changeset 3464:be4ca325525a

Merge.
author Thomas Wuerthinger <thomas@wuerthinger.net>
date Wed, 27 Jul 2011 17:32:44 -0700
parents 7c4b4daac19b (current diff) 9b0ca45cd756 (diff)
children 8cd198d7cbc1 2c5b4dd06d3b a6c1f49a7319
files .hgignore .hgtags make/linux/Makefile make/linux/makefiles/cscope.make make/linux/makefiles/vm.make make/solaris/makefiles/cscope.make src/cpu/x86/vm/assembler_x86.cpp src/cpu/x86/vm/frame_x86.cpp src/cpu/x86/vm/sharedRuntime_x86_64.cpp src/os/linux/vm/os_linux.cpp src/os/windows/vm/os_windows.cpp src/share/vm/c1/c1_LIRGenerator.cpp src/share/vm/c1/c1_Runtime1.cpp src/share/vm/ci/ciCallProfile.hpp src/share/vm/ci/ciEnv.cpp src/share/vm/ci/ciEnv.hpp src/share/vm/ci/ciField.cpp src/share/vm/ci/ciObject.cpp src/share/vm/ci/ciObject.hpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/systemDictionary.hpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/code/codeBlob.hpp src/share/vm/code/nmethod.cpp src/share/vm/code/nmethod.hpp src/share/vm/code/pcDesc.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/compiler/oopMap.cpp src/share/vm/gc_implementation/g1/heapRegion.cpp src/share/vm/interpreter/interpreterRuntime.cpp src/share/vm/interpreter/rewriter.cpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/klass.cpp src/share/vm/oops/methodOop.cpp src/share/vm/oops/methodOop.hpp src/share/vm/prims/jni.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/deoptimization.hpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/javaCalls.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/sharedRuntime.hpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vframeArray.cpp src/share/vm/utilities/exceptions.cpp
diffstat 341 files changed, 21726 insertions(+), 6199 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Wed Jul 27 17:24:11 2011 -0700
+++ b/.hgignore	Wed Jul 27 17:32:44 2011 -0700
@@ -34,4 +34,5 @@
 ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
 ^src/share/tools/IdealGraphVisualizer/build/
 ^src/share/tools/IdealGraphVisualizer/dist/
+^.hgtip
 ^make/solaris/solaris_amd64_compiler1/
--- a/.hgtags	Wed Jul 27 17:24:11 2011 -0700
+++ b/.hgtags	Wed Jul 27 17:32:44 2011 -0700
@@ -167,3 +167,19 @@
 611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139
 611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09
 a197fd9e273c692767407654c4caf858460a413f Tested with fop, lusearch, eclipse and jtt with Xcomp flag
+d283b82966712b353fa307845a1316da42a355f4 jdk7-b140
+d283b82966712b353fa307845a1316da42a355f4 hs21-b10
+5d07913abd59261c77f24cc04a759cb75d804099 jdk7-b141
+3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11
+9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142
+9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12
+c149193c768b8b7233da4c3a3fdc0756b975848e hs21-b13
+c149193c768b8b7233da4c3a3fdc0756b975848e jdk7-b143
+fe189d4a44e9e8f0c7d78fcbd1c63701745752ca jdk7-b144
+62f39d40ebf176306a916812729df586f9d10f43 hs21-b14
+82a81d5c5700a69333e12532bf0c4d33e885c7fc jdk7-b145
+82a81d5c5700a69333e12532bf0c4d33e885c7fc hs21-b15
+38fa55e5e79232d48f1bb8cf27d88bc094c9375a jdk7-b146
+38fa55e5e79232d48f1bb8cf27d88bc094c9375a hs21-b16
+81d815b05abb564aa1f4100ae13491c949b9a07e jdk7-b147
+81d815b05abb564aa1f4100ae13491c949b9a07e hs21-b17
--- a/THIRD_PARTY_README	Wed Jul 27 17:24:11 2011 -0700
+++ b/THIRD_PARTY_README	Wed Jul 27 17:32:44 2011 -0700
@@ -1,38 +1,12 @@
 DO NOT TRANSLATE OR LOCALIZE.
-
-%% This notice is provided with respect to Thai dictionary for text breaking, which may be included with this software: 
-
---- begin of LICENSE file ---
-
-Copyright (C) 1982 The Royal Institute, Thai Royal Government.
-
-Copyright (C) 1998 National Electronics and Computer Technology Center,
- National Science and Technology Development Agency,
- Ministry of Science Technology and Environment,
- Thai Royal Government.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without
-limitation the rights to use, copy, modify, merge, publish, distribute,
-sublicense, and/or sell copies of the Software, and to permit persons to
-whom the Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-
---- end of LICENSE file ---
-%% This notice is provided with respect to ASM, which may be included with this software: 
-Copyright (c) 2000-2007 INRIA, France Telecom
+-----------------------------
+
+%% This notice is provided with respect to ASM Bytecode Manipulation 
+Framework v3.1, which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2000-2005 INRIA, France Telecom
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -61,18 +35,3110 @@
 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 THE POSSIBILITY OF SUCH DAMAGE.
-%% This notice is provided with respect to zlib 1.1.3, which may be included with this software:   
-
-Acknowledgments:
-
-  The deflate format used by zlib was defined by Phil Katz. The deflate
-  and zlib specifications were written by L. Peter Deutsch. Thanks to all the
-  people who reported problems and suggested various improvements in zlib;
-  they are too numerous to cite here.
-
-Copyright notice:
-
- (C) 1995-1998 Jean-loup Gailly and Mark Adler
+
+--- end of LICENSE ---
+
+--------------------------------------------------------------------------------
+
+%% This notice is provided with respect to CodeViewer 1.0, which is included 
+with JDK 7.
+
+--- begin of LICENSE ---
+
+Copyright 1999 by CoolServlets.com.
+
+Any errors or suggested improvements to this class can be reported as
+instructed on CoolServlets.com. We hope you enjoy this program... your
+comments will encourage further development!  This software is distributed
+under the terms of the BSD License.  Redistribution and use in source and
+binary forms, with or without modification, are permitted provided that the
+following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+Neither name of CoolServlets.com nor the names of its contributors may be
+used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY COOLSERVLETS.COM AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
+
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Cryptix AES 3.2.0, which is
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Cryptix General License
+
+Copyright (c) 1995-2005 The Cryptix Foundation Limited.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+  1. Redistributions of source code must retain the copyright notice,
+     this list of conditions and the following disclaimer.
+
+  2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in
+     the documentation and/or other materials provided with the
+     distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE CRYPTIX FOUNDATION LIMITED AND
+CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE CRYPTIX FOUNDATION LIMITED OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to CUP Parser Generator for 
+Java 0.10k, which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright 1996-1999 by Scott Hudson, Frank Flannery, C. Scott Ananian
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted, provided
+that the above copyright notice appear in all copies and that both the
+copyright notice and this permission notice and warranty disclaimer appear in
+supporting documentation, and that the names of the authors or their
+employers not be used in advertising or publicity pertaining to distribution of
+the software without specific, written prior permission.
+
+The authors and their employers disclaim all warranties with regard to
+this software, including all implied warranties of merchantability and fitness.
+In no event shall the authors or their employers be liable for any special,
+indirect or consequential damages or any damages whatsoever resulting from
+loss of use, data or profits, whether in an action of contract, negligence or
+other tortious action, arising out of or in connection with the use or
+performance of this software.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Document Object Model (DOM) Level 2
+& 3, which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+W3C SOFTWARE NOTICE AND LICENSE
+
+http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
+
+This work (and included software, documentation such as READMEs, or other
+related items) is being provided by the copyright holders under the following
+license. By obtaining, using and/or copying this work, you (the licensee)
+agree that you have read, understood, and will comply with the following terms
+and conditions.
+
+Permission to copy, modify, and distribute this software and its
+documentation, with or without modification, for any purpose and without fee
+or royalty is hereby granted, provided that you include the following on ALL
+copies of the software and documentation or portions thereof, including
+modifications:
+
+   1.The full text of this NOTICE in a location viewable to users of the
+   redistributed or derivative work.
+
+   2.Any pre-existing intellectual property disclaimers, notices, or terms and
+   conditions. If none exist, the W3C Software Short Notice should be included
+   (hypertext is preferred, text is permitted) within the body of any
+   redistributed or derivative code.
+
+   3.Notice of any changes or modifications to the files, including the date
+   changes were made. (We recommend you provide URIs to the location from
+   which the code is derived.)
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS
+MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT
+LIMITED TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR
+PURPOSE OR THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
+THIRD PARTY PATENTS,COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
+
+COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL
+OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR
+DOCUMENTATION.  The name and trademarks of copyright holders may NOT be used
+in advertising or publicity pertaining to the software without specific,
+written prior permission. Title to copyright in this software and any
+associated documentation will at all times remain with copyright holders.
+
+____________________________________
+
+This formulation of W3C's notice and license became active on December 31
+2002. This version removes the copyright ownership notice such that this
+license can be used with materials other than those owned by the W3C, reflects
+that ERCIM is now a host of the W3C, includes references to this specific
+dated version of the license, and removes the ambiguous grant of "use".
+Otherwise, this version is the same as the previous version and is written so
+as to preserve the Free Software Foundation's assessment of GPL compatibility
+and OSI's certification under the Open Source Definition. Please see our
+Copyright FAQ for common questions about using materials from our site,
+including specific terms and conditions for packages like libwww, Amaya, and
+Jigsaw. Other questions about this notice can be directed to
+site-policy@w3.org.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Elliptic Curve Cryptography, which 
+is included with JRE 7, JDK 7, and OpenJDK 7.
+
+You are receiving a copy of the Elliptic Curve Cryptography library in source
+form with the JDK 7 source distribution and object code in the JRE 7 & JDK 7
+runtime.
+
+The terms of the Oracle license do NOT apply to the Elliptic Curve
+Cryptography library program; it is licensed under the following license,
+separately from the Oracle programs you receive. If you do not wish to install
+this program, you may delete the library named libsunec.so (on Solaris and
+Linux systems) or sunec.dll (on Windows systems) from the JRE bin directory
+reserved for native libraries.
+
+--- begin of LICENSE ---
+
+                  GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+                  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+                            NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to FontConfig 2.5, which is 
+included with JRE 7, JDK 7, and OpenJDK 7 source distributions on
+Linux and Solaris.
+
+--- begin of LICENSE ---
+
+Copyright © 2001,2003 Keith Packard
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that the
+above copyright notice appear in all copies and that both that copyright
+notice and this permission notice appear in supporting documentation, and that
+the name of Keith Packard not be used in advertising or publicity pertaining
+to distribution of the software without specific, written prior permission.
+Keith Packard makes no representations about the suitability of this software
+for any purpose.  It is provided "as is" without express or implied warranty.
+
+KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
+ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL KEITH
+PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to IAIK PKCS#11 Wrapper, 
+which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+IAIK PKCS#11 Wrapper License
+
+Copyright (c) 2002 Graz University of Technology. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. The end-user documentation included with the redistribution, if any, must
+   include the following acknowledgment:
+
+   "This product includes software developed by IAIK of Graz University of
+    Technology."
+
+   Alternately, this acknowledgment may appear in the software itself, if and
+   wherever such third-party acknowledgments normally appear.
+
+4. The names "Graz University of Technology" and "IAIK of Graz University of
+   Technology" must not be used to endorse or promote products derived from this
+   software without prior written permission.
+
+5. Products derived from this software may not be called "IAIK PKCS Wrapper",
+   nor may "IAIK" appear in their name, without prior written permission of
+   Graz University of Technology.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+LICENSOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to ICU4C 4.0.1 and ICU4J 4.4, which 
+is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright (c) 1995-2010 International Business Machines Corporation and others 
+
+All rights reserved. 
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, and/or sell copies of the
+Software, and to permit persons to whom the Software is furnished to do so,
+provided that the above copyright notice(s) and this permission notice appear
+in all copies of the Software and that both the above copyright notice(s) and
+this permission notice appear in supporting documentation.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN
+NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
+LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder shall not
+be used in advertising or otherwise to promote the sale, use or other dealings
+in this Software without prior written authorization of the copyright holder.
+All trademarks and registered trademarks mentioned herein are the property of
+their respective owners.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to IJG JPEG 6b, which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+This software is copyright (C) 1991-1998, Thomas G. Lane.
+All Rights Reserved except as specified below.
+
+Permission is hereby granted to use, copy, modify, and distribute this
+software (or portions thereof) for any purpose, without fee, subject to these
+conditions:
+(1) If any part of the source code for this software is distributed, then this
+README file must be included, with this copyright and no-warranty notice
+unaltered; and any additions, deletions, or changes to the original files
+must be clearly indicated in accompanying documentation.
+(2) If only executable code is distributed, then the accompanying
+documentation must state that "this software is based in part on the work of
+the Independent JPEG Group".
+(3) Permission for use of this software is granted only if the user accepts
+full responsibility for any undesirable consequences; the authors accept
+NO LIABILITY for damages of any kind.
+
+These conditions apply to any software derived from or based on the IJG code,
+not just to the unmodified library.  If you use our work, you ought to
+acknowledge us.
+
+Permission is NOT granted for the use of any IJG author's name or company name
+in advertising or publicity relating to this software or products derived from
+it.  This software may be referred to only as "the Independent JPEG Group's
+software".
+
+We specifically permit and encourage the use of this software as the basis of
+commercial products, provided that all warranty or liability claims are
+assumed by the product vendor.
+
+--- end of LICENSE ---
+
+--------------------------------------------------------------------------------
+
+%% This notice is provided with respect to JOpt-Simple v3.0,  which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+ Copyright (c) 2004-2009 Paul R. Holser, Jr.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+--- end of LICENSE ---
+
+--------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Kerberos functionality, which 
+which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+ (C) Copyright IBM Corp. 1999 All Rights Reserved.
+ Copyright 1997 The Open Group Research Institute. All rights reserved.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Kerberos functionality from 
+FundsXpress, INC., which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+ Copyright (C) 1998 by the FundsXpress, INC.
+
+ All rights reserved.
+
+ Export of this software from the United States of America may require
+ a specific license from the United States Government.  It is the
+ responsibility of any person or organization contemplating export to
+ obtain such a license before exporting.
+
+ WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ distribute this software and its documentation for any purpose and
+ without fee is hereby granted, provided that the above copyright
+ notice appear in all copies and that both that copyright notice and
+ this permission notice appear in supporting documentation, and that
+ the name of FundsXpress. not be used in advertising or publicity pertaining
+ to distribution of the software without specific, written prior
+ permission.  FundsXpress makes no representations about the suitability of
+ this software for any purpose.  It is provided "as is" without express
+ or implied warranty.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Kronos OpenGL headers, which is 
+included with JDK 7 and OpenJDK 7 source distributions.
+
+--- begin of LICENSE ---
+
+ Copyright (c) 2007 The Khronos Group Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and/or associated documentation files (the "Materials"), to
+ deal in the Materials without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ sell copies of the Materials, and to permit persons to whom the Materials are
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Materials.
+
+ THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE
+ MATERIALS.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% Portions Copyright Eastman Kodak Company 1992
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to libpng 1.2.18, which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+This copy of the libpng notices is provided for your convenience.  In case of
+any discrepancy between this copy and the notices in the file png.h that is
+included in the libpng distribution, the latter shall prevail.
+
+COPYRIGHT NOTICE, DISCLAIMER, and LICENSE:
+
+If you modify libpng you may insert additional notices immediately following
+this sentence.
+
+libpng versions 1.2.6, August 15, 2004, through 1.2.18, May 15, 2007, are
+Copyright (c) 2004, 2006-2007 Glenn Randers-Pehrson, and are
+distributed according to the same disclaimer and license as libpng-1.2.5
+with the following individual added to the list of Contributing Authors
+
+   Cosmin Truta
+
+libpng versions 1.0.7, July 1, 2000, through 1.2.5 - October 3, 2002, are
+Copyright (c) 2000-2002 Glenn Randers-Pehrson, and are
+distributed according to the same disclaimer and license as libpng-1.0.6
+with the following individuals added to the list of Contributing Authors
+
+   Simon-Pierre Cadieux
+   Eric S. Raymond
+   Gilles Vollant
+
+and with the following additions to the disclaimer:
+
+   There is no warranty against interference with your enjoyment of the
+   library or against infringement.  There is no warranty that our
+   efforts or the library will fulfill any of your particular purposes
+   or needs.  This library is provided with all faults, and the entire
+   risk of satisfactory quality, performance, accuracy, and effort is with
+   the user.
+
+libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, are
+Copyright (c) 1998, 1999 Glenn Randers-Pehrson, and are
+distributed according to the same disclaimer and license as libpng-0.96,
+with the following individuals added to the list of Contributing Authors:
+
+   Tom Lane
+   Glenn Randers-Pehrson
+   Willem van Schaik
+
+libpng versions 0.89, June 1996, through 0.96, May 1997, are
+Copyright (c) 1996, 1997 Andreas Dilger
+Distributed according to the same disclaimer and license as libpng-0.88,
+with the following individuals added to the list of Contributing Authors:
+
+   John Bowler
+   Kevin Bracey
+   Sam Bushell
+   Magnus Holmgren
+   Greg Roelofs
+   Tom Tanner
+
+libpng versions 0.5, May 1995, through 0.88, January 1996, are
+Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.
+
+For the purposes of this copyright and license, "Contributing Authors"
+is defined as the following set of individuals:
+
+   Andreas Dilger
+   Dave Martindale
+   Guy Eric Schalnat
+   Paul Schmidt
+   Tim Wegner
+
+The PNG Reference Library is supplied "AS IS".  The Contributing Authors
+and Group 42, Inc. disclaim all warranties, expressed or implied,
+including, without limitation, the warranties of merchantability and of
+fitness for any purpose.  The Contributing Authors and Group 42, Inc.
+assume no liability for direct, indirect, incidental, special, exemplary,
+or consequential damages, which may result from the use of the PNG
+Reference Library, even if advised of the possibility of such damage.
+
+Permission is hereby granted to use, copy, modify, and distribute this
+source code, or portions hereof, for any purpose, without fee, subject
+to the following restrictions:
+
+1. The origin of this source code must not be misrepresented.
+
+2. Altered versions must be plainly marked as such and must not
+   be misrepresented as being the original source.
+
+3. This Copyright notice may not be removed or altered from any
+   source or altered source distribution.
+
+The Contributing Authors and Group 42, Inc. specifically permit, without
+fee, and encourage the use of this source code as a component to
+supporting the PNG file format in commercial products.  If you use this
+source code in a product, acknowledgment is not required but would be
+appreciated.
+
+
+A "png_get_copyright" function is available, for convenient use in "about"
+boxes and the like:
+
+   printf("%s",png_get_copyright(NULL));
+
+Also, the PNG logo (in PNG format, of course) is supplied in the
+files "pngbar.png" and "pngbar.jpg (88x31) and "pngnow.png" (98x31).
+
+Libpng is OSI Certified Open Source Software.  OSI Certified Open Source is a
+certification mark of the Open Source Initiative.
+
+Glenn Randers-Pehrson
+glennrp at users.sourceforge.net
+May 15, 2007
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to libungif 4.1.3, which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+The GIFLIB distribution is Copyright (c) 1997  Eric S. Raymond
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Little CMS 2.0, which is 
+included with OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Little CMS
+Copyright (c) 1998-2010 Marti Maria Saguer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% Lucida is a registered trademark or trademark of Bigelow & Holmes in the
+U.S. and other countries.
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Mesa 3D Graphics Library v4.1,
+which is included with JRE 7, JDK 7, and OpenJDK 7 source distributions.
+
+--- begin of LICENSE ---
+
+ Mesa 3-D graphics library
+ Version:  4.1
+
+ Copyright (C) 1999-2002  Brian Paul   All Rights Reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to PC/SC Lite for Suse Linux v.1.1.1,
+which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+
+--- begin of LICENSE ---
+
+Copyright (c) 1999-2004 David Corcoran <corcoran@linuxnet.com>
+Copyright (c) 1999-2004 Ludovic Rousseau <ludovic.rousseau (at) free.fr>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement:
+     This product includes software developed by: 
+      David Corcoran <corcoran@linuxnet.com>
+      http://www.linuxnet.com (MUSCLE)
+4. The name of the author may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+Changes to this license can be made only by the copyright author with 
+explicit written consent.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Relax NG Object/Parser v.20050510,
+which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright (c) Kohsuke Kawaguchi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions: The above copyright
+notice and this permission notice shall be included in all copies or
+substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to RelaxNGCC v1.12, which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2000-2003 Daisuke Okajima and Kohsuke Kawaguchi.  
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. The end-user documentation included with the redistribution, if any, must
+   include the following acknowledgment:
+
+    "This product includes software developed by Daisuke Okajima
+    and Kohsuke Kawaguchi (http://relaxngcc.sf.net/)."
+
+Alternately, this acknowledgment may appear in the software itself, if and
+wherever such third-party acknowledgments normally appear.
+
+4. The names of the copyright holders must not be used to endorse or promote
+   products derived from this software without prior written permission. For
+   written permission, please contact the copyright holders.
+
+5. Products derived from this software may not be called "RELAXNGCC", nor may
+  "RELAXNGCC" appear in their name, without prior written permission of the
+  copyright holders.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE APACHE
+SOFTWARE FOUNDATION OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Mozilla Rhino v1.7R3, which 
+is included with JRE 7, JDK 7, and OpenJDK 7
+
+--- begin of LICENSE ---
+
+                          MOZILLA PUBLIC LICENSE
+                                Version 1.1
+
+                              ---------------
+
+1. Definitions.
+
+     1.0.1. "Commercial Use" means distribution or otherwise making the
+     Covered Code available to a third party.
+
+     1.1. "Contributor" means each entity that creates or contributes to
+     the creation of Modifications.
+
+     1.2. "Contributor Version" means the combination of the Original
+     Code, prior Modifications used by a Contributor, and the Modifications
+     made by that particular Contributor.
+
+     1.3. "Covered Code" means the Original Code or Modifications or the
+     combination of the Original Code and Modifications, in each case
+     including portions thereof.
+
+     1.4. "Electronic Distribution Mechanism" means a mechanism generally
+     accepted in the software development community for the electronic
+     transfer of data.
+
+     1.5. "Executable" means Covered Code in any form other than Source
+     Code.
+
+     1.6. "Initial Developer" means the individual or entity identified
+     as the Initial Developer in the Source Code notice required by Exhibit
+     A.
+
+     1.7. "Larger Work" means a work which combines Covered Code or
+     portions thereof with code not governed by the terms of this License.
+
+     1.8. "License" means this document.
+
+     1.8.1. "Licensable" means having the right to grant, to the maximum
+     extent possible, whether at the time of the initial grant or
+     subsequently acquired, any and all of the rights conveyed herein.
+
+     1.9. "Modifications" means any addition to or deletion from the
+     substance or structure of either the Original Code or any previous
+     Modifications. When Covered Code is released as a series of files, a
+     Modification is:
+          A. Any addition to or deletion from the contents of a file
+          containing Original Code or previous Modifications.
+
+          B. Any new file that contains any part of the Original Code or
+          previous Modifications.
+
+     1.10. "Original Code" means Source Code of computer software code
+     which is described in the Source Code notice required by Exhibit A as
+     Original Code, and which, at the time of its release under this
+     License is not already Covered Code governed by this License.
+
+     1.10.1. "Patent Claims" means any patent claim(s), now owned or
+     hereafter acquired, including without limitation,  method, process,
+     and apparatus claims, in any patent Licensable by grantor.
+
+     1.11. "Source Code" means the preferred form of the Covered Code for
+     making modifications to it, including all modules it contains, plus
+     any associated interface definition files, scripts used to control
+     compilation and installation of an Executable, or source code
+     differential comparisons against either the Original Code or another
+     well known, available Covered Code of the Contributor's choice. The
+     Source Code can be in a compressed or archival form, provided the
+     appropriate decompression or de-archiving software is widely available
+     for no charge.
+
+     1.12. "You" (or "Your")  means an individual or a legal entity
+     exercising rights under, and complying with all of the terms of, this
+     License or a future version of this License issued under Section 6.1.
+     For legal entities, "You" includes any entity which controls, is
+     controlled by, or is under common control with You. For purposes of
+     this definition, "control" means (a) the power, direct or indirect,
+     to cause the direction or management of such entity, whether by
+     contract or otherwise, or (b) ownership of more than fifty percent
+     (50%) of the outstanding shares or beneficial ownership of such
+     entity.
+
+2. Source Code License.
+
+     2.1. The Initial Developer Grant.
+     The Initial Developer hereby grants You a world-wide, royalty-free,
+     non-exclusive license, subject to third party intellectual property
+     claims:
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Initial Developer to use, reproduce,
+          modify, display, perform, sublicense and distribute the Original
+          Code (or portions thereof) with or without Modifications, and/or
+          as part of a Larger Work; and
+
+          (b) under Patents Claims infringed by the making, using or
+          selling of Original Code, to make, have made, use, practice,
+          sell, and offer for sale, and/or otherwise dispose of the
+          Original Code (or portions thereof).
+
+          (c) the licenses granted in this Section 2.1(a) and (b) are
+          effective on the date Initial Developer first distributes
+          Original Code under the terms of this License.
+
+          (d) Notwithstanding Section 2.1(b) above, no patent license is
+          granted: 1) for code that You delete from the Original Code; 2)
+          separate from the Original Code;  or 3) for infringements caused
+          by: i) the modification of the Original Code or ii) the
+          combination of the Original Code with other software or devices.
+
+     2.2. Contributor Grant.
+     Subject to third party intellectual property claims, each Contributor
+     hereby grants You a world-wide, royalty-free, non-exclusive license
+
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Contributor, to use, reproduce, modify,
+          display, perform, sublicense and distribute the Modifications
+          created by such Contributor (or portions thereof) either on an
+          unmodified basis, with other Modifications, as Covered Code
+          and/or as part of a Larger Work; and
+
+          (b) under Patent Claims infringed by the making, using, or
+          selling of  Modifications made by that Contributor either alone
+          and/or in combination with its Contributor Version (or portions
+          of such combination), to make, use, sell, offer for sale, have
+          made, and/or otherwise dispose of: 1) Modifications made by that
+          Contributor (or portions thereof); and 2) the combination of
+          Modifications made by that Contributor with its Contributor
+          Version (or portions of such combination).
+
+          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+          effective on the date Contributor first makes Commercial Use of
+          the Covered Code.
+
+          (d)    Notwithstanding Section 2.2(b) above, no patent license is
+          granted: 1) for any code that Contributor has deleted from the
+          Contributor Version; 2)  separate from the Contributor Version;
+          3)  for infringements caused by: i) third party modifications of
+          Contributor Version or ii)  the combination of Modifications made
+          by that Contributor with other software  (except as part of the
+          Contributor Version) or other devices; or 4) under Patent Claims
+          infringed by Covered Code in the absence of Modifications made by
+          that Contributor.
+
+3. Distribution Obligations.
+
+     3.1. Application of License.
+     The Modifications which You create or to which You contribute are
+     governed by the terms of this License, including without limitation
+     Section 2.2. The Source Code version of Covered Code may be
+     distributed only under the terms of this License or a future version
+     of this License released under Section 6.1, and You must include a
+     copy of this License with every copy of the Source Code You
+     distribute. You may not offer or impose any terms on any Source Code
+     version that alters or restricts the applicable version of this
+     License or the recipients' rights hereunder. However, You may include
+     an additional document offering the additional rights described in
+     Section 3.5.
+
+     3.2. Availability of Source Code.
+     Any Modification which You create or to which You contribute must be
+     made available in Source Code form under the terms of this License
+     either on the same media as an Executable version or via an accepted
+     Electronic Distribution Mechanism to anyone to whom you made an
+     Executable version available; and if made available via Electronic
+     Distribution Mechanism, must remain available for at least twelve (12)
+     months after the date it initially became available, or at least six
+     (6) months after a subsequent version of that particular Modification
+     has been made available to such recipients. You are responsible for
+     ensuring that the Source Code version remains available even if the
+     Electronic Distribution Mechanism is maintained by a third party.
+
+     3.3. Description of Modifications.
+     You must cause all Covered Code to which You contribute to contain a
+     file documenting the changes You made to create that Covered Code and
+     the date of any change. You must include a prominent statement that
+     the Modification is derived, directly or indirectly, from Original
+     Code provided by the Initial Developer and including the name of the
+     Initial Developer in (a) the Source Code, and (b) in any notice in an
+     Executable version or related documentation in which You describe the
+     origin or ownership of the Covered Code.
+
+     3.4. Intellectual Property Matters
+          (a) Third Party Claims.
+          If Contributor has knowledge that a license under a third party's
+          intellectual property rights is required to exercise the rights
+          granted by such Contributor under Sections 2.1 or 2.2,
+          Contributor must include a text file with the Source Code
+          distribution titled "LEGAL" which describes the claim and the
+          party making the claim in sufficient detail that a recipient will
+          know whom to contact. If Contributor obtains such knowledge after
+          the Modification is made available as described in Section 3.2,
+          Contributor shall promptly modify the LEGAL file in all copies
+          Contributor makes available thereafter and shall take other steps
+          (such as notifying appropriate mailing lists or newsgroups)
+          reasonably calculated to inform those who received the Covered
+          Code that new knowledge has been obtained.
+
+          (b) Contributor APIs.
+          If Contributor's Modifications include an application programming
+          interface and Contributor has knowledge of patent licenses which
+          are reasonably necessary to implement that API, Contributor must
+          also include this information in the LEGAL file.
+
+               (c)    Representations.
+          Contributor represents that, except as disclosed pursuant to
+          Section 3.4(a) above, Contributor believes that Contributor's
+          Modifications are Contributor's original creation(s) and/or
+          Contributor has sufficient rights to grant the rights conveyed by
+          this License.
+
+     3.5. Required Notices.
+     You must duplicate the notice in Exhibit A in each file of the Source
+     Code.  If it is not possible to put such notice in a particular Source
+     Code file due to its structure, then You must include such notice in a
+     location (such as a relevant directory) where a user would be likely
+     to look for such a notice.  If You created one or more Modification(s)
+     You may add your name as a Contributor to the notice described in
+     Exhibit A.  You must also duplicate this License in any documentation
+     for the Source Code where You describe recipients' rights or ownership
+     rights relating to Covered Code.  You may choose to offer, and to
+     charge a fee for, warranty, support, indemnity or liability
+     obligations to one or more recipients of Covered Code. However, You
+     may do so only on Your own behalf, and not on behalf of the Initial
+     Developer or any Contributor. You must make it absolutely clear than
+     any such warranty, support, indemnity or liability obligation is
+     offered by You alone, and You hereby agree to indemnify the Initial
+     Developer and every Contributor for any liability incurred by the
+     Initial Developer or such Contributor as a result of warranty,
+     support, indemnity or liability terms You offer.
+
+     3.6. Distribution of Executable Versions.
+     You may distribute Covered Code in Executable form only if the
+     requirements of Section 3.1-3.5 have been met for that Covered Code,
+     and if You include a notice stating that the Source Code version of
+     the Covered Code is available under the terms of this License,
+     including a description of how and where You have fulfilled the
+     obligations of Section 3.2. The notice must be conspicuously included
+     in any notice in an Executable version, related documentation or
+     collateral in which You describe recipients' rights relating to the
+     Covered Code. You may distribute the Executable version of Covered
+     Code or ownership rights under a license of Your choice, which may
+     contain terms different from this License, provided that You are in
+     compliance with the terms of this License and that the license for the
+     Executable version does not attempt to limit or alter the recipient's
+     rights in the Source Code version from the rights set forth in this
+     License. If You distribute the Executable version under a different
+     license You must make it absolutely clear that any terms which differ
+     from this License are offered by You alone, not by the Initial
+     Developer or any Contributor. You hereby agree to indemnify the
+     Initial Developer and every Contributor for any liability incurred by
+     the Initial Developer or such Contributor as a result of any such
+     terms You offer.
+
+     3.7. Larger Works.
+     You may create a Larger Work by combining Covered Code with other code
+     not governed by the terms of this License and distribute the Larger
+     Work as a single product. In such a case, You must make sure the
+     requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+     If it is impossible for You to comply with any of the terms of this
+     License with respect to some or all of the Covered Code due to
+     statute, judicial order, or regulation then You must: (a) comply with
+     the terms of this License to the maximum extent possible; and (b)
+     describe the limitations and the code they affect. Such description
+     must be included in the LEGAL file described in Section 3.4 and must
+     be included with all distributions of the Source Code. Except to the
+     extent prohibited by statute or regulation, such description must be
+     sufficiently detailed for a recipient of ordinary skill to be able to
+     understand it.
+
+5. Application of this License.
+
+     This License applies to code to which the Initial Developer has
+     attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+     6.1. New Versions.
+     Netscape Communications Corporation ("Netscape") may publish revised
+     and/or new versions of the License from time to time. Each version
+     will be given a distinguishing version number.
+
+     6.2. Effect of New Versions.
+     Once Covered Code has been published under a particular version of the
+     License, You may always continue to use it under the terms of that
+     version. You may also choose to use such Covered Code under the terms
+     of any subsequent version of the License published by Netscape. No one
+     other than Netscape has the right to modify the terms applicable to
+     Covered Code created under this License.
+
+     6.3. Derivative Works.
+     If You create or use a modified version of this License (which you may
+     only do in order to apply it to code which is not already Covered Code
+     governed by this License), You must (a) rename Your license so that
+     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+     "MPL", "NPL" or any confusingly similar phrase do not appear in your
+     license (except to note that your license differs from this License)
+     and (b) otherwise make it clear that Your version of the license
+     contains terms which differ from the Mozilla Public License and
+     Netscape Public License. (Filling in the name of the Initial
+     Developer, Original Code or Contributor in the notice described in
+     Exhibit A shall not of themselves be deemed to be modifications of
+     this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+     8.1.  This License and the rights granted hereunder will terminate
+     automatically if You fail to comply with terms herein and fail to cure
+     such breach within 30 days of becoming aware of the breach. All
+     sublicenses to the Covered Code which are properly granted shall
+     survive any termination of this License. Provisions which, by their
+     nature, must remain in effect beyond the termination of this License
+     shall survive.
+
+     8.2.  If You initiate litigation by asserting a patent infringement
+     claim (excluding declatory judgment actions) against Initial Developer
+     or a Contributor (the Initial Developer or Contributor against whom
+     You file such action is referred to as "Participant")  alleging that:
+
+     (a)  such Participant's Contributor Version directly or indirectly
+     infringes any patent, then any and all rights granted by such
+     Participant to You under Sections 2.1 and/or 2.2 of this License
+     shall, upon 60 days notice from Participant terminate prospectively,
+     unless if within 60 days after receipt of notice You either: (i)
+     agree in writing to pay Participant a mutually agreeable reasonable
+     royalty for Your past and future use of Modifications made by such
+     Participant, or (ii) withdraw Your litigation claim with respect to
+     the Contributor Version against such Participant.  If within 60 days
+     of notice, a reasonable royalty and payment arrangement are not
+     mutually agreed upon in writing by the parties or the litigation claim
+     is not withdrawn, the rights granted by Participant to You under
+     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+     the 60 day notice period specified above.
+
+     (b)  any software, hardware, or device, other than such Participant's
+     Contributor Version, directly or indirectly infringes any patent, then
+     any rights granted to You by such Participant under Sections 2.1(b)
+     and 2.2(b) are revoked effective as of the date You first made, used,
+     sold, distributed, or had made, Modifications made by that
+     Participant.
+
+     8.3.  If You assert a patent infringement claim against Participant
+     alleging that such Participant's Contributor Version directly or
+     indirectly infringes any patent where such claim is resolved (such as
+     by license or settlement) prior to the initiation of patent
+     infringement litigation, then the reasonable value of the licenses
+     granted by such Participant under Sections 2.1 or 2.2 shall be taken
+     into account in determining the amount or value of any payment or
+     license.
+
+     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
+     all end user license agreements (excluding distributors and resellers)
+     which have been validly granted by You or any distributor hereunder
+     prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+     The Covered Code is a "commercial item," as that term is defined in
+     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+     software" and "commercial computer software documentation," as such
+     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+     all U.S. Government End Users acquire Covered Code with only those
+     rights set forth herein.
+
+11. MISCELLANEOUS.
+
+     This License represents the complete agreement concerning subject
+     matter hereof. If any provision of this License is held to be
+     unenforceable, such provision shall be reformed only to the extent
+     necessary to make it enforceable. This License shall be governed by
+     California law provisions (except to the extent applicable law, if
+     any, provides otherwise), excluding its conflict-of-law provisions.
+     With respect to disputes in which at least one party is a citizen of,
+     or an entity chartered or registered to do business in the United
+     States of America, any litigation relating to this License shall be
+     subject to the jurisdiction of the Federal Courts of the Northern
+     District of California, with venue lying in Santa Clara County,
+     California, with the losing party responsible for costs, including
+     without limitation, court costs and reasonable attorneys' fees and
+     expenses. The application of the United Nations Convention on
+     Contracts for the International Sale of Goods is expressly excluded.
+     Any law or regulation which provides that the language of a contract
+     shall be construed against the drafter shall not apply to this
+     License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+     As between Initial Developer and the Contributors, each party is
+     responsible for claims and damages arising, directly or indirectly,
+     out of its utilization of rights under this License and You agree to
+     work with Initial Developer and Contributors to distribute such
+     responsibility on an equitable basis. Nothing herein is intended or
+     shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+     Initial Developer may designate portions of the Covered Code as
+     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
+     Developer permits you to utilize portions of the Covered Code under
+     Your choice of the NPL or the alternative licenses, if any, specified
+     by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A - Mozilla Public License.
+
+     ``The contents of this file are subject to the Mozilla Public License
+     Version 1.1 (the "License"); you may not use this file except in
+     compliance with the License. You may obtain a copy of the License at
+     http://www.mozilla.org/MPL/
+
+     Software distributed under the License is distributed on an "AS IS"
+     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+     License for the specific language governing rights and limitations
+     under the License.
+
+     The Original Code is ______________________________________.
+
+     The Initial Developer of the Original Code is ________________________.
+     Portions created by ______________________ are Copyright (C) ______
+     _______________________. All Rights Reserved.
+
+     Contributor(s): ______________________________________.
+
+     Alternatively, the contents of this file may be used under the terms
+     of the _____ license (the  "[___] License"), in which case the
+     provisions of [______] License are applicable instead of those
+     above.  If you wish to allow use of your version of this file only
+     under the terms of the [____] License and not to allow others to use
+     your version of this file under the MPL, indicate your decision by
+     deleting  the provisions above and replace  them with the notice and
+     other provisions required by the [___] License.  If you do not delete
+     the provisions above, a recipient may use your version of this file
+     under either the MPL or the [___] License."
+
+     [NOTE: The text of this Exhibit A may differ slightly from the text of
+     the notices in the Source Code files of the Original Code. You should
+     use the text of this Exhibit A rather than the text found in the
+     Original Code Source Code for Your Modifications.]
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to SAX 2.0.1, which is included 
+with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+ SAX is free!
+
+ In fact, it's not possible to own a license to SAX, since it's been placed in
+ the public domain.
+
+ No Warranty
+
+ Because SAX is released to the public domain, there is no warranty for the
+ design or for the software implementation, to the extent permitted by
+ applicable law. Except when otherwise stated in writing the copyright holders
+ and/or other parties provide SAX "as is" without warranty of any kind, either
+ expressed or implied, including, but not limited to, the implied warranties
+ of merchantability and fitness for a particular purpose. The entire risk as
+ to the quality and performance of SAX is with you. Should SAX prove
+ defective, you assume the cost of all necessary servicing, repair or
+ correction.
+
+ In no event unless required by applicable law or agreed to in writing will
+ any copyright holder, or any other party who may modify and/or redistribute
+ SAX, be liable to you for damages, including any general, special, incidental
+ or consequential damages arising out of the use or inability to use SAX
+ (including but not limited to loss of data or data being rendered inaccurate
+ or losses sustained by you or third parties or a failure of the SAX to
+ operate with any other programs), even if such holder or other party has been
+ advised of the possibility of such damages.
+
+ Copyright Disclaimers 
+
+ This page includes statements to that effect by David Megginson, who would
+ have been able to claim copyright for the original work.  SAX 1.0
+
+ Version 1.0 of the Simple API for XML (SAX), created collectively by the
+ membership of the XML-DEV mailing list, is hereby released into the public
+ domain.
+
+ No one owns SAX: you may use it freely in both commercial and non-commercial
+ applications, bundle it with your software distribution, include it on a
+ CD-ROM, list the source code in a book, mirror the documentation at your own
+ web site, or use it in any other way you see fit.
+
+ David Megginson, sax@megginson.com
+ 1998-05-11
+
+ SAX 2.0 
+
+ I hereby abandon any property rights to SAX 2.0 (the Simple API for XML), and
+ release all of the SAX 2.0 source code, compiled code, and documentation
+ contained in this distribution into the Public Domain. SAX comes with NO
+ WARRANTY or guarantee of fitness for any purpose.
+
+ David Megginson, david@megginson.com
+ 2000-05-05
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to SoftFloat version 2b, which is 
+included with JRE 7, JDK 7, and OpenJDK 7 on Linux/ARM.
+
+--- begin of LICENSE ---
+
+Use of any of this software is governed by the terms of the license below:
+
+SoftFloat was written by me, John R. Hauser. This work was made possible in 
+part by the International Computer Science Institute, located at Suite 600, 
+1947 Center Street, Berkeley, California 94704. Funding was partially 
+provided by the National Science Foundation under grant MIP-9311980. The 
+original version of this code was written as part of a project to build 
+a fixed-point vector processor in collaboration with the University of 
+California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. 
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort 
+has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT 
+TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO 
+PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL 
+LOSSES, COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO 
+FURTHERMORE EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER 
+SCIENCE INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, 
+COSTS, OR OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE 
+SOFTWARE. 
+
+Derivative works are acceptable, even for commercial purposes, provided 
+that the minimal documentation requirements stated in the source code are 
+satisfied. 
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% Portions licensed from Taligent, Inc.
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Thai Dictionary, which is 
+included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Copyright (C) 1982 The Royal Institute, Thai Royal Government.
+
+Copyright (C) 1998 National Electronics and Computer Technology Center,
+National Science and Technology Development Agency,
+Ministry of Science Technology and Environment,
+Thai Royal Government.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Unicode 6.0.0, CLDR v1.4.1, & CLDR
+v1.9, which is included with JRE 7, JDK 7, and OpenJDK 7.
+
+--- begin of LICENSE ---
+
+Unicode Terms of Use
+
+For the general privacy policy governing access to this site, see the Unicode
+Privacy Policy. For trademark usage, see the Unicode® Consortium Name and
+Trademark Usage Policy.
+
+A. Unicode Copyright.
+   1. Copyright © 1991-2011 Unicode, Inc. All rights reserved.
+
+   2. Certain documents and files on this website contain a legend indicating
+      that "Modification is permitted." Any person is hereby authorized,
+      without fee, to modify such documents and files to create derivative
+      works conforming to the Unicode® Standard, subject to Terms and
+      Conditions herein.
+
+    3. Any person is hereby authorized, without fee, to view, use, reproduce,
+       and distribute all documents and files solely for informational
+       purposes in the creation of products supporting the Unicode Standard,
+       subject to the Terms and Conditions herein.
+
+    4. Further specifications of rights and restrictions pertaining to the use
+       of the particular set of data files known as the "Unicode Character
+       Database" can be found in Exhibit 1.
+
+    5. Each version of the Unicode Standard has further specifications of
+       rights and restrictions of use. For the book editions (Unicode 5.0 and
+       earlier), these are found on the back of the title page. The online
+       code charts carry specific restrictions. All other files, including
+       online documentation of the core specification for Unicode 6.0 and
+       later, are covered under these general Terms of Use.
+
+    6. No license is granted to "mirror" the Unicode website where a fee is
+       charged for access to the "mirror" site.
+
+    7. Modification is not permitted with respect to this document. All copies
+       of this document must be verbatim.
+
+B. Restricted Rights Legend. Any technical data or software which is licensed
+   to the United States of America, its agencies and/or instrumentalities
+   under this Agreement is commercial technical data or commercial computer
+   software developed exclusively at private expense as defined in FAR 2.101,
+   or DFARS 252.227-7014 (June 1995), as applicable. For technical data, use,
+   duplication, or disclosure by the Government is subject to restrictions as
+   set forth in DFARS 202.227-7015 Technical Data, Commercial and Items (Nov
+   1995) and this Agreement. For Software, in accordance with FAR 12-212 or
+   DFARS 227-7202, as applicable, use, duplication or disclosure by the
+   Government is subject to the restrictions set forth in this Agreement.
+
+C. Warranties and Disclaimers.
+   1. This publication and/or website may include technical or typographical
+      errors or other inaccuracies . Changes are periodically added to the
+      information herein; these changes will be incorporated in new editions
+      of the publication and/or website. Unicode may make improvements and/or
+      changes in the product(s) and/or program(s) described in this
+      publication and/or website at any time.
+
+    2. If this file has been purchased on magnetic or optical media from
+       Unicode, Inc. the sole and exclusive remedy for any claim will be
+       exchange of the defective media within ninety (90) days of original
+       purchase.
+
+    3. EXCEPT AS PROVIDED IN SECTION C.2, THIS PUBLICATION AND/OR SOFTWARE IS
+       PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND EITHER EXPRESS, IMPLIED,
+       OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF
+       MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
+       UNICODE AND ITS LICENSORS ASSUME NO RESPONSIBILITY FOR ERRORS OR
+       OMISSIONS IN THIS PUBLICATION AND/OR SOFTWARE OR OTHER DOCUMENTS WHICH
+       ARE REFERENCED BY OR LINKED TO THIS PUBLICATION OR THE UNICODE WEBSITE.
+
+D. Waiver of Damages. In no event shall Unicode or its licensors be liable for
+   any special, incidental, indirect or consequential damages of any kind, or
+   any damages whatsoever, whether or not Unicode was advised of the
+   possibility of the damage, including, without limitation, those resulting
+   from the following: loss of use, data or profits, in connection with the
+   use, modification or distribution of this information or its derivatives.
+
+E.Trademarks & Logos.
+   1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode,
+      Inc.  “The Unicode Consortium†and “Unicode, Inc.†are trade names of
+      Unicode, Inc.  Use of the information and materials found on this
+      website indicates your acknowledgement of Unicode, Inc.’s exclusive
+      worldwide rights in the Unicode Word Mark, the Unicode Logo, and the
+      Unicode trade names.
+
+   2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark
+      Policyâ€) are incorporated herein by reference and you agree to abide by
+      the provisions of the Trademark Policy, which may be changed from time
+      to time in the sole discretion of Unicode, Inc.
+
+   3. All third party trademarks referenced herein are the property of their
+      respective owners.
+
+Miscellaneous.
+   1. Jurisdiction and Venue. This server is operated from a location in the
+      State of California, United States of America. Unicode makes no
+      representation that the materials are appropriate for use in other
+      locations. If you access this server from other locations, you are
+      responsible for compliance with local laws. This Agreement, all use of
+      this site and any claims and damages resulting from use of this site are
+      governed solely by the laws of the State of California without regard to
+      any principles which would apply the laws of a different jurisdiction.
+      The user agrees that any disputes regarding this site shall be resolved
+      solely in the courts located in Santa Clara County, California. The user
+      agrees said courts have personal jurisdiction and agree to waive any
+      right to transfer the dispute to any other forum.
+
+   2. Modification by Unicode.  Unicode shall have the right to modify this
+      Agreement at any time by posting it to this site. The user may not
+      assign any part of this Agreement without Unicode’s prior written
+      consent.
+
+   3. Taxes. The user agrees to pay any taxes arising from access to this
+      website or use of the information herein, except for those based on
+      Unicode’s net income.
+
+   4. Severability.  If any provision of this Agreement is declared invalid or
+      unenforceable, the remaining provisions of this Agreement shall remain
+      in effect.
+
+   5. Entire Agreement. This Agreement constitutes the entire agreement
+      between the parties.
+
+EXHIBIT 1
+UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
+
+Unicode Data Files include all data files under the directories
+http://www.unicode.org/Public/, http://www.unicode.org/reports/, and
+http://www.unicode.org/cldr/data/. Unicode Data Files do not include PDF
+online code charts under the directory http://www.unicode.org/Public/.
+Software includes any source code published in the Unicode Standard or under
+the directories http://www.unicode.org/Public/,
+http://www.unicode.org/reports/, and http://www.unicode.org/cldr/data/.
+
+NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING,
+INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA
+FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO
+BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT
+AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR
+SOFTWARE.
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright © 1991-2011 Unicode, Inc. All rights reserved. Distributed under the
+Terms of Use in http://www.unicode.org/copyright.html.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of the Unicode data files and any associated documentation (the "Data Files")
+or Unicode software and any associated documentation (the "Software") to deal
+in the Data Files or Software without restriction, including without
+limitation the rights to use, copy, modify, merge, publish, distribute, and/or
+sell copies of the Data Files or Software, and to permit persons to whom the
+Data Files or Software are furnished to do so, provided that (a) the above
+copyright notice(s) and this permission notice appear with all copies of the
+Data Files or Software, (b) both the above copyright notice(s) and this
+permission notice appear in associated documentation, and (c) there is clear
+notice in each modified Data File or in the Software as well as in the
+documentation associated with the Data File(s) or Software that the data or
+software has been modified.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD
+PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
+THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE
+DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder shall not
+be used in advertising or otherwise to promote the sale, use or other dealings
+in these Data Files or Software without prior written authorization of the
+copyright holder.
+
+Unicode and the Unicode logo are trademarks of Unicode, Inc. in the United
+States and other countries. All third party trademarks referenced herein are
+the property of their respective owners.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to UPX v3.01, which is included 
+with JRE 7 on Windows.
+
+--- begin of LICENSE ---
+
+Use of any of this software is governed by the terms of the license below:
+
+
+                 ooooo     ooo ooooooooo.   ooooooo  ooooo
+                 `888'     `8' `888   `Y88.  `8888    d8'
+                  888       8   888   .d88'    Y888..8P
+                  888       8   888ooo88P'      `8888'
+                  888       8   888            .8PY888.
+                  `88.    .8'   888           d8'  `888b
+                    `YbodP'    o888o        o888o  o88888o
+
+
+                    The Ultimate Packer for eXecutables
+          Copyright (c) 1996-2000 Markus Oberhumer & Laszlo Molnar
+               http://wildsau.idv.uni-linz.ac.at/mfx/upx.html
+                          http://www.nexus.hu/upx
+                            http://upx.tsx.org
+
+
+PLEASE CAREFULLY READ THIS LICENSE AGREEMENT, ESPECIALLY IF YOU PLAN
+TO MODIFY THE UPX SOURCE CODE OR USE A MODIFIED UPX VERSION.
+
+
+ABSTRACT
+========
+
+   UPX and UCL are copyrighted software distributed under the terms
+   of the GNU General Public License (hereinafter the "GPL").
+
+   The stub which is imbedded in each UPX compressed program is part
+   of UPX and UCL, and contains code that is under our copyright. The
+   terms of the GNU General Public License still apply as compressing
+   a program is a special form of linking with our stub.
+
+   As a special exception we grant the free usage of UPX for all
+   executables, including commercial programs.
+   See below for details and restrictions.
+
+
+COPYRIGHT
+=========
+
+   UPX and UCL are copyrighted software. All rights remain with the authors.
+
+   UPX is Copyright (C) 1996-2000 Markus Franz Xaver Johannes Oberhumer
+   UPX is Copyright (C) 1996-2000 Laszlo Molnar
+
+   UCL is Copyright (C) 1996-2000 Markus Franz Xaver Johannes Oberhumer
+
+
+GNU GENERAL PUBLIC LICENSE
+==========================
+
+   UPX and the UCL library are free software; you can redistribute them
+   and/or modify them under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of
+   the License, or (at your option) any later version.
+
+   UPX and UCL are distributed in the hope that they will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; see the file COPYING.
+
+
+SPECIAL EXCEPTION FOR COMPRESSED EXECUTABLES
+============================================
+
+   The stub which is imbedded in each UPX compressed program is part
+   of UPX and UCL, and contains code that is under our copyright. The
+   terms of the GNU General Public License still apply as compressing
+   a program is a special form of linking with our stub.
+
+   Hereby Markus F.X.J. Oberhumer and Laszlo Molnar grant you special
+   permission to freely use and distribute all UPX compressed programs
+   (including commercial ones), subject to the following restrictions:
+
+   1. You must compress your program with a completely unmodified UPX
+      version; either with our precompiled version, or (at your option)
+      with a self compiled version of the unmodified UPX sources as
+      distributed by us.
+   2. This also implies that the UPX stub must be completely unmodfied, i.e.
+      the stub imbedded in your compressed program must be byte-identical
+      to the stub that is produced by the official unmodified UPX version.
+   3. The decompressor and any other code from the stub must exclusively get
+      used by the unmodified UPX stub for decompressing your program at
+      program startup. No portion of the stub may get read, copied,
+      called or otherwise get used or accessed by your program.
+
+
+ANNOTATIONS
+===========
+
+  - You can use a modified UPX version or modified UPX stub only for
+    programs that are compatible with the GNU General Public License.
+
+  - We grant you special permission to freely use and distribute all UPX
+    compressed programs. But any modification of the UPX stub (such as,
+    but not limited to, removing our copyright string or making your
+    program non-decompressible) will immediately revoke your right to
+    use and distribute a UPX compressed program.
+
+  - UPX is not a software protection tool; by requiring that you use
+    the unmodified UPX version for your proprietary programs we
+    make sure that any user can decompress your program. This protects
+    both you and your users as nobody can hide malicious code -
+    any program that cannot be decompressed is highly suspicious
+    by definition.
+
+  - You can integrate all or part of UPX and UCL into projects that
+    are compatible with the GNU GPL, but obviously you cannot grant
+    any special exceptions beyond the GPL for our code in your project.
+
+  - We want to actively support manufacturers of virus scanners and
+    similar security software. Please contact us if you would like to
+    incorporate parts of UPX or UCL into such a product.
+
+
+
+Markus F.X.J. Oberhumer                   Laszlo Molnar
+markus.oberhumer@jk.uni-linz.ac.at        ml1050@cdata.tvnet.hu
+
+Linz, Austria, 25 Feb 2000
+
+Additional License(s)
+
+The UPX license file is at http://upx.sourceforge.net/upx-license.html.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Xfree86-VidMode Extension 1.0,
+which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+
+--- begin of LICENSE ---
+
+Version 1.1 of XFree86 ProjectLicence.
+
+Copyright (C) 1994-2004 The XFree86 Project, Inc.    All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicence, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so,subject to the following conditions:
+
+   1. Redistributions of source code must retain the above copyright
+   notice,this list of conditions, and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution, and in the same place
+   and form as other copyright, license and disclaimer information.
+
+   3. The end-user documentation included with the redistribution, if any,must
+   include the following acknowledgment: "This product includes
+   software developed by The XFree86 Project, Inc (http://www.xfree86.org/) and
+   its contributors", in the same place and form as other third-party
+   acknowledgments. Alternately, this acknowledgment may appear in the software
+   itself, in the same form and location as other such third-party
+   acknowledgments.
+
+    4. Except as contained in this notice, the name of The XFree86 Project,Inc
+    shall not be used in advertising or otherwise to promote the sale, use
+    or other dealings in this Software without prior written authorization from
+    The XFree86 Project, Inc.
+
+    THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+    WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+    MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+    EVENT SHALL THE XFREE86 PROJECT, INC OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL,SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+    (INCLUDING, BUT NOT LIMITED TO,PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+    LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+    OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+    DAMAGE.  
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to X Window System 6.8.2, which is 
+included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+
+--- begin of LICENSE ---
+
+          Licenses
+The X.Org Foundation March 2004
+
+1. Introduction
+
+The X.org Foundation X Window System distribution is a compilation of code and
+documentation from many sources. This document is intended primarily as a
+guide to the licenses used in the distribution: you must check each file
+and/or package for precise redistribution terms. None-the-less, this summary
+may be useful to many users. No software incorporating the XFree86 1.1 license
+has been incorporated.
+
+This document is based on the compilation from XFree86.
+
+2. XFree86 License
+
+XFree86 code without an explicit copyright is covered by the following
+copyright/license:
+
+Copyright (C) 1994-2003 The XFree86 Project, Inc. All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+XFREE86 PROJECT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of the XFree86 Project shall not
+be used in advertising or otherwise to promote the sale, use or other dealings
+in this Software without prior written authorization from the XFree86 Project.
+
+3. Other Licenses
+
+Portions of code are covered by the following licenses/copyrights. See
+individual files for the copyright dates.
+
+3.1. X/MIT Copyrights
+
+3.1.1. X Consortium
+
+Copyright (C) <date> X Consortium
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X
+CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of the X Consortium shall not be
+used in advertising or otherwise to promote the sale, use or other dealings in
+this Software without prior written authorization from the X Consortium.
+
+X Window System is a trademark of X Consortium, Inc.
+
+3.1.2. The Open Group
+
+Copyright <date> The Open Group
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that the
+above copyright notice appear in all copies and that both that copyright
+notice and this permission notice appear in supporting documentation.
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of The Open Group shall not be
+used in advertising or otherwise to promote the sale, use or other dealings in
+this Software without prior written authorization from The Open Group.  3.2.
+Berkeley-based copyrights:
+
+o
+3.2.1. General
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   3. The name of the author may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.  3.2.2. UCB/LBL
+
+Copyright (c) 1993 The Regents of the University of California. All rights
+reserved.
+
+This software was developed by the Computer Systems Engineering group at
+Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and contributed to
+Berkeley.
+
+All advertising materials mentioning features or use of this software must
+display the following acknowledgement: This product includes software
+developed by the University of California, Lawrence Berkeley Laboratory.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement: This product includes software
+   developed by the University of California, Berkeley and its contributors.
+
+   4. Neither the name of the University nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  3.2.3. The
+NetBSD Foundation, Inc.
+
+Copyright (c) 2003 The NetBSD Foundation, Inc. All rights reserved.
+
+This code is derived from software contributed to The NetBSD Foundation by Ben
+Collver <collver1@attbi.com>
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement: This product includes software
+   developed by the NetBSD Foundation, Inc. and its contributors.
+
+   4. Neither the name of The NetBSD Foundation nor the names of its
+   contributors may be used to endorse or promote products derived from this
+   software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS ``AS
+IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  3.2.4. Theodore
+Ts'o.
+
+Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights
+reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   and the entire permission notice in its entirety, including the disclaimer
+   of warranties.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   3. he name of the author may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE, ALL OF WHICH ARE HEREBY DISCLAIMED. IN NO
+EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.  3.2.5. Theo de Raadt and Damien Miller
+
+Copyright (c) 1995,1999 Theo de Raadt. All rights reserved. Copyright (c)
+2001-2002 Damien Miller. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.  3.2.6. Todd C. Miller
+
+Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+
+Permission to use, copy, modify, and distribute this software for any purpose
+with or without fee is hereby granted, provided that the above copyright
+notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND TODD C. MILLER DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL TODD C. MILLER BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.  3.2.7. Thomas
+Winischhofer
+
+Copyright (C) 2001-2004 Thomas Winischhofer
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+   3. The name of the author may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.  3.3. NVIDIA Corp
+
+Copyright (c) 1996 NVIDIA, Corp. All rights reserved.
+
+NOTICE TO USER: The source code is copyrighted under U.S. and international
+laws. NVIDIA, Corp. of Sunnyvale, California owns the copyright and as design
+patents pending on the design and interface of the NV chips. Users and
+possessors of this source code are hereby granted a nonexclusive, royalty-free
+copyright and design patent license to use this code in individual and
+commercial software.
+
+Any use of this source code must include, in the user documentation and
+internal comments to the code, notices to the end user as follows:
+
+Copyright (c) 1996 NVIDIA, Corp. NVIDIA design patents pending in the U.S. and
+foreign countries.
+
+NVIDIA, CORP. MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
+CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED
+WARRANTY OF ANY KIND. NVIDIA, CORP. DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA, CORP. BE LIABLE
+FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE.  3.4. GLX Public
+License
+
+GLX PUBLIC LICENSE (Version 1.0 (2/11/99)) ("License")
+
+Subject to any third party claims, Silicon Graphics, Inc. ("SGI") hereby
+grants permission to Recipient (defined below), under Recipient's copyrights
+in the Original Software (defined below), to use, copy, modify, merge,
+publish, distribute, sublicense and/or sell copies of Subject Software
+(defined below), and to permit persons to whom the Subject Software is
+furnished in accordance with this License to do the same, subject to all of
+the following terms and conditions, which Recipient accepts by engaging in any
+such use, copying, modifying, merging, publishing, distributing, sublicensing
+or selling:
+
+1. Definitions.
+
+    (a) "Original Software" means source code of computer software code which
+    is described in Exhibit A as Original Software.
+
+    (b) "Modifications" means any addition to or deletion from the substance
+    or structure of either the Original Software or any previous
+    Modifications. When Subject Software is released as a series of files, a
+    Modification means (i) any addition to or deletion from the contents of a
+    file containing Original Software or previous Modifications and (ii) any
+    new file that contains any part of the Original Code or previous
+    Modifications.
+
+    (c) "Subject Software" means the Original Software or Modifications or the
+    combination of the Original Software and Modifications, or portions of any
+    of the foregoing.
+
+    (d) "Recipient" means an individual or a legal entity exercising rights
+    under, and complying with all of the terms of, this License. For legal
+    entities, "Recipient" includes any entity which controls, is controlled
+    by, or is under common control with Recipient. For purposes of this
+    definition, "control" of an entity means (a) the power, direct or
+    indirect, to direct or manage such entity, or (b) ownership of fifty
+    percent (50%) or more of the outstanding shares or beneficial ownership of
+    such entity.
+
+2. Redistribution of Source Code Subject to These Terms. Redistributions of
+Subject Software in source code form must retain the notice set forth in
+Exhibit A, below, in every file. A copy of this License must be included in
+any documentation for such Subject Software where the recipients' rights
+relating to Subject Software are described. Recipient may distribute the
+source code version of Subject Software under a license of Recipient's choice,
+which may contain terms different from this License, provided that (i)
+Recipient is in compliance with the terms of this License, and (ii) the
+license terms include this Section 2 and Sections 3, 4, 7, 8, 10, 12 and 13 of
+this License, which terms may not be modified or superseded by any other terms
+of such license. If Recipient distributes the source code version under a
+different license Recipient must make it absolutely clear that any terms which
+differ from this License are offered by Recipient alone, not by SGI. Recipient
+hereby agrees to indemnify SGI for any liability incurred by SGI as a result
+of any such terms Recipient offers.
+
+3. Redistribution in Executable Form. The notice set forth in Exhibit A must
+be conspicuously included in any notice in an executable version of Subject
+Software, related documentation or collateral in which Recipient describes the
+user's rights relating to the Subject Software. Recipient may distribute the
+executable version of Subject Software under a license of Recipient's choice,
+which may contain terms different from this License, provided that (i)
+Recipient is in compliance with the terms of this License, and (ii) the
+license terms include this Section 3 and Sections 4, 7, 8, 10, 12 and 13 of
+this License, which terms may not be modified or superseded by any other terms
+of such license. If Recipient distributes the executable version under a
+different license Recipient must make it absolutely clear that any terms which
+differ from this License are offered by Recipient alone, not by SGI. Recipient
+hereby agrees to indemnify SGI for any liability incurred by SGI as a result
+of any such terms Recipient offers.
+
+4. Termination. This License and the rights granted hereunder will terminate
+automatically if Recipient fails to comply with terms herein and fails to cure
+such breach within 30 days of the breach. Any sublicense to the Subject
+Software which is properly granted shall survive any termination of this
+License absent termination by the terms of such sublicense. Provisions which,
+by their nature, must remain in effect beyond the termination of this License
+shall survive.
+
+5. No Trademark Rights. This License does not grant any rights to use any
+trade name, trademark or service mark whatsoever. No trade name, trademark or
+service mark of SGI may be used to endorse or promote products derived from
+the Subject Software without prior written permission of SGI.
+
+6. No Other Rights. This License does not grant any rights with respect to the
+OpenGL API or to any software or hardware implementation thereof or to any
+other software whatsoever, nor shall any other rights or licenses not
+expressly granted hereunder arise by implication, estoppel or otherwise with
+respect to the Subject Software. Title to and ownership of the Original
+Software at all times remains with SGI. All rights in the Original Software
+not expressly granted under this License are reserved.
+
+7. Compliance with Laws; Non-Infringement. Recipient shall comply with all
+applicable laws and regulations in connection with use and distribution of the
+Subject Software, including but not limited to, all export and import control
+laws and regulations of the U.S. government and other countries. Recipient may
+not distribute Subject Software that (i) in any way infringes (directly or
+contributorily) the rights (including patent, copyright, trade secret,
+trademark or other intellectual property rights of any kind) of any other
+person or entity or (ii) breaches any representation or warranty, express,
+implied or statutory, which under any applicable law it might be deemed to
+have been distributed.
+
+8. Claims of Infringement. If Recipient at any time has knowledge of any one
+or more third party claims that reproduction, modification, use, distribution,
+import or sale of Subject Software (including particular functionality or code
+incorporated in Subject Software) infringes the third party's intellectual
+property rights, Recipient must place in a well-identified web page bearing
+the title "LEGAL" a description of each such claim and a description of the
+party making each such claim in sufficient detail that a user of the Subject
+Software will know whom to contact regarding the claim. Also, upon gaining
+such knowledge of any such claim, Recipient must conspicuously include the URL
+for such web page in the Exhibit A notice required under Sections 2 and 3,
+above, and in the text of any related documentation, license agreement or
+collateral in which Recipient describes end user's rights relating to the
+Subject Software. If Recipient obtains such knowledge after it makes Subject
+Software available to any other person or entity, Recipient shall take other
+steps (such as notifying appropriate mailing lists or newsgroups) reasonably
+calculated to inform those who received the Subject Software that new
+knowledge has been obtained.
+
+9. DISCLAIMER OF WARRANTY. SUBJECT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
+WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT
+LIMITATION, WARRANTIES THAT THE SUBJECT SOFTWARE IS FREE OF DEFECTS,
+MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON- INFRINGING. SGI ASSUMES NO
+RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE. SHOULD ANY SOFTWARE
+PROVE DEFECTIVE IN ANY RESPECT, SGI ASSUMES NO COST OR LIABILITY FOR ANY
+SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
+ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY SUBJECT SOFTWARE IS AUTHORIZED
+HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+10. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY,
+WHETHER TORT (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE OR STRICT LIABILITY),
+CONTRACT, OR OTHERWISE, SHALL SGI OR ANY SGI LICENSOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK
+STOPPAGE, LOSS OF DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF
+THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY
+TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SGI's NEGLIGENCE TO
+THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT
+ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+THAT EXCLUSION AND LIMITATION MAY NOT APPLY TO RECIPIENT.
+
+11. Indemnity. Recipient shall be solely responsible for damages arising,
+directly or indirectly, out of its utilization of rights under this License.
+Recipient will defend, indemnify and hold harmless Silicon Graphics, Inc. from
+and against any loss, liability, damages, costs or expenses (including the
+payment of reasonable attorneys fees) arising out of Recipient's use,
+modification, reproduction and distribution of the Subject Software or out of
+any representation or warranty made by Recipient.
+
+12. U.S. Government End Users. The Subject Software is a "commercial item"
+consisting of "commercial computer software" as such terms are defined in
+title 48 of the Code of Federal Regulations and all U.S. Government End Users
+acquire only the rights set forth in this License and are subject to the terms
+of this License.
+
+13. Miscellaneous. This License represents the complete agreement concerning
+subject matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed so as to achieve as nearly as
+possible the same economic effect as the original provision and the remainder
+of this License will remain in effect. This License shall be governed by and
+construed in accordance with the laws of the United States and the State of
+California as applied to agreements entered into and to be performed entirely
+within California between California residents. Any litigation relating to
+this License shall be subject to the exclusive jurisdiction of the Federal
+Courts of the Northern District of California (or, absent subject matter
+jurisdiction in such courts, the courts of the State of California), with
+venue lying exclusively in Santa Clara County, California, with the losing
+party responsible for costs, including without limitation, court costs and
+reasonable attorneys fees and expenses. The application of the United Nations
+Convention on Contracts for the International Sale of Goods is expressly
+excluded. Any law or regulation which provides that the language of a contract
+shall be construed against the drafter shall not apply to this License.
+
+Exhibit A
+
+The contents of this file are subject to Sections 2, 3, 4, 7, 8, 10, 12 and 13
+of the GLX Public License Version 1.0 (the "License"). You may not use this
+file except in compliance with those sections of the License. You may obtain a
+copy of the License at Silicon Graphics, Inc., attn: Legal Services, 2011 N.
+Shoreline Blvd., Mountain View, CA 94043 or at
+http://www.sgi.com/software/opensource/glx/license.html.
+
+Software distributed under the License is distributed on an "AS IS" basis. ALL
+WARRANTIES ARE DISCLAIMED, INCLUDING, WITHOUT LIMITATION, ANY IMPLIED
+WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE OR OF NON-
+INFRINGEMENT. See the License for the specific language governing rights and
+limitations under the License.
+
+The Original Software is GLX version 1.2 source code, released February, 1999.
+The developer of the Original Software is Silicon Graphics, Inc. Those
+portions of the Subject Software created by Silicon Graphics, Inc. are
+Copyright (c) 1991-9 Silicon Graphics, Inc. All Rights Reserved.  3.5. CID
+Font Code Public License
+
+CID FONT CODE PUBLIC LICENSE (Version 1.0 (3/31/99))("License")
+
+Subject to any applicable third party claims, Silicon Graphics, Inc. ("SGI")
+hereby grants permission to Recipient (defined below), under SGI's copyrights
+in the Original Software (defined below), to use, copy, modify, merge,
+publish, distribute, sublicense and/or sell copies of Subject Software
+(defined below) in both source code and executable form, and to permit persons
+to whom the Subject Software is furnished in accordance with this License to
+do the same, subject to all of the following terms and conditions, which
+Recipient accepts by engaging in any such use, copying, modifying, merging,
+publication, distributing, sublicensing or selling:
+
+1. Definitions.
+
+    a. "Original Software" means source code of computer software code that is
+    described in Exhibit A as Original Software.
+
+    b. "Modifications" means any addition to or deletion from the substance or
+    structure of either the Original Software or any previous Modifications.
+    When Subject Software is released as a series of files, a Modification
+    means (i) any addition to or deletion from the contents of a file
+    containing Original Software or previous Modifications and (ii) any new
+    file that contains any part of the Original Code or previous
+    Modifications.
+
+    c. "Subject Software" means the Original Software or Modifications or the
+    combination of the Original Software and Modifications, or portions of any
+    of the foregoing.
+
+    d. "Recipient" means an individual or a legal entity exercising rights
+    under the terms of this License. For legal entities, "Recipient" includes
+    any entity that controls, is controlled by, or is under common control
+    with Recipient. For purposes of this definition, "control" of an entity
+    means (i) the power, direct or indirect, to direct or manage such entity,
+    or (ii) ownership of fifty percent (50%) or more of the outstanding shares
+    or beneficial ownership of such entity.
+
+    e. "Required Notice" means the notice set forth in Exhibit A to this
+    License.
+
+    f. "Accompanying Technology" means any software or other technology that
+    is not a Modification and that is distributed or made publicly available
+    by Recipient with the Subject Software. Separate software files that do
+    not contain any Original Software or any previous Modification shall not
+    be deemed a Modification, even if such software files are aggregated as
+    part of a product, or in any medium of storage, with any file that does
+    contain Original Software or any previous Modification.
+
+2. License Terms. All distribution of the Subject Software must be made
+subject to the terms of this License. A copy of this License and the Required
+Notice must be included in any documentation for Subject Software where
+Recipient's rights relating to Subject Software and/or any Accompanying
+Technology are described. Distributions of Subject Software in source code
+form must also include the Required Notice in every file distributed. In
+addition, a ReadMe file entitled "Important Legal Notice" must be distributed
+with each distribution of one or more files that incorporate Subject Software.
+That file must be included with distributions made in both source code and
+executable form. A copy of the License and the Required Notice must be
+included in that file. Recipient may distribute Accompanying Technology under
+a license of Recipient's choice, which may contain terms different from this
+License, provided that (i) Recipient is in compliance with the terms of this
+License, (ii) such other license terms do not modify or supersede the terms of
+this License as applicable to the Subject Software, (iii) Recipient hereby
+indemnifies SGI for any liability incurred by SGI as a result of the
+distribution of Accompanying Technology or the use of other license terms.
+
+3. Termination. This License and the rights granted hereunder will terminate
+automatically if Recipient fails to comply with terms herein and fails to cure
+such breach within 30 days of the breach. Any sublicense to the Subject
+Software that is properly granted shall survive any termination of this
+License absent termination by the terms of such sublicense. Provisions which,
+by their nature, must remain in effect beyond the termination of this License
+shall survive.
+
+4. Trademark Rights. This License does not grant any rights to use any trade
+name, trademark or service mark whatsoever. No trade name, trademark or
+service mark of SGI may be used to endorse or promote products derived from or
+incorporating any Subject Software without prior written permission of SGI.
+
+5. No Other Rights. No rights or licenses not expressly granted hereunder
+shall arise by implication, estoppel or otherwise. Title to and ownership of
+the Original Software at all times remains with SGI. All rights in the
+Original Software not expressly granted under this License are reserved.
+
+6. Compliance with Laws; Non-Infringement. Recipient shall comply with all
+applicable laws and regulations in connection with use and distribution of the
+Subject Software, including but not limited to, all export and import control
+laws and regulations of the U.S. government and other countries. Recipient may
+not distribute Subject Software that (i) in any way infringes (directly or
+contributorily) the rights (including patent, copyright, trade secret,
+trademark or other intellectual property rights of any kind) of any other
+person or entity, or (ii) breaches any representation or warranty, express,
+implied or statutory, which under any applicable law it might be deemed to
+have been distributed.
+
+7. Claims of Infringement. If Recipient at any time has knowledge of any one
+or more third party claims that reproduction, modification, use, distribution,
+import or sale of Subject Software (including particular functionality or code
+incorporated in Subject Software) infringes the third party's intellectual
+property rights, Recipient must place in a well-identified web page bearing
+the title "LEGAL" a description of each such claim and a description of the
+party making each such claim in sufficient detail that a user of the Subject
+Software will know whom to contact regarding the claim. Also, upon gaining
+such knowledge of any such claim, Recipient must conspicuously include the URL
+for such web page in the Required Notice, and in the text of any related
+documentation, license agreement or collateral in which Recipient describes
+end user's rights relating to the Subject Software. If Recipient obtains such
+knowledge after it makes Subject Software available to any other person or
+entity, Recipient shall take other steps (such as notifying appropriate
+mailing lists or newsgroups) reasonably calculated to provide such knowledge
+to those who received the Subject Software.
+
+8. DISCLAIMER OF WARRANTY. SUBJECT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
+WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT
+LIMITATION, WARRANTIES THAT THE SUBJECT SOFTWARE IS FREE OF DEFECTS,
+MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. SGI ASSUMES NO
+RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE. SHOULD ANY SOFTWARE
+PROVE DEFECTIVE IN ANY RESPECT, SGI ASSUMES NO COST OR LIABILITY FOR ANY
+SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
+ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY SUBJECT SOFTWARE IS AUTHORIZED
+HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+9. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY,
+WHETHER TORT (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE OR STRICT LIABILITY),
+CONTRACT, OR OTHERWISE, SHALL SGI OR ANY SGI LICENSOR BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SUBJECT SOFTWARE OR
+THE USE OR OTHER DEALINGS IN THE SUBJECT SOFTWARE. SOME JURISDICTIONS DO NOT
+ALLOW THE EXCLUSION OR LIMITATION OF CERTAIN DAMAGES, SO THIS EXCLUSION AND
+LIMITATION MAY NOT APPLY TO RECIPIENT TO THE EXTENT SO DISALLOWED.
+
+10. Indemnity. Recipient shall be solely responsible for damages arising,
+directly or indirectly, out of its utilization of rights under this License.
+Recipient will defend, indemnify and hold SGI and its successors and assigns
+harmless from and against any loss, liability, damages, costs or expenses
+(including the payment of reasonable attorneys fees) arising out of
+(Recipient's use, modification, reproduction and distribution of the Subject
+Software or out of any representation or warranty made by Recipient.
+
+11. U.S. Government End Users. The Subject Software is a "commercial item"
+consisting of "commercial computer software" as such terms are defined in
+title 48 of the Code of Federal Regulations and all U.S. Government End Users
+acquire only the rights set forth in this License and are subject to the terms
+of this License.
+
+12. Miscellaneous. This License represents the complete agreement concerning
+subject matter hereof. If any provision of this License is held to be
+unenforceable by any judicial or administrative authority having proper
+jurisdiction with respect thereto, such provision shall be reformed so as to
+achieve as nearly as possible the same economic effect as the original
+provision and the remainder of this License will remain in effect. This
+License shall be governed by and construed in accordance with the laws of the
+United States and the State of California as applied to agreements entered
+into and to be performed entirely within California between California
+residents. Any litigation relating to this License shall be subject to the
+exclusive jurisdiction of the Federal Courts of the Northern District of
+California (or, absent subject matter jurisdiction in such courts, the courts
+of the State of California), with venue lying exclusively in Santa Clara
+County, California, with the losing party responsible for costs, including
+without limitation, court costs and reasonable attorneys fees and expenses.
+The application of the United Nations Convention on Contracts for the
+International Sale of Goods is expressly excluded. Any law or regulation that
+provides that the language of a contract shall be construed against the
+drafter shall not apply to this License.
+
+Exhibit A
+
+Copyright (c) 1994-1999 Silicon Graphics, Inc.
+
+The contents of this file are subject to the CID Font Code Public License
+Version 1.0 (the "License"). You may not use this file except in compliance
+with the License. You may obtain a copy of the License at Silicon Graphics,
+Inc., attn: Legal Services, 2011 N. Shoreline Blvd., Mountain View, CA 94043
+or at http://www.sgi.com/software/opensource/cid/license.html
+
+Software distributed under the License is distributed on an "AS IS" basis. ALL
+WARRANTIES ARE DISCLAIMED, INCLUDING, WITHOUT LIMITATION, ANY IMPLIED
+WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE OR OF
+NON-INFRINGEMENT. See the License for the specific language governing rights
+and limitations under the License.
+
+The Original Software (as defined in the License) is CID font code that was
+developed by Silicon Graphics, Inc. Those portions of the Subject Software (as
+defined in the License) that were created by Silicon Graphics, Inc. are
+Copyright (c) 1994-1999 Silicon Graphics, Inc. All Rights Reserved.
+
+[NOTE: When using this text in connection with Subject Software delivered
+solely in object code form, Recipient may replace the words "this file" with
+"this software" in both the first and second sentences.] 3.6. Bitstream Vera
+Fonts Copyright
+
+The fonts have a generous copyright, allowing derivative works (as long as
+"Bitstream" or "Vera" are not in the names), and full redistribution (so long
+as they are not *sold* by themselves). They can be be bundled, redistributed
+and sold with any software.
+
+The fonts are distributed under the following copyright:
+
+Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream Vera is a
+trademark of Bitstream, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of the fonts accompanying this license ("Fonts") and associated documentation
+files (the "Font Software"), to reproduce and distribute the Font Software,
+including without limitation the rights to use, copy, merge, publish,
+distribute, and/or sell copies of the Font Software, and to permit persons to
+whom the Font Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright and trademark notices and this permission notice shall be
+included in all copies of one or more of the Font Software typefaces.
+
+The Font Software may be modified, altered, or added to, and in particular the
+designs of glyphs or characters in the Fonts may be modified and additional
+glyphs or characters may be added to the Fonts, only if the fonts are renamed
+to names not containing either the words "Bitstream" or the word "Vera".
+
+This License becomes null and void to the extent applicable to Fonts or Font
+Software that has been modified and is distributed under the "Bitstream Vera"
+names.
+
+The Font Software may be sold as part of a larger software package but no copy
+of one or more of the Font Software typefaces may be sold by itself.
+
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT,
+TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL BITSTREAM OR THE GNOME FOUNDATION
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL,
+SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO
+USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
+
+Except as contained in this notice, the names of Gnome, the Gnome Foundation,
+and Bitstream Inc., shall not be used in advertising or otherwise to promote
+the sale, use or other dealings in this Font Software without prior written
+authorization from the Gnome Foundation or Bitstream Inc., respectively. For
+further information, contact: fonts at gnome dot org.  3.7. Bigelow & Holmes
+Inc and URW++ GmbH Luxi font license
+
+Luxi fonts copyright (c) 2001 by Bigelow & Holmes Inc. Luxi font instruction
+code copyright (c) 2001 by URW++ GmbH. All Rights Reserved. Luxi is a
+registered trademark of Bigelow & Holmes Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of these Fonts and associated documentation files (the "Font Software"), to
+deal in the Font Software, including without limitation the rights to use,
+copy, merge, publish, distribute, sublicense, and/or sell copies of the Font
+Software, and to permit persons to whom the Font Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright and trademark notices and this permission notice shall be
+included in all copies of one or more of the Font Software.
+
+The Font Software may not be modified, altered, or added to, and in particular
+the designs of glyphs or characters in the Fonts may not be modified nor may
+additional glyphs or characters be added to the Fonts. This License becomes
+null and void when the Fonts or Font Software have been modified.
+
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT,
+TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL BIGELOW & HOLMES INC. OR URW++
+GMBH. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY
+GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN
+AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR
+INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT
+SOFTWARE.
+
+Except as contained in this notice, the names of Bigelow & Holmes Inc. and
+URW++ GmbH. shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in this Font Software without prior written
+authorization from Bigelow & Holmes Inc. and URW++ GmbH.
+
+For further information, contact:
+
+info@urwpp.de or design@bigelowandholmes.com
+
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to zlib v1.2.3, which is included 
+with JRE 7, JDK 7, and OpenJDK 7
+
+--- begin of LICENSE ---
+
+  version 1.2.3, July 18th, 2005
+
+  Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
 
   This software is provided 'as-is', without any express or implied
   warranty.  In no event will the authors be held liable for any damages
@@ -93,686 +3159,23 @@
   Jean-loup Gailly        Mark Adler
   jloup@gzip.org          madler@alumni.caltech.edu
 
-If you use the zlib library in a product, we would appreciate *not*
-receiving lengthy legal documents to sign. The sources are provided
-for free but without warranty of any kind.  The library has been
-entirely written by Jean-loup Gailly and Mark Adler; it does not
-include third-party code.
-
-If you redistribute modified sources, we would appreciate that you include
-in the file ChangeLog history information documenting your changes.
-
-%% This notice is provided with respect to W3C (DTD for XML Signatures), which may be included with this software: 
-W3C® SOFTWARE NOTICE AND LICENSE
-Copyright © 1994-2002 World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University). All Rights Reserved. http://www.w3.org/Consortium/Legal/
-This W3C work (including software, documents, or other related items) is being provided by the copyright holders under the following license. By obtaining, using and/or copying this work, you (the licensee) agree that you have read, understood, and will comply with the following terms and conditions:
-Permission to use, copy, modify, and distribute this software and its documentation, with or without modification,  for any purpose and without fee or royalty is hereby granted, provided that you include the following on ALL copies of the software and documentation or portions thereof, including modifications, that you make:
-1.The full text of this NOTICE in a location viewable to users of the redistributed or derivative work. 
-2.Any pre-existing intellectual property disclaimers, notices, or terms and conditions. If none exist, a short notice of the following form (hypertext is preferred, text is permitted) should be used within the body of any redistributed or derivative code: "Copyright © [$date-of-software] World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University). All Rights Reserved. http://www.w3.org/Consortium/Legal/" 
-3.Notice of any changes or modifications to the W3C files, including the date changes were made. (We recommend you provide URIs to the location from which the code is derived.) 
-THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
-COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION.
-The name and trademarks of copyright holders may NOT be used in advertising or publicity pertaining to the software without specific, written prior permission. Title to copyright in this software and any associated documentation will at all times remain with copyright holders.
-____________________________________
-This formulation of W3C's notice and license became active on August 14 1998 so as to improve compatibility with GPL. This version ensures that W3C software licensing terms are no more restrictive than GPL and consequently W3C software may be distributed in GPL packages. See the older formulation for the policy prior to this date. Please see our Copyright FAQ for common questions about using materials from our site, including specific terms and conditions for packages like libwww, Amaya, and Jigsaw. Other questions about this notice can be directed to site-policy@w3.org.

-%% This notice is provided with respect to jscheme.jar, which may be included with this software: 
-Software License Agreement
-Copyright © 1998-2002 by Peter Norvig. 
-Permission is granted to anyone to use this software, in source or object code form, on any computer system, and to modify, compile, decompile, run, and redistribute it to anyone else, subject to the following restrictions: 
-1.The author makes no warranty of any kind, either expressed or implied, about the suitability of this software for any purpose.
-2.The author accepts no liability of any kind for damages or other consequences of the use of this software, even if they arise from defects in the software.
-3.The origin of this software must not be misrepresented, either by explicit claim or by omission.
-4.Altered versions must be plainly marked as such, and must not be misrepresented as being the original software. Altered versions may be distributed in packages under other licenses (such as the GNU license). 
-If you find this software useful, it would be nice if you let me (peter@norvig.com) know about it, and nicer still if you send me modifications that you are willing to share. However, you are not required to do so.
-
-
-%% This notice is provided with respect to PC/SC Lite for Suse Linux v. 1.1.1, which may be included with this software: 
-
-Copyright (c) 1999-2004 David Corcoran 
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-   derived from this software without specific prior written permission.
-
-Changes to this license can be made only by the copyright author with 
-explicit written consent.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-%% This notice is provided with respect to IAIK PKCS Wrapper, which may be included with this software: 
-
-Copyright (c) 2002 Graz University of Technology. All rights reserved.
-Redistribution and use in source and binary forms, with or without modification,are permitted provided that the following conditions are met:
-
-
-1. Redistributions of source code must retain the above copyright notice, this   list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,   this list of conditions and the following disclaimer in the documentation   and/or other materials provided with the distribution.
-
-3. The end-user documentation included with the redistribution, if any, must   include the following acknowledgment:
-
-   "This product includes software developed by IAIK of Graz University of    Technology."
-
-   Alternately, this acknowledgment may appear in the software itself, if and   wherever such third-party acknowledgments normally appear.
-
-4. The names "Graz University of Technology" and "IAIK of Graz University of   Technology" must not be used to endorse or promote products derived from this   software without prior written permission.
-
-5. Products derived from this software may not be called "IAIK PKCS Wrapper",   nor may "IAIK" appear in their name, without prior written permission of   Graz University of Technology.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE LICENSOR BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
-OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-%% This notice is provided with respect to Document Object Model (DOM) v. Level 3, which may be included with this software: 
-
-W3Cýý SOFTWARE NOTICE AND LICENSE
-
-http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
-
-This work (and included software, documentation such as READMEs, or other related items) is being
-provided by the copyright holders under the following license. By obtaining, using and/or copying this work, you
-(the licensee) agree that you have read, understood, and will comply with the following terms and conditions.
-
-Permission to copy, modify, and distribute this software and its documentation, with or without modification, for
-any purpose and without fee or royalty is hereby granted, provided that you include the following on ALL copies
-of the software and documentation or portions thereof, including modifications:
-   1.The full text of this NOTICE in a location viewable to users of the redistributed or derivative work. 
-   2.Any pre-existing intellectual property disclaimers, notices, or terms and conditions. If none exist, the
-     W3C Software Short Notice should be included (hypertext is preferred, text is permitted) within the body
-     of any redistributed or derivative code. 
-   3.Notice of any changes or modifications to the files, including the date changes were made. (We
-     recommend you provide URIs to the location from which the code is derived.) 
-THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKENO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO,
-WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THEUSE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY PATENTS,COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
-
-COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL ORCONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION.
-The name and trademarks of copyright holders may NOT be used in advertising or publicity pertaining to the
-software without specific, written prior permission. Title to copyright in this software and any associated
-documentation will at all times remain with copyright holders.
-
-____________________________________
-
-This formulation of W3C's notice and license became active on December 31 2002. This version removes the
-copyright ownership notice such that this license can be used with materials other than those owned by the
-W3C, reflects that ERCIM is now a host of the W3C, includes references to this specific dated version of the
-license, and removes the ambiguous grant of "use". Otherwise, this version is the same as the previous
-version and is written so as to preserve the Free Software Foundation's assessment of GPL compatibility and
-OSI's certification under the Open Source Definition. Please see our Copyright FAQ for common questions
-about using materials from our site, including specific terms and conditions for packages like libwww, Amaya,
-and Jigsaw. Other questions about this notice can be directed to 
-site-policy@w3.org.
-
-%% This notice is provided with respect to Xalan, Xerces, which may be included with this software: 
-
-/*
- * The Apache Software License, Version 1.1
- *
- *
- * Copyright (c) 1999-2003 The Apache Software Foundation.  All rights  * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.  *
- * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. The end-user documentation included with the redistribution,
- *    if any, must include the following acknowledgment:  
- *       "This product includes software developed by the
- *        Apache Software Foundation (http://www.apache.org/)."
- *    Alternately, this acknowledgment may appear in the software itself, *    if and wherever such third-party acknowledgments normally appear. *
- * 4. The names "Xerces" and "Apache Software Foundation" must
- *    not be used to endorse or promote products derived from this
- *    software without prior written permission. For written 
- *    permission, please contact apache@apache.org.
- *
- * 5. Products derived from this software may not be called "Apache",
- *    nor may "Apache" appear in their name, without prior written
- *    permission of the Apache Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED.  IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * ==================================================================== *
- * This software consists of voluntary contributions made by many
- * individuals on behalf of the Apache Software Foundation and was
- * originally based on software copyright (c) 1999, International
- * Business Machines, Inc., http://www.ibm.com.  For more
- * information on the Apache Software Foundation, please see
- * 
-
-%% This notice is provided with respect to JavaScript, which may be included with this software: 
-
-AMENDMENTS 
-The Netscape Public License Version 1.1 ("NPL") consists of the Mozilla Public License Version 1.1 with the following Amendments, including Exhibit A-Netscape Public License.  Files identified with "Exhibit A-Netscape Public License" are governed by the Netscape Public License Version 1.1. 
-Additional Terms applicable to the Netscape Public License. 
-I. Effect. 
-These additional terms described in this Netscape Public License -- Amendments shall apply to the Mozilla Communicator client code and to all Covered Code under this License. 
-II. ''Netscape's Branded Code'' means Covered Code that Netscape distributes and/or permits others to distribute under one or more trademark(s) which are controlled by Netscape but which are not licensed for use under this License. 
-III. Netscape and logo. 
-This License does not grant any rights to use the trademarks "Netscape'', the "Netscape N and horizon'' logo or the "Netscape lighthouse" logo, "Netcenter", "Gecko", "Java" or "JavaScript", "Smart Browsing" even if such marks are included in the Original Code or Modifications. 
-IV. Inability to Comply Due to Contractual Obligation. 
-Prior to licensing the Original Code under this License, Netscape has licensed third party code for use in Netscape's Branded Code. To the extent that Netscape is limited contractually from making such third party code available under this License, Netscape may choose to reintegrate such code into Covered Code without being required to distribute such code in Source Code form, even if such code would otherwise be considered ''Modifications'' under this License. 
-V. Use of Modifications and Covered Code by Initial Developer. 
-V.1. In General. 
-The obligations of Section 3 apply to Netscape, except to the extent specified in this Amendment, Section V.2 and V.3. 
-V.2. Other Products. 
-Netscape may include Covered Code in products other than the Netscape's Branded Code which are released by Netscape during the two (2) years following the release date of the Original Code, without such additional products becoming subject to the terms of this License, and may license such additional products on different terms from those contained in this License. 
-V.3. Alternative Licensing. 
-Netscape may license the Source Code of Netscape's Branded Code, including Modifications incorporated therein, without such Netscape Branded Code becoming subject to the terms of this License, and may license such Netscape Branded Code on different terms from those contained in this License. 

-VI. Litigation. 
-Notwithstanding the limitations of Section 11 above, the provisions regarding litigation in Section 11(a), (b) and (c) of the License shall apply to all disputes relating to this License.
-
-EXHIBIT A-Netscape Public License. 

-''The contents of this file are subject to the Netscape Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/NPL/ 
-Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. 
-The Original Code is Mozilla Communicator client code, released March 31, 1998. 
-The Initial Developer of the Original Code is Netscape Communications Corporation. Portions created by Netscape are Copyright (C) 1998-1999 Netscape Communications Corporation. All Rights Reserved. 
-Contributor(s): ______________________________________.

-Alternatively, the contents of this file may be used under the terms of the _____ license (the  "[___] License"), in which case the provisions of [______] License are applicable  instead of those above.  If you wish to allow use of your version of this file only under the terms of the [____] License and not to allow others to use your version of this file under the NPL, indicate your decision by deleting  the provisions above and replace  them with the notice and other provisions required by the [___] License.  If you do not delete the provisions above, a recipient may use your version of this file under either the NPL or the [___] License."
-
-MOZILLA PUBLIC LICENSE 
-Version 1.1 
-
-1. Definitions. 
-1.0.1. "Commercial Use" means distribution or otherwise making the Covered Code available to a third party. 
-1.1. ''Contributor'' means each entity that creates or contributes to the creation of Modifications. 
-1.2. ''Contributor Version'' means the combination of the Original Code, prior Modifications used by a Contributor, and the Modifications made by that particular Contributor. 
-1.3. ''Covered Code'' means the Original Code or Modifications or the combination of the Original Code and Modifications, in each case including portions thereof. 
-1.4. ''Electronic Distribution Mechanism'' means a mechanism generally accepted in the software development community for the electronic transfer of data. 
-1.5. ''Executable'' means Covered Code in any form other than Source Code. 
-1.6. ''Initial Developer'' means the individual or entity identified as the Initial Developer in the Source Code notice required by Exhibit A. 
-1.7. ''Larger Work'' means a work which combines Covered Code or portions thereof with code not governed by the terms of this License. 
-1.8. ''License'' means this document. 
-1.8.1. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. 
-1.9. ''Modifications'' means any addition to or deletion from the substance or structure of either the Original Code or any previous Modifications. When Covered Code is released as a series of files, a Modification is: 
-A. Any addition to or deletion from the contents of a file containing Original Code or previous Modifications. 
-B. Any new file that contains any part of the Original Code or previous Modifications. 

-1.10. ''Original Code'' means Source Code of computer software code which is described in the Source Code notice required by Exhibit A as Original Code, and which, at the time of its release under this License is not already Covered Code governed by this License. 
-1.10.1. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation,  method, process, and apparatus claims, in any patent Licensable by grantor. 
-1.11. ''Source Code'' means the preferred form of the Covered Code for making modifications to it, including all modules it contains, plus any associated interface definition files, scripts used to control compilation and installation of an Executable, or source code differential comparisons against either the Original Code or another well known, available Covered Code of the Contributor's choice. The Source Code can be in a compressed or archival form, provided the appropriate decompression or de-archiving software is widely available for no charge. 
-1.12. "You'' (or "Your")  means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License or a future version of this License issued under Section 6.1. For legal entities, "You'' includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control'' means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-2. Source Code License. 
-2.1. The Initial Developer Grant. 
-The Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims: 
-(a)  under intellectual property rights (other than patent or trademark) Licensable by Initial Developer to use, reproduce, modify, display, perform, sublicense and distribute the Original Code (or portions thereof) with or without Modifications, and/or as part of a Larger Work; and 
-(b) under Patents Claims infringed by the making, using or selling of Original Code, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Code (or portions thereof). 

-(c) the licenses granted in this Section 2.1(a) and (b) are effective on the date Initial Developer first distributes Original Code under the terms of this License. 
-(d) Notwithstanding Section 2.1(b) above, no patent license is granted: 1) for code that You delete from the Original Code; 2) separate from the Original Code;  or 3) for infringements caused by: i) the modification of the Original Code or ii) the combination of the Original Code with other software or devices. 

-2.2. Contributor Grant. 
-Subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license 

-(a)  under intellectual property rights (other than patent or trademark) Licensable by Contributor, to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof) either on an unmodified basis, with other Modifications, as Covered Code and/or as part of a Larger Work; and 
-(b) under Patent Claims infringed by the making, using, or selling of  Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: 1) Modifications made by that Contributor (or portions thereof); and 2) the combination of  Modifications made by that Contributor with its Contributor Version (or portions of such combination). 
-(c) the licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first makes Commercial Use of the Covered Code. 
-(d)    Notwithstanding Section 2.2(b) above, no patent license is granted: 1) for any code that Contributor has deleted from the Contributor Version; 2)  separate from the Contributor Version;  3)  for infringements caused by: i) third party modifications of Contributor Version or ii)  the combination of Modifications made by that Contributor with other software  (except as part of the Contributor Version) or other devices; or 4) under Patent Claims infringed by Covered Code in the absence of Modifications made by that Contributor.
-
-3. Distribution Obligations. 
-3.1. Application of License. 
-The Modifications which You create or to which You contribute are governed by the terms of this License, including without limitation Section 2.2. The Source Code version of Covered Code may be distributed only under the terms of this License or a future version of this License released under Section 6.1, and You must include a copy of this License with every copy of the Source Code You distribute. You may not offer or impose any terms on any Source Code version that alters or restricts the applicable version of this License or the recipients' rights hereunder. However, You may include an additional document offering the additional rights described in Section 3.5. 
-3.2. Availability of Source Code. 
-Any Modification which You create or to which You contribute must be made available in Source Code form under the terms of this License either on the same media as an Executable version or via an accepted Electronic Distribution Mechanism to anyone to whom you made an Executable version available; and if made available via Electronic Distribution Mechanism, must remain available for at least twelve (12) months after the date it initially became available, or at least six (6) months after a subsequent version of that particular Modification has been made available to such recipients. You are responsible for ensuring that the Source Code version remains available even if the Electronic Distribution Mechanism is maintained by a third party. 
-3.3. Description of Modifications. 
-You must cause all Covered Code to which You contribute to contain a file documenting the changes You made to create that Covered Code and the date of any change. You must include a prominent statement that the Modification is derived, directly or indirectly, from Original Code provided by the Initial Developer and including the name of the Initial Developer in (a) the Source Code, and (b) in any notice in an Executable version or related documentation in which You describe the origin or ownership of the Covered Code. 
-3.4. Intellectual Property Matters 
-(a) Third Party Claims. 
-If Contributor has knowledge that a license under a third party's intellectual property rights is required to exercise the rights granted by such Contributor under Sections 2.1 or 2.2, Contributor must include a text file with the Source Code distribution titled "LEGAL'' which describes the claim and the party making the claim in sufficient detail that a recipient will know whom to contact. If Contributor obtains such knowledge after the Modification is made available as described in Section 3.2, Contributor shall promptly modify the LEGAL file in all copies Contributor makes available thereafter and shall take other steps (such as notifying appropriate mailing lists or newsgroups) reasonably calculated to inform those who received the Covered Code that new knowledge has been obtained. 
-(b) Contributor APIs. 
-If Contributor's Modifications include an application programming interface and Contributor has knowledge of patent licenses which are reasonably necessary to implement that API, Contributor must also include this information in the LEGAL file. 

-          (c)    Representations. 
-Contributor represents that, except as disclosed pursuant to Section 3.4(a) above, Contributor believes that Contributor's Modifications are Contributor's original creation(s) and/or Contributor has sufficient rights to grant the rights conveyed by this License.
-
-3.5. Required Notices. 
-You must duplicate the notice in Exhibit A in each file of the Source Code.  If it is not possible to put such notice in a particular Source Code file due to its structure, then You must include such notice in a location (such as a relevant directory) where a user would be likely to look for such a notice.  If You created one or more Modification(s) You may add your name as a Contributor to the notice described in Exhibit A.  You must also duplicate this License in any documentation for the Source Code where You describe recipients' rights or ownership rights relating to Covered Code.  You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Code. However, You may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear than any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. 
-3.6. Distribution of Executable Versions. 
-You may distribute Covered Code in Executable form only if the requirements of Section 3.1-3.5 have been met for that Covered Code, and if You include a notice stating that the Source Code version of the Covered Code is available under the terms of this License, including a description of how and where You have fulfilled the obligations of Section 3.2. The notice must be conspicuously included in any notice in an Executable version, related documentation or collateral in which You describe recipients' rights relating to the Covered Code. You may distribute the Executable version of Covered Code or ownership rights under a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable version does not attempt to limit or alter the recipient's rights in the Source Code version from the rights set forth in this License. If You distribute the Executable version under a different license You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or any Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. 
-3.7. Larger Works. 
-You may create a Larger Work by combining Covered Code with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Code.
-4. Inability to Comply Due to Statute or Regulation. 
-If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Code due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be included in the LEGAL file described in Section 3.4 and must be included with all distributions of the Source Code. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
-5. Application of this License. 
-This License applies to code to which the Initial Developer has attached the notice in Exhibit A and to related Covered Code.
-6. Versions of the License. 
-6.1. New Versions. 
-Netscape Communications Corporation (''Netscape'') may publish revised and/or new versions of the License from time to time. Each version will be given a distinguishing version number. 
-6.2. Effect of New Versions. 
-Once Covered Code has been published under a particular version of the License, You may always continue to use it under the terms of that version. You may also choose to use such Covered Code under the terms of any subsequent version of the License published by Netscape. No one other than Netscape has the right to modify the terms applicable to Covered Code created under this License. 
-6.3. Derivative Works. 
-If You create or use a modified version of this License (which you may only do in order to apply it to code which is not already Covered Code governed by this License), You must (a) rename Your license so that the phrases ''Mozilla'', ''MOZILLAPL'', ''MOZPL'', ''Netscape'', "MPL", ''NPL'' or any confusingly similar phrase do not appear in your license (except to note that your license differs from this License) and (b) otherwise make it clear that Your version of the license contains terms which differ from the Mozilla Public License and Netscape Public License. (Filling in the name of the Initial Developer, Original Code or Contributor in the notice described in Exhibit A shall not of themselves be deemed to be modifications of this License.)
-7. DISCLAIMER OF WARRANTY. 
-COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS'' BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-8. TERMINATION. 
-8.1.  This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. All sublicenses to the Covered Code which are properly granted shall survive any termination of this License. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. 
-8.2.  If You initiate litigation by asserting a patent infringement claim (excluding declatory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You file such action is referred to as "Participant")  alleging that: 
-(a)  such Participant's Contributor Version directly or indirectly infringes any patent, then any and all rights granted by such Participant to You under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively, unless if within 60 days after receipt of notice You either: (i)  agree in writing to pay Participant a mutually agreeable reasonable royalty for Your past and future use of Modifications made by such Participant, or (ii) withdraw Your litigation claim with respect to the Contributor Version against such Participant.  If within 60 days of notice, a reasonable royalty and payment arrangement are not mutually agreed upon in writing by the parties or the litigation claim is not withdrawn, the rights granted by Participant to You under Sections 2.1 and/or 2.2 automatically terminate at the expiration of the 60 day notice period specified above. 
-(b)  any software, hardware, or device, other than such Participant's Contributor Version, directly or indirectly infringes any patent, then any rights granted to You by such Participant under Sections 2.1(b) and 2.2(b) are revoked effective as of the date You first made, used, sold, distributed, or had made, Modifications made by that Participant. 
-8.3.  If You assert a patent infringement claim against Participant alleging that such Participant's Contributor Version directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. 
-8.4.  In the event of termination under Sections 8.1 or 8.2 above,  all end user license agreements (excluding distributors and resellers) which have been validly granted by You or any distributor hereunder prior to termination shall survive termination.
-9. LIMITATION OF LIABILITY. 
-UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-10. U.S. GOVERNMENT END USERS. 
-The Covered Code is a ''commercial item,'' as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of ''commercial computer software'' and ''commercial computer software documentation,'' as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Code with only those rights set forth herein.
-11. MISCELLANEOUS. 
-This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by California law provisions (except to the extent applicable law, if any, provides otherwise), excluding its conflict-of-law provisions. With respect to disputes in which at least one party is a citizen of, or an entity chartered or registered to do business in the United States of America, any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California, with venue lying in Santa Clara County, California, with the losing party responsible for costs, including without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License.
-12. RESPONSIBILITY FOR CLAIMS. 
-As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
-13. MULTIPLE-LICENSED CODE. 
-Initial Developer may designate portions of the Covered Code as "Multiple-Licensed".  "Multiple-Licensed" means that the Initial Developer permits you to utilize portions of the Covered Code under Your choice of the NPL or the alternative licenses, if any, specified by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License. 
-``The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 
-http://www.mozilla.org/MPL/ 
-Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF 
-ANY KIND, either express or implied. See the License for the specific language governing rights and 
-limitations under the License. 
-The Original Code is ______________________________________. 
-The Initial Developer of the Original Code is ________________________. Portions created by 
- ______________________ are Copyright (C) ______ _______________________. All Rights 
-Reserved. 
-Contributor(s): ______________________________________. 
-Alternatively, the contents of this file may be used under the terms of the _____ license (the  "[___] License"), in which case the provisions of [______] License are applicable  instead of those above.  If you wish to allow use of your version of this file only under the terms of the [____] License and not to allow others to use your version of this file under the MPL, indicate your decision by deleting  the provisions above and replace  them with the notice and other provisions required by the [___] License.  If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the [___] License." 
-[NOTE: The text of this Exhibit A may differ slightly from the text of the notices in the Source Code files of the Original Code. You should use the text of this Exhibit A rather than the text found in the Original Code Source Code for Your Modifications.] 
-
-%% This notice is provided with respect to Mesa 3-D graphics library v. 5, which may be included with this software: 
-
-Copyright (c) 2007 The Khronos Group Inc. 
-
-Permission is hereby granted, free of charge, to any person obtaining a 
-copy of this software and/or associated documentation files (the 
-"Materials"), to deal in the Materials without restriction, including 
-without limitation the rights to use, copy, modify, merge, publish, 
-distribute, sublicense, and/or sell copies of the Materials, and to 
-permit persons to whom the Materials are furnished to do so, subject to 
-the following conditions: 
-
-The above copyright notice and this permission notice shall be included 
-in all copies or substantial portions of the Materials. 
-
-THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 
-MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. 
-
-%% This notice is provided with respect to Byte Code Engineering Library (BCEL), which may be included with this software: 
-
-                     Apache Software License 
-
-                     /*
-====================================================================                      * The Apache Software License, Version 1.1
-                      *
-                      * Copyright (c) 2001 The Apache Software Foundation.  Allrights
-                      * reserved.
-                      *
-                      * Redistribution and use in source and binary forms, withor without
-                      * modification, are permitted provided that the followingconditions
-                      * are met:
-                      *
-                      * 1. Redistributions of source code must retain the abovecopyright
-                      *    notice, this list of conditions and the followingdisclaimer.
-                      *
-                      * 2. Redistributions in binary form must reproduce theabove copyright
-                      *    notice, this list of conditions and the followingdisclaimer in
-                      *    the documentation and/or other materials providedwith the
-                      *    distribution.
-                      *
-                      * 3. The end-user documentation included with theredistribution,
-                      *    if any, must include the following acknowledgment:                      *       "This product includes software developed by the                      *        Apache Software Foundation
-(http://www.apache.org/)."
-                      *    Alternately, this acknowledgment may appear in thesoftware itself,
-                      *    if and wherever such third-party acknowledgmentsnormally appear.
-                      *
-                      * 4. The names "Apache" and "Apache Software Foundation"and 
-                      *    "Apache BCEL" must not be used to endorse or promoteproducts 
-                      *    derived from this software without prior writtenpermission. For 
-                      *    written permission, please contact apache@apache.org.                      *
-                      * 5. Products derived from this software may not be called"Apache",
-                      *    "Apache BCEL", nor may "Apache" appear in their name,without 
-                      *    prior written permission of the Apache SoftwareFoundation.
-                      *
-                      * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED ORIMPLIED
-                      * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIEDWARRANTIES
-                      * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE
-                      * DISCLAIMED.  IN NO EVENT SHALL THE APACHE SOFTWAREFOUNDATION OR
-                      * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,INCIDENTAL,
-                      * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,BUT NOT
-                      * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF
-                      * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVERCAUSED AND
-                      * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICTLIABILITY,
-                      * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING INANY WAY OUT
-                      * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF
-                      * SUCH DAMAGE.
-                      *
-====================================================================                      *
-                      * This software consists of voluntary contributions madeby many
-                      * individuals on behalf of the Apache Software
-Foundation.  For more
-                      * information on the Apache Software Foundation, pleasesee
-                      * .
-                      */
-
-%% This notice is provided with respect to Regexp, Regular Expression Package, which may be included with this software: 
-
-The Apache Software License, Version 1.1
-Copyright (c) 2001 The Apache Software Foundation.  All rights
-reserved.
-Redistribution and use in source and binary forms, with or without modification,are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in
-the documentation and/or other materials provided with the
-distribution.
-
-3. The end-user documentation included with the redistribution,
-if any, must include the following acknowledgment:
-"This product includes software developed by the
-Apache Software Foundation (http://www.apache.org/)."
-Alternately, this acknowledgment may appear in the software itself,
-if and wherever such third-party acknowledgments normally appear.
-
-4. The names "Apache" and "Apache Software Foundation" and 
-"Apache Turbine" must not be used to endorse or promote products 
-derived from this software without prior written permission. For 
-written permission, please contact apache@apache.org.
-
-5. Products derived from this software may not be called "Apache",
-"Apache Turbine", nor may "Apache" appear in their name, without 
-prior written permission of the Apache Software Foundation.
-
-THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
-ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
-USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
-                                                             
-====================================================================
-This software consists of voluntary contributions made by many
-individuals on behalf of the Apache Software Foundation.  For more
-information on the Apache Software Foundation, please see
-                                                             
-http://www.apache.org.
-
-%% This notice is provided with respect to CUP Parser Generator for Java, which may be included with this software: 
-
-CUP Parser Generator Copyright Notice, License, and Disclaimer
-
-Copyright 1996-1999 by Scott Hudson, Frank Flannery, C. Scott Ananian 
-Permission to use, copy, modify, and distribute this software and its
-documentation for any purpose and without fee is hereby granted, provided thatthe above copyright notice appear in all copies and that both the copyrightnotice and this permission notice and warranty disclaimer appear in
-supporting documentation, and that the names of the authors or their employersnot be used in advertising or publicity pertaining to distribution of
-the software without specific, written prior permission. 
-
-The authors and their employers disclaim all warranties with regard to thissoftware, including all implied warranties of merchantability and
-fitness. In no event shall the authors or their employers be liable for anyspecial, indirect or consequential damages or any damages whatsoever
-resulting from loss of use, data or profits, whether in an action of contract,negligence or other tortious action, arising out of or in connection withthe use or performance of this software. 
-
-%% This notice is provided with respect to SAX v. 2.0.1, which may be included with this software: 
-
-Copyright Status
-
-                         SAX is free!
-
-                         In fact, it's not possible to own a license to SAX, since it's been placed in the public
-                         domain. 
-
-                         No Warranty
-
-                         Because SAX is released to the public domain, there is no warranty for the design or for
-                         the software implementation, to the extent permitted by applicable law. Except when
-                         otherwise stated in writing the copyright holders and/or other parties provide SAX "as is"
-                         without warranty of any kind, either expressed or implied, including, but not limited to, the
-                         implied warranties of merchantability and fitness for a particular purpose. The entire risk as
-                         to the quality and performance of SAX is with you. Should SAX prove defective, you
-                         assume the cost of all necessary servicing, repair or correction.
-
-                         In no event unless required by applicable law or agreed to in writing will any copyright
-                         holder, or any other party who may modify and/or redistribute SAX, be liable to you for
-                         damages, including any general, special, incidental or consequential damages arising out of
-                         the use or inability to use SAX (including but not limited to loss of data or data being
-                         rendered inaccurate or losses sustained by you or third parties or a failure of the SAX to
-                         operate with any other programs), even if such holder or other party has been advised of
-                         the possibility of such damages.
-
-                         Copyright Disclaimers 
-
-                         This page includes statements to that effect by David Megginson, who would have been
-                         able to claim copyright for the original work. 
-                         SAX 1.0 
-
-                         Version 1.0 of the Simple API for XML (SAX), created collectively by the membership of
-                         the XML-DEV mailing list, is hereby released into the public domain.
-
-                         No one owns SAX: you may use it freely in both commercial and non-commercial
-                         applications, bundle it with your software distribution, include it on a CD-ROM, list the
-                         source code in a book, mirror the documentation at your own web site, or use it in any
-                         other way you see fit.
-
-                         David Megginson, sax@megginson.com
-                         1998-05-11
-
-                         SAX 2.0 
-
-                         I hereby abandon any property rights to SAX 2.0 (the Simple API for XML), and release
-                         all of the SAX 2.0 source code, compiled code, and documentation contained in this
-                         distribution into the Public Domain. SAX comes with NO WARRANTY or guarantee of
-                         fitness for any purpose.
-
-                         David Megginson, david@megginson.com
-                         2000-05-05
-
-%% This notice is provided with respect to Cryptix, which may be included with this software: 
-
-Cryptix General License
-
-Copyright © 1995-2003 The Cryptix Foundation Limited. All rights reserved.
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions aremet:
-
-   1.Redistributions of source code must retain the copyright notice, this list of conditions and the following disclaimer.    2.Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the     documentation and/or other materials provided with the distribution. 
-THIS SOFTWARE IS PROVIDED BY THE CRYPTIX FOUNDATION LIMITED AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS ORIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FORA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CRYPTIX FOUNDATION LIMITED OR CONTRIBUTORS BELIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOTLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESSINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OFTHE POSSIBILITY OF SUCH DAMAGE.
-
-%% This notice is provided with respect to X Window System, which may be included with this software: 
-
-Copyright  The Open Group
-
-Permission to use, copy, modify, distribute, and sell this software and itsdocumentation for any purpose is hereby granted without fee, provided that theabove copyright notice appear in all copies and that both that copyright noticeand this permission notice appear in supporting documentation.
-
-The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESSFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE OPEN GROUPBE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OFCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THESOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-Except as contained in this notice, the name of The Open Group shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from The Open Group.
-
-Portions also covered by other licenses as noted in the above URL.
-
-%% This notice is provided with respect to Retroweaver, which may be included with this software: 
-
-Copyright (c) February 2004, Toby Reyelts
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 
-Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 
-Neither the name of Toby Reyelts nor the names of his contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICTLIABILITY, 
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-%% This notice is provided with respect to stripper, which may be included with this software: 
-
-Stripper : debug information stripper
- Copyright (c) 2003 Kohsuke Kawaguchi
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the    documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from    this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-%% This notice is provided with respect to libpng official PNG reference library, which may be included with this software: 
-
-This copy of the libpng notices is provided for your convenience.  In case ofany discrepancy between this copy and the notices in the file png.h that isincluded in the libpng distribution, the latter shall prevail.
-
-COPYRIGHT NOTICE, DISCLAIMER, and LICENSE:
-
-If you modify libpng you may insert additional notices immediately followingthis sentence.
-
-libpng version 1.2.6, December 3, 2004, is
-Copyright (c) 2004 Glenn Randers-Pehrson, and is
-distributed according to the same disclaimer and license as libpng-1.2.5with the following individual added to the list of Contributing Authors
-   Cosmin Truta
-
-libpng versions 1.0.7, July 1, 2000, through 1.2.5 - October 3, 2002, areCopyright (c) 2000-2002 Glenn Randers-Pehrson, and are
-distributed according to the same disclaimer and license as libpng-1.0.6with the following individuals added to the list of Contributing Authors
-   Simon-Pierre Cadieux
-   Eric S. Raymond
-   Gilles Vollant
-
-and with the following additions to the disclaimer:
-
-   There is no warranty against interference with your enjoyment of the   library or against infringement.  There is no warranty that our
-   efforts or the library will fulfill any of your particular purposes   or needs.  This library is provided with all faults, and the entire   risk of satisfactory quality, performance, accuracy, and effort is with   the user.
-
-libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, areCopyright (c) 1998, 1999 Glenn Randers-Pehrson, and are
-distributed according to the same disclaimer and license as libpng-0.96,with the following individuals added to the list of Contributing Authors:
-   Tom Lane
-   Glenn Randers-Pehrson
-   Willem van Schaik
-
-libpng versions 0.89, June 1996, through 0.96, May 1997, are
-Copyright (c) 1996, 1997 Andreas Dilger
-Distributed according to the same disclaimer and license as libpng-0.88,with the following individuals added to the list of Contributing Authors:
-   John Bowler
-   Kevin Bracey
-   Sam Bushell
-   Magnus Holmgren
-   Greg Roelofs
-   Tom Tanner
-
-libpng versions 0.5, May 1995, through 0.88, January 1996, are
-Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.
-
-For the purposes of this copyright and license, "Contributing Authors"is defined as the following set of individuals:
-
-   Andreas Dilger
-   Dave Martindale
-   Guy Eric Schalnat
-   Paul Schmidt
-   Tim Wegner
-
-The PNG Reference Library is supplied "AS IS".  The Contributing Authorsand Group 42, Inc. disclaim all warranties, expressed or implied,
-including, without limitation, the warranties of merchantability and offitness for any purpose.  The Contributing Authors and Group 42, Inc.
-assume no liability for direct, indirect, incidental, special, exemplary,or consequential damages, which may result from the use of the PNG
-Reference Library, even if advised of the possibility of such damage.
-
-Permission is hereby granted to use, copy, modify, and distribute thissource code, or portions hereof, for any purpose, without fee, subjectto the following restrictions:
-
-1. The origin of this source code must not be misrepresented.
-
-2. Altered versions must be plainly marked as such and must not
-   be misrepresented as being the original source.
-
-3. This Copyright notice may not be removed or altered from any
-   source or altered source distribution.
-
-The Contributing Authors and Group 42, Inc. specifically permit, withoutfee, and encourage the use of this source code as a component to
-supporting the PNG file format in commercial products.  If you use thissource code in a product, acknowledgment is not required but would be
-appreciated.
-
-
-A "png_get_copyright" function is available, for convenient use in "about"boxes and the like:
-
-   printf("%s",png_get_copyright(NULL));
-
-Also, the PNG logo (in PNG format, of course) is supplied in the
-files "pngbar.png" and "pngbar.jpg (88x31) and "pngnow.png" (98x31).
-
-Libpng is OSI Certified Open Source Software.  OSI Certified Open Source is acertification mark of the Open Source Initiative.
-
-Glenn Randers-Pehrson
-glennrp at users.sourceforge.net
-December 3, 2004
-
-%% This notice is provided with respect to Libungif - An uncompressed GIF library, which may be included with this software: 
-
-The GIFLIB distribution is Copyright (c) 1997  Eric S. Raymond
-
-Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included inall copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INTHE SOFTWARE.
-
-%% This notice is provided with respect to XML Resolver library, Xalan J2, and StAX API, which may be included with this software: 
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to the following which is 
+included with JRE 7, JDK 7, and OpenJDK 7, except where noted:
+
+  Apache Derby 10.8.1.2        [included with JDK 7 only]
+  Apache Jakarta BCEL 5.2 
+  Apache Jakarta Regexp 1.4 
+  Apache Santuario XMLSec-Java 1.4.2
+  Apache Xalan-Java 2.7.1 
+  Apache Xerces2 Java 2.10.0 
+  Apache XML Resolver 1.1 
+
+
+--- begin of LICENSE ---
 
                                  Apache License
                            Version 2.0, January 2004
@@ -782,17 +3185,26 @@
 
    1. Definitions.
 
-      "License" shall mean the terms and conditions for use, reproduction,      and distribution as defined by Sections 1 through 9 of this document.
-      "Licensor" shall mean the copyright owner or entity authorized by      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all      other entities that control, are controlled by, or are under common      control with that entity. For the purposes of this definition,
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
       "control" means (i) the power, direct or indirect, to cause the
       direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the      outstanding shares, or (iii) beneficial ownership of such entity.
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
       "You" (or "Your") shall mean an individual or Legal Entity
       exercising permissions granted by this License.
 
-      "Source" form shall mean the preferred form for making modifications,      including but not limited to software source code, documentation      source, and configuration files.
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
 
       "Object" form shall mean any form resulting from mechanical
       transformation or translation of a Source form, including but
@@ -800,25 +3212,56 @@
       and conversions to other media types.
 
       "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a      copyright notice that is included in or attached to the work
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
       (an example is provided in the Appendix below).
 
-      "Derivative Works" shall mean any work, whether in Source or Object      form, that is based on (or derived from) the Work and for which the      editorial revisions, annotations, elaborations, or other modifications      represent, as a whole, an original work of authorship. For the purposes      of this License, Derivative Works shall not include works that remain      separable from, or merely link (or bind by name) to the interfaces of,      the Work and Derivative Works thereof.
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
 
       "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner      or by an individual or Legal Entity authorized to submit on behalf of      the copyright owner. For the purposes of this definition, "submitted"      means any form of electronic, verbal, or written communication sent      to the Licensor or its representatives, including but not limited to      communication on electronic mailing lists, source code control systems,      and issue tracking systems that are managed by, or on behalf of, the      Licensor for the purpose of discussing and improving the Work, but      excluding communication that is conspicuously marked or otherwise      designated in writing by the copyright owner as "Not a Contribution."
-      "Contributor" shall mean Licensor and any individual or Legal Entity      on behalf of whom a Contribution has been received by Licensor and      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of      this License, each Contributor hereby grants to You a perpetual,      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
       copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the      Work and such Derivative Works in Source or Object form.
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
 
    3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,      use, offer to sell, sell, import, and otherwise transfer the Work,      where such license applies only to those patent claims licensable      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)      with the Work to which such Contribution(s) was submitted. If You      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work      or a Contribution incorporated within the Work constitutes direct      or contributory patent infringement, then any patent licenses
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
       granted to You under this License for that Work shall terminate
       as of the date such litigation is filed.
 
@@ -830,7 +3273,8 @@
       (a) You must give any other recipients of the Work or
           Derivative Works a copy of this License; and
 
-      (b) You must cause any modified files to carry prominent notices          stating that You changed the files; and
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
 
       (c) You must retain, in the Source form of any Derivative Works
           that You distribute, all copyright, patent, trademark, and
@@ -839,35 +3283,74 @@
           the Derivative Works; and
 
       (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must          include a readable copy of the attribution notices contained          within such NOTICE file, excluding those notices that do not          pertain to any part of the Derivative Works, in at least one          of the following places: within a NOTICE text file distributed          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents          of the NOTICE file are for informational purposes only and
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
           do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside          or as an addendum to the NOTICE text from the Work, provided          that such additional attribution notices cannot be construed          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and      may provide additional or different license terms and conditions      for use, reproduction, or distribution of Your modifications, or      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,      any Contribution intentionally submitted for inclusion in the Work      by You to the Licensor shall be under the terms and conditions of      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify      the terms of any separate license agreement you may have executed      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade      names, trademarks, service marks, or product names of the Licensor,      except as required for reasonable and customary use in describing the      origin of the Work and reproducing the content of the NOTICE file.
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
    7. Disclaimer of Warranty. Unless required by applicable law or
       agreed to in writing, Licensor provides the Work (and each
       Contributor provides its Contributions) on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the      appropriateness of using or redistributing the Work and assume any      risks associated with Your exercise of permissions under this License.
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
    8. Limitation of Liability. In no event and under no legal theory,
       whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly      negligent acts) or agreed to in writing, shall any Contributor be      liable to You for damages, including any direct, indirect, special,      incidental, or consequential damages of any character arising as a      result of this License or out of the use or inability to use the      Work (including but not limited to damages for loss of goodwill,      work stoppage, computer failure or malfunction, or any and all
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
       other commercial damages or losses), even if such Contributor
       has been advised of the possibility of such damages.
 
-   9. Accepting Warranty or Additional Liability. While redistributing      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,      or other liability obligations and/or rights consistent with this      License. However, in accepting such obligations, You may act only      on Your own behalf and on Your sole responsibility, not on behalf      of any other Contributor, and only if You agree to indemnify,
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
       defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason      of your accepting any such warranty or additional liability.
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
 
    END OF TERMS AND CONDITIONS
 
@@ -878,7 +3361,8 @@
       replaced with your own identifying information. (Don't include
       the brackets!)  The text should be enclosed in the appropriate
       comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the      same "printed page" as the copyright notice for easier
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
    Copyright [yyyy] [name of copyright owner]
@@ -889,728 +3373,13 @@
 
        http://www.apache.org/licenses/LICENSE-2.0
 
-   Unless required by applicable law or agreed to in writing, software   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   See the License for the specific language governing permissions and   limitations under the License.
-
-%% Some Portions licensed from IBM are available at: 
-http://www.ibm.com/software/globalization/icu/
-
-%% This notice is provided with respect to ICU4J, ICU 1.8.1 and later, which may be included with this software: 
-
-ICU License - ICU 1.8.1 and later COPYRIGHT AND PERMISSION NOTICE Cop
-yright (c)
-1995-2003 International Business Machines Corporation and others All rightsreserved. Permission is hereby granted, free of charge, to any person obtaininga copy of this software and associated documentation files (the "Software"), todeal in the Software without restriction, including without limitation therights to use, copy, modify, merge, publish, distribute, and/or sell copies ofthe Software, and to permit persons to whom the Software is furnished to do so,provided that the above copyright notice(s) and this permission notice appear inall copies of the Software and that both the above copyright notice(s) and thispermission notice appear in supporting documentation. THE SOFTWARE IS PROVIDED"AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOTLIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSEAND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHTHOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY C
- LAIM, OR ANYSPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTINGFROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCEOR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE ORPERFORMANCE OF THIS SOFTWARE. Except as contained in this notice, the name of acopyright holder shall not be used in advertising or otherwise to promote thesale, use or other dealings in this Software without prior written authorizationof the copyright holder.
-
-%% This notice is provided with respect to Jing, which may be included with this software: 
-
-Jing Copying Conditions
-
-Copyright (c) 2001-2003 Thai Open Source Software Center Ltd
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice,this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright notice,this list of conditions and the following disclaimer in the documentation and/orother materials provided with the distribution.
-    * Neither the name of the Thai Open Source Software Center Ltd nor the namesof its contributors may be used to endorse or promote products derived from thissoftware without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ANDANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIEDWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AREDISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ONANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THISSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-%% This notice is provided with respect to RELAX NG Object Model/Parser, which may be included with this software: 
-
-
-The MIT License
-
-Copyright (c)  
-
-Permission is hereby granted, free of charge, to any person obtaining a copy ofthis software and associated documentation files (the "Software"), to deal inthe Software without restriction, including without limitation the rights touse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ofthe Software, and to permit persons to whom the Software is furnished to do so,subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESSFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS ORCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHERIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR INCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-%% This notice is provided with respect to XFree86-VidMode Extension, which may be included with this software: 
-
-Version 1.1 of XFree86 ProjectLicence.
-
-    Copyright (C) 1994-2004 The XFree86 Project, Inc.    All rights reserved.
-
-    Permission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to deal inthe Software without restriction, including without limitation the rights touse, copy, modify, merge, publish, distribute, sublicence, and/or sell copies ofthe Software, and to permit persons to whom the Software is furnished to do so,subject to the following conditions:
-
-       1. Redistributions of source code must retain the above copyright notice,this list of conditions, and the following disclaimer.
-       2. Redistributions in binary form must reproduce the above copyrightnotice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution, and in thesame place and form as other copyright, license and disclaimer information.       3. The end-user documentation included with the redistribution, if any,must include the following acknowledgment: "This product includes softwaredeveloped by The XFree86 Project, Inc (http://www.xfree86.org/) and itscontributors", in the same place and form as other third-party acknowledgments.Alternately, this acknowledgment may appear in the software itself, in the sameform and location as other such third-party acknowledgments.
-       4. Except as contained in this notice, the name of The XFree86 Project,Inc shall not be used in advertising or otherwise to promote the sale, use orother dealings in this Software without prior written authorization from TheXFree86 Project, Inc.
-
-    THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY ANDFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE XFREE86PROJECT, INC OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ORBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER INCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISINGIN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITYOF SUCH DAMAGE.
-
-%% This notice is provided with respect to XML Security, which may be included with this software: 
-
-  The Apache Software License,
-                                       Version 1.1
-
-
-                                                                                 PDF
-
-
-                                       Copyright (C) 2002 The Apache SoftwareFoundation.
-                                       All rights reserved. Redistribution anduse in
-                                       source and binary forms, with or withoutmodifica-
-                                       tion, are permitted provided that thefollowing
-                                       conditions are met: 1. Redistributions ofsource
-                                       code must retain the above copyrightnotice, this
-                                       list of conditions and the followingdisclaimer.
-                                       2. Redistributions in binary form mustreproduce
-                                       the above copyright notice, this list of                                       conditions and the following disclaimerin the
-                                       documentation and/or other materialsprovided with
-                                       the distribution. 3. The end-userdocumentation
-                                       included with the redistribution, if any,must
-                                       include the following acknowledgment:"This
-                                       product includes software developed bythe Apache
-                                       Software Foundation
-(http://www.apache.org/)."
-                                       Alternately, this acknowledgment mayappear in the
-                                       software itself, if and wherever suchthird-party
-                                       acknowledgments normally appear. 4. Thenames
-                                       "Apache Forrest" and "Apache SoftwareFoundation"
-                                       must not be used to endorse or promoteproducts
-                                       derived from this software without priorwritten
-                                       permission. For written permission,please contact
-                                       apache@apache.org. 5. Products derivedfrom this
-                                       software may not be called "Apache", normay
-                                       "Apache" appear in their name, withoutprior
-                                       written permission of the Apache Software                                       Foundation. THIS SOFTWARE IS PROVIDED``AS IS''
-                                       AND ANY EXPRESSED OR IMPLIED WARRANTIES,                                       INCLUDING, BUT NOT LIMITED TO, THEIMPLIED
-                                       WARRANTIES OF MERCHANTABILITY AND FITNESSFOR A
-                                       PARTICULAR PURPOSE ARE DISCLAIMED. IN NOEVENT
-                                       SHALL THE APACHE SOFTWARE FOUNDATION ORITS
-                                       CONTRIBUTORS BE LIABLE FOR ANY DIRECT,INDIRECT,
-                                       INCIDENTAL, SPECIAL, EXEMPLARY, ORCONSEQUENTIAL
-                                       DAMAGES (INCLU- DING, BUT NOT LIMITED TO,                                       PROCUREMENT OF SUBSTITUTE GOODS ORSERVICES; LOSS
-                                       OF USE, DATA, OR PROFITS; OR BUSINESS                                       INTERRUPTION) HOWEVER CAUSED AND ON ANYTHEORY OF
-                                       LIABILITY, WHETHER IN CONTRACT, STRICTLIABILITY,
-                                       OR TORT (INCLUDING NEGLIGENCE OROTHERWISE)
-                                       ARISING IN ANY WAY OUT OF THE USE OF THIS                                       SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF
-                                       SUCH DAMAGE. This software consists ofvoluntary
-                                       contributions made by many individuals onbehalf
-                                       of the Apache Software Foundation. Formore
-                                       information on the Apache SoftwareFoundation,
-                                       please see .
-
-%% This notice is provided with respect to Independent JPEG Group's software (libjpeg), which may be included with this software: 
-
-In plain English:
-
-1. We don't promise that this software works.  (But if you find any bugs,
-   please let us know!)
-2. You can use this software for whatever you want.  You don't have to pay us.
-3. You may not pretend that you wrote this software.  If you use it in a program, you must acknowledge somewhere in your documentation that you've used the IJG code.
-
-In legalese:
-
-The authors make NO WARRANTY or representation, either express or implied, with respect to this software, its quality, accuracy, merchantability, or fitness for a particular purpose.  This software is provided "AS IS", and you, its user, assume the entire risk as to its quality and accuracy.
-
-This software is copyright (C) 1991-1998, Thomas G. Lane.
-All Rights Reserved except as specified below.
-
-Permission is hereby granted to use, copy, modify, and distribute this software (or portions thereof) for any purpose, without fee, subject to these conditions:
-
-(1) If any part of the source code for this software is distributed, then this
-README file must be included, with this copyright and no-warranty notice unaltered; and any additions, deletions, or changes to the original files must be clearly indicated in accompanying documentation.
-
-(2) If only executable code is distributed, then the accompanying documentation must state that "this software is based in part on the work of the Independent JPEG Group".
-
-(3) Permission for use of this software is granted only if the user accepts full responsibility for any undesirable consequences; the authors accept NO LIABILITY for damages of any kind.
-
-These conditions apply to any software derived from or based on the IJG code, not just to the unmodified library.  If you use our work, you ought to acknowledge us.
-
-Permission is NOT granted for the use of any IJG author's name or company name in advertising or publicity relating to this software or products derived from it.  This software may be referred to only as "the Independent JPEG Group's software".
-
-We specifically permit and encourage the use of this software as the basis of commercial products, provided that all warranty or liability claims are assumed by the product vendor.
-
-ansi2knr.c is included in this distribution by permission of L. Peter Deutsch, sole proprietor of its copyright holder, Aladdin Enterprises of Menlo Park, CA. ansi2knr.c is NOT covered by the above copyright and conditions, but instead by the usual distribution terms of the Free Software Foundation; principally, that you must include source code if you redistribute it.  (See the file ansi2knr.c for full details.)  However, since ansi2knr.c is not needed as part of any program generated from the IJG code, this does not limit you more than the foregoing paragraphs do.
-
-The Unix configuration script "configure" was produced with GNU Autoconf. It is copyright by the Free Software Foundation but is freely distributable. The same holds for its supporting scripts (config.guess, config.sub, ltconfig, ltmain.sh).  Another support script, install-sh, is copyright by M.I.T. but is also freely distributable.
-
-It appears that the arithmetic coding option of the JPEG spec is covered by patents owned by IBM, AT&T, and Mitsubishi.  Hence arithmetic coding cannot legally be used without obtaining one or more licenses.  For this reason, support for arithmetic coding has been removed from the free JPEG software. (Since arithmetic coding provides only a marginal gain over the unpatented Huffman mode, it is unlikely that very many implementations will support it.) So far as we are aware, there are no patent restrictions on the remaining code.
-
-The IJG distribution formerly included code to read and write GIF files. To avoid entanglement with the Unisys LZW patent, GIF reading support has been removed altogether, and the GIF writer has been simplified to produce "uncompressed GIFs".  This technique does not use the LZW algorithm; the resulting GIF files are larger than usual, but are readable by all standard GIF decoders.
-
-We are required to state that
-    "The Graphics Interchange Format(c) is the Copyright property of
-    CompuServe Incorporated.  GIF(sm) is a Service Mark property of
-    CompuServe Incorporated."
-
-%% This notice is provided with respect to X Resize and Rotate (Xrandr) Extension, which may be included with this software: 
-2. XFree86 License
-
-XFree86 code without an explicit copyright is covered by the following
-copyright/license:
-
-Copyright (C) 1994-2003 The XFree86 Project, Inc. All Rights Reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE XFREE86
-PROJECT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-Except as contained in this notice, the name of the XFree86 Project shall not be
-used in advertising or otherwise to promote the sale, use or other dealings in
-this Software without prior written authorization from the XFree86 Project.
-
-%% This notice is provided with respect to fontconfig, which may be included with this software: 
-Id: COPYING,v 1.3 2003/04/04 20:17:40 keithp Exp $
-Copyright 2001,2003 Keith Packard
-
-Permission to use, copy, modify, distribute, and sell this software and its
-documentation for any purpose is hereby granted without fee, provided that
-the above copyright notice appear in all copies and that both that
-copyright notice and this permission notice appear in supporting
-documentation, and that the name of Keith Packard not be used in
-advertising or publicity pertaining to distribution of the software without
-specific, written prior permission.  Keith Packard makes no
-representations about the suitability of this software for any purpose.  It
-is provided "as is" without express or implied warranty.
-
-KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-%% This notice is provided with respect to XFree86, which may be included with this software: 
-Copyright (C) 1994-2002 The XFree86 Project, Inc. All Rights Reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated
-documentation files (the "Software"), to deal in the Software without
-restriction, including without limitation
-the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to the
-following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the
-Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT
-NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE XFREE86 PROJECT BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-Except as contained in this notice, the name of the XFree86 Project shall not be
-used in advertising or otherwise
-to promote the sale, use or other dealings in this Software without prior
-written authorization from the XFree86
-Project.
-%% This notice is provided with respect to Fast Infoset, which may be included with this software: 
-* Fast Infoset ver. 0.1 software ("Software") 
-* 
-* Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
-* 
-* Software is licensed under the Apache License, Version 2.0 (the "License"); 
-* you may not use this file except in compliance with the License. You may 
-* obtain a copy of the License at: 
- * 
- * http://www.apache.org/licenses/LICENSE-2.0 
- * 
- * Unless required by applicable law or agreed to in writing, software 
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 
-* License for the specific language governing permissions and limitations. 
-* 
-* Oracle supports and benefits from the global community of open source 
-* developers, and thanks the community for its important contributions and 
-* open standards-based technology, which Oracle has adopted into many of its 
-* products. 
-* 
-* Please note that portions of Software may be provided with notices and 
-* open source licenses from such communities and third parties that govern the 
-* use of those portions, and any licenses granted hereunder do not alter any 
-* rights and obligations you may have under such open source licenses, 
-* however, the disclaimer of warranty and limitation of liability provisions 
-* in this License will apply to all Software in this distribution. 
-* 
-* You acknowledge that the Software is not designed, licensed or intended 
-* for use in the design, construction, operation or maintenance of any nuclear 
-* facility. 
-* 
-* Apache License 
-* Version 2.0, January 2004 
-* http://www.apache.org/licenses/ 
-* 
-*/ 
-/* 
-* ==================================================================== 
-* 
-* This code is subject to the freebxml License, Version 1.1 
-* 
-* Copyright (c) 2001 - 2005 freebxml.org. All rights reserved. 
-* 
-* $Header: /cvs/fi/FastInfoset/src/com/sun/xml/internal/fastinfoset/AbstractResourceBundle.java,v 1.2 
-*  ==================================================================== 
-*/ 
-%% This notice is provided with respect to Kerberos, which may be included with this software: 
-
-/* 
- * Copyright (C) 1998 by the FundsXpress, INC. 
- * 
- * All rights reserved. 
- * 
- * Export of this software from the United States of America may require 
- * a specific license from the United States Government.  It is the 
- * responsibility of any person or organization contemplating export to 
- * obtain such a license before exporting. 
- * 
- * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 
- * distribute this software and its documentation for any purpose and 
- * without fee is hereby granted, provided that the above copyright 
- * notice appear in all copies and that both that copyright notice and 
- * this permission notice appear in supporting documentation, and that 
- * the name of FundsXpress. not be used in advertising or publicity pertaining 
- * to distribution of the software without specific, written prior 
- * permission. FundsXpress makes no representations about the suitability of 
- * this software for any purpose. It is provided "as is" without express 
- * or implied warranty. 
- * 
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 
- */ 
-
-%% This notice is provided with respect to Unicode's CLDR data repository, which may be included with this software: 
-
- Unicode Copyright
-
-    For the general privacy policy governing access to this site, see the 
-Unicode Privacy Policy. For trademark usage, see the the Unicode Consortium
-Trademarks and Logo Policy.
-    Notice to End User: Terms of Use
-    Carefully read the following legal agreement ("Agreement"). Use or copying
-of the software and/or codes provided with this agreement (The "Software")
-constitutes your acceptance of these terms
-
-       1. Unicode Copyright.
-             1. Copyright © 1991-2005 Unicode, Inc. All rights reserved.
-             2. Certain documents and files on this website contain a legend
-indicating that "Modification is permitted." Any person is hereby authorized,
-without fee, to modify such documents and files to create derivative works
-conforming to the Unicode® Standard, subject to Terms and Conditions herein.
-             3. Any person is hereby authorized, without fee, to view, use,
-reproduce, and distribute all documents and files solely for informational
-purposes in the creation of products supporting the Unicode Standard, subject to
-the Terms and Conditions herein.
-             4. Further specifications of rights and restrictions pertaining to
-the use of the particular set of data files known as the "Unicode Character
-Database" can be found in Exhibit 1.
-             5. Further specifications of rights and restrictions pertaining to
-the use of the particular set of files that constitute the online edition of The
-Unicode Standard, Version 4.0, may be found in V4.0 online edition.
-             6. No license is granted to "mirror" the Unicode website where a
-fee is charged for access to the "mirror" site.
-             7. Modification is not permitted with respect to this document. All
-copies of this document must be verbatim.
-       2. Restricted Rights Legend. Any technical data or software which is
-licensed to the United States of America, its agencies and/or instrumentalities
-under this Agreement is commercial technical data or commercial computer
-software developed exclusively at private expense as defined in FAR 2.101, or
-DFARS 252.227-7014 (June 1995), as applicable. For technical data, use,
-duplication, or disclosure by the Government is subject to restrictions as set
-forth in DFARS 202.227-7015 Technical Data, Commercial and Items (Nov 1995) and
-this Agreement. For Software, in accordance with FAR 12-212 or DFARS 227-7202,
-as applicable, use, duplication or disclosure by the Government is subject to
-the restrictions set forth in this Agreement.
-       3. Warranties and Disclaimers.
-             1. This publication and/or website may include technical or
-typographical errors or other inaccuracies . Changes are periodically added to
-the information herein; these changes will be incorporated in new editions of
-the publication and/or website. Unicode may make improvements and/or changes in
-the product(s) and/or program(s) described in this publication and/or website at
-any time.
-             2. If this file has been purchased on magnetic or optical media
-from Unicode, Inc. the sole and exclusive remedy for any claim will be exchange
-of the defective media within ninety (90) days of original purchase.
-             3. EXCEPT AS PROVIDED IN SECTION C.2, THIS PUBLICATION AND/OR
-SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND EITHER EXPRESS,
-IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. UNICODE
-AND ITS LICENSORS ASSUME NO RESPONSIBILITY FOR ERRORS OR OMISSIONS IN THIS
-PUBLICATION AND/OR SOFTWARE OR OTHER DOCUMENTS WHICH ARE REFERENCED BY OR LINKED
-TO THIS PUBLICATION OR THE UNICODE WEBSITE.
-       4. Waiver of Damages. In no event shall Unicode or its licensors be
-liable for any special, incidental, indirect or consequential damages of any
-kind, or any damages whatsoever, whether or not Unicode was advised of the
-possibility of the damage, including, without limitation, those resulting from
-the following: loss of use, data or profits, in connection with the use,
-modification or distribution of this information or its derivatives.
-       5. Trademarks.
-             1. Unicode and the Unicode logo are registered trademarks of
-Unicode, Inc. 
-             2. This site contains product names and corporate names of other
-companies. All product names and company names and logos mentioned herein are
-the trademarks or registered trademarks of their respective owners. Other
-products and corporate names mentioned herein which are trademarks of a third
-party are used only for explanation and for the owners' benefit and with no
-intent to infringe.
-             3. Use of third party products or information referred to herein is
-at the user's risk.
-       6. Miscellaneous.
-             1. Jurisdiction and Venue. This server is operated from a location
-in the State of California, United States of America. Unicode makes no
-representation that the materials are appropriate for use in other locations. If
-you access this server from other locations, you are responsible for compliance
-with local laws. This Agreement, all use of this site and any claims and damages
-resulting from use of this site are governed solely by the laws of the State of
-California without regard to any principles which would apply the laws of a
-different jurisdiction. The user agrees that any disputes regarding this site
-shall be resolved solely in the courts located in Santa Clara County,
-California. The user agrees said courts have personal jurisdiction and agree to
-waive any right to transfer the dispute to any other forum.
-             2. Modification by Unicode Unicode shall have the right to modify
-this Agreement at any time by posting it to this site. The user may not assign
-any part of this Agreement without Unicode's prior written consent.
-             3. Taxes. The user agrees to pay any taxes arising from access to
-this website or use of the information herein, except for those based on
-Unicode's net income.
-             4. Severability.  If any provision of this Agreement is declared
-invalid or unenforceable, the remaining provisions of this Agreement shall
-remain in effect.
-             5. Entire Agreement. This Agreement constitutes the entire
-agreement between the parties. 
-
-EXHIBIT 1
-UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
-
-    Unicode Data Files include all data files under the directories
-http://www.unicode.org/Public/ and http://www.unicode.org/reports/. Unicode
-Software includes any source code under the directories
-http://www.unicode.org/Public/ and http://www.unicode.org/reports/.
-
-    NOTICE TO USER: Carefully read the following legal agreement. BY
-DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES
-("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND
-AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU
-DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES
-OR SOFTWARE.
-
-    COPYRIGHT AND PERMISSION NOTICE
-
-    Copyright Ã?Â,Ã,© 1991-2004 Unicode, Inc. All rights reserved. Distributed under
-the Terms of Use in http://www.unicode.org/copyright.html.
-
-    Permission is hereby granted, free of charge, to any person obtaining a copy
-of the Unicode data files and associated documentation (the "Data Files") or
-Unicode software and associated documentation (the "Software") to deal in the
-Data Files or Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, and/or sell copies of
-the Data Files or Software, and to permit persons to whom the Data Files or
-Software are furnished to do so, provided that (a) the above copyright notice(s)
-and this permission notice appear with all copies of the Data Files or Software,
-(b) both the above copyright notice(s) and this permission notice appear in
-associated documentation, and (c) there is clear notice in each modified Data
-File or in the Software as well as in the documentation associated with the Data
-File(s) or Software that the data or software has been modified.
-
-    THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
-KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD
-PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
-NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
-DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
-OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
-
-    Except as contained in this notice, the name of a copyright holder shall not
-be used in advertising or otherwise to promote the sale, use or other dealings
-in these Data Files or Software without prior written authorization of the
-copyright holder.
-
-    Unicode and the Unicode logo are trademarks of Unicode, Inc., and may be
-registered in some jurisdictions. All other trademarks and registered trademarks
-mentioned herein are the property of their respective owners.
-%% This notice is provided with respect to RSA PKCS#11 Header Files & Specification, which may be included with this software: 
-
-/* 
- * Copyright (C) 1998 by the FundsXpress, INC. 
- * 
- * All rights reserved. 
- * 
- * Export of this software from the United States of America may require 
- * a specific license from the United States Government.  It is the 
- * responsibility of any person or organization contemplating export to 
- * obtain such a license before exporting. 
- * 
- * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 
- * distribute this software and its documentation for any purpose and 
- * without fee is hereby granted, provided that the above copyright 
- * notice appear in all copies and that both that copyright notice and 
- * this permission notice appear in supporting documentation, and that 
- * the name of FundsXpress. not be used in advertising or publicity pertaining 
- * to distribution of the software without specific, written prior 
- * permission.  FundsXpress makes no representations about the suitability of 
- * this software for any purpose.  It is provided "as is" without express 
- * or implied warranty. 
- * 
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 
- */ 
-
-%% This notice is provided with respect to certain files/code which may included in the implementation of AWT within the software: 
-
-****************************************************** 
-BEGIN  src/solaris/native/sun/awt/HPkeysym.h 
-Copyright 1987, 1998  The Open Group 
-
-All Rights Reserved. 
-
-The above copyright notice and this permission notice shall be included 
-in all copies or substantial portions of the Software. 
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
-IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR 
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 
-OTHER DEALINGS IN THE SOFTWARE. 
-
-Except as contained in this notice, the name of The Open Group shall 
-not be used in advertising or otherwise to promote the sale, use or 
-other dealings in this Software without prior written authorization 
-from The Open Group. 
-
-Copyright 1987 by Digital Equipment Corporation, Maynard, Massachusetts, 
-
-All Rights Reserved 
-
-Permission to use, copy, modify, and distribute this software and its 
-documentation for any purpose and without fee is hereby granted, 
-provided that the above copyright notice appear in all copies and that 
-both that copyright notice and this permission notice appear in 
-supporting documentation, and that the names of Hewlett Packard 
-or Digital not be 
-used in advertising or publicity pertaining to distribution of the 
-software without specific, written prior permission. 
-
-DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 
-ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL 
-DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 
-ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, 
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, 
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS 
-SOFTWARE. 
-
-HEWLETT-PACKARD MAKES NO WARRANTY OF ANY KIND WITH REGARD 
-TO THIS SOFWARE, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
-PURPOSE.  Hewlett-Packard shall not be liable for errors 
-contained herein or direct, indirect, special, incidental or 
-consequential damages in connection with the furnishing, 
-performance, or use of this material. 
-
-END  src/solaris/native/sun/awt/HPkeysym.h 
-****************************************************** 
-****************************************************** 
-BEGIN src/solaris/native/sun/awt/Xrandr.h 
-/* 
- * $XFree86: xc/lib/Xrandr/Xrandr.h,v 1.9 2002/09/29 23:39:44 keithp Exp $ 
- * 
- * Copyright © 2000 Compaq Computer Corporation, Inc. 
- * Copyright © 2002 Hewlett-Packard Company, Inc. 
- * 
- * Permission to use, copy, modify, distribute, and sell this software and its 
- * documentation for any purpose is hereby granted without fee, provided that 
- * the above copyright notice appear in all copies and that both that 
- * copyright notice and this permission notice appear in supporting 
- * documentation, and that the name of Compaq not be used in advertising or 
- * publicity pertaining to distribution of the software without specific, 
- * written prior permission.  HP makes no representations about the 
- * suitability of this software for any purpose.  It is provided "as is" 
- * without express or implied warranty. 
- * 
- * HP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL COMPAQ 
- * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
- * Author:  Jim Gettys, HP Labs, HP. 
- */ 
-
-
-END src/solaris/native/sun/awt/Xrandr.h 
-****************************************************** 
-BEGIN src/solaris/native/sun/awt/extutil.h 
-/* 
- * $Xorg: extutil.h,v 1.3 2000/08/18 04:05:45 coskrey Exp $ 
- * 
-Copyright 1989, 1998  The Open Group 
-
-All Rights Reserved. 
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE 
-OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 
-AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
-
-Except as contained in this notice, the name of The Open Group shall not be 
-used in advertising or otherwise to promote the sale, use or other dealings 
-in this Software without prior written authorization from The Open Group. 
- * 
- * Author:  Jim Fulton, MIT The Open Group 
- * 
- *                     Xlib Extension-Writing Utilities 
- * 
- * This package contains utilities for writing the client API for various 
- * protocol extensions.  THESE INTERFACES ARE NOT PART OF THE X STANDARD AND 
- * ARE SUBJECT TO CHANGE! 
- */ 
-/* $XFree86: xc/include/extensions/extutil.h,v 1.5 2001/01/17 17:53:20 dawes Exp $ */ 
-
-END src/solaris/native/sun/awt/extutil.h 
-****************************************************** 
-BEGIN   src/solaris/native/sun/awt/fontconfig.h 
-/* 
- * $RCSId: xc/lib/fontconfig/fontconfig/fontconfig.h,v 1.30 2002/09/26 00:17:27 
-keithp Exp $ 
- * 
- * Copyright © 2001 Keith Packard 
- * 
- * Permission to use, copy, modify, distribute, and sell this software and its 
- * documentation for any purpose is hereby granted without fee, provided that 
- * the above copyright notice appear in all copies and that both that 
- * copyright notice and this permission notice appear in supporting 
- * documentation, and that the name of Keith Packard not be used in 
- * advertising or publicity pertaining to distribution of the software without 
- * specific, written prior permission.  Keith Packard makes no 
- * representations about the suitability of this software for any purpose.  It 
- * is provided "as is" without express or implied warranty. 
- * 
- * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 
- * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR 
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 
- * PERFORMANCE OF THIS SOFTWARE. 
-
-END   src/solaris/native/sun/awt/fontconfig.h 
-****************************************************** 
-BEGIN src/solaris/native/sun/awt/list.c 
-AND  src/solaris/native/sun/awt/list.h 
-AND src/solaris/native/sun/awt/multiVis.c 
-AND  src/solaris/native/sun/awt/multiVis.h 
-AND  src/solaris/native/sun/awt/wsutils.h 
-
-Copyright (c) 1994 Hewlett-Packard Co. 
-Copyright (c) 1996  X Consortium 
-
-Permission is hereby granted, free of charge, to any person obtaining 
-a copy of this software and associated documentation files (the 
-"Software"), to deal in the Software without restriction, including 
-without limitation the rights to use, copy, modify, merge, publish, 
-distribute, sublicense, and sell copies of the Software, and to 
-permit persons to whom the Software is furnished to do so, subject to 
-the following conditions: 
-
-The above copyright notice and this permission notice shall be included 
-in all copies or substantial portions of the Software. 
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
-IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR 
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 
-OTHER DEALINGS IN THE SOFTWARE. 
-
-Except as contained in this notice, the name of the X Consortium shall 
-not be used in advertising or otherwise to promote the sale, use or 
-other dealings in this Software without prior written authorization 
-from the X Consortium. 
-
-END src/solaris/native/sun/awt/list.c 
-AND  src/solaris/native/sun/awt/list.h 
-AND src/solaris/native/sun/awt/multiVis.c 
-AND  src/solaris/native/sun/awt/multiVis.h 
-AND  src/solaris/native/sun/awt/wsutils.h 
-
-***************************************************************** 
-BEGIN src/solaris/native/sun/awt/randr.h 
-
- * 
- * Copyright © 2000, Compaq Computer Corporation, 
- * Copyright © 2002, Hewlett Packard, Inc. 
- * 
- * Permission to use, copy, modify, distribute, and sell this software and its 
- * documentation for any purpose is hereby granted without fee, provided that 
- * the above copyright notice appear in all copies and that both that 
- * copyright notice and this permission notice appear in supporting 
- * documentation, and that the name of Compaq or HP not be used in advertising 
- * or publicity pertaining to distribution of the software without specific, 
- * written prior permission.  HP makes no representations about the 
- * suitability of this software for any purpose.  It is provided "as is" 
- * without express or implied warranty. 
- * 
- * HP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL HP 
- * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
- * Author:  Jim Gettys, HP Labs, Hewlett-Packard, Inc. 
-
-END src/solaris/native/sun/awt/randr.h 
-***************************************************** 
-
-BEGIN src/solaris/native/sun/java2d/opengl/J2D_GL/glx.h 
- * Mesa 3-D graphics library 
- * Version:  4.1 
- * 
- * Copyright (C) 1999-2002  Brian Paul   All Rights Reserved. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a 
- * copy of this software and associated documentation files (the "Software"), 
- * to deal in the Software without restriction, including without limitation 
- * the rights to use, copy, modify, merge, publish, distribute, sublicense, 
- * and/or sell copies of the Software, and to permit persons to whom the 
- * Software is furnished to do so, subject to the following conditions: 
- * 
- * The above copyright notice and this permission notice shall be included 
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL 
- * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
-
-END src/solaris/native/sun/java2d/opengl/J2D_GL/glx.h 
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
--- a/agent/make/Makefile	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/make/Makefile	Wed Jul 27 17:32:44 2011 -0700
@@ -257,7 +257,7 @@
 all: filelist
 	@mkdir -p $(OUTPUT_DIR)
 	@echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
-	$(JAVAC) -source 1.4 -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
+	$(JAVAC) -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
 	$(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js
 	cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql
@@ -269,7 +269,7 @@
 allprof: filelist
 	@mkdir -p $(OUTPUT_DIR)
 	@echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
-	$(JAVAC) -source 1.4 -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
+	$(JAVAC) -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist
 	$(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js
 	cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql
--- a/agent/src/os/solaris/proc/libproc.h	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/os/solaris/proc/libproc.h	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -420,7 +420,22 @@
 /*
  * Stack frame iteration interface.
  */
+#ifdef SOLARIS_11_B159_OR_LATER
+/* building on Nevada-B159 or later so define the new callback */
+typedef int proc_stack_f(
+    void *,             /* the cookie given to Pstack_iter() */
+    const prgregset_t,  /* the frame's registers */
+    uint_t,             /* argc for the frame's function */
+    const long *,       /* argv for the frame's function */
+    int,                /* bitwise flags describing the frame (see below) */
+    int);               /* a signal number */
+
+#define PR_SIGNAL_FRAME    1    /* called by a signal handler */
+#define PR_FOUND_SIGNAL    2    /* we found the corresponding signal number */
+#else
+/* building on Nevada-B158 or earlier so define the old callback */
 typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
+#endif
 
 extern int Pstack_iter(struct ps_prochandle *,
     const prgregset_t, proc_stack_f *, void *);
--- a/agent/src/os/solaris/proc/salibproc.h	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/os/solaris/proc/salibproc.h	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,23 @@
 /*
  * Stack frame iteration interface.
  */
+#ifdef SOLARIS_11_B159_OR_LATER
+/* building on Nevada-B159 or later so define the new callback */
+typedef int proc_stack_f(
+    void *,             /* the cookie given to Pstack_iter() */
+    const prgregset_t,  /* the frame's registers */
+    uint_t,             /* argc for the frame's function */
+    const long *,       /* argv for the frame's function */
+    int,                /* bitwise flags describing the frame (see below) */
+    int);               /* a signal number */
+
+#define PR_SIGNAL_FRAME    1    /* called by a signal handler */
+#define PR_FOUND_SIGNAL    2    /* we found the corresponding signal number */
+#else
+/* building on Nevada-B158 or earlier so define the old callback */
 typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
+#endif
+
 extern int Pstack_iter(struct ps_prochandle *,
     const prgregset_t, proc_stack_f *, void *);
 
--- a/agent/src/os/solaris/proc/saproc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/os/solaris/proc/saproc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,9 @@
 
 #include "salibproc.h"
 #include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h"
+#ifndef SOLARIS_11_B159_OR_LATER
+#include <sys/utsname.h>
+#endif
 #include <thread_db.h>
 #include <strings.h>
 #include <limits.h>
@@ -40,8 +43,22 @@
 #define SYMBOL_BUF_SIZE  256
 #define ERR_MSG_SIZE     (PATH_MAX + 256)
 
-// debug mode
+// debug modes
 static int _libsaproc_debug = 0;
+#ifndef SOLARIS_11_B159_OR_LATER
+static bool _Pstack_iter_debug = false;
+
+static void dprintf_2(const char* format,...) {
+  if (_Pstack_iter_debug) {
+    va_list alist;
+
+    va_start(alist, format);
+    fputs("Pstack_iter DEBUG: ", stderr);
+    vfprintf(stderr, format, alist);
+    va_end(alist);
+  }
+}
+#endif // !SOLARIS_11_B159_OR_LATER
 
 static void print_debug(const char* format,...) {
   if (_libsaproc_debug) {
@@ -450,6 +467,7 @@
   return 0;
 }
 
+// Pstack_iter() proc_stack_f callback prior to Nevada-B159
 static int
 fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) {
   DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd;
@@ -472,6 +490,14 @@
   return 0;
 }
 
+// Pstack_iter() proc_stack_f callback in Nevada-B159 or later
+/*ARGSUSED*/
+static int
+wrapper_fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc,
+                         const long *argv, int frame_flags, int sig) {
+  return(fill_cframe_list(cd, regs, argc, argv));
+}
+
 // part of the class sharing workaround
 
 // FIXME: !!HACK ALERT!!
@@ -970,6 +996,11 @@
                    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
 }
 
+#ifndef SOLARIS_11_B159_OR_LATER
+// building on Nevada-B158 or earlier so more hoops to jump through
+static bool has_newer_Pstack_iter = false;  // older version by default
+#endif
+
 /*
  * Class:       sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
  * Method:      fillCFrameList0
@@ -997,7 +1028,24 @@
 
   env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT);
   CHECK_EXCEPTION_(0);
-  Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, fill_cframe_list, &dbgo2);
+
+#ifdef SOLARIS_11_B159_OR_LATER
+  // building on Nevada-B159 or later so use the new callback
+  Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+              wrapper_fill_cframe_list, &dbgo2);
+#else
+  // building on Nevada-B158 or earlier so figure out which callback to use
+
+  if (has_newer_Pstack_iter) {
+    // Since we're building on Nevada-B158 or earlier, we have to
+    // cast wrapper_fill_cframe_list to make the compiler happy.
+    Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+                (proc_stack_f *)wrapper_fill_cframe_list, &dbgo2);
+  } else {
+    Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+                fill_cframe_list, &dbgo2);
+  }
+#endif // SOLARIS_11_B159_OR_LATER
   return dbgo2.obj;
 }
 
@@ -1218,6 +1266,102 @@
   return res;
 }
 
+#ifndef SOLARIS_11_B159_OR_LATER
+// Determine if the OS we're running on has the newer version
+// of libproc's Pstack_iter.
+//
+// Set env var PSTACK_ITER_DEBUG=true to debug this logic.
+// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value.
+// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value.
+//
+// frankenputer 'uname -r -v': 5.10 Generic_141445-09
+// jurassic 'uname -r -v':     5.11 snv_164
+// lonepeak 'uname -r -v':     5.11 snv_127
+//
+static void set_has_newer_Pstack_iter(JNIEnv *env) {
+  static bool done_set = false;
+
+  if (done_set) {
+    // already set has_newer_Pstack_iter
+    return;
+  }
+
+  struct utsname name;
+  if (uname(&name) == -1) {
+    THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!");
+  }
+  dprintf_2("release='%s'  version='%s'\n", name.release, name.version);
+
+  if (_Pstack_iter_debug) {
+    char *override = getenv("PSTACK_ITER_DEBUG_RELEASE");
+    if (override != NULL) {
+      strncpy(name.release, override, SYS_NMLN - 1);
+      name.release[SYS_NMLN - 2] = '\0';
+      dprintf_2("overriding with release='%s'\n", name.release);
+    }
+    override = getenv("PSTACK_ITER_DEBUG_VERSION");
+    if (override != NULL) {
+      strncpy(name.version, override, SYS_NMLN - 1);
+      name.version[SYS_NMLN - 2] = '\0';
+      dprintf_2("overriding with version='%s'\n", name.version);
+    }
+  }
+
+  // the major number corresponds to the old SunOS major number
+  int major = atoi(name.release);
+  if (major >= 6) {
+    dprintf_2("release is SunOS 6 or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+  if (major < 5) {
+    dprintf_2("release is SunOS 4 or earlier\n");
+    done_set = true;
+    return;
+  }
+
+  // some SunOS 5.* build so now check for Solaris versions
+  char *dot = strchr(name.release, '.');
+  int minor = 0;
+  if (dot != NULL) {
+    // release is major.minor format
+    *dot = NULL;
+    minor = atoi(dot + 1);
+  }
+
+  if (minor <= 10) {
+    dprintf_2("release is Solaris 10 or earlier\n");
+    done_set = true;
+    return;
+  } else if (minor >= 12) {
+    dprintf_2("release is Solaris 12 or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+
+  // some Solaris 11 build so now check for internal build numbers
+  if (strncmp(name.version, "snv_", 4) != 0) {
+    dprintf_2("release is Solaris 11 post-GA or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+
+  // version begins with "snv_" so a pre-GA build of Solaris 11
+  int build = atoi(&name.version[4]);
+  if (build >= 159) {
+    dprintf_2("release is Nevada-B159 or later\n");
+    has_newer_Pstack_iter = true;
+  } else {
+    dprintf_2("release is Nevada-B158 or earlier\n");
+  }
+
+  done_set = true;
+}
+#endif // !SOLARIS_11_B159_OR_LATER
+
 /*
  * Class:       sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
  * Method:      initIDs
@@ -1237,6 +1381,14 @@
   if (libproc_handle == 0)
      THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
 
+#ifndef SOLARIS_11_B159_OR_LATER
+  _Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL;
+
+  set_has_newer_Pstack_iter(env);
+  CHECK_EXCEPTION;
+  dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter);
+#endif
+
   p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
   CHECK_EXCEPTION;
 
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Jul 27 17:32:44 2011 -0700
@@ -1028,7 +1028,12 @@
                                     if (AddressOps.equal(val, value)) {
                                         if (!printed) {
                                             printed = true;
-                                            blob.printOn(out);
+                                            try {
+                                                blob.printOn(out);
+                                            } catch (Exception e) {
+                                                out.println("Exception printing blob at " + base);
+                                                e.printStackTrace();
+                                            }
                                         }
                                         out.println("found at " + base + "\n");
                                     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/AdapterBlob.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class AdapterBlob extends CodeBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("AdapterBlob");
+
+    // // FIXME: add any needed fields
+  }
+
+  public AdapterBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isAdapterBlob() {
+    return true;
+  }
+
+  public String getName() {
+    return "AdapterBlob: " + super.getName();
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,8 @@
   public boolean isUncommonTrapStub()   { return false; }
   public boolean isExceptionStub()      { return false; }
   public boolean isSafepointStub()      { return false; }
+  public boolean isRicochetBlob()       { return false; }
+  public boolean isAdapterBlob()        { return false; }
 
   // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
   public boolean isJavaMethod()         { return false; }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,8 @@
     virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
     virtualConstructor.addMapping("nmethod", NMethod.class);
     virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
+    virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
+    virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
     virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
     virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
     if (VM.getVM().isServerCompiler()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+/** RicochetBlob (currently only used by Compiler 2) */
+
+public class RicochetBlob extends SingletonBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("RicochetBlob");
+
+    // FIXME: add any needed fields
+  }
+
+  public RicochetBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isRicochetBlob() {
+    return true;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java	Wed Jul 27 17:24:11 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/make/altsrc.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/altsrc.make	Wed Jul 27 17:32:44 2011 -0700
@@ -24,7 +24,8 @@
 
 # This file defines variables and macros which are used in the makefiles to 
 # allow distributions to augment or replace common hotspot code with 
-# distribution-specific source files.
+# distribution-specific source files. This capability is disabled when
+# an OPENJDK build is requested, unless HS_ALT_SRC_REL has been set externally.
 
 # Requires: GAMMADIR
 # Provides:
@@ -33,14 +34,17 @@
 
 HS_COMMON_SRC_REL=src
 
-# This needs to be changed to a more generic location, but we keep it as this 
-# for now for compatibility
-HS_ALT_SRC_REL=src/closed
+ifneq ($(OPENJDK),true)
+  # This needs to be changed to a more generic location, but we keep it 
+  # as this for now for compatibility
+  HS_ALT_SRC_REL=src/closed
+else
+  HS_ALT_SRC_REL=NO_SUCH_PATH
+endif
 
 HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL)
 HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL)
 
-
 ## altsrc-equiv 
 # 
 # Convert a common source path to an alternative source path
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/cscope.make	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,141 @@
+#
+# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# The cscope.out file is generated in the current directory.  The old cscope.out
+# file is *not* removed because cscope is smart enough to only build what has
+# changed.  cscope can be confused if files are renamed or removed, so it may be
+# necessary to remove cscope.out (gmake cscope.clean) if a lot of reorganization
+# has occurred.
+
+include $(GAMMADIR)/make/scm.make
+
+RM	= rm -f
+HG	= hg
+CS_TOP	= $(GAMMADIR)
+
+CSDIRS	= $(CS_TOP)/src $(CS_TOP)/make
+CSINCS	= $(CSDIRS:%=-I%)
+
+CSCOPE		= cscope
+CSCOPE_OUT	= cscope.out
+CSCOPE_FLAGS	= -b
+
+# Allow .java files to be added from the environment (CSCLASSES=yes).
+ifdef	CSCLASSES
+ADDCLASSES=	-o -name '*.java'
+endif
+
+# Adding CClassHeaders also pushes the file count of a full workspace up about
+# 200 files (these files also don't exist in a new workspace, and thus will
+# cause the recreation of the database as they get created, which might seem
+# a little confusing).  Thus allow these files to be added from the environment
+# (CSHEADERS=yes).
+ifndef	CSHEADERS
+RMCCHEADERS=	-o -name CClassHeaders
+endif
+
+# Ignore build products.
+CS_PRUNE_GENERATED	= -o -name '${OSNAME}_*_core' -o \
+			     -name '${OSNAME}_*_compiler?'
+
+# O/S-specific files for all systems are included by default.  Set CS_OS to a
+# space-separated list of identifiers to include only those systems.
+ifdef	CS_OS
+CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',\
+		    $(filter-out ${CS_OS},linux macos solaris windows))
+endif
+
+# CPU-specific files for all processors are included by default.  Set CS_CPU 
+# space-separated list identifiers to include only those CPUs.
+ifdef	CS_CPU
+CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',\
+		    $(filter-out ${CS_CPU},arm ppc sparc x86 zero))
+endif
+
+# What files should we include?  A simple rule might be just those files under
+# SCCS control, however this would miss files we create like the opcodes and
+# CClassHeaders.  The following attempts to find everything that is *useful*.
+# (.del files are created by sccsrm, demo directories contain many .java files
+# that probably aren't useful for development, and the pkgarchive may contain
+# duplicates of files within the source hierarchy).
+
+# Directories to exclude.
+CS_PRUNE_STD	= $(SCM_DIRS) \
+		  -o -name '.del-*' \
+		  -o -name '*demo' \
+		  -o -name pkgarchive
+
+# Placeholder for user-defined excludes.
+CS_PRUNE_EX	=
+
+CS_PRUNE	= $(CS_PRUNE_STD) \
+		  $(CS_PRUNE_OS) \
+		  $(CS_PRUNE_CPU) \
+		  $(CS_PRUNE_GENERATED) \
+		  $(CS_PRUNE_EX) \
+		  $(RMCCHEADERS)
+
+# File names to include.
+CSFILENAMES	= -name '*.[ch]pp' \
+		  -o -name '*.[Ccshlxy]' \
+		  $(CS_ADD_GENERATED) \
+		  -o -name '*.d' \
+		  -o -name '*.il' \
+		  -o -name '*.cc' \
+		  -o -name '*[Mm]akefile*' \
+		  -o -name '*.gmk' \
+		  -o -name '*.make' \
+		  -o -name '*.ad' \
+		  $(ADDCLASSES)
+
+.PHONY:		cscope cscope.clean cscope.scratch TAGS.clean FORCE
+.PRECIOUS:	cscope.out
+
+cscope $(CSCOPE_OUT): cscope.files FORCE
+	$(CSCOPE) -f $(CSCOPE_OUT) $(CSCOPE_FLAGS)
+
+cscope.clean:
+	$(QUIETLY) $(RM) $(CSCOPE_OUT) cscope.files
+
+cscope.scratch:  cscope.clean cscope
+
+# The raw list is reordered so cscope displays the most relevant files first.
+cscope.files:
+	$(QUIETLY)						\
+	raw=cscope.$$$$;					\
+	find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o	\
+	    -type f \( $(CSFILENAMES) \) -print > $$raw;	\
+	{							\
+	echo "$(CSINCS)";					\
+	egrep -v "\.java|/make/" $$raw;				\
+	fgrep ".java" $$raw;					\
+	fgrep "/make/" $$raw;					\
+	} > $@;							\
+	rm -f $$raw
+
+TAGS:  cscope.files FORCE
+	egrep -v '^-|^$$' $< | etags --members -
+
+TAGS.clean:
+	$(RM) TAGS
--- a/make/hotspot_version	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/hotspot_version	Wed Jul 27 17:32:44 2011 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=21
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=09
+HS_BUILD_NUMBER=17
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/jdk6_hotspot_distro	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,32 @@
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+
+#
+# This file format must remain compatible with both
+# GNU Makefile and Microsoft nmake formats.
+#
+
+# Don't put quotes (fail windows build).
+HOTSPOT_VM_DISTRO=Java HotSpot(TM)
+COMPANY_NAME=Sun Microsystems, Inc.
+PRODUCT_NAME=Java(TM) Platform SE
--- a/make/jprt.gmk	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/jprt.gmk	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,24 @@
   ZIPFLAGS=-q -y
 endif
 
+jprt_build_productEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
+
+jprt_build_debugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
+
+jprt_build_fastdebugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
+
+jprt_build_productOpen:
+	$(MAKE) OPENJDK=true jprt_build_product
+
+jprt_build_debugOpen:
+	$(MAKE) OPENJDK=true jprt_build_debug
+
+jprt_build_fastdebugOpen:
+	$(MAKE) OPENJDK=true jprt_build_fastdebug
+
 jprt_build_product: all_product copy_product_jdk export_product_jdk
 	( $(CD) $(JDK_IMAGE_DIR) && \
 	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
--- a/make/jprt.properties	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/jprt.properties	Wed Jul 27 17:32:44 2011 -0700
@@ -202,16 +202,21 @@
     ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     ${jprt.my.windows.x64}-{product|fastdebug|debug}
 
+jprt.build.targets.open= \
+    ${jprt.my.solaris.i586}-{productOpen}, \
+    ${jprt.my.solaris.x64}-{debugOpen}, \
+    ${jprt.my.linux.x64}-{productOpen}
+
 jprt.build.targets.embedded= \
-    ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
-    ${jprt.my.linux.ppc}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcv2}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcsflt}-{product|fastdebug}, \
-    ${jprt.my.linux.armvfp}-{product|fastdebug}, \
-    ${jprt.my.linux.armsflt}-{product|fastdebug}
+    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
+    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
 
 jprt.build.targets.all=${jprt.build.targets.standard}, \
-    ${jprt.build.targets.embedded}
+    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
 
 jprt.build.targets.jdk7=${jprt.build.targets.all}
 jprt.build.targets.jdk7temp=${jprt.build.targets.all}
@@ -453,6 +458,12 @@
     ${jprt.my.windows.x64}-product-c2-jbb_G1, \
     ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
 
+# Some basic "smoke" tests for OpenJDK builds
+jprt.test.targets.open = \
+    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
+
 # Testing for actual embedded builds is different to standard
 jprt.my.linux.i586.test.targets.embedded = \
     linux_i586_2.6-product-c1-scimark
@@ -461,6 +472,7 @@
 # Note: no PPC or ARM tests at this stage
 
 jprt.test.targets.standard = \
+  ${jprt.my.linux.i586.test.targets.embedded}, \
   ${jprt.my.solaris.sparc.test.targets}, \
   ${jprt.my.solaris.sparcv9.test.targets}, \
   ${jprt.my.solaris.i586.test.targets}, \
@@ -468,7 +480,8 @@
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}
+  ${jprt.my.windows.x64.test.targets}, \
+  ${jprt.test.targets.open}
 
 jprt.test.targets.embedded= 		\
   ${jprt.my.linux.i586.test.targets.embedded}, \
--- a/make/linux/Makefile	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/linux/Makefile	Wed Jul 27 17:32:44 2011 -0700
@@ -359,7 +359,7 @@
 
 clean:  clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
 
-include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
+include $(GAMMADIR)/make/cscope.make
 
 #-------------------------------------------------------------------------------
 
--- a/make/linux/README	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/linux/README	Wed Jul 27 17:32:44 2011 -0700
@@ -1,4 +1,4 @@
-Copyright (c) 2007 Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   
 This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/cscope.make	Wed Jul 27 17:24:11 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,160 +0,0 @@
-#
-# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-#
-# The cscope.out file is made in the current directory and spans the entire
-# source tree.
-#
-# Things to note:
-#	1. We use relative names for cscope.
-#	2. We *don't* remove the old cscope.out file, because cscope is smart
-#	   enough to only build what has changed.  It can be confused, however,
-#	   if files are renamed or removed, so it may be necessary to manually
-#	   remove cscope.out if a lot of reorganization has occurred.
-#
-
-include $(GAMMADIR)/make/scm.make
-
-NAWK	= awk
-RM	= rm -f
-HG	= hg
-CS_TOP	= ../..
-
-CSDIRS	= $(CS_TOP)/src $(CS_TOP)/build
-CSINCS	= $(CSDIRS:%=-I%)
-
-CSCOPE		= cscope
-CSCOPE_FLAGS	= -b
-
-# Allow .java files to be added from the environment (CSCLASSES=yes).
-ifdef	CSCLASSES
-ADDCLASSES=	-o -name '*.java'
-endif
-
-# Adding CClassHeaders also pushes the file count of a full workspace up about
-# 200 files (these files also don't exist in a new workspace, and thus will
-# cause the recreation of the database as they get created, which might seem
-# a little confusing).  Thus allow these files to be added from the environment
-# (CSHEADERS=yes).
-ifndef	CSHEADERS
-RMCCHEADERS=	-o -name CClassHeaders
-endif
-
-# Use CS_GENERATED=x to include auto-generated files in the build directories.
-ifdef	CS_GENERATED
-CS_ADD_GENERATED	= -o -name '*.incl'
-else
-CS_PRUNE_GENERATED	= -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
-endif
-
-# OS-specific files for other systems are excluded by default.  Use CS_OS=yes
-# to include platform-specific files for other platforms.
-ifndef	CS_OS
-CS_OS		= linux macos solaris win32
-CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
-endif
-
-# Processor-specific files for other processors are excluded by default.  Use
-# CS_CPU=x to include platform-specific files for other platforms.
-ifndef	CS_CPU
-CS_CPU		= i486 sparc amd64 ia64
-CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
-endif
-
-# What files should we include?  A simple rule might be just those files under
-# SCCS control, however this would miss files we create like the opcodes and
-# CClassHeaders.  The following attempts to find everything that is *useful*.
-# (.del files are created by sccsrm, demo directories contain many .java files
-# that probably aren't useful for development, and the pkgarchive may contain
-# duplicates of files within the source hierarchy).
-
-# Directories to exclude.
-CS_PRUNE_STD	= $(SCM_DIRS) \
-		  -o -name '.del-*' \
-		  -o -name '*demo' \
-		  -o -name pkgarchive
-
-CS_PRUNE	= $(CS_PRUNE_STD) \
-		  $(CS_PRUNE_OS) \
-		  $(CS_PRUNE_CPU) \
-		  $(CS_PRUNE_GENERATED) \
-		  $(RMCCHEADERS)
-
-# File names to include.
-CSFILENAMES	= -name '*.[ch]pp' \
-		  -o -name '*.[Ccshlxy]' \
-		  $(CS_ADD_GENERATED) \
-		  -o -name '*.il' \
-		  -o -name '*.cc' \
-		  -o -name '*[Mm]akefile*' \
-		  -o -name '*.gmk' \
-		  -o -name '*.make' \
-		  -o -name '*.ad' \
-		  $(ADDCLASSES)
-
-.PRECIOUS:	cscope.out
-
-cscope cscope.out: cscope.files FORCE
-	$(CSCOPE) $(CSCOPE_FLAGS)
-
-# The .raw file is reordered here in an attempt to make cscope display the most
-# relevant files first.
-cscope.files: .cscope.files.raw
-	echo "$(CSINCS)" > $@
-	-egrep -v "\.java|\/make\/"	$< >> $@
-	-fgrep ".java"			$< >> $@
-	-fgrep "/make/"		$< >> $@
-
-.cscope.files.raw:  .nametable.files
-	-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
-	    -type f \( $(CSFILENAMES) \) -print > $@
-
-cscope.clean:  nametable.clean
-	-$(RM) cscope.out cscope.files .cscope.files.raw
-
-TAGS:  cscope.files FORCE
-	egrep -v '^-|^$$' $< | etags --members -
-
-TAGS.clean:  nametable.clean
-	-$(RM) TAGS
-
-# .nametable.files and .nametable.files.tmp are used to determine if any files
-# were added to/deleted from/renamed in the workspace.  If not, then there's
-# normally no need to rebuild the cscope database. To force a rebuild of
-# the cscope database: gmake nametable.clean.
-.nametable.files:  .nametable.files.tmp
-	( cmp -s $@ $< ) || ( cp $< $@ )
-	-$(RM) $<
-
-# `hg status' is slightly faster than `hg fstatus'. Both are
-# quite a bit slower on an NFS mounted file system, so this is
-# really geared towards repos on local file systems.
-.nametable.files.tmp:
-	-$(HG) fstatus -acmn > $@
-nametable.clean:
-	-$(RM) .nametable.files .nametable.files.tmp
-
-FORCE:
-
-.PHONY:		cscope cscope.clean TAGS.clean nametable.clean FORCE
--- a/make/linux/makefiles/gcc.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/linux/makefiles/gcc.make	Wed Jul 27 17:32:44 2011 -0700
@@ -205,7 +205,7 @@
 SHARED_FLAG = -shared
 
 # Keep symbols even they are not used
-AOUT_FLAGS += -export-dynamic
+AOUT_FLAGS += -Xlinker -export-dynamic
 
 #------------------------------------------------------------------------
 # Debug flags
--- a/make/linux/makefiles/vm.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/linux/makefiles/vm.make	Wed Jul 27 17:32:44 2011 -0700
@@ -102,6 +102,10 @@
 CFLAGS += $(EXTRA_CFLAGS)
 LFLAGS += $(EXTRA_CFLAGS)
 
+# Don't set excutable bit on stack segment
+# the same could be done by separate execstack command
+LFLAGS += -Xlinker -z -Xlinker noexecstack
+
 LIBS += -lm -ldl -lpthread
 
 # By default, link the *.o into the library, not the executable.
--- a/make/solaris/Makefile	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/solaris/Makefile	Wed Jul 27 17:32:44 2011 -0700
@@ -296,7 +296,7 @@
 
 clean:  clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel
 
-include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
+include $(GAMMADIR)/make/cscope.make
 
 #-------------------------------------------------------------------------------
 
--- a/make/solaris/makefiles/cscope.make	Wed Jul 27 17:24:11 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,162 +0,0 @@
-#
-# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-#
-# The cscope.out file is made in the current directory and spans the entire
-# source tree.
-#
-# Things to note:
-#	1. We use relative names for cscope.
-#	2. We *don't* remove the old cscope.out file, because cscope is smart
-#	   enough to only build what has changed.  It can be confused, however,
-#	   if files are renamed or removed, so it may be necessary to manually
-#	   remove cscope.out if a lot of reorganization has occurred.
-#
-
-include $(GAMMADIR)/make/scm.make
-
-NAWK	= /usr/xpg4/bin/awk
-RM	= rm -f
-HG	= hg
-CS_TOP	= ../..
-
-CSDIRS	= $(CS_TOP)/src $(CS_TOP)/make
-CSINCS	= $(CSDIRS:%=-I%)
-
-CSCOPE		= cscope
-CSCOPE_FLAGS	= -b
-
-# Allow .java files to be added from the environment (CSCLASSES=yes).
-ifdef	CSCLASSES
-ADDCLASSES=	-o -name '*.java'
-endif
-
-# Adding CClassHeaders also pushes the file count of a full workspace up about
-# 200 files (these files also don't exist in a new workspace, and thus will
-# cause the recreation of the database as they get created, which might seem
-# a little confusing).  Thus allow these files to be added from the environment
-# (CSHEADERS=yes).
-ifndef	CSHEADERS
-RMCCHEADERS=	-o -name CClassHeaders
-endif
-
-# Use CS_GENERATED=x to include auto-generated files in the make directories.
-ifdef	CS_GENERATED
-CS_ADD_GENERATED	= -o -name '*.incl'
-else
-CS_PRUNE_GENERATED	= -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
-endif
-
-# OS-specific files for other systems are excluded by default.  Use CS_OS=yes
-# to include platform-specific files for other platforms.
-ifndef	CS_OS
-CS_OS		= linux macos solaris win32
-CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
-endif
-
-# Processor-specific files for other processors are excluded by default.  Use
-# CS_CPU=x to include platform-specific files for other platforms.
-ifndef	CS_CPU
-CS_CPU		= i486 sparc amd64 ia64
-CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
-endif
-
-# What files should we include?  A simple rule might be just those files under
-# SCCS control, however this would miss files we create like the opcodes and
-# CClassHeaders.  The following attempts to find everything that is *useful*.
-# (.del files are created by sccsrm, demo directories contain many .java files
-# that probably aren't useful for development, and the pkgarchive may contain
-# duplicates of files within the source hierarchy).
-
-# Directories to exclude.
-CS_PRUNE_STD	= $(SCM_DIRS) \
-		  -o -name '.del-*' \
-		  -o -name '*demo' \
-		  -o -name pkgarchive
-
-CS_PRUNE	= $(CS_PRUNE_STD) \
-		  $(CS_PRUNE_OS) \
-		  $(CS_PRUNE_CPU) \
-		  $(CS_PRUNE_GENERATED) \
-		  $(RMCCHEADERS)
-
-# File names to include.
-CSFILENAMES	= -name '*.[ch]pp' \
-		  -o -name '*.[Ccshlxy]' \
-		  $(CS_ADD_GENERATED) \
-		  -o -name '*.d' \
-		  -o -name '*.il' \
-		  -o -name '*.cc' \
-		  -o -name '*[Mm]akefile*' \
-		  -o -name '*.gmk' \
-		  -o -name '*.make' \
-		  -o -name '*.ad' \
-		  $(ADDCLASSES)
-
-.PRECIOUS:	cscope.out
-
-cscope cscope.out: cscope.files FORCE
-	$(CSCOPE) $(CSCOPE_FLAGS)
-
-# The .raw file is reordered here in an attempt to make cscope display the most
-# relevant files first.
-cscope.files: .cscope.files.raw
-	echo "$(CSINCS)" > $@
-	-egrep -v "\.java|\/make\/"	$< >> $@
-	-fgrep ".java"			$< >> $@
-	-fgrep "/make/"		$< >> $@
-
-.cscope.files.raw:  .nametable.files
-	-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
-	    -type f \( $(CSFILENAMES) \) -print > $@
-
-cscope.clean:  nametable.clean
-	-$(RM) cscope.out cscope.files .cscope.files.raw
-
-TAGS:  cscope.files FORCE
-	egrep -v '^-|^$$' $< | etags --members -
-
-TAGS.clean:  nametable.clean
-	-$(RM) TAGS
-
-# .nametable.files and .nametable.files.tmp are used to determine if any files
-# were added to/deleted from/renamed in the workspace.  If not, then there's
-# normally no need to rebuild the cscope database. To force a rebuild of
-# the cscope database: gmake nametable.clean.
-.nametable.files:  .nametable.files.tmp
-	( cmp -s $@ $< ) || ( cp $< $@ )
-	-$(RM) $<
-
-# `hg status' is slightly faster than `hg fstatus'. Both are
-# quite a bit slower on an NFS mounted file system, so this is
-# really geared towards repos on local file systems.
-.nametable.files.tmp:
-	-$(HG) fstatus -acmn > $@
-
-nametable.clean:
-	-$(RM) .nametable.files .nametable.files.tmp
-
-FORCE:
-
-.PHONY:		cscope cscope.clean TAGS.clean nametable.clean FORCE
--- a/make/solaris/makefiles/saproc.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/solaris/makefiles/saproc.make	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,33 @@
 SA_LFLAGS += -mt -xnolib -norunpath
 endif
 
+# The libproc Pstack_iter() interface changed in Nevada-B159.
+# Use 'uname -r -v' to determine the Solaris version as per
+# Solaris Nevada team request. This logic needs to match:
+# agent/src/os/solaris/proc/saproc.cpp: set_has_newer_Pstack_iter():
+#   - skip SunOS 4 or older
+#   - skip Solaris 10 or older
+#   - skip two digit internal Nevada builds
+#   - skip three digit internal Nevada builds thru 149
+#   - skip internal Nevada builds 150-158
+#   - if not skipped, print define for Nevada-B159 or later
+SOLARIS_11_B159_OR_LATER := \
+$(shell uname -r -v \
+    | sed -n \
+          -e '/^[0-4]\. /b' \
+          -e '/^5\.[0-9] /b' \
+          -e '/^5\.10 /b' \
+          -e '/ snv_[0-9][0-9]$/b' \
+          -e '/ snv_[01][0-4][0-9]$/b' \
+          -e '/ snv_15[0-8]$/b' \
+          -e 's/.*/-DSOLARIS_11_B159_OR_LATER/' \
+          -e 'p' \
+          )
+
+# Uncomment the following to simulate building on Nevada-B159 or later
+# when actually building on Nevada-B158 or earlier:
+#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
+
 $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
 	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
 	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@@ -68,6 +95,7 @@
 	           -I$(GENERATED)                                       \
 	           -I$(BOOT_JAVA_HOME)/include                          \
 	           -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family)    \
+	           $(SOLARIS_11_B159_OR_LATER)                          \
 	           $(SASRCFILES)                                        \
 	           $(SA_LFLAGS)                                         \
 	           -o $@                                                \
--- a/make/solaris/makefiles/sparcWorks.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/solaris/makefiles/sparcWorks.make	Wed Jul 27 17:32:44 2011 -0700
@@ -100,11 +100,6 @@
 
 LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
 
-# Some interfaces (_lwp_create) changed with LP64 and Solaris 7
-SOLARIS_7_OR_LATER := \
-$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }')
-CFLAGS += ${SOLARIS_7_OR_LATER}
-
 # New architecture options started in SS12 (5.9), we need both styles to build.
 #   The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
 #   Note: default for 32bit sparc is now the same as v8plus, so the
--- a/make/windows/build.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/windows/build.make	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,25 @@
 # or make/hotspot_distro.
 !ifndef HOTSPOT_VM_DISTRO
 !if exists($(WorkSpace)\src\closed)
+
+# if the build is for JDK6 or earlier version, it should include jdk6_hotspot_distro,
+# instead of hotspot_distro.
+JDK6_OR_EARLIER=0
+!if "$(JDK_MAJOR_VERSION)" != "" && "$(JDK_MINOR_VERSION)" != "" && "$(JDK_MICRO_VERSION)" != ""
+!if $(JDK_MAJOR_VERSION) == 1 && $(JDK_MINOR_VERSION) < 7
+JDK6_OR_EARLIER=1
+!endif
+!else
+!if $(JDK_MAJOR_VER) == 1 && $(JDK_MINOR_VER) < 7
+JDK6_OR_EARLIER=1
+!endif
+!endif
+
+!if $(JDK6_OR_EARLIER) == 1
+!include $(WorkSpace)\make\jdk6_hotspot_distro
+!else
 !include $(WorkSpace)\make\hotspot_distro
+!endif
 !else
 !include $(WorkSpace)\make\openjdk_distro
 !endif
@@ -260,7 +278,7 @@
 	@ echo Variant=$(realVariant)				>> $@
 	@ echo WorkSpace=$(WorkSpace)				>> $@
 	@ echo BootStrapDir=$(BootStrapDir)			>> $@
-        @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME)	>> $@
+	@ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME)	>> $@
 	@ echo HS_VER=$(HS_VER)					>> $@
 	@ echo HS_DOTVER=$(HS_DOTVER)				>> $@
 	@ echo HS_COMPANY=$(COMPANY_NAME)			>> $@
--- a/make/windows/makefiles/compile.make	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/windows/makefiles/compile.make	Wed Jul 27 17:32:44 2011 -0700
@@ -81,7 +81,6 @@
 !endif
 
 CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS"
-
 # Must specify this for sharedRuntimeTrig.cpp
 CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN"
 
@@ -232,6 +231,11 @@
  uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
  /opt:ICF,8 /map /debug
 
+
+!if $(MSC_VER) >= 1600 
+LINK_FLAGS= $(LINK_FLAGS) psapi.lib
+!endif
+
 # Resource compiler settings
 RC=rc.exe
 RC_FLAGS=/D "HS_VER=$(HS_VER)" \
--- a/make/windows/projectfiles/kernel/Makefile	Wed Jul 27 17:24:11 2011 -0700
+++ b/make/windows/projectfiles/kernel/Makefile	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #   
 # This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -42,6 +42,12 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #endif
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
 // Convert the raw encoding form into the form expected by the
 // constructor for Address.
 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
@@ -1072,6 +1078,12 @@
     check_and_forward_exception(Gtemp);
   }
 
+#ifdef ASSERT
+  set(badHeapWordVal, G3);
+  set(badHeapWordVal, G4);
+  set(badHeapWordVal, G5);
+#endif
+
   // get oop result if there is one and reset the value in the thread
   if (oop_result->is_valid()) {
     get_vm_result(oop_result);
@@ -1177,6 +1189,11 @@
   call(entry_point, relocInfo::runtime_call_type);
   delayed()->nop();
   restore_thread(thread_cache);
+#ifdef ASSERT
+  set(badHeapWordVal, G3);
+  set(badHeapWordVal, G4);
+  set(badHeapWordVal, G5);
+#endif
 }
 
 
@@ -1518,7 +1535,7 @@
 // save_frame: given number of "extra" words in frame,
 // issue approp. save instruction (p 200, v8 manual)
 
-void MacroAssembler::save_frame(int extraWords = 0) {
+void MacroAssembler::save_frame(int extraWords) {
   int delta = -total_frame_size_in_bytes(extraWords);
   if (is_simm13(delta)) {
     save(SP, delta, SP);
@@ -1730,6 +1747,7 @@
 
   if (reg == G0)  return;       // always NULL, which is always an oop
 
+  BLOCK_COMMENT("verify_oop {");
   char buffer[64];
 #ifdef COMPILER1
   if (CommentedAssembly) {
@@ -1768,6 +1786,7 @@
   delayed()->nop();
   // recover frame size
   add(SP, 8*8,SP);
+  BLOCK_COMMENT("} verify_oop");
 }
 
 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
@@ -2040,7 +2059,7 @@
   }
   else
      ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
-  assert(false, "error");
+  assert(false, err_msg("DEBUG MESSAGE: %s", msg));
 }
 
 
@@ -3230,6 +3249,7 @@
 
 
 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
+                                                   Register temp_reg,
                                                    int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
   int stackElementSize = Interpreter::stackElementSize;
@@ -3238,18 +3258,19 @@
     offset += arg_slot.as_constant() * stackElementSize;
     return offset;
   } else {
-    Register temp = arg_slot.as_register();
-    sll_ptr(temp, exact_log2(stackElementSize), temp);
+    assert(temp_reg != noreg, "must specify");
+    sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
     if (offset != 0)
-      add(temp, offset, temp);
-    return temp;
+      add(temp_reg, offset, temp_reg);
+    return temp_reg;
   }
 }
 
 
 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+                                         Register temp_reg,
                                          int extra_slot_offset) {
-  return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
+  return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
 }
 
 
@@ -4257,34 +4278,14 @@
 ///////////////////////////////////////////////////////////////////////////////////
 #ifndef SERIALGC
 
-static uint num_stores = 0;
-static uint num_null_pre_stores = 0;
-
-static void count_null_pre_vals(void* pre_val) {
-  num_stores++;
-  if (pre_val == NULL) num_null_pre_stores++;
-  if ((num_stores % 1000000) == 0) {
-    tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
-                  num_stores, num_null_pre_stores,
-                  100.0*(float)num_null_pre_stores/(float)num_stores);
-  }
-}
-
-static address satb_log_enqueue_with_frame = 0;
-static u_char* satb_log_enqueue_with_frame_end = 0;
-
-static address satb_log_enqueue_frameless = 0;
-static u_char* satb_log_enqueue_frameless_end = 0;
+static address satb_log_enqueue_with_frame = NULL;
+static u_char* satb_log_enqueue_with_frame_end = NULL;
+
+static address satb_log_enqueue_frameless = NULL;
+static u_char* satb_log_enqueue_frameless_end = NULL;
 
 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
 
-// The calls to this don't work.  We'd need to do a fair amount of work to
-// make it work.
-static void check_index(int ind) {
-  assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
-         "Invariants.");
-}
-
 static void generate_satb_log_enqueue(bool with_frame) {
   BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
   CodeBuffer buf(bb);
@@ -4388,13 +4389,27 @@
   }
 }
 
-void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
-  assert(offset == 0 || index == noreg, "choose one");
-
-  if (G1DisablePreBarrier) return;
-  // satb_log_barrier(tmp, obj, offset, preserve_o_regs);
+void MacroAssembler::g1_write_barrier_pre(Register obj,
+                                          Register index,
+                                          int offset,
+                                          Register pre_val,
+                                          Register tmp,
+                                          bool preserve_o_regs) {
   Label filtered;
-  // satb_log_barrier_work0(tmp, filtered);
+
+  if (obj == noreg) {
+    // We are not loading the previous value so make
+    // sure that we don't trash the value in pre_val
+    // with the code below.
+    assert_different_registers(pre_val, tmp);
+  } else {
+    // We will be loading the previous value
+    // in this code so...
+    assert(offset == 0 || index == noreg, "choose one");
+    assert(pre_val == noreg, "check this code");
+  }
+
+  // Is marking active?
   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
     ld(G2,
        in_bytes(JavaThread::satb_mark_queue_offset() +
@@ -4413,61 +4428,46 @@
   br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
   delayed() -> nop();
 
-  // satb_log_barrier_work1(tmp, offset);
-  if (index == noreg) {
-    if (Assembler::is_simm13(offset)) {
-      load_heap_oop(obj, offset, tmp);
+  // Do we need to load the previous value?
+  if (obj != noreg) {
+    // Load the previous value...
+    if (index == noreg) {
+      if (Assembler::is_simm13(offset)) {
+        load_heap_oop(obj, offset, tmp);
+      } else {
+        set(offset, tmp);
+        load_heap_oop(obj, tmp, tmp);
+      }
     } else {
-      set(offset, tmp);
-      load_heap_oop(obj, tmp, tmp);
+      load_heap_oop(obj, index, tmp);
     }
-  } else {
-    load_heap_oop(obj, index, tmp);
+    // Previous value has been loaded into tmp
+    pre_val = tmp;
   }
 
-  // satb_log_barrier_work2(obj, tmp, offset);
-
-  // satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
-
-  const Register pre_val = tmp;
-
-  if (G1SATBBarrierPrintNullPreVals) {
-    save_frame(0);
-    mov(pre_val, O0);
-    // Save G-regs that target may use.
-    mov(G1, L1);
-    mov(G2, L2);
-    mov(G3, L3);
-    mov(G4, L4);
-    mov(G5, L5);
-    call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
-    delayed()->nop();
-    // Restore G-regs that target may have used.
-    mov(L1, G1);
-    mov(L2, G2);
-    mov(L3, G3);
-    mov(L4, G4);
-    mov(L5, G5);
-    restore(G0, G0, G0);
-  }
-
+  assert(pre_val != noreg, "must have a real register");
+
+  // Is the previous value null?
   // Check on whether to annul.
   br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
   delayed() -> nop();
 
   // OK, it's not filtered, so we'll need to call enqueue.  In the normal
-  // case, pre_val will be a scratch G-reg, but there's some cases in which
-  // it's an O-reg.  In the first case, do a normal call.  In the latter,
-  // do a save here and call the frameless version.
+  // case, pre_val will be a scratch G-reg, but there are some cases in
+  // which it's an O-reg.  In the first case, do a normal call.  In the
+  // latter, do a save here and call the frameless version.
 
   guarantee(pre_val->is_global() || pre_val->is_out(),
             "Or we need to think harder.");
+
   if (pre_val->is_global() && !preserve_o_regs) {
-    generate_satb_log_enqueue_if_necessary(true); // with frame.
+    generate_satb_log_enqueue_if_necessary(true); // with frame
+
     call(satb_log_enqueue_with_frame);
     delayed()->mov(pre_val, O0);
   } else {
-    generate_satb_log_enqueue_if_necessary(false); // with frameless.
+    generate_satb_log_enqueue_if_necessary(false); // frameless
+
     save_frame(0);
     call(satb_log_enqueue_frameless);
     delayed()->mov(pre_val->after_save(), O0);
@@ -4614,7 +4614,6 @@
   MacroAssembler* post_filter_masm = this;
 
   if (new_val == G0) return;
-  if (G1DisablePostBarrier) return;
 
   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
   assert(bs->kind() == BarrierSet::G1SATBCT ||
@@ -4626,6 +4625,7 @@
 #else
     srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
 #endif
+
     if (G1PrintCTFilterStats) {
       guarantee(tmp->is_global(), "Or stats won't work...");
       // This is a sleazy hack: I'm temporarily hijacking G2, which I
@@ -4927,4 +4927,3 @@
   // Caller should set it:
   // add(G0, 1, result); // equals
 }
-
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -309,12 +309,14 @@
 #endif
 
   // accessors
-  Register base()      const { return _base; }
-  Register index()     const { return _index_or_disp.as_register(); }
-  int      disp()      const { return _index_or_disp.as_constant(); }
-
-  bool     has_index() const { return _index_or_disp.is_register(); }
-  bool     has_disp()  const { return _index_or_disp.is_constant(); }
+  Register base()             const { return _base; }
+  Register index()            const { return _index_or_disp.as_register(); }
+  int      disp()             const { return _index_or_disp.as_constant(); }
+
+  bool     has_index()        const { return _index_or_disp.is_register(); }
+  bool     has_disp()         const { return _index_or_disp.is_constant(); }
+
+  bool     uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
 
   const relocInfo::relocType rtype() { return _rspec.type(); }
   const RelocationHolder&    rspec() { return _rspec; }
@@ -330,6 +332,10 @@
     Address a(base(), disp() + plusdisp);
     return a;
   }
+  bool is_same_address(Address a) const {
+    // disregard _rspec
+    return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
+  }
 
   Address after_save() const {
     Address a = (*this);
@@ -436,6 +442,10 @@
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 
+  AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
   AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
@@ -455,6 +465,21 @@
   }
 };
 
+// Convenience classes
+class ExternalAddress: public AddressLiteral {
+ private:
+  static relocInfo::relocType reloc_for_target(address target) {
+    // Sometimes ExternalAddress is used for values which aren't
+    // exactly addresses, like the card table base.
+    // external_word_type can't be used for values in the first page
+    // so just skip the reloc in that case.
+    return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
+  }
+
+ public:
+  ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(          target)) {}
+  ExternalAddress(oop*    target) : AddressLiteral(target, reloc_for_target((address) target)) {}
+};
 
 inline Address RegisterImpl::address_in_saved_window() const {
    return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
@@ -855,9 +880,8 @@
   // and be sign-extended. Check the range.
 
   static void assert_signed_range(intptr_t x, int nbits) {
-    assert( nbits == 32
-        ||  -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
-      "value out of range");
+    assert(nbits == 32 || (-(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1)),
+           err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits));
   }
 
   static void assert_signed_word_disp_range(intptr_t x, int nbits) {
@@ -2210,15 +2234,11 @@
   void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
 
 #ifndef SERIALGC
-  // Array store and offset
-  void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
-
+  // General G1 pre-barrier generator.
+  void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
+
+  // General G1 post-barrier generator
   void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
-
-  // May do filtering, depending on the boolean arguments.
-  void g1_card_table_write(jbyte* byte_map_base,
-                           Register tmp, Register obj, Register new_val,
-                           bool region_filter, bool null_filter);
 #endif // SERIALGC
 
   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
@@ -2291,7 +2311,7 @@
   int total_frame_size_in_bytes(int extraWords);
 
   // used when extraWords known statically
-  void save_frame(int extraWords);
+  void save_frame(int extraWords = 0);
   void save_frame_c1(int size_in_bytes);
   // make a frame, and simultaneously pass up one or two register value
   // into the new register window
@@ -2460,9 +2480,11 @@
   // offset relative to Gargs of argument at tos[arg_slot].
   // (arg_slot == 0 means the last argument, not the first).
   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
+                                     Register temp_reg,
                                      int extra_slot_offset = 0);
   // Address of Gargs and argument_offset.
   Address            argument_address(RegisterOrConstant arg_slot,
+                                      Register temp_reg,
                                       int extra_slot_offset = 0);
 
   // Stack overflow checking
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -255,7 +255,11 @@
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); }
+inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
+  relocate(a.rspec(offset));
+  if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index()        ); }
+  else               {                          stf(w, d, a.base(), a.disp() + offset); }
+}
 
 inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_long( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -408,13 +408,20 @@
 #ifndef SERIALGC
 
 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
   __ bind(_entry);
 
   assert(pre_val()->is_register(), "Precondition.");
-
   Register pre_val_reg = pre_val()->as_register();
 
-  ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
+  if (do_load()) {
+    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
+  }
+
   if (__ is_in_wdisp16_range(_continuation)) {
     __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
                       pre_val_reg, _continuation);
@@ -431,6 +438,96 @@
 
 }
 
+void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
+  // At this point we know that offset == referent_offset.
+  //
+  // So we might have to emit:
+  //   if (src == null) goto continuation.
+  //
+  // and we definitely have to emit:
+  //   if (klass(src).reference_type == REF_NONE) goto continuation
+  //   if (!marking_active) goto continuation
+  //   if (pre_val == null) goto continuation
+  //   call pre_barrier(pre_val)
+  //   goto continuation
+  //
+  __ bind(_entry);
+
+  assert(src()->is_register(), "sanity");
+  Register src_reg = src()->as_register();
+
+  if (gen_src_check()) {
+    // The original src operand was not a constant.
+    // Generate src == null?
+    if (__ is_in_wdisp16_range(_continuation)) {
+      __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                        src_reg, _continuation);
+    } else {
+      __ cmp(src_reg, G0);
+      __ brx(Assembler::equal, false, Assembler::pt, _continuation);
+    }
+    __ delayed()->nop();
+  }
+
+  // Generate src->_klass->_reference_type() == REF_NONE)?
+  assert(tmp()->is_register(), "sanity");
+  Register tmp_reg = tmp()->as_register();
+
+  __ load_klass(src_reg, tmp_reg);
+
+  Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
+  __ ld(ref_type_adr, tmp_reg);
+
+  if (__ is_in_wdisp16_range(_continuation)) {
+    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                      tmp_reg, _continuation);
+  } else {
+    __ cmp(tmp_reg, G0);
+    __ brx(Assembler::equal, false, Assembler::pt, _continuation);
+  }
+  __ delayed()->nop();
+
+  // Is marking active?
+  assert(thread()->is_register(), "precondition");
+  Register thread_reg = thread()->as_pointer_register();
+
+  Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
+                                       PtrQueue::byte_offset_of_active()));
+
+  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+    __ ld(in_progress, tmp_reg);
+  } else {
+    assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
+    __ ldsb(in_progress, tmp_reg);
+  }
+  if (__ is_in_wdisp16_range(_continuation)) {
+    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                      tmp_reg, _continuation);
+  } else {
+    __ cmp(tmp_reg, G0);
+    __ brx(Assembler::equal, false, Assembler::pt, _continuation);
+  }
+  __ delayed()->nop();
+
+  // val == null?
+  assert(val()->is_register(), "Precondition.");
+  Register val_reg = val()->as_register();
+
+  if (__ is_in_wdisp16_range(_continuation)) {
+    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                      val_reg, _continuation);
+  } else {
+    __ cmp(val_reg, G0);
+    __ brx(Assembler::equal, false, Assembler::pt, _continuation);
+  }
+  __ delayed()->nop();
+
+  __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
+  __ delayed()->mov(val_reg, G4);
+  __ br(Assembler::always, false, Assembler::pt, _continuation);
+  __ delayed()->nop();
+}
+
 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
 
 jbyte* G1PostBarrierStub::byte_map_base_slow() {
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -387,7 +387,8 @@
 
   if (obj_store) {
     // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
+    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
+                true /* do_load */, false /* patch */, NULL);
   }
   __ move(value.result(), array_addr, null_check_info);
   if (obj_store) {
@@ -687,7 +688,8 @@
   __ add(obj.result(), offset.result(), addr);
 
   if (type == objectType) {  // Write-barrier needed for Object fields.
-    pre_barrier(addr, false, NULL);
+    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
+                true /* do_load */, false /* patch */, NULL);
   }
 
   if (type == objectType)
@@ -1187,7 +1189,8 @@
       }
 
       if (is_obj) {
-        pre_barrier(LIR_OprFact::address(addr), false, NULL);
+        pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
+                    true /* do_load */, false /* patch */, NULL);
         // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
       }
       __ move(data, addr);
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -551,6 +551,26 @@
   return NULL;
 }
 
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  if (UseG1GC) {
+    // We need to generate have a routine that generates code to:
+    //   * load the value in the referent field
+    //   * passes that value to the pre-barrier.
+    //
+    // In the case of G1 this will record the value of the
+    // referent in an SATB buffer if marking is active.
+    // This will cause concurrent marking to mark the referent
+    // field as live.
+    Unimplemented();
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
 //
 // Interpreter stub for calling a native method. (C++ interpreter)
 // This sets up a somewhat different looking stack for calling the native method
@@ -2156,6 +2176,7 @@
                                            int tempcount, // Number of slots on java expression stack in use
                                            int popframe_extra_args,
                                            int moncount,  // Number of active monitors
+                                           int caller_actual_parameters,
                                            int callee_param_size,
                                            int callee_locals_size,
                                            frame* caller,
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -513,6 +513,8 @@
   // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   // explicitly recognized.
 
+  if (is_ricochet_frame())    return sender_for_ricochet_frame(map);
+
   bool frame_is_interpreted = is_interpreted_frame();
   if (frame_is_interpreted) {
     map->make_integer_regs_unsaved();
@@ -806,3 +808,34 @@
   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
   return &interpreter_frame_tos_address()[index];
 }
+
+
+#ifdef ASSERT
+
+#define DESCRIBE_FP_OFFSET(name) \
+  values.describe(frame_no, fp() + frame::name##_offset, #name)
+
+void frame::describe_pd(FrameValues& values, int frame_no) {
+  for (int w = 0; w < frame::register_save_words; w++) {
+    values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
+  }
+
+  if (is_interpreted_frame()) {
+    DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
+    DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
+    DESCRIBE_FP_OFFSET(interpreter_frame_padding);
+    DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
+  }
+
+  if (!is_compiled_frame()) {
+    if (frame::callee_aggregate_return_pointer_words != 0) {
+      values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
+    }
+    for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
+      values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w,
+                      err_msg("callee_register_argument_save_area_words %d", w));
+    }
+  }
+}
+
+#endif
--- a/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
   address generate_empty_entry(void);
   address generate_accessor_entry(void);
+  address generate_Reference_get_entry(void);
   void lock_method(void);
   void save_native_result(void);
   void restore_native_result(void);
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -407,6 +407,8 @@
     case Interpreter::java_lang_math_abs     :                                                                             break;
     case Interpreter::java_lang_math_log     :                                                                             break;
     case Interpreter::java_lang_math_log10   :                                                                             break;
+    case Interpreter::java_lang_ref_reference_get
+                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
     default                                  : ShouldNotReachHere();                                                       break;
   }
 
@@ -421,25 +423,6 @@
   return true;
 }
 
-// This method tells the deoptimizer how big an interpreted frame must be:
-int AbstractInterpreter::size_activation(methodOop method,
-                                         int tempcount,
-                                         int popframe_extra_args,
-                                         int moncount,
-                                         int callee_param_count,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  return layout_activation(method,
-                           tempcount,
-                           popframe_extra_args,
-                           moncount,
-                           callee_param_count,
-                           callee_locals,
-                           (frame*)NULL,
-                           (frame*)NULL,
-                           is_top_frame);
-}
-
 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 
   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -69,6 +69,460 @@
   return me;
 }
 
+// stack walking support
+
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
+  //RicochetFrame* f = RicochetFrame::from_frame(fr);
+  // Cf. is_interpreted_frame path of frame::sender
+  intptr_t* younger_sp = fr.sp();
+  intptr_t* sp         = fr.sender_sp();
+  map->make_integer_regs_unsaved();
+  map->shift_window(sp, younger_sp);
+  bool this_frame_adjusted_stack = true;  // I5_savedSP is live in this RF
+  return frame(sp, younger_sp, this_frame_adjusted_stack);
+}
+
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
+  ResourceMark rm;
+  RicochetFrame* f = RicochetFrame::from_frame(fr);
+
+  // pick up the argument type descriptor:
+  Thread* thread = Thread::current();
+  Handle cookie(thread, f->compute_saved_args_layout(true, true));
+
+  // process fixed part
+  blk->do_oop((oop*)f->saved_target_addr());
+  blk->do_oop((oop*)f->saved_args_layout_addr());
+
+  // process variable arguments:
+  if (cookie.is_null())  return;  // no arguments to describe
+
+  // the cookie is actually the invokeExact method for my target
+  // his argument signature is what I'm interested in
+  assert(cookie->is_method(), "");
+  methodHandle invoker(thread, methodOop(cookie()));
+  assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
+  assert(!invoker->is_static(), "must have MH argument");
+  int slot_count = invoker->size_of_parameters();
+  assert(slot_count >= 1, "must include 'this'");
+  intptr_t* base = f->saved_args_base();
+  intptr_t* retval = NULL;
+  if (f->has_return_value_slot())
+    retval = f->return_value_slot_addr();
+  int slot_num = slot_count - 1;
+  intptr_t* loc = &base[slot_num];
+  //blk->do_oop((oop*) loc);   // original target, which is irrelevant
+  int arg_num = 0;
+  for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
+    if (ss.at_return_type())  continue;
+    BasicType ptype = ss.type();
+    if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
+    assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
+    slot_num -= type2size[ptype];
+    loc = &base[slot_num];
+    bool is_oop = (ptype == T_OBJECT && loc != retval);
+    if (is_oop)  blk->do_oop((oop*)loc);
+    arg_num += 1;
+  }
+  assert(slot_num == 0, "must have processed all the arguments");
+}
+
+// Ricochet Frames
+const Register MethodHandles::RicochetFrame::L1_continuation      = L1;
+const Register MethodHandles::RicochetFrame::L2_saved_target      = L2;
+const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3;
+const Register MethodHandles::RicochetFrame::L4_saved_args_base   = L4; // cf. Gargs = G4
+const Register MethodHandles::RicochetFrame::L5_conversion        = L5;
+#ifdef ASSERT
+const Register MethodHandles::RicochetFrame::L0_magic_number_1    = L0;
+#endif //ASSERT
+
+oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
+  if (read_cache) {
+    oop cookie = saved_args_layout();
+    if (cookie != NULL)  return cookie;
+  }
+  oop target = saved_target();
+  oop mtype  = java_lang_invoke_MethodHandle::type(target);
+  oop mtform = java_lang_invoke_MethodType::form(mtype);
+  oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
+  if (write_cache)  {
+    (*saved_args_layout_addr()) = cookie;
+  }
+  return cookie;
+}
+
+void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
+                                                          // output params:
+                                                          int* bounce_offset,
+                                                          int* exception_offset,
+                                                          int* frame_size_in_words) {
+  (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
+
+  address start = __ pc();
+
+#ifdef ASSERT
+  __ illtrap(0); __ illtrap(0); __ illtrap(0);
+  // here's a hint of something special:
+  __ set(MAGIC_NUMBER_1, G0);
+  __ set(MAGIC_NUMBER_2, G0);
+#endif //ASSERT
+  __ illtrap(0);  // not reached
+
+  // Return values are in registers.
+  // L1_continuation contains a cleanup continuation we must return
+  // to.
+
+  (*bounce_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.bounce");
+
+  if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+  trace_method_handle(_masm, "ricochet_blob.bounce");
+
+  __ JMP(L1_continuation, 0);
+  __ delayed()->nop();
+  __ illtrap(0);
+
+  DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0));
+
+  (*exception_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.exception");
+
+  // compare this to Interpreter::rethrow_exception_entry, which is parallel code
+  // for example, see TemplateInterpreterGenerator::generate_throw_exception
+  // Live registers in:
+  //   Oexception  (O0): exception
+  //   Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr)
+  __ verify_oop(Oexception);
+
+  // Take down the frame.
+
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7);
+
+  // We are done with this activation frame; find out where to go next.
+  // The continuation point will be an exception handler, which expects
+  // the following registers set up:
+  //
+  // Oexception: exception
+  // Oissuing_pc: the local call that threw exception
+  // Other On: garbage
+  // In/Ln:  the contents of the caller's register window
+  //
+  // We do the required restore at the last possible moment, because we
+  // need to preserve some state across a runtime call.
+  // (Remember that the caller activation is unknown--it might not be
+  // interpreted, so things like Lscratch are useless in the caller.)
+  __ mov(Oexception,  Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
+  __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
+  __ call_VM_leaf(L7_thread_cache,
+                  CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  G2_thread, Oissuing_pc->after_save());
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ JMP(O0, 0);                         // return exception handler in caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  // (same old exception object is already in Oexception; see above)
+  // Note that an "issuing PC" is actually the next PC after the call
+}
+
+void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
+                                                        Register recv_reg,
+                                                        Register argv_reg,
+                                                        address return_handler) {
+  // does not include the __ save()
+  assert(argv_reg == Gargs, "");
+  Address G3_mh_vmtarget(   recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
+  Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
+
+  // Create the RicochetFrame.
+  // Unlike on x86 we can store all required information in local
+  // registers.
+  BLOCK_COMMENT("push RicochetFrame {");
+  __ set(ExternalAddress(return_handler),          L1_continuation);
+  __ load_heap_oop(G3_mh_vmtarget,                 L2_saved_target);
+  __ mov(G0,                                       L3_saved_args_layout);
+  __ mov(Gargs,                                    L4_saved_args_base);
+  __ lduw(G3_amh_conversion,                       L5_conversion);  // 32-bit field
+  // I5, I6, I7 are already set up
+  DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1,      L0_magic_number_1));
+  BLOCK_COMMENT("} RicochetFrame");
+}
+
+void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
+                                                        Register recv_reg,
+                                                        Register new_sp_reg,
+                                                        Register sender_pc_reg) {
+  assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place");
+  assert(sender_pc_reg == I7, "in a fixed place");
+  // does not include the __ ret() & __ restore()
+  assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg);
+  // Take down the frame.
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  BLOCK_COMMENT("end_ricochet_frame {");
+  if (recv_reg->is_valid())
+    __ mov(L2_saved_target, recv_reg);
+  BLOCK_COMMENT("} end_ricochet_frame");
+}
+
+// Emit code to verify that FP is pointing at a valid ricochet frame.
+#ifdef ASSERT
+enum {
+  ARG_LIMIT = 255, SLOP = 45,
+  // use this parameter for checking for garbage stack movements:
+  UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
+  // the slop defends against false alarms due to fencepost errors
+};
+
+void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
+  // The stack should look like this:
+  //    ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
+  // Check various invariants.
+
+  Register O7_temp = O7, O5_temp = O5;
+
+  Label L_ok_1, L_ok_2, L_ok_3, L_ok_4;
+  BLOCK_COMMENT("verify_clean {");
+  // Magic numbers must check out:
+  __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
+  __ cmp(O7_temp, L0_magic_number_1);
+  __ br(Assembler::equal, false, Assembler::pt, L_ok_1);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
+
+  __ BIND(L_ok_1);
+
+  // Arguments pointer must look reasonable:
+#ifdef _LP64
+  Register FP_temp = O5_temp;
+  __ add(FP, STACK_BIAS, FP_temp);
+#else
+  Register FP_temp = FP;
+#endif
+  __ cmp(L4_saved_args_base, FP_temp);
+  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok_2);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: L4 < FP");
+
+  __ BIND(L_ok_2);
+  // Disable until we decide on it's fate
+  // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp);
+  // __ cmp(O7_temp, FP_temp);
+  // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3);
+  // __ delayed()->nop();
+  // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP");
+
+  __ BIND(L_ok_3);
+  extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
+  __ cmp(O7_temp, T_VOID);
+  __ br(Assembler::equal, false, Assembler::pt, L_ok_4);
+  __ delayed()->nop();
+  extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
+  __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
+  assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
+  __ cmp(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok_4);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
+  __ BIND(L_ok_4);
+  BLOCK_COMMENT("} verify_clean");
+}
+#endif //ASSERT
+
+void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
+  if (VerifyMethodHandles)
+    verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
+                 "AMH argument is a Class");
+  __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
+}
+
+void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) {
+  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+  assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load");
+  __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg);
+}
+
+void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
+  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+  __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg);
+}
+
+void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
+  __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg);
+  __ and3(reg, 0x0F, reg);
+}
+
+void MethodHandles::load_stack_move(MacroAssembler* _masm,
+                                    Address G3_amh_conversion,
+                                    Register stack_move_reg) {
+  BLOCK_COMMENT("load_stack_move {");
+  __ ldsw(G3_amh_conversion, stack_move_reg);
+  __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
+  if (VerifyMethodHandles) {
+    Label L_ok, L_bad;
+    int32_t stack_move_limit = 0x0800;  // extra-large
+    __ cmp(stack_move_reg, stack_move_limit);
+    __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ cmp(stack_move_reg, -stack_move_limit);
+    __ br(Assembler::greater, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("load_stack_move of garbage value");
+    __ BIND(L_ok);
+  }
+  BLOCK_COMMENT("} load_stack_move");
+}
+
+#ifdef ASSERT
+void MethodHandles::RicochetFrame::verify() const {
+  assert(magic_number_1() == MAGIC_NUMBER_1, "");
+  if (!Universe::heap()->is_gc_active()) {
+    if (saved_args_layout() != NULL) {
+      assert(saved_args_layout()->is_method(), "must be valid oop");
+    }
+    if (saved_target() != NULL) {
+      assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
+    }
+  }
+  int conv_op = adapter_conversion_op(conversion());
+  assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
+         "must be a sane conversion");
+  if (has_return_value_slot()) {
+    assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
+  }
+}
+
+void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
+  // Verify that argslot lies within (Gargs, FP].
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_argslot {");
+  __ add(FP, STACK_BIAS, temp_reg);  // STACK_BIAS is zero on !_LP64
+  __ cmp(argslot_reg, temp_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ cmp(Gargs, argslot_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_argslot");
+}
+
+void MethodHandles::verify_argslots(MacroAssembler* _masm,
+                                    RegisterOrConstant arg_slots,
+                                    Register arg_slot_base_reg,
+                                    Register temp_reg,
+                                    Register temp2_reg,
+                                    bool negate_argslots,
+                                    const char* error_message) {
+  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_argslots {");
+  if (negate_argslots) {
+    if (arg_slots.is_constant()) {
+      arg_slots = -1 * arg_slots.as_constant();
+    } else {
+      __ neg(arg_slots.as_register(), temp_reg);
+      arg_slots = temp_reg;
+    }
+  }
+  __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
+  __ add(FP, STACK_BIAS, temp2_reg);  // STACK_BIAS is zero on !_LP64
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  // Gargs points to the first word so adjust by BytesPerWord
+  __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
+  __ cmp(Gargs, temp_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_argslots");
+}
+
+// Make sure that arg_slots has the same sign as the given direction.
+// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
+void MethodHandles::verify_stack_move(MacroAssembler* _masm,
+                                      RegisterOrConstant arg_slots, int direction) {
+  enum { UNREASONABLE_STACK_MOVE = 256 * 4 };  // limit of 255 arguments
+  bool allow_zero = arg_slots.is_constant();
+  if (direction == 0) { direction = +1; allow_zero = true; }
+  assert(stack_move_unit() == -1, "else add extra checks here");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    BLOCK_COMMENT("verify_stack_move {");
+    // __ btst(-stack_move_unit() - 1, arg_slots.as_register());  // no need
+    // __ br(Assembler::notZero, false, Assembler::pn, L_bad);
+    // __ delayed()->nop();
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    if (direction > 0) {
+      __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+      __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
+      __ br(Assembler::less, false, Assembler::pn, L_ok);
+      __ delayed()->nop();
+    } else {
+      __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+      __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
+      __ br(Assembler::greater, false, Assembler::pn, L_ok);
+      __ delayed()->nop();
+    }
+    __ BIND(L_bad);
+    if (direction > 0)
+      __ stop("assert arg_slots > 0");
+    else
+      __ stop("assert arg_slots < 0");
+    __ BIND(L_ok);
+    BLOCK_COMMENT("} verify_stack_move");
+  } else {
+    intptr_t size = arg_slots.as_constant();
+    if (direction < 0)  size = -size;
+    assert(size >= 0, "correct direction of constant move");
+    assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
+  }
+}
+
+void MethodHandles::verify_klass(MacroAssembler* _masm,
+                                 Register obj_reg, KlassHandle klass,
+                                 Register temp_reg, Register temp2_reg,
+                                 const char* error_message) {
+  oop* klass_addr = klass.raw_value();
+  assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
+         klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
+         "must be one of the SystemDictionaryHandles");
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_klass {");
+  __ verify_oop(obj_reg);
+  __ br_null(obj_reg, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ load_klass(obj_reg, temp_reg);
+  __ set(ExternalAddress(klass_addr), temp2_reg);
+  __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  intptr_t super_check_offset = klass->super_check_offset();
+  __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
+  __ set(ExternalAddress(klass_addr), temp2_reg);
+  __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_klass");
+}
+#endif // ASSERT
 
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
@@ -94,8 +548,9 @@
   __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
   __ delayed()->nop();
   __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
-  // mov(G3_method_handle, G3_method_handle);  // already in this register
-  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
+  __ mov(G3_method_handle, G3_method_handle);  // already in this register
+  // O0 will be filled in with JavaThread in stub
+  __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch);
   __ delayed()->nop();
 
   // here's where control starts out:
@@ -103,6 +558,9 @@
   address entry_point = __ pc();
 
   // fetch the MethodType from the method handle
+  // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
+  // This would simplify several touchy bits of code.
+  // See 6984712: JSR 292 method handle calls need a clean argument base pointer
   {
     Register tem = G5_method;
     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
@@ -114,19 +572,25 @@
   // given the MethodType, find out where the MH argument is buried
   __ load_heap_oop(Address(O0_mtype,   __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,        O1_scratch)), O4_argslot);
   __ ldsw(         Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
-  __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
+  __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase);
   // Note: argument_address uses its input as a scratch register!
-  __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
+  Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize);
+  __ ld_ptr(mh_receiver_slot_addr, G3_method_handle);
 
   trace_method_handle(_masm, "invokeExact");
 
   __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
+
+  // Nobody uses the MH receiver slot after this.  Make sure.
+  DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
+
   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 
   // for invokeGeneric (only), apply argument and result conversions on the fly
   __ bind(invoke_generic_slow_path);
 #ifdef ASSERT
-  { Label L;
+  if (VerifyMethodHandles) {
+    Label L;
     __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
     __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
     __ brx(Assembler::equal, false, Assembler::pt, L);
@@ -137,23 +601,13 @@
 #endif //ASSERT
 
   // make room on the stack for another pointer:
-  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch);
+  insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch);
   // load up an adapter from the calling type (Java weaves this)
   Register O2_form    = O2_scratch;
   Register O3_adapter = O3_scratch;
   __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,               O1_scratch)), O2_form);
-  // load_heap_oop(Address(O2_form,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
-  // deal with old JDK versions:
-  __ add(          Address(O2_form,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
-  __ cmp(O3_adapter, O2_form);
-  Label sorry_no_invoke_generic;
-  __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic);
-  __ delayed()->nop();
-
-  __ load_heap_oop(Address(O3_adapter, 0), O3_adapter);
-  __ tst(O3_adapter);
-  __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic);
-  __ delayed()->nop();
+  __ load_heap_oop(Address(O2_form,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
+  __ verify_oop(O3_adapter);
   __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
   // As a trusted first argument, pass the type being called, so the adapter knows
   // the actual types of the arguments and return values.
@@ -164,83 +618,91 @@
   trace_method_handle(_masm, "invokeGeneric");
   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 
-  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
-  __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
-  // mov(G3_method_handle, G3_method_handle);  // already in this register
-  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
-  __ delayed()->nop();
-
   return entry_point;
 }
 
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+static RegisterOrConstant constant(int value) {
+  return RegisterOrConstant(value);
+}
 
+static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) {
+  __ ldsw(vmargslot_addr, result);
+}
+
+static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm,
+                                                            RegisterOrConstant arg_slots,
+                                                            Register temp_reg, Register temp2_reg) {
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  if (arg_slots.is_constant()) {
+    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
+    const int masked_offset = round_to(offset, 2 * BytesPerWord);
+    const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask;
+    assert(masked_offset == masked_offset2, "must agree");
+    __ sub(Gargs,        offset, Gargs);
+    __ sub(SP,    masked_offset, SP   );
+    return offset;
+  } else {
 #ifdef ASSERT
-static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
-  // Verify that argslot lies within (Gargs, FP].
-  Label L_ok, L_bad;
-  BLOCK_COMMENT("{ verify_argslot");
-#ifdef _LP64
-  __ add(FP, STACK_BIAS, temp_reg);
-  __ cmp(argslot_reg, temp_reg);
-#else
-  __ cmp(argslot_reg, FP);
+    {
+      Label L_ok;
+      __ cmp(arg_slots.as_register(), 0);
+      __ br(Assembler::greaterEqual, false, Assembler::pt, L_ok);
+      __ delayed()->nop();
+      __ stop("negative arg_slots");
+      __ bind(L_ok);
+    }
 #endif
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
-  __ delayed()->nop();
-  __ cmp(Gargs, argslot_reg);
-  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
-  __ bind(L_bad);
-  __ stop(error_message);
-  __ bind(L_ok);
-  BLOCK_COMMENT("} verify_argslot");
+    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
+    __ add( temp_reg,  1*BytesPerWord,       temp2_reg);
+    __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg);
+    __ sub(Gargs, temp_reg,  Gargs);
+    __ sub(SP,    temp2_reg, SP   );
+    return temp_reg;
+  }
 }
-#endif
 
+static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm,
+                                                          RegisterOrConstant arg_slots,
+                                                          Register temp_reg, Register temp2_reg) {
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  if (arg_slots.is_constant()) {
+    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
+    const int masked_offset = offset & ~TwoWordAlignmentMask;
+    __ add(Gargs,        offset, Gargs);
+    __ add(SP,    masked_offset, SP   );
+    return offset;
+  } else {
+    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
+    __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg);
+    __ add(Gargs, temp_reg,  Gargs);
+    __ add(SP,    temp2_reg, SP   );
+    return temp_reg;
+  }
+}
 
 // Helper to insert argument slots into the stack.
-// arg_slots must be a multiple of stack_move_unit() and <= 0
+// arg_slots must be a multiple of stack_move_unit() and < 0
+// argslot_reg is decremented to point to the new (shifted) location of the argslot
+// But, temp_reg ends up holding the original value of argslot_reg.
 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
                                      RegisterOrConstant arg_slots,
-                                     int arg_mask,
                                      Register argslot_reg,
                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
-  assert(temp3_reg != noreg, "temp3 required");
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
+
   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 
-#ifdef ASSERT
-  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ br(Assembler::greater, false, Assembler::pn, L_bad);
-    __ delayed()->nop();
-    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
-    __ br(Assembler::zero, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ bind(L_bad);
-    __ stop("assert arg_slots <= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() <= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif // ASSERT
-
-#ifdef _LP64
-  if (arg_slots.is_register()) {
-    // Was arg_slots register loaded as signed int?
-    Label L_ok;
-    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
-    __ sra(temp_reg, BitsPerInt, temp_reg);
-    __ cmp(arg_slots.as_register(), temp_reg);
-    __ br(Assembler::equal, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ stop("arg_slots register not loaded as signed int");
-    __ bind(L_ok);
-  }
-#endif
+  BLOCK_COMMENT("insert_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, -1);
 
   // Make space on the stack for the inserted argument(s).
   // Then pull down everything shallower than argslot_reg.
@@ -250,26 +712,20 @@
   //   for (temp = sp + size; temp < argslot; temp++)
   //     temp[-size] = temp[0]
   //   argslot -= size;
-  BLOCK_COMMENT("insert_arg_slots {");
-  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 
-  // Keep the stack pointer 2*wordSize aligned.
-  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
-  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
-  __ add(SP, masked_offset, SP);
-
-  __ mov(Gargs, temp_reg);  // source pointer for copy
-  __ add(Gargs, offset, Gargs);
+  // offset is temp3_reg in case of arg_slots being a register.
+  RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
+  __ sub(Gargs, offset, temp_reg);  // source pointer for copy
 
   {
     Label loop;
     __ BIND(loop);
     // pull one word down each time through the loop
-    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
-    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ add(temp_reg, wordSize, temp_reg);
     __ cmp(temp_reg, argslot_reg);
-    __ brx(Assembler::less, false, Assembler::pt, loop);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, loop);
     __ delayed()->nop();  // FILLME
   }
 
@@ -280,39 +736,24 @@
 
 
 // Helper to remove argument slots from the stack.
-// arg_slots must be a multiple of stack_move_unit() and >= 0
+// arg_slots must be a multiple of stack_move_unit() and > 0
 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                      RegisterOrConstant arg_slots,
                                      Register argslot_reg,
                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
-  assert(temp3_reg != noreg, "temp3 required");
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 
-  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+  BLOCK_COMMENT("remove_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false,
+                    "deleted argument(s) must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, +1);
 
-#ifdef ASSERT
-  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
-  __ add(argslot_reg, offset, temp2_reg);
-  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ br(Assembler::less, false, Assembler::pn, L_bad);
-    __ delayed()->nop();
-    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
-    __ br(Assembler::zero, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ bind(L_bad);
-    __ stop("assert arg_slots >= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() >= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif // ASSERT
-
-  BLOCK_COMMENT("remove_arg_slots {");
   // Pull up everything shallower than argslot.
   // Then remove the excess space on the stack.
   // The stacked return address gets pulled up with everything else.
@@ -321,38 +762,271 @@
   //     temp[size] = temp[0]
   //   argslot += size;
   //   sp += size;
+
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
+
   {
-    Label loop;
-    __ BIND(loop);
+    Label L_loop;
+    __ BIND(L_loop);
     // pull one word up each time through the loop
-    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
-    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ sub(temp_reg, wordSize, temp_reg);
     __ cmp(temp_reg, Gargs);
-    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
+    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_loop);
     __ delayed()->nop();  // FILLME
   }
 
-  // Now move the argslot up, to point to the just-copied block.
-  __ add(Gargs, offset, Gargs);
   // And adjust the argslot address to point at the deletion point.
   __ add(argslot_reg, offset, argslot_reg);
 
-  // Keep the stack pointer 2*wordSize aligned.
-  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
-  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
-  __ add(SP, masked_offset, SP);
+  // We don't need the offset at this point anymore, just adjust SP and Gargs.
+  (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
+
   BLOCK_COMMENT("} remove_arg_slots");
 }
 
+// Helper to copy argument slots to the top of the stack.
+// The sequence starts with argslot_reg and is counted by slot_count
+// slot_count must be a multiple of stack_move_unit() and >= 0
+// This function blows the temps but does not change argslot_reg.
+void MethodHandles::push_arg_slots(MacroAssembler* _masm,
+                                   Register argslot_reg,
+                                   RegisterOrConstant slot_count,
+                                   Register temp_reg, Register temp2_reg) {
+  // allow constant zero
+  if (slot_count.is_constant() && slot_count.as_constant() == 0)
+    return;
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg,
+                             (!slot_count.is_register() ? Gargs : slot_count.as_register()),
+                             SP);
+  assert(Interpreter::stackElementSize == wordSize, "else change this code");
+
+  BLOCK_COMMENT("push_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, slot_count, 0);
+
+  RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg);
+
+  if (slot_count.is_constant()) {
+    for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
+      __ ld_ptr(          Address(argslot_reg, i * wordSize), temp_reg);
+      __ st_ptr(temp_reg, Address(Gargs,       i * wordSize));
+    }
+  } else {
+    Label L_plural, L_loop, L_break;
+    // Emit code to dynamically check for the common cases, zero and one slot.
+    __ cmp(slot_count.as_register(), (int32_t) 1);
+    __ br(Assembler::greater, false, Assembler::pn, L_plural);
+    __ delayed()->nop();
+    __ br(Assembler::less, false, Assembler::pn, L_break);
+    __ delayed()->nop();
+    __ ld_ptr(          Address(argslot_reg, 0), temp_reg);
+    __ st_ptr(temp_reg, Address(Gargs,       0));
+    __ ba(false, L_break);
+    __ delayed()->nop();  // FILLME
+    __ BIND(L_plural);
+
+    // Loop for 2 or more:
+    //   top = &argslot[slot_count]
+    //   while (top > argslot)  *(--Gargs) = *(--top)
+    Register top_reg = temp_reg;
+    __ add(argslot_reg, offset, top_reg);
+    __ add(Gargs,       offset, Gargs  );  // move back up again so we can go down
+    __ BIND(L_loop);
+    __ sub(top_reg, wordSize, top_reg);
+    __ sub(Gargs,   wordSize, Gargs  );
+    __ ld_ptr(           Address(top_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(Gargs,   0));
+    __ cmp(top_reg, argslot_reg);
+    __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+    __ delayed()->nop();  // FILLME
+    __ BIND(L_break);
+  }
+  BLOCK_COMMENT("} push_arg_slots");
+}
+
+// in-place movement; no change to Gargs
+// blows temp_reg, temp2_reg
+void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
+                                      Register bottom_reg,  // invariant
+                                      Address  top_addr,    // can use temp_reg
+                                      RegisterOrConstant positive_distance_in_slots,  // destroyed if register
+                                      Register temp_reg, Register temp2_reg) {
+  assert_different_registers(bottom_reg,
+                             temp_reg, temp2_reg,
+                             positive_distance_in_slots.register_or_noreg());
+  BLOCK_COMMENT("move_arg_slots_up {");
+  Label L_loop, L_break;
+  Register top_reg = temp_reg;
+  if (!top_addr.is_same_address(Address(top_reg, 0))) {
+    __ add(top_addr, top_reg);
+  }
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (positive_distance_in_slots.is_register()) {
+      __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0);
+      __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+    }
+    __ cmp(bottom_reg, top_reg);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("valid bounds (copy up)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
+  __ delayed()->nop();
+  // work top down to bottom, copying contiguous data upwards
+  // In pseudo-code:
+  //   while (--top >= bottom) *(top + distance) = *(top + 0);
+  RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg());
+  __ BIND(L_loop);
+  __ sub(top_reg, wordSize, top_reg);
+  __ ld_ptr(           Address(top_reg, 0     ), temp2_reg);
+  __ st_ptr(temp2_reg, Address(top_reg, offset)           );
+  __ cmp(top_reg, bottom_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+  __ delayed()->nop();  // FILLME
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ BIND(L_break);
+  BLOCK_COMMENT("} move_arg_slots_up");
+}
+
+// in-place movement; no change to rsp
+// blows temp_reg, temp2_reg
+void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
+                                        Address  bottom_addr,  // can use temp_reg
+                                        Register top_reg,      // invariant
+                                        RegisterOrConstant negative_distance_in_slots,  // destroyed if register
+                                        Register temp_reg, Register temp2_reg) {
+  assert_different_registers(top_reg,
+                             negative_distance_in_slots.register_or_noreg(),
+                             temp_reg, temp2_reg);
+  BLOCK_COMMENT("move_arg_slots_down {");
+  Label L_loop, L_break;
+  Register bottom_reg = temp_reg;
+  if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) {
+    __ add(bottom_addr, bottom_reg);
+  }
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (negative_distance_in_slots.is_register()) {
+      __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0);
+      __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+    }
+    __ cmp(bottom_reg, top_reg);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("valid bounds (copy down)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
+  __ delayed()->nop();
+  // work bottom up to top, copying contiguous data downwards
+  // In pseudo-code:
+  //   while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
+  RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg());
+  __ BIND(L_loop);
+  __ ld_ptr(           Address(bottom_reg, 0     ), temp2_reg);
+  __ st_ptr(temp2_reg, Address(bottom_reg, offset)           );
+  __ add(bottom_reg, wordSize, bottom_reg);
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_loop);
+  __ delayed()->nop();  // FILLME
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ BIND(L_break);
+  BLOCK_COMMENT("} move_arg_slots_down");
+}
+
+// Copy from a field or array element to a stacked argument slot.
+// is_element (ignored) says whether caller is loading an array element instead of an instance field.
+void MethodHandles::move_typed_arg(MacroAssembler* _masm,
+                                   BasicType type, bool is_element,
+                                   Address value_src, Address slot_dest,
+                                   Register temp_reg) {
+  assert(!slot_dest.uses(temp_reg), "must be different register");
+  BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
+  if (type == T_OBJECT || type == T_ARRAY) {
+    __ load_heap_oop(value_src, temp_reg);
+    __ verify_oop(temp_reg);
+    __ st_ptr(temp_reg, slot_dest);
+  } else if (type != T_VOID) {
+    int  arg_size      = type2aelembytes(type);
+    bool arg_is_signed = is_signed_subword_type(type);
+    int  slot_size     = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size;  // store int sub-words as int
+    __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed);
+    __ store_sized_value(temp_reg, slot_dest, slot_size              );
+  }
+  BLOCK_COMMENT("} move_typed_arg");
+}
+
+// Cf. TemplateInterpreterGenerator::generate_return_entry_for and
+// InterpreterMacroAssembler::save_return_value
+void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
+                                      Address return_slot) {
+  BLOCK_COMMENT("move_return_value {");
+  // Look at the type and pull the value out of the corresponding register.
+  if (type == T_VOID) {
+    // nothing to do
+  } else if (type == T_OBJECT) {
+    __ verify_oop(O0);
+    __ st_ptr(O0, return_slot);
+  } else if (type == T_INT || is_subword_type(type)) {
+    int type_size = type2aelembytes(T_INT);
+    __ store_sized_value(O0, return_slot, type_size);
+  } else if (type == T_LONG) {
+    // store the value by parts
+    // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
+#if !defined(_LP64) && defined(COMPILER2)
+    __ stx(G1, return_slot);
+#else
+  #ifdef _LP64
+    __ stx(O0, return_slot);
+  #else
+    if (return_slot.has_disp()) {
+      // The displacement is a constant
+      __ st(O0, return_slot);
+      __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize));
+    } else {
+      __ std(O0, return_slot);
+    }
+  #endif
+#endif
+  } else if (type == T_FLOAT) {
+    __ stf(FloatRegisterImpl::S, Ftos_f, return_slot);
+  } else if (type == T_DOUBLE) {
+    __ stf(FloatRegisterImpl::D, Ftos_f, return_slot);
+  } else {
+    ShouldNotReachHere();
+  }
+  BLOCK_COMMENT("} move_return_value");
+}
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
-                              oopDesc* mh) {
-  printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
-  print_method_handle(mh);
+                              oopDesc* mh,
+                              intptr_t* saved_sp) {
+  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
+  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
+  if (has_mh)
+    print_method_handle(mh);
 }
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
@@ -361,6 +1035,7 @@
   __ save_frame(16);
   __ set((intptr_t) adaptername, O0);
   __ mov(G3_method_handle, O1);
+  __ mov(I5_savedSP, O2);
   __ mov(G3_method_handle, L3);
   __ mov(Gargs, L4);
   __ mov(G5_method_type, L5);
@@ -381,13 +1056,21 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
+          // OP_PRIM_TO_REF is below...
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+          // OP_COLLECT_ARGS is below...
+         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
+         |(!UseRicochetFrames ? 0 :
+           java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
+           ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
+           )
+          )
          );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 }
 
 //------------------------------------------------------------------------------
@@ -396,19 +1079,25 @@
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
+  MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
+
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
   // - G3: receiver method handle
   // - O5_savedSP: sender SP (must preserve)
 
-  const Register O0_argslot = O0;
+  const Register O0_scratch = O0;
   const Register O1_scratch = O1;
   const Register O2_scratch = O2;
   const Register O3_scratch = O3;
-  const Register G5_index   = G5;
+  const Register O4_scratch = O4;
+  const Register G5_scratch = G5;
 
-  // Argument registers for _raise_exception.
+  // Often used names:
+  const Register O0_argslot = O0;
+
+  // Argument registers for _raise_exception:
   const Register O0_code     = O0;
   const Register O1_actual   = O1;
   const Register O2_required = O2;
@@ -441,6 +1130,8 @@
 
   trace_method_handle(_masm, entry_name(ek));
 
+  BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
+
   switch ((int) ek) {
   case _raise_exception:
     {
@@ -456,23 +1147,13 @@
       // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
       __ ld_ptr(Address(G5_method, 0), G5_method);
-      __ tst(G5_method);
-      __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
-      __ delayed()->nop();
 
       const int jobject_oop_offset = 0;
       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
-      __ tst(G5_method);
-      __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
-      __ delayed()->nop();
 
       __ verify_oop(G5_method);
       __ jump_indirect_to(G5_method_fce, O3_scratch);  // jump to compiled entry
       __ delayed()->nop();
-
-      // Do something that is at least causes a valid throw from the interpreter.
-      __ bind(L_no_method);
-      __ unimplemented("call throw_WrongMethodType_entry");
     }
     break;
 
@@ -486,7 +1167,7 @@
       if (ek == _invokespecial_mh) {
         // Must load & check the first argument before entering the target method.
         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
-        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
+        __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
         __ null_check(G3_method_handle);
         __ verify_oop(G3_method_handle);
       }
@@ -502,10 +1183,11 @@
 
       // Pick out the vtable index and receiver offset from the MH,
       // and then we can discard it:
+      Register O2_index = O2_scratch;
       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
-      __ ldsw(G3_dmh_vmindex, G5_index);
+      __ ldsw(G3_dmh_vmindex, O2_index);
       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
-      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 
       // Get receiver klass:
@@ -517,8 +1199,8 @@
       const int base = instanceKlass::vtable_start_offset() * wordSize;
       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 
-      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
-      __ add(O0_klass, G5_index, O0_klass);
+      __ sll_ptr(O2_index, LogBytesPerWord, O2_index);
+      __ add(O0_klass, O2_index, O0_klass);
       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
       __ ld_ptr(vtable_entry_addr, G5_method);
 
@@ -534,9 +1216,10 @@
       // minus the CP setup and profiling:
       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
       Register O1_intf  = O1_scratch;
+      Register G5_index = G5_scratch;
       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
       __ ldsw(G3_dmh_vmindex, G5_index);
-      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 
       // Get receiver klass:
@@ -577,16 +1260,14 @@
   case _bound_long_direct_mh:
     {
       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = _INSERT_NO_MASK;
-      int       arg_slots = -1;
-      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
+      BasicType arg_type  = ek_bound_mh_arg_type(ek);
+      int       arg_slots = type2size[arg_type];
 
       // Make room for the new argument:
-      __ ldsw(G3_bmh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
-      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 
       // Store bound argument into the new stack slot:
       __ load_heap_oop(G3_bmh_argument, O1_scratch);
@@ -594,9 +1275,10 @@
         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
       } else {
         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
-        const int arg_size = type2aelembytes(arg_type);
-        __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type));
-        __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size);  // long store uses O2/O3 on !_LP64
+        move_typed_arg(_masm, arg_type, false,
+                       prim_value_addr,
+                       Address(O0_argslot, 0),
+                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
       }
 
       if (direct_to_method) {
@@ -616,6 +1298,7 @@
   case _adapter_retype_raw:
     // Immediately jump to the next MH layer:
     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
+    __ verify_oop(G3_method_handle);
     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     // This is OK when all parameter types widen.
     // It is also OK when a return type narrows.
@@ -623,29 +1306,28 @@
 
   case _adapter_check_cast:
     {
-      // Temps:
-      Register G5_klass = G5_index;  // Interesting AMH data.
-
       // Check a reference argument before jumping to the next layer of MH:
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      Address vmarg = __ argument_address(O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot, O0_argslot);
 
       // What class are we casting to?
-      __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
-      __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
+      Register O1_klass = O1_scratch;  // Interesting AMH data.
+      __ load_heap_oop(G3_amh_argument, O1_klass);  // This is a Class object!
+      load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch);
 
-      Label done;
-      __ ld_ptr(vmarg, O1_scratch);
-      __ tst(O1_scratch);
-      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
+      Label L_done;
+      __ ld_ptr(vmarg, O2_scratch);
+      __ tst(O2_scratch);
+      __ brx(Assembler::zero, false, Assembler::pn, L_done);  // No cast if null.
       __ delayed()->nop();
-      __ load_klass(O1_scratch, O1_scratch);
+      __ load_klass(O2_scratch, O2_scratch);
 
       // Live at this point:
-      // - G5_klass        :  klass required by the target method
-      // - O1_scratch      :  argument klass to test
+      // - O0_argslot      :  argslot index in vmarg; may be required in the failing path
+      // - O1_klass        :  klass required by the target method
+      // - O2_scratch      :  argument klass to test
       // - G3_method_handle:  adapter method handle
-      __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
+      __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done);
 
       // If we get here, the type check failed!
       __ load_heap_oop(G3_amh_argument,        O2_required);  // required class
@@ -653,7 +1335,7 @@
       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
       __ delayed()->mov(Bytecodes::_checkcast, O0_code);      // who is complaining?
 
-      __ bind(done);
+      __ BIND(L_done);
       // Get the new MH:
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -672,14 +1354,14 @@
   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
     {
       // Perform an in-place conversion to int or an int subword.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
       Address value;
-      Address vmarg = __ argument_address(O0_argslot);
+      Address vmarg;
       bool value_left_justified = false;
 
       switch (ek) {
       case _adapter_opt_i2i:
-        value = vmarg;
+        value = vmarg = __ argument_address(O0_argslot, O0_argslot);
         break;
       case _adapter_opt_l2i:
         {
@@ -688,13 +1370,13 @@
           // In V9, longs are given 2 64-bit slots in the interpreter, but the
           // data is passed in only 1 slot.
           // Keep the second slot.
-          __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot);
+          __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot);
           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
           value = Address(O0_argslot, 4);  // Get least-significant 32-bit of 64-bit value.
           vmarg = Address(O0_argslot, Interpreter::stackElementSize);
 #else
           // Keep the first slot.
-          __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+          __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
           value = Address(O0_argslot, 0);
           vmarg = value;
@@ -703,6 +1385,7 @@
         break;
       case _adapter_opt_unboxi:
         {
+          vmarg = __ argument_address(O0_argslot, O0_argslot);
           // Load the value up from the heap.
           __ ld_ptr(vmarg, O1_scratch);
           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
@@ -725,7 +1408,7 @@
       }
 
       // This check is required on _BIG_ENDIAN
-      Register G5_vminfo = G5_index;
+      Register G5_vminfo = G5_scratch;
       __ ldsw(G3_amh_conversion, G5_vminfo);
       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 
@@ -761,13 +1444,13 @@
   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
     {
       // Perform an in-place int-to-long or ref-to-long conversion.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
 
       // On big-endian machine we duplicate the slot and store the MSW
       // in the first slot.
-      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot);
 
-      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
+      insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 
       Address arg_lsw(O0_argslot, 0);
       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
@@ -829,103 +1512,84 @@
   case _adapter_opt_rot_2_up:
   case _adapter_opt_rot_2_down:
     {
-      int swap_bytes = 0, rotate = 0;
-      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
+      int swap_slots = ek_adapter_opt_swap_slots(ek);
+      int rotate     = ek_adapter_opt_swap_mode(ek);
 
       // 'argslot' is the position of the first argument to swap.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
+      if (VerifyMethodHandles)
+        verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame");
 
       // 'vminfo' is the second.
       Register O1_destslot = O1_scratch;
-      __ ldsw(G3_amh_conversion, O1_destslot);
-      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
-      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
-      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
+      load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot);
+      __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot);
+      if (VerifyMethodHandles)
+        verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame");
 
+      assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
       if (!rotate) {
-        for (int i = 0; i < swap_bytes; i += wordSize) {
-          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
-          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
-          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
-          __ st_ptr(O2_scratch, Address(O1_destslot, i));
+        // simple swap
+        for (int i = 0; i < swap_slots; i++) {
+          __ ld_ptr(            Address(O0_argslot,  i * wordSize), O2_scratch);
+          __ ld_ptr(            Address(O1_destslot, i * wordSize), O3_scratch);
+          __ st_ptr(O3_scratch, Address(O0_argslot,  i * wordSize));
+          __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize));
         }
       } else {
-        // Save the first chunk, which is going to get overwritten.
-        switch (swap_bytes) {
-        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
-        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
-        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
-        default: ShouldNotReachHere();
+        // A rotate is actually pair of moves, with an "odd slot" (or pair)
+        // changing place with a series of other slots.
+        // First, push the "odd slot", which is going to get overwritten
+        switch (swap_slots) {
+        case 2 :  __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru
+        case 1 :  __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break;
+        default:  ShouldNotReachHere();
         }
-
         if (rotate > 0) {
-          // Rorate upward.
-          __ sub(O0_argslot, swap_bytes, O0_argslot);
-#if ASSERT
-          {
-            // Verify that argslot > destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmp(O0_argslot, O1_destslot);
-            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
-            __ delayed()->nop();
-            __ stop("source must be above destination (upward rotation)");
-            __ bind(L_ok);
-          }
-#endif
-          // Work argslot down to destslot, copying contiguous data upwards.
-          // Pseudo-code:
+          // Here is rotate > 0:
+          // (low mem)                                          (high mem)
+          //     | dest:     more_slots...     | arg: odd_slot :arg+1 |
+          // =>
+          //     | dest: odd_slot | dest+1: more_slots...      :arg+1 |
+          // work argslot down to destslot, copying contiguous data upwards
+          // pseudo-code:
           //   argslot  = src_addr - swap_bytes
           //   destslot = dest_addr
-          //   while (argslot >= destslot) {
-          //     *(argslot + swap_bytes) = *(argslot + 0);
-          //     argslot--;
-          //   }
-          Label loop;
-          __ bind(loop);
-          __ ld_ptr(Address(O0_argslot, 0), G5_index);
-          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
-          __ sub(O0_argslot, wordSize, O0_argslot);
-          __ cmp(O0_argslot, O1_destslot);
-          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
-          __ delayed()->nop();  // FILLME
+          //   while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--;
+          move_arg_slots_up(_masm,
+                            O1_destslot,
+                            Address(O0_argslot, 0),
+                            swap_slots,
+                            O0_argslot, O2_scratch);
         } else {
-          __ add(O0_argslot, swap_bytes, O0_argslot);
-#if ASSERT
-          {
-            // Verify that argslot < destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmp(O0_argslot, O1_destslot);
-            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-            __ delayed()->nop();
-            __ stop("source must be above destination (upward rotation)");
-            __ bind(L_ok);
-          }
-#endif
-          // Work argslot up to destslot, copying contiguous data downwards.
-          // Pseudo-code:
+          // Here is the other direction, rotate < 0:
+          // (low mem)                                          (high mem)
+          //     | arg: odd_slot | arg+1: more_slots...       :dest+1 |
+          // =>
+          //     | arg:    more_slots...     | dest: odd_slot :dest+1 |
+          // work argslot up to destslot, copying contiguous data downwards
+          // pseudo-code:
           //   argslot  = src_addr + swap_bytes
           //   destslot = dest_addr
-          //   while (argslot >= destslot) {
-          //     *(argslot - swap_bytes) = *(argslot + 0);
-          //     argslot++;
-          //   }
-          Label loop;
-          __ bind(loop);
-          __ ld_ptr(Address(O0_argslot, 0), G5_index);
-          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
-          __ add(O0_argslot, wordSize, O0_argslot);
-          __ cmp(O0_argslot, O1_destslot);
-          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
-          __ delayed()->nop();  // FILLME
+          //   while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++;
+          // dest_slot denotes an exclusive upper limit
+          int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
+          if (limit_bias != 0)
+            __ add(O1_destslot, - limit_bias * wordSize, O1_destslot);
+          move_arg_slots_down(_masm,
+                              Address(O0_argslot, swap_slots * wordSize),
+                              O1_destslot,
+                              -swap_slots,
+                              O0_argslot, O2_scratch);
+
+          __ sub(O1_destslot, swap_slots * wordSize, O1_destslot);
         }
-
-        // Store the original first chunk into the destination slot, now free.
-        switch (swap_bytes) {
-        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
-        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
-        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
-        default: ShouldNotReachHere();
+        // pop the original first chunk into the destination slot, now free
+        switch (swap_slots) {
+        case 2 :  __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru
+        case 1 :  __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break;
+        default:  ShouldNotReachHere();
         }
       }
 
@@ -937,41 +1601,21 @@
   case _adapter_dup_args:
     {
       // 'argslot' is the position of the first argument to duplicate.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
       // 'stack_move' is negative number of words to duplicate.
-      Register G5_stack_move = G5_index;
-      __ ldsw(G3_amh_conversion, G5_stack_move);
-      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
-
-      // Remember the old Gargs (argslot[0]).
-      Register O1_oldarg = O1_scratch;
-      __ mov(Gargs, O1_oldarg);
-
-      // Move Gargs down to make room for dups.
-      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
-      __ add(Gargs, G5_stack_move, Gargs);
-
-      // Compute the new Gargs (argslot[0]).
-      Register O2_newarg = O2_scratch;
-      __ mov(Gargs, O2_newarg);
+      Register O1_stack_move = O1_scratch;
+      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
 
-      // Copy from oldarg[0...] down to newarg[0...]
-      // Pseude-code:
-      //   O1_oldarg  = old-Gargs
-      //   O2_newarg  = new-Gargs
-      //   O0_argslot = argslot
-      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
-      Label loop;
-      __ bind(loop);
-      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
-      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
-      __ add(O0_argslot, wordSize, O0_argslot);
-      __ add(O2_newarg,  wordSize, O2_newarg);
-      __ cmp(O2_newarg, O1_oldarg);
-      __ brx(Assembler::less, false, Assembler::pt, loop);
-      __ delayed()->nop();  // FILLME
+      if (VerifyMethodHandles) {
+        verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true,
+                        "copied argument(s) must fall within current frame");
+      }
+
+      // insert location is always the bottom of the argument list:
+      __ neg(O1_stack_move);
+      push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch);
 
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -981,15 +1625,14 @@
   case _adapter_drop_args:
     {
       // 'argslot' is the position of the first argument to nuke.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
       // 'stack_move' is number of words to drop.
-      Register G5_stack_move = G5_index;
-      __ ldsw(G3_amh_conversion, G5_stack_move);
-      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+      Register O1_stack_move = O1_scratch;
+      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
 
-      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+      remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch);
 
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -997,31 +1640,686 @@
     break;
 
   case _adapter_collect_args:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
-    break;
-
+  case _adapter_fold_args:
   case _adapter_spread_args:
     // Handled completely by optimized cases.
     __ stop("init_AdapterMethodHandle should not issue this");
     break;
 
-  case _adapter_opt_spread_0:
-  case _adapter_opt_spread_1:
-  case _adapter_opt_spread_more:
+  case _adapter_opt_collect_ref:
+  case _adapter_opt_collect_int:
+  case _adapter_opt_collect_long:
+  case _adapter_opt_collect_float:
+  case _adapter_opt_collect_double:
+  case _adapter_opt_collect_void:
+  case _adapter_opt_collect_0_ref:
+  case _adapter_opt_collect_1_ref:
+  case _adapter_opt_collect_2_ref:
+  case _adapter_opt_collect_3_ref:
+  case _adapter_opt_collect_4_ref:
+  case _adapter_opt_collect_5_ref:
+  case _adapter_opt_filter_S0_ref:
+  case _adapter_opt_filter_S1_ref:
+  case _adapter_opt_filter_S2_ref:
+  case _adapter_opt_filter_S3_ref:
+  case _adapter_opt_filter_S4_ref:
+  case _adapter_opt_filter_S5_ref:
+  case _adapter_opt_collect_2_S0_ref:
+  case _adapter_opt_collect_2_S1_ref:
+  case _adapter_opt_collect_2_S2_ref:
+  case _adapter_opt_collect_2_S3_ref:
+  case _adapter_opt_collect_2_S4_ref:
+  case _adapter_opt_collect_2_S5_ref:
+  case _adapter_opt_fold_ref:
+  case _adapter_opt_fold_int:
+  case _adapter_opt_fold_long:
+  case _adapter_opt_fold_float:
+  case _adapter_opt_fold_double:
+  case _adapter_opt_fold_void:
+  case _adapter_opt_fold_1_ref:
+  case _adapter_opt_fold_2_ref:
+  case _adapter_opt_fold_3_ref:
+  case _adapter_opt_fold_4_ref:
+  case _adapter_opt_fold_5_ref:
     {
-      // spread an array out into a group of arguments
-      __ unimplemented(entry_name(ek));
+      // Given a fresh incoming stack frame, build a new ricochet frame.
+      // On entry, TOS points at a return PC, and FP is the callers frame ptr.
+      // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
+      // RCX contains an AdapterMethodHandle of the indicated kind.
+
+      // Relevant AMH fields:
+      // amh.vmargslot:
+      //   points to the trailing edge of the arguments
+      //   to filter, collect, or fold.  For a boxing operation,
+      //   it points just after the single primitive value.
+      // amh.argument:
+      //   recursively called MH, on |collect| arguments
+      // amh.vmtarget:
+      //   final destination MH, on return value, etc.
+      // amh.conversion.dest:
+      //   tells what is the type of the return value
+      //   (not needed here, since dest is also derived from ek)
+      // amh.conversion.vminfo:
+      //   points to the trailing edge of the return value
+      //   when the vmtarget is to be called; this is
+      //   equal to vmargslot + (retained ? |collect| : 0)
+
+      // Pass 0 or more argument slots to the recursive target.
+      int collect_count_constant = ek_adapter_opt_collect_count(ek);
+
+      // The collected arguments are copied from the saved argument list:
+      int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
+
+      assert(ek_orig == _adapter_collect_args ||
+             ek_orig == _adapter_fold_args, "");
+      bool retain_original_args = (ek_orig == _adapter_fold_args);
+
+      // The return value is replaced (or inserted) at the 'vminfo' argslot.
+      // Sometimes we can compute this statically.
+      int dest_slot_constant = -1;
+      if (!retain_original_args)
+        dest_slot_constant = collect_slot_constant;
+      else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
+        // We are preserving all the arguments, and the return value is prepended,
+        // so the return slot is to the left (above) the |collect| sequence.
+        dest_slot_constant = collect_slot_constant + collect_count_constant;
+
+      // Replace all those slots by the result of the recursive call.
+      // The result type can be one of ref, int, long, float, double, void.
+      // In the case of void, nothing is pushed on the stack after return.
+      BasicType dest = ek_adapter_opt_collect_type(ek);
+      assert(dest == type2wfield[dest], "dest is a stack slot type");
+      int dest_count = type2size[dest];
+      assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
+
+      // Choose a return continuation.
+      EntryKind ek_ret = _adapter_opt_return_any;
+      if (dest != T_CONFLICT && OptimizeMethodHandles) {
+        switch (dest) {
+        case T_INT    : ek_ret = _adapter_opt_return_int;     break;
+        case T_LONG   : ek_ret = _adapter_opt_return_long;    break;
+        case T_FLOAT  : ek_ret = _adapter_opt_return_float;   break;
+        case T_DOUBLE : ek_ret = _adapter_opt_return_double;  break;
+        case T_OBJECT : ek_ret = _adapter_opt_return_ref;     break;
+        case T_VOID   : ek_ret = _adapter_opt_return_void;    break;
+        default       : ShouldNotReachHere();
+        }
+        if (dest == T_OBJECT && dest_slot_constant >= 0) {
+          EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
+          if (ek_try <= _adapter_opt_return_LAST &&
+              ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
+            ek_ret = ek_try;
+          }
+        }
+        assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
+      }
+
+      // Already pushed:  ... keep1 | collect | keep2 |
+
+      // Push a few extra argument words, if we need them to store the return value.
+      {
+        int extra_slots = 0;
+        if (retain_original_args) {
+          extra_slots = dest_count;
+        } else if (collect_count_constant == -1) {
+          extra_slots = dest_count;  // collect_count might be zero; be generous
+        } else if (dest_count > collect_count_constant) {
+          extra_slots = (dest_count - collect_count_constant);
+        } else {
+          // else we know we have enough dead space in |collect| to repurpose for return values
+        }
+        if (extra_slots != 0) {
+          __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP);
+        }
+      }
+
+      // Set up Ricochet Frame.
+      __ mov(SP, O5_savedSP);  // record SP for the callee
+
+      // One extra (empty) slot for outgoing target MH (see Gargs computation below).
+      __ save_frame(2);  // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23.
+
+      // Note: Gargs is live throughout the following, until we make our recursive call.
+      // And the RF saves a copy in L4_saved_args_base.
+
+      RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs,
+                                          entry(ek_ret)->from_interpreted_entry());
+
+      // Compute argument base:
+      // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above).
+      __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs);
+
+      // Now pushed:  ... keep1 | collect | keep2 | extra | [RF]
+
+#ifdef ASSERT
+      if (VerifyMethodHandles && dest != T_CONFLICT) {
+        BLOCK_COMMENT("verify AMH.conv.dest {");
+        extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch);
+        Label L_dest_ok;
+        __ cmp(O1_scratch, (int) dest);
+        __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
+        __ delayed()->nop();
+        if (dest == T_INT) {
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt))) {
+              __ cmp(O1_scratch, (int) bt);
+              __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
+              __ delayed()->nop();
+            }
+          }
+        }
+        __ stop("bad dest in AMH.conv");
+        __ BIND(L_dest_ok);
+        BLOCK_COMMENT("} verify AMH.conv.dest");
+      }
+#endif //ASSERT
+
+      // Find out where the original copy of the recursive argument sequence begins.
+      Register O0_coll = O0_scratch;
+      {
+        RegisterOrConstant collect_slot = collect_slot_constant;
+        if (collect_slot_constant == -1) {
+          load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch);
+          collect_slot = O1_scratch;
+        }
+        // collect_slot might be 0, but we need the move anyway.
+        __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll);
+        // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2|
+      }
+
+      // Replace the old AMH with the recursive MH.  (No going back now.)
+      // In the case of a boxing call, the recursive call is to a 'boxer' method,
+      // such as Integer.valueOf or Long.valueOf.  In the case of a filter
+      // or collect call, it will take one or more arguments, transform them,
+      // and return some result, to store back into argument_base[vminfo].
+      __ load_heap_oop(G3_amh_argument, G3_method_handle);
+      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch);
+
+      // Calculate |collect|, the number of arguments we are collecting.
+      Register O1_collect_count = O1_scratch;
+      RegisterOrConstant collect_count;
+      if (collect_count_constant < 0) {
+        __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch);
+        collect_count = O1_collect_count;
+      } else {
+        collect_count = collect_count_constant;
+#ifdef ASSERT
+        if (VerifyMethodHandles) {
+          BLOCK_COMMENT("verify collect_count_constant {");
+          __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
+          Label L_count_ok;
+          __ cmp(O3_scratch, collect_count_constant);
+          __ br(Assembler::equal, false, Assembler::pt, L_count_ok);
+          __ delayed()->nop();
+          __ stop("bad vminfo in AMH.conv");
+          __ BIND(L_count_ok);
+          BLOCK_COMMENT("} verify collect_count_constant");
+        }
+#endif //ASSERT
+      }
+
+      // copy |collect| slots directly to TOS:
+      push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch);
+      // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
+      // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2|
+
+      // If necessary, adjust the saved arguments to make room for the eventual return value.
+      // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
+      // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
+      // In the non-retaining case, this might move keep2 either up or down.
+      // We don't have to copy the whole | RF... collect | complex,
+      // but we must adjust RF.saved_args_base.
+      // Also, from now on, we will forget about the original copy of |collect|.
+      // If we are retaining it, we will treat it as part of |keep2|.
+      // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
+
+      BLOCK_COMMENT("adjust trailing arguments {");
+      // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
+      int                open_count  = dest_count;
+      RegisterOrConstant close_count = collect_count_constant;
+      Register O1_close_count = O1_collect_count;
+      if (retain_original_args) {
+        close_count = constant(0);
+      } else if (collect_count_constant == -1) {
+        close_count = O1_collect_count;
+      }
+
+      // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
+      RegisterOrConstant keep3_count;
+      Register O2_keep3_count = O2_scratch;
+      if (dest_slot_constant < 0) {
+        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count);
+        keep3_count = O2_keep3_count;
+      } else  {
+        keep3_count = dest_slot_constant;
+#ifdef ASSERT
+        if (VerifyMethodHandles && dest_slot_constant < 0) {
+          BLOCK_COMMENT("verify dest_slot_constant {");
+          extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
+          Label L_vminfo_ok;
+          __ cmp(O3_scratch, dest_slot_constant);
+          __ br(Assembler::equal, false, Assembler::pt, L_vminfo_ok);
+          __ delayed()->nop();
+          __ stop("bad vminfo in AMH.conv");
+          __ BIND(L_vminfo_ok);
+          BLOCK_COMMENT("} verify dest_slot_constant");
+        }
+#endif //ASSERT
+      }
+
+      // tasks remaining:
+      bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
+      bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
+      bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
+
+      // Old and new argument locations (based at slot 0).
+      // Net shift (&new_argv - &old_argv) is (close_count - open_count).
+      bool zero_open_count = (open_count == 0);  // remember this bit of info
+      if (move_keep3 && fix_arg_base) {
+        // It will be easier to have everything in one register:
+        if (close_count.is_register()) {
+          // Deduct open_count from close_count register to get a clean +/- value.
+          __ sub(close_count.as_register(), open_count, close_count.as_register());
+        } else {
+          close_count = close_count.as_constant() - open_count;
+        }
+        open_count = 0;
+      }
+      Register L4_old_argv = RicochetFrame::L4_saved_args_base;
+      Register O3_new_argv = O3_scratch;
+      if (fix_arg_base) {
+        __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv,
+               -(open_count * Interpreter::stackElementSize));
+      }
+
+      // First decide if any actual data are to be moved.
+      // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
+      // (As it happens, all movements involve an argument list size change.)
+
+      // If there are variable parameters, use dynamic checks to skip around the whole mess.
+      Label L_done;
+      if (keep3_count.is_register()) {
+        __ tst(keep3_count.as_register());
+        __ br(Assembler::zero, false, Assembler::pn, L_done);
+        __ delayed()->nop();
+      }
+      if (close_count.is_register()) {
+        __ cmp(close_count.as_register(), open_count);
+        __ br(Assembler::equal, false, Assembler::pn, L_done);
+        __ delayed()->nop();
+      }
+
+      if (move_keep3 && fix_arg_base) {
+        bool emit_move_down = false, emit_move_up = false, emit_guard = false;
+        if (!close_count.is_constant()) {
+          emit_move_down = emit_guard = !zero_open_count;
+          emit_move_up   = true;
+        } else if (open_count != close_count.as_constant()) {
+          emit_move_down = (open_count > close_count.as_constant());
+          emit_move_up   = !emit_move_down;
+        }
+        Label L_move_up;
+        if (emit_guard) {
+          __ cmp(close_count.as_register(), open_count);
+          __ br(Assembler::greater, false, Assembler::pn, L_move_up);
+          __ delayed()->nop();
+        }
+
+        if (emit_move_down) {
+          // Move arguments down if |+dest+| > |-collect-|
+          // (This is rare, except when arguments are retained.)
+          // This opens space for the return value.
+          if (keep3_count.is_constant()) {
+            for (int i = 0; i < keep3_count.as_constant(); i++) {
+              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
+              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
+            }
+          } else {
+            // Live: O1_close_count, O2_keep3_count, O3_new_argv
+            Register argv_top = O0_scratch;
+            __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top);
+            move_arg_slots_down(_masm,
+                                Address(L4_old_argv, 0),  // beginning of old argv
+                                argv_top,                 // end of old argv
+                                close_count,              // distance to move down (must be negative)
+                                O4_scratch, G5_scratch);
+          }
+        }
+
+        if (emit_guard) {
+          __ ba(false, L_done);  // assumes emit_move_up is true also
+          __ delayed()->nop();
+          __ BIND(L_move_up);
+        }
+
+        if (emit_move_up) {
+          // Move arguments up if |+dest+| < |-collect-|
+          // (This is usual, except when |keep3| is empty.)
+          // This closes up the space occupied by the now-deleted collect values.
+          if (keep3_count.is_constant()) {
+            for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
+              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
+              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
+            }
+          } else {
+            Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch));
+            // Live: O1_close_count, O2_keep3_count, O3_new_argv
+            move_arg_slots_up(_masm,
+                              L4_old_argv,  // beginning of old argv
+                              argv_top,     // end of old argv
+                              close_count,  // distance to move up (must be positive)
+                              O4_scratch, G5_scratch);
+          }
+        }
+      }
+      __ BIND(L_done);
+
+      if (fix_arg_base) {
+        // adjust RF.saved_args_base
+        __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base);
+      }
+
+      if (stomp_dest) {
+        // Stomp the return slot, so it doesn't hold garbage.
+        // This isn't strictly necessary, but it may help detect bugs.
+        __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch);
+        __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base,
+                                      __ argument_offset(keep3_count, keep3_count.register_or_noreg())));  // uses O2_keep3_count
+      }
+      BLOCK_COMMENT("} adjust trailing arguments");
+
+      BLOCK_COMMENT("do_recursive_call");
+      __ mov(SP, O5_savedSP);  // record SP for the callee
+      __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7);
+      // The globally unique bounce address has two purposes:
+      // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
+      // 2. When returned to, it cuts back the stack and redirects control flow
+      //    to the return handler.
+      // The return handler will further cut back the stack when it takes
+      // down the RF.  Perhaps there is a way to streamline this further.
+
+      // State during recursive call:
+      // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
 
-  case _adapter_flyby:
-  case _adapter_ricochet:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+  case _adapter_opt_return_ref:
+  case _adapter_opt_return_int:
+  case _adapter_opt_return_long:
+  case _adapter_opt_return_float:
+  case _adapter_opt_return_double:
+  case _adapter_opt_return_void:
+  case _adapter_opt_return_S0_ref:
+  case _adapter_opt_return_S1_ref:
+  case _adapter_opt_return_S2_ref:
+  case _adapter_opt_return_S3_ref:
+  case _adapter_opt_return_S4_ref:
+  case _adapter_opt_return_S5_ref:
+    {
+      BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
+      int       dest_slot_constant = ek_adapter_opt_return_slot(ek);
+
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+
+      if (dest_slot_constant == -1) {
+        // The current stub is a general handler for this dest_type.
+        // It can be called from _adapter_opt_return_any below.
+        // Stash the address in a little table.
+        assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
+        address return_handler = __ pc();
+        _adapter_return_handlers[dest_type_constant] = return_handler;
+        if (dest_type_constant == T_INT) {
+          // do the subword types too
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt)) &&
+                _adapter_return_handlers[bt] == NULL) {
+              _adapter_return_handlers[bt] = return_handler;
+            }
+          }
+        }
+      }
+
+      // On entry to this continuation handler, make Gargs live again.
+      __ mov(RicochetFrame::L4_saved_args_base, Gargs);
+
+      Register O7_temp   = O7;
+      Register O5_vminfo = O5;
+
+      RegisterOrConstant dest_slot = dest_slot_constant;
+      if (dest_slot_constant == -1) {
+        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo);
+        dest_slot = O5_vminfo;
+      }
+      // Store the result back into the argslot.
+      // This code uses the interpreter calling sequence, in which the return value
+      // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
+      // There are certain irregularities with floating point values, which can be seen
+      // in TemplateInterpreterGenerator::generate_return_entry_for.
+      move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp));
+
+      RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7);
+
+      // Load the final target and go.
+      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch);
+      __ restore(I5_savedSP, G0, SP);
+      __ jump_to_method_handle_entry(G3_method_handle, O0_scratch);
+      __ illtrap(0);
+    }
+    break;
+
+  case _adapter_opt_return_any:
+    {
+      Register O7_temp      = O7;
+      Register O5_dest_type = O5;
+
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+      extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type);
+      __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp);
+      __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type);
+      __ ld_ptr(O7_temp, O5_dest_type, O7_temp);
+
+#ifdef ASSERT
+      { Label L_ok;
+        __ br_notnull(O7_temp, false, Assembler::pt, L_ok);
+        __ delayed()->nop();
+        __ stop("bad method handle return");
+        __ BIND(L_ok);
+      }
+#endif //ASSERT
+      __ JMP(O7_temp, 0);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _adapter_opt_spread_0:
+  case _adapter_opt_spread_1_ref:
+  case _adapter_opt_spread_2_ref:
+  case _adapter_opt_spread_3_ref:
+  case _adapter_opt_spread_4_ref:
+  case _adapter_opt_spread_5_ref:
+  case _adapter_opt_spread_ref:
+  case _adapter_opt_spread_byte:
+  case _adapter_opt_spread_char:
+  case _adapter_opt_spread_short:
+  case _adapter_opt_spread_int:
+  case _adapter_opt_spread_long:
+  case _adapter_opt_spread_float:
+  case _adapter_opt_spread_double:
+    {
+      // spread an array out into a group of arguments
+      int  length_constant    = ek_adapter_opt_spread_count(ek);
+      bool length_can_be_zero = (length_constant == 0);
+      if (length_constant < 0) {
+        // some adapters with variable length must handle the zero case
+        if (!OptimizeMethodHandles ||
+            ek_adapter_opt_spread_type(ek) != T_OBJECT)
+          length_can_be_zero = true;
+      }
+
+      // find the address of the array argument
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
+
+      // O0_argslot points both to the array and to the first output arg
+      Address vmarg = Address(O0_argslot, 0);
+
+      // Get the array value.
+      Register  O1_array       = O1_scratch;
+      Register  O2_array_klass = O2_scratch;
+      BasicType elem_type      = ek_adapter_opt_spread_type(ek);
+      int       elem_slots     = type2size[elem_type];  // 1 or 2
+      int       array_slots    = 1;  // array is always a T_OBJECT
+      int       length_offset  = arrayOopDesc::length_offset_in_bytes();
+      int       elem0_offset   = arrayOopDesc::base_offset_in_bytes(elem_type);
+      __ ld_ptr(vmarg, O1_array);
+
+      Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
+      if (length_can_be_zero) {
+        // handle the null pointer case, if zero is allowed
+        Label L_skip;
+        if (length_constant < 0) {
+          load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
+          __ br_zero(Assembler::notZero, false, Assembler::pn, O3_scratch, L_skip);
+          __ delayed()->nop();
+        }
+        __ br_null(O1_array, false, Assembler::pn, L_array_is_empty);
+        __ delayed()->nop();
+        __ BIND(L_skip);
+      }
+      __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
+      __ load_klass(O1_array, O2_array_klass);
+
+      // Check the array type.
+      Register O3_klass = O3_scratch;
+      __ load_heap_oop(G3_amh_argument, O3_klass);  // this is a Class object!
+      load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch);
+
+      Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
+      __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
+      // If we get here, the type check failed!
+      __ ba(false, L_bad_array_klass);
+      __ delayed()->nop();
+      __ BIND(L_ok_array_klass);
+
+      // Check length.
+      if (length_constant >= 0) {
+        __ ldsw(Address(O1_array, length_offset), O4_scratch);
+        __ cmp(O4_scratch, length_constant);
+      } else {
+        Register O3_vminfo = O3_scratch;
+        load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo);
+        __ ldsw(Address(O1_array, length_offset), O4_scratch);
+        __ cmp(O3_vminfo, O4_scratch);
+      }
+      __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length);
+      __ delayed()->nop();
+
+      Register O2_argslot_limit = O2_scratch;
+
+      // Array length checks out.  Now insert any required stack slots.
+      if (length_constant == -1) {
+        // Form a pointer to the end of the affected region.
+        __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit);
+        // 'stack_move' is negative number of words to insert
+        // This number already accounts for elem_slots.
+        Register O3_stack_move = O3_scratch;
+        load_stack_move(_masm, G3_amh_conversion, O3_stack_move);
+        __ cmp(O3_stack_move, 0);
+        assert(stack_move_unit() < 0, "else change this comparison");
+        __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space);
+        __ delayed()->nop();
+        __ br(Assembler::equal, false, Assembler::pn, L_copy_args);
+        __ delayed()->nop();
+        // single argument case, with no array movement
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
+                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+        __ ba(false, L_args_done);  // no spreading to do
+        __ delayed()->nop();
+        __ BIND(L_insert_arg_space);
+        // come here in the usual case, stack_move < 0 (2 or more spread arguments)
+        // Live: O1_array, O2_argslot_limit, O3_stack_move
+        insert_arg_slots(_masm, O3_stack_move,
+                         O0_argslot, O4_scratch, G5_scratch, O1_scratch);
+        // reload from rdx_argslot_limit since rax_argslot is now decremented
+        __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array);
+      } else if (length_constant >= 1) {
+        int new_slots = (length_constant * elem_slots) - array_slots;
+        insert_arg_slots(_masm, new_slots * stack_move_unit(),
+                         O0_argslot, O2_scratch, O3_scratch, O4_scratch);
+      } else if (length_constant == 0) {
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
+                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+      } else {
+        ShouldNotReachHere();
+      }
+
+      // Copy from the array to the new slots.
+      // Note: Stack change code preserves integrity of O0_argslot pointer.
+      // So even after slot insertions, O0_argslot still points to first argument.
+      // Beware:  Arguments that are shallow on the stack are deep in the array,
+      // and vice versa.  So a downward-growing stack (the usual) has to be copied
+      // elementwise in reverse order from the source array.
+      __ BIND(L_copy_args);
+      if (length_constant == -1) {
+        // [O0_argslot, O2_argslot_limit) is the area we are inserting into.
+        // Array element [0] goes at O0_argslot_limit[-wordSize].
+        Register O1_source = O1_array;
+        __ add(Address(O1_array, elem0_offset), O1_source);
+        Register O4_fill_ptr = O4_scratch;
+        __ mov(O2_argslot_limit, O4_fill_ptr);
+        Label L_loop;
+        __ BIND(L_loop);
+        __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr);
+        move_typed_arg(_masm, elem_type, true,
+                       Address(O1_source, 0), Address(O4_fill_ptr, 0),
+                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+        __ add(O1_source, type2aelembytes(elem_type), O1_source);
+        __ cmp(O4_fill_ptr, O0_argslot);
+        __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+        __ delayed()->nop();  // FILLME
+      } else if (length_constant == 0) {
+        // nothing to copy
+      } else {
+        int elem_offset = elem0_offset;
+        int slot_offset = length_constant * Interpreter::stackElementSize;
+        for (int index = 0; index < length_constant; index++) {
+          slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
+          move_typed_arg(_masm, elem_type, true,
+                         Address(O1_array, elem_offset), Address(O0_argslot, slot_offset),
+                         O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+          elem_offset += type2aelembytes(elem_type);
+        }
+      }
+      __ BIND(L_args_done);
+
+      // Arguments are spread.  Move to next method handle.
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+
+      __ BIND(L_bad_array_klass);
+      assert(!vmarg.uses(O2_required), "must be different registers");
+      __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required);  // required class
+      __ ld_ptr(       vmarg,                                       O1_actual);    // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
+      __ delayed()->mov(Bytecodes::_aaload,                         O0_code);      // who is complaining?
+
+      __ bind(L_bad_array_length);
+      assert(!vmarg.uses(O2_required), "must be different registers");
+      __ mov(   G3_method_handle,                O2_required);  // required class
+      __ ld_ptr(vmarg,                           O1_actual);    // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
+      __ delayed()->mov(Bytecodes::_arraylength, O0_code);      // who is complaining?
+    }
     break;
 
   default:
+    DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek)));
     ShouldNotReachHere();
   }
+  BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
 
   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/methodHandles_sparc.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Platform-specific definitions for method handles.
+// These definitions are inlined into class MethodHandles.
+
+// Adapters
+enum /* platform_dependent_constants */ {
+  adapter_code_size = NOT_LP64(22000 DEBUG_ONLY(+ 40000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
+};
+
+public:
+
+class RicochetFrame : public ResourceObj {
+  friend class MethodHandles;
+
+ private:
+  /*
+    RF field            x86                 SPARC
+    sender_pc           *(rsp+0)            I7-0x8
+    sender_link         rbp                 I6+BIAS
+    exact_sender_sp     rsi/r13             I5_savedSP
+    conversion          *(rcx+&amh_conv)    L5_conv
+    saved_args_base     rax                 L4_sab (cf. Gargs = G4)
+    saved_args_layout   #NULL               L3_sal
+    saved_target        *(rcx+&mh_vmtgt)    L2_stgt
+    continuation        #STUB_CON           L1_cont
+   */
+  static const Register L1_continuation     ;  // what to do when control gets back here
+  static const Register L2_saved_target     ;  // target method handle to invoke on saved_args
+  static const Register L3_saved_args_layout;  // caching point for MethodTypeForm.vmlayout cookie
+  static const Register L4_saved_args_base  ;  // base of pushed arguments (slot 0, arg N) (-3)
+  static const Register L5_conversion       ;  // misc. information from original AdapterMethodHandle (-2)
+
+  frame _fr;
+
+  RicochetFrame(const frame& fr) : _fr(fr) { }
+
+  intptr_t* register_addr(Register reg) const  {
+    assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree");
+    return _fr.register_addr(reg);
+  }
+  intptr_t  register_value(Register reg) const { return *register_addr(reg); }
+
+ public:
+  intptr_t* continuation() const        { return (intptr_t*) register_value(L1_continuation); }
+  oop       saved_target() const        { return (oop)       register_value(L2_saved_target); }
+  oop       saved_args_layout() const   { return (oop)       register_value(L3_saved_args_layout); }
+  intptr_t* saved_args_base() const     { return (intptr_t*) register_value(L4_saved_args_base); }
+  intptr_t  conversion() const          { return             register_value(L5_conversion); }
+  intptr_t* exact_sender_sp() const     { return (intptr_t*) register_value(I5_savedSP); }
+  intptr_t* sender_link() const         { return _fr.sender_sp(); }  // XXX
+  address   sender_pc() const           { return _fr.sender_pc(); }
+
+  // This value is not used for much, but it apparently must be nonzero.
+  static int frame_size_in_bytes()              { return wordSize * 4; }
+
+  intptr_t* extended_sender_sp() const  { return saved_args_base(); }
+
+  intptr_t  return_value_slot_number() const {
+    return adapter_conversion_vminfo(conversion());
+  }
+  BasicType return_value_type() const {
+    return adapter_conversion_dest_type(conversion());
+  }
+  bool has_return_value_slot() const {
+    return return_value_type() != T_VOID;
+  }
+  intptr_t* return_value_slot_addr() const {
+    assert(has_return_value_slot(), "");
+    return saved_arg_slot_addr(return_value_slot_number());
+  }
+  intptr_t* saved_target_slot_addr() const {
+    return saved_arg_slot_addr(saved_args_length());
+  }
+  intptr_t* saved_arg_slot_addr(int slot) const {
+    assert(slot >= 0, "");
+    return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
+  }
+
+  jint      saved_args_length() const;
+  jint      saved_arg_offset(int arg) const;
+
+  // GC interface
+  oop*  saved_target_addr()                     { return (oop*)register_addr(L2_saved_target); }
+  oop*  saved_args_layout_addr()                { return (oop*)register_addr(L3_saved_args_layout); }
+
+  oop  compute_saved_args_layout(bool read_cache, bool write_cache);
+
+#ifdef ASSERT
+  // The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
+  enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
+  static const Register L0_magic_number_1   ;  // cookie for debugging, at start of RSA
+  static Address magic_number_2_addr()  { return Address(L4_saved_args_base, -wordSize); }
+  intptr_t magic_number_1() const       { return register_value(L0_magic_number_1); }
+  intptr_t magic_number_2() const       { return saved_args_base()[-1]; }
+#endif //ASSERT
+
+ public:
+  enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
+
+  void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
+
+  static void generate_ricochet_blob(MacroAssembler* _masm,
+                                     // output params:
+                                     int* bounce_offset,
+                                     int* exception_offset,
+                                     int* frame_size_in_words);
+
+  static void enter_ricochet_frame(MacroAssembler* _masm,
+                                   Register recv_reg,
+                                   Register argv_reg,
+                                   address return_handler);
+
+  static void leave_ricochet_frame(MacroAssembler* _masm,
+                                   Register recv_reg,
+                                   Register new_sp_reg,
+                                   Register sender_pc_reg);
+
+  static RicochetFrame* from_frame(const frame& fr) {
+    RicochetFrame* rf = new RicochetFrame(fr);
+    rf->verify();
+    return rf;
+  }
+
+  static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+};
+
+// Additional helper methods for MethodHandles code generation:
+public:
+  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
+  static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg);
+  static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
+  static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
+
+  static void load_stack_move(MacroAssembler* _masm,
+                              Address G3_amh_conversion,
+                              Register G5_stack_move);
+
+  static void insert_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg, Register temp3_reg);
+
+  static void remove_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg, Register temp3_reg);
+
+  static void push_arg_slots(MacroAssembler* _masm,
+                             Register argslot_reg,
+                             RegisterOrConstant slot_count,
+                             Register temp_reg, Register temp2_reg);
+
+  static void move_arg_slots_up(MacroAssembler* _masm,
+                                Register bottom_reg,  // invariant
+                                Address  top_addr,    // can use temp_reg
+                                RegisterOrConstant positive_distance_in_slots,
+                                Register temp_reg, Register temp2_reg);
+
+  static void move_arg_slots_down(MacroAssembler* _masm,
+                                  Address  bottom_addr,  // can use temp_reg
+                                  Register top_reg,      // invariant
+                                  RegisterOrConstant negative_distance_in_slots,
+                                  Register temp_reg, Register temp2_reg);
+
+  static void move_typed_arg(MacroAssembler* _masm,
+                             BasicType type, bool is_element,
+                             Address value_src, Address slot_dest,
+                             Register temp_reg);
+
+  static void move_return_value(MacroAssembler* _masm, BasicType type,
+                                Address return_slot);
+
+  static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
+                             Register temp_reg,
+                             const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_argslots(MacroAssembler* _masm,
+                              RegisterOrConstant argslot_count,
+                              Register argslot_reg,
+                              Register temp_reg,
+                              Register temp2_reg,
+                              bool negate_argslot,
+                              const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_stack_move(MacroAssembler* _masm,
+                                RegisterOrConstant arg_slots,
+                                int direction) NOT_DEBUG_RETURN;
+
+  static void verify_klass(MacroAssembler* _masm,
+                           Register obj_reg, KlassHandle klass,
+                           Register temp_reg, Register temp2_reg,
+                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
+
+  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
+                                   Register temp_reg, Register temp2_reg) {
+    verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
+                 temp_reg, temp2_reg,
+                 "reference is a MH");
+  }
+
+  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
--- a/src/cpu/sparc/vm/registerMap_sparc.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/registerMap_sparc.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 // machine-dependent implemention for register maps
   friend class frame;
+  friend class MethodHandles;
 
  private:
   intptr_t* _window;         // register window save area (for L and I regs)
--- a/src/cpu/sparc/vm/runtime_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/runtime_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
 
 #define __ masm->
 
-ExceptionBlob      *OptoRuntime::_exception_blob;
-
 //------------------------------ generate_exception_blob ---------------------------
 // creates exception blob at the end
 // Using exception blob, this code is jumped from a compiled method.
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -47,18 +47,6 @@
 
 #define __ masm->
 
-#ifdef COMPILER2
-UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
-#endif // COMPILER2
-
-DeoptimizationBlob* SharedRuntime::_deopt_blob;
-SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*        SharedRuntime::_wrong_method_blob;
-RuntimeStub*        SharedRuntime::_ic_miss_blob;
-RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
 
 class RegisterSaver {
 
@@ -3492,7 +3480,7 @@
 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
 // Tricky, tricky, tricky...
 
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3587,7 +3575,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3677,35 +3665,3 @@
   // frame_size_words or bytes??
   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
 }
-
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                             "wrong_method_stub");
-
-  _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_deopt_blob();
-
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -440,7 +440,8 @@
 #undef __
 #define __ masm->
 
-  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
+  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
+                                   Register arg1 = noreg, Register arg2 = noreg) {
 #ifdef ASSERT
     int insts_size = VerifyThread ? 1 * K : 600;
 #else
@@ -476,6 +477,13 @@
     __ set_last_Java_frame(last_java_sp, G0);
     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
     __ save_thread(noreg);
+    if (arg1 != noreg) {
+      assert(arg2 != O1, "clobbered");
+      __ mov(arg1, O1);
+    }
+    if (arg2 != noreg) {
+      __ mov(arg2, O2);
+    }
     // do the call
     BLOCK_COMMENT("call runtime_entry");
     __ call(runtime_entry, relocInfo::runtime_call_type);
@@ -3240,6 +3248,14 @@
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
 #endif  // COMPILER2 !=> _LP64
+
+    // Build this early so it's available for the interpreter.  The
+    // stub expects the required and actual type to already be in O1
+    // and O2 respectively.
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, G5_method_type, G3_method_handle);
   }
 
 
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,11 +44,6 @@
   code_size2 = 20000            // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 15000
-};
-
 class Sparc {
  friend class StubGenerator;
 
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -128,24 +128,6 @@
 }
 
 
-// Arguments are: required type in G5_method_type, and
-// failing object (or NULL) in G3_method_handle.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  // load exception object
-  __ call_VM(Oexception,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             G5_method_type,    // required
-             G3_method_handle); // actual
-  __ should_not_reach_here();
-  return entry;
-}
-
-
 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
   address entry = __ pc();
   // expression stack must be empty before entering the VM if an exception happened
@@ -763,6 +745,87 @@
   return NULL;
 }
 
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+     Label slow_path;
+
+    // In the G1 code we don't check if we need to reach a safepoint. We
+    // continue and the thread will safepoint at the next bytecode dispatch.
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
+    __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
+    __ brx(Assembler::zero, false, Assembler::pn, slow_path);
+    __ delayed()->nop();
+
+
+    // Load the value of the referent field.
+    if (Assembler::is_simm13(referent_offset)) {
+      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
+    } else {
+      __ set(referent_offset, G3_scratch);
+      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
+    }
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer. Note with
+    // these parameters the pre-barrier does not generate
+    // the load of the previous value
+
+    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
+                            Otos_i /* pre_val */,
+                            G3_scratch /* tmp */,
+                            true /* preserve_o_regs */);
+
+    // _areturn
+    __ retl();                      // return from leaf routine
+    __ delayed()->mov(O5_savedSP, SP);
+
+    // Generate regular method entry
+    __ bind(slow_path);
+    (void) generate_normal_entry(false);
+    return entry;
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
 //
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the native method
@@ -1542,6 +1605,7 @@
                                            int tempcount,
                                            int popframe_extra_args,
                                            int moncount,
+                                           int caller_actual_parameters,
                                            int callee_param_count,
                                            int callee_local_count,
                                            frame* caller,
@@ -1617,7 +1681,6 @@
                      popframe_extra_args;
 
     int local_words = method->max_locals() * Interpreter::stackElementWords;
-    int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords;
     NEEDS_CLEANUP;
     intptr_t* locals;
     if (caller->is_interpreted_frame()) {
@@ -1625,12 +1688,13 @@
       intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
       // Note that this computation means we replace size_of_parameters() values from the caller
       // interpreter frame's expression stack with our argument locals
+      int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
       locals = Lesp_ptr + parm_words;
       int delta = local_words - parm_words;
       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
     } else {
-      assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
+      assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases");
       // Don't have Lesp available; lay out locals block in the caller
       // adjacent to the register window save area.
       //
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -57,7 +57,11 @@
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
-        __ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
+        // Load and record the previous value.
+        __ g1_write_barrier_pre(base, index, offset,
+                                noreg /* pre_val */,
+                                tmp, true /*preserve_o_regs*/);
+
         if (index == noreg ) {
           assert(Assembler::is_simm13(offset), "fix this code");
           __ store_heap_oop(val, base, offset);
@@ -262,7 +266,7 @@
 
 void TemplateTable::ldc(bool wide) {
   transition(vtos, vtos);
-  Label call_ldc, notInt, notString, notClass, exit;
+  Label call_ldc, notInt, isString, notString, notClass, exit;
 
   if (wide) {
     __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
@@ -313,8 +317,11 @@
 
   __ bind(notInt);
  // __ cmp(O2, JVM_CONSTANT_String);
+  __ brx(Assembler::equal, true, Assembler::pt, isString);
+  __ delayed()->cmp(O2, JVM_CONSTANT_Object);
   __ brx(Assembler::notEqual, true, Assembler::pt, notString);
   __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
+  __ bind(isString);
   __ ld_ptr(O0, O1, Otos_i);
   __ verify_oop(Otos_i);
   __ push(atos);
@@ -3289,8 +3296,6 @@
                              /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
   __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
 
-  __ verify_oop(G5_callsite);
-
   // profile this call
   __ profile_call(O4);
 
@@ -3303,8 +3308,10 @@
   __ sll(Rret, LogBytesPerWord, Rret);
   __ ld_ptr(Rtemp, Rret, Rret);  // get return address
 
+  __ verify_oop(G5_callsite);
   __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
   __ null_check(G3_method_handle);
+  __ verify_oop(G3_method_handle);
 
   // Adjust Rret first so Llast_SP can be same as Rret
   __ add(Rret, -frame::pc_return_offset, O7);
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -3804,6 +3804,14 @@
   emit_arith(0x03, 0xC0, dst, src);
 }
 
+void Assembler::andq(Address dst, int32_t imm32) {
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_byte(0x81);
+  emit_operand(rsp, dst, 4);
+  emit_long(imm32);
+}
+
 void Assembler::andq(Register dst, int32_t imm32) {
   (void) prefixq_and_encode(dst->encoding());
   emit_arith(0x81, 0xE0, dst, imm32);
@@ -5090,7 +5098,7 @@
   } else {
     ttyLocker ttyl;
     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
-    assert(false, "DEBUG MESSAGE");
+    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
   }
   ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
 }
@@ -5653,6 +5661,7 @@
     ttyLocker ttyl;
     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
                     msg);
+    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
   }
 }
 
@@ -5890,6 +5899,53 @@
   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 }
 
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   int number_of_arguments,
+                                   bool check_exceptions) {
+  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
+  MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   bool check_exceptions) {
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   Register arg_2,
+                                   bool check_exceptions) {
+
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   Register arg_2,
+                                   Register arg_3,
+                                   bool check_exceptions) {
+  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+  pass_arg3(this, arg_3);
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
+}
+
 void MacroAssembler::call_VM_base(Register oop_result,
                                   Register java_thread,
                                   Register last_java_sp,
@@ -6039,6 +6095,43 @@
   call_VM_leaf(entry_point, 3);
 }
 
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
+  pass_arg0(this, arg_0);
+  MacroAssembler::call_VM_leaf_base(entry_point, 1);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
+
+  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+  pass_arg1(this, arg_1);
+  pass_arg0(this, arg_0);
+  MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
+  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+  pass_arg1(this, arg_1);
+  pass_arg0(this, arg_0);
+  MacroAssembler::call_VM_leaf_base(entry_point, 3);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
+  LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
+  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+  pass_arg3(this, arg_3);
+  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+  pass_arg1(this, arg_1);
+  pass_arg0(this, arg_0);
+  MacroAssembler::call_VM_leaf_base(entry_point, 4);
+}
+
 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
 }
 
@@ -6902,26 +6995,39 @@
 #ifndef SERIALGC
 
 void MacroAssembler::g1_write_barrier_pre(Register obj,
-#ifndef _LP64
+                                          Register pre_val,
                                           Register thread,
-#endif
                                           Register tmp,
-                                          Register tmp2,
-                                          bool tosca_live) {
-  LP64_ONLY(Register thread = r15_thread;)
+                                          bool tosca_live,
+                                          bool expand_call) {
+
+  // If expand_call is true then we expand the call_VM_leaf macro
+  // directly to skip generating the check by
+  // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
+
+#ifdef _LP64
+  assert(thread == r15_thread, "must be");
+#endif // _LP64
+
+  Label done;
+  Label runtime;
+
+  assert(pre_val != noreg, "check this code");
+
+  if (obj != noreg) {
+    assert_different_registers(obj, pre_val, tmp);
+    assert(pre_val != rax, "check this code");
+  }
+
   Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                        PtrQueue::byte_offset_of_active()));
-
   Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                        PtrQueue::byte_offset_of_index()));
   Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                        PtrQueue::byte_offset_of_buf()));
 
 
-  Label done;
-  Label runtime;
-
-  // if (!marking_in_progress) goto done;
+  // Is marking active?
   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
     cmpl(in_progress, 0);
   } else {
@@ -6930,65 +7036,92 @@
   }
   jcc(Assembler::equal, done);
 
-  // if (x.f == NULL) goto done;
-#ifdef _LP64
-  load_heap_oop(tmp2, Address(obj, 0));
-#else
-  movptr(tmp2, Address(obj, 0));
-#endif
-  cmpptr(tmp2, (int32_t) NULL_WORD);
+  // Do we need to load the previous value?
+  if (obj != noreg) {
+    load_heap_oop(pre_val, Address(obj, 0));
+  }
+
+  // Is the previous value null?
+  cmpptr(pre_val, (int32_t) NULL_WORD);
   jcc(Assembler::equal, done);
 
   // Can we store original value in the thread's buffer?
-
-#ifdef _LP64
-  movslq(tmp, index);
-  cmpq(tmp, 0);
-#else
-  cmpl(index, 0);
-#endif
-  jcc(Assembler::equal, runtime);
-#ifdef _LP64
-  subq(tmp, wordSize);
-  movl(index, tmp);
-  addq(tmp, buffer);
-#else
-  subl(index, wordSize);
-  movl(tmp, buffer);
-  addl(tmp, index);
-#endif
-  movptr(Address(tmp, 0), tmp2);
+  // Is index == 0?
+  // (The index field is typed as size_t.)
+
+  movptr(tmp, index);                   // tmp := *index_adr
+  cmpptr(tmp, 0);                       // tmp == 0?
+  jcc(Assembler::equal, runtime);       // If yes, goto runtime
+
+  subptr(tmp, wordSize);                // tmp := tmp - wordSize
+  movptr(index, tmp);                   // *index_adr := tmp
+  addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
+
+  // Record the previous value
+  movptr(Address(tmp, 0), pre_val);
   jmp(done);
+
   bind(runtime);
   // save the live input values
   if(tosca_live) push(rax);
-  push(obj);
-#ifdef _LP64
-  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
-#else
-  push(thread);
-  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
-  pop(thread);
-#endif
-  pop(obj);
+
+  if (obj != noreg && obj != rax)
+    push(obj);
+
+  if (pre_val != rax)
+    push(pre_val);
+
+  // Calling the runtime using the regular call_VM_leaf mechanism generates
+  // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
+  // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
+  //
+  // If we care generating the pre-barrier without a frame (e.g. in the
+  // intrinsified Reference.get() routine) then ebp might be pointing to
+  // the caller frame and so this check will most likely fail at runtime.
+  //
+  // Expanding the call directly bypasses the generation of the check.
+  // So when we do not have have a full interpreter frame on the stack
+  // expand_call should be passed true.
+
+  NOT_LP64( push(thread); )
+
+  if (expand_call) {
+    LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
+    pass_arg1(this, thread);
+    pass_arg0(this, pre_val);
+    MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
+  } else {
+    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+  }
+
+  NOT_LP64( pop(thread); )
+
+  // save the live input values
+  if (pre_val != rax)
+    pop(pre_val);
+
+  if (obj != noreg && obj != rax)
+    pop(obj);
+
   if(tosca_live) pop(rax);
+
   bind(done);
-
 }
 
 void MacroAssembler::g1_write_barrier_post(Register store_addr,
                                            Register new_val,
-#ifndef _LP64
                                            Register thread,
-#endif
                                            Register tmp,
                                            Register tmp2) {
-
-  LP64_ONLY(Register thread = r15_thread;)
+#ifdef _LP64
+  assert(thread == r15_thread, "must be");
+#endif // _LP64
+
   Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        PtrQueue::byte_offset_of_index()));
   Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        PtrQueue::byte_offset_of_buf()));
+
   BarrierSet* bs = Universe::heap()->barrier_set();
   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
   Label done;
@@ -7067,7 +7200,6 @@
   pop(store_addr);
 
   bind(done);
-
 }
 
 #endif // SERIALGC
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -234,6 +234,20 @@
     a._disp += disp;
     return a;
   }
+  Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
+    Address a = (*this);
+    a._disp += disp.constant_or_zero() * scale_size(scale);
+    if (disp.is_register()) {
+      assert(!a.index()->is_valid(), "competing indexes");
+      a._index = disp.as_register();
+      a._scale = scale;
+    }
+    return a;
+  }
+  bool is_same_address(Address a) const {
+    // disregard _rspec
+    return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
+  }
 
   // The following two overloads are used in connection with the
   // ByteSize type (see sizes.hpp).  They simplify the use of
@@ -765,6 +779,7 @@
   void andl(Register dst, Address src);
   void andl(Register dst, Register src);
 
+  void andq(Address  dst, int32_t imm32);
   void andq(Register dst, int32_t imm32);
   void andq(Register dst, Address src);
   void andq(Register dst, Register src);
@@ -1453,6 +1468,7 @@
 class MacroAssembler: public Assembler {
   friend class LIR_Assembler;
   friend class Runtime1;      // as_Address()
+
  protected:
 
   Address as_Address(AddressLiteral adr);
@@ -1645,6 +1661,14 @@
                Register arg_1, Register arg_2, Register arg_3,
                bool check_exceptions = true);
 
+  // These always tightly bind to MacroAssembler::call_VM_base
+  // bypassing the virtual implementation
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
+
   void call_VM_leaf(address entry_point,
                     int number_of_arguments = 0);
   void call_VM_leaf(address entry_point,
@@ -1654,6 +1678,14 @@
   void call_VM_leaf(address entry_point,
                     Register arg_1, Register arg_2, Register arg_3);
 
+  // These always tightly bind to MacroAssembler::call_VM_leaf_base
+  // bypassing the virtual implementation
+  void super_call_VM_leaf(address entry_point);
+  void super_call_VM_leaf(address entry_point, Register arg_1);
+  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
+  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
+  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
+
   // last Java Frame (fills frame anchor)
   void set_last_Java_frame(Register thread,
                            Register last_java_sp,
@@ -1674,21 +1706,22 @@
   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 
+#ifndef SERIALGC
+
   void g1_write_barrier_pre(Register obj,
-#ifndef _LP64
+                            Register pre_val,
                             Register thread,
-#endif
                             Register tmp,
-                            Register tmp2,
-                            bool     tosca_live);
+                            bool tosca_live,
+                            bool expand_call);
+
   void g1_write_barrier_post(Register store_addr,
                              Register new_val,
-#ifndef _LP64
                              Register thread,
-#endif
                              Register tmp,
                              Register tmp2);
 
+#endif // SERIALGC
 
   // split store_check(Register obj) to enhance instruction interleaving
   void store_check_part_1(Register obj);
@@ -2019,6 +2052,10 @@
   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
   void addptr(Register dst, int32_t src);
   void addptr(Register dst, Register src);
+  void addptr(Register dst, RegisterOrConstant src) {
+    if (src.is_constant()) addptr(dst, (int) src.as_constant());
+    else                   addptr(dst,       src.as_register());
+  }
 
   void andptr(Register dst, int32_t src);
   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
@@ -2080,7 +2117,10 @@
   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
   void subptr(Register dst, int32_t src);
   void subptr(Register dst, Register src);
-
+  void subptr(Register dst, RegisterOrConstant src) {
+    if (src.is_constant()) subptr(dst, (int) src.as_constant());
+    else                   subptr(dst,       src.as_register());
+  }
 
   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
@@ -2278,6 +2318,11 @@
 
   void movptr(Address dst, Register src);
 
+  void movptr(Register dst, RegisterOrConstant src) {
+    if (src.is_constant()) movptr(dst, src.as_constant());
+    else                   movptr(dst, src.as_register());
+  }
+
 #ifdef _LP64
   // Generally the next two are only used for moving NULL
   // Although there are situations in initializing the mark word where
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -466,15 +466,19 @@
 #ifndef SERIALGC
 
 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-
-  // At this point we know that marking is in progress
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
 
   __ bind(_entry);
   assert(pre_val()->is_register(), "Precondition.");
 
   Register pre_val_reg = pre_val()->as_register();
 
-  ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
+  if (do_load()) {
+    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
+  }
 
   __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
   __ jcc(Assembler::equal, _continuation);
@@ -484,6 +488,68 @@
 
 }
 
+void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
+  // At this point we know that offset == referent_offset.
+  //
+  // So we might have to emit:
+  //   if (src == null) goto continuation.
+  //
+  // and we definitely have to emit:
+  //   if (klass(src).reference_type == REF_NONE) goto continuation
+  //   if (!marking_active) goto continuation
+  //   if (pre_val == null) goto continuation
+  //   call pre_barrier(pre_val)
+  //   goto continuation
+  //
+  __ bind(_entry);
+
+  assert(src()->is_register(), "sanity");
+  Register src_reg = src()->as_register();
+
+  if (gen_src_check()) {
+    // The original src operand was not a constant.
+    // Generate src == null?
+    __ cmpptr(src_reg, (int32_t) NULL_WORD);
+    __ jcc(Assembler::equal, _continuation);
+  }
+
+  // Generate src->_klass->_reference_type == REF_NONE)?
+  assert(tmp()->is_register(), "sanity");
+  Register tmp_reg = tmp()->as_register();
+
+  __ load_klass(tmp_reg, src_reg);
+
+  Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
+  __ cmpl(ref_type_adr, REF_NONE);
+  __ jcc(Assembler::equal, _continuation);
+
+  // Is marking active?
+  assert(thread()->is_register(), "precondition");
+  Register thread_reg = thread()->as_pointer_register();
+
+  Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
+                                       PtrQueue::byte_offset_of_active()));
+
+  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+    __ cmpl(in_progress, 0);
+  } else {
+    assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
+    __ cmpb(in_progress, 0);
+  }
+  __ jcc(Assembler::equal, _continuation);
+
+  // val == null?
+  assert(val()->is_register(), "Precondition.");
+  Register val_reg = val()->as_register();
+
+  __ cmpptr(val_reg, (int32_t) NULL_WORD);
+  __ jcc(Assembler::equal, _continuation);
+
+  ce->store_parameter(val()->as_register(), 0);
+  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
+  __ jmp(_continuation);
+}
+
 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
 
 jbyte* G1PostBarrierStub::byte_map_base_slow() {
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -47,7 +47,7 @@
 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
   // of 128-bits operands for SSE instructions.
-  jlong *operand = (jlong*)(((long)adr)&((long)(~0xF)));
+  jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
   // Store the value to a 128-bits operand.
   operand[0] = lo;
   operand[1] = hi;
@@ -3113,7 +3113,6 @@
     // reload the register args properly if we go slow path. Yuck
 
     // These are proper for the calling convention
-
     store_parameter(length, 2);
     store_parameter(dst_pos, 1);
     store_parameter(dst, 0);
@@ -3351,12 +3350,15 @@
           __ jcc(Assembler::notEqual, *stub->entry());
         }
 
+       // Spill because stubs can use any register they like and it's
+       // easier to restore just those that we care about.
+       store_parameter(dst, 0);
+       store_parameter(dst_pos, 1);
+       store_parameter(length, 2);
+       store_parameter(src_pos, 3);
+       store_parameter(src, 4);
+
 #ifndef _LP64
-        // save caller save registers
-        store_parameter(rax, 2);
-        store_parameter(rcx, 1);
-        store_parameter(rdx, 0);
-
         __ movptr(tmp, dst_klass_addr);
         __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
         __ push(tmp);
@@ -3372,17 +3374,6 @@
 #else
         __ movl2ptr(length, length); //higher 32bits must be null
 
-        // save caller save registers: copy them to callee save registers
-        __ mov(rbx, rdx);
-        __ mov(r13, r8);
-        __ mov(r14, r9);
-#ifndef _WIN64
-        store_parameter(rsi, 1);
-        store_parameter(rcx, 0);
-        // on WIN64 other incoming parameters are in rdi and rsi saved
-        // across the call
-#endif
-
         __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
         assert_different_registers(c_rarg0, dst, dst_pos, length);
         __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@@ -3432,25 +3423,13 @@
 
         __ xorl(tmp, -1);
 
-#ifndef _LP64
-        // restore caller save registers
-        assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost
-        __ movptr(rdx, Address(rsp, 0*BytesPerWord));
-        __ movptr(rcx, Address(rsp, 1*BytesPerWord));
-        __ movptr(rax, Address(rsp, 2*BytesPerWord));
-#else
-        // restore caller save registers
-        __ mov(rdx, rbx);
-        __ mov(r8, r13);
-        __ mov(r9, r14);
-#ifndef _WIN64
-        assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost
-        __ movptr(rcx, Address(rsp, 0*BytesPerWord));
-        __ movptr(rsi, Address(rsp, 1*BytesPerWord));
-#else
-        assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost
-#endif
-#endif
+        // Restore previously spilled arguments
+        __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
+        __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
+        __ movptr   (length,  Address(rsp, 2*BytesPerWord));
+        __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
+        __ movptr   (src,     Address(rsp, 4*BytesPerWord));
+
 
         __ subl(length, tmp);
         __ addl(src_pos, tmp);
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -326,7 +326,8 @@
 
   if (obj_store) {
     // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
+    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
+                true /* do_load */, false /* patch */, NULL);
     __ move(value.result(), array_addr, null_check_info);
     // Seems to be a precise
     post_barrier(LIR_OprFact::address(array_addr), value.result());
@@ -794,7 +795,8 @@
 
   if (type == objectType) {  // Write-barrier needed for Object fields.
     // Do the pre-write barrier, if any.
-    pre_barrier(addr, false, NULL);
+    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
+                true /* do_load */, false /* patch */, NULL);
   }
 
   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
@@ -1339,7 +1341,8 @@
     bool is_obj = (type == T_ARRAY || type == T_OBJECT);
     if (is_obj) {
       // Do the pre-write barrier, if any.
-      pre_barrier(LIR_OprFact::address(addr), false, NULL);
+      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
+                  true /* do_load */, false /* patch */, NULL);
       __ move(data, addr);
       assert(src->is_register(), "must be register");
       // Seems to be a precise address
--- a/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
   address generate_empty_entry(void);
   address generate_accessor_entry(void);
+  address generate_Reference_get_entry(void);
   void lock_method(void);
   void generate_stack_overflow_check(void);
 
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -936,6 +936,26 @@
 
 }
 
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  if (UseG1GC) {
+    // We need to generate have a routine that generates code to:
+    //   * load the value in the referent field
+    //   * passes that value to the pre-barrier.
+    //
+    // In the case of G1 this will record the value of the
+    // referent in an SATB buffer if marking is active.
+    // This will cause concurrent marking to mark the referent
+    // field as live.
+    Unimplemented();
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
 //
 // C++ Interpreter stub for calling a native method.
 // This sets up a somewhat different looking stack for calling the native method
@@ -2210,6 +2230,8 @@
     case Interpreter::java_lang_math_log     : // fall thru
     case Interpreter::java_lang_math_log10   : // fall thru
     case Interpreter::java_lang_math_sqrt    : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);     break;
+    case Interpreter::java_lang_ref_reference_get
+                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
     default                                  : ShouldNotReachHere();                                                       break;
   }
 
@@ -2317,14 +2339,15 @@
 }
 
 int AbstractInterpreter::layout_activation(methodOop method,
-                                                int tempcount,  //
-                                                int popframe_extra_args,
-                                                int moncount,
-                                                int callee_param_count,
-                                                int callee_locals,
-                                                frame* caller,
-                                                frame* interpreter_frame,
-                                                bool is_top_frame) {
+                                           int tempcount,  //
+                                           int popframe_extra_args,
+                                           int moncount,
+                                           int caller_actual_parameters,
+                                           int callee_param_count,
+                                           int callee_locals,
+                                           frame* caller,
+                                           frame* interpreter_frame,
+                                           bool is_top_frame) {
 
   assert(popframe_extra_args == 0, "FIX ME");
   // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -339,7 +339,6 @@
   return fr;
 }
 
-
 //------------------------------------------------------------------------------
 // frame::verify_deopt_original_pc
 //
@@ -361,6 +360,55 @@
 }
 #endif
 
+//------------------------------------------------------------------------------
+// frame::adjust_unextended_sp
+void frame::adjust_unextended_sp() {
+  // If we are returning to a compiled MethodHandle call site, the
+  // saved_fp will in fact be a saved value of the unextended SP.  The
+  // simplest way to tell whether we are returning to such a call site
+  // is as follows:
+
+  nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
+      _unextended_sp = _fp;
+    }
+    else if (sender_nm->is_deopt_entry(_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(_pc)) {
+      _unextended_sp = _fp;
+    }
+  }
+}
+
+//------------------------------------------------------------------------------
+// frame::update_map_with_saved_link
+void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
+  // The interpreter and compiler(s) always save EBP/RBP in a known
+  // location on entry. We must record where that location is
+  // so this if EBP/RBP was live on callout from c2 we can find
+  // the saved copy no matter what it called.
+
+  // Since the interpreter always saves EBP/RBP if we record where it is then
+  // we don't have to always save EBP/RBP on entry and exit to c2 compiled
+  // code, on entry will be enough.
+  map->set_location(rbp->as_VMReg(), (address) link_addr);
+#ifdef AMD64
+  // this is weird "H" ought to be at a higher address however the
+  // oopMaps seems to have the "H" regs at the same address and the
+  // vanilla register.
+  // XXXX make this go away
+  if (true) {
+    map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
+  }
+#endif // AMD64
+}
+
 
 //------------------------------------------------------------------------------
 // frame::sender_for_interpreter_frame
@@ -372,54 +420,13 @@
   // This is the sp before any possible extension (adapter/locals).
   intptr_t* unextended_sp = interpreter_frame_sender_sp();
 
-  // Stored FP.
-  intptr_t* saved_fp = link();
-
-  address sender_pc = this->sender_pc();
-  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
-  assert(sender_cb, "sanity");
-  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
-
-  if (sender_nm != NULL) {
-    // If the sender PC is a deoptimization point, get the original
-    // PC.  For MethodHandle call site the unextended_sp is stored in
-    // saved_fp.
-    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
-      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
-      unextended_sp = saved_fp;
-    }
-    else if (sender_nm->is_deopt_entry(sender_pc)) {
-      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
-    }
-    else if (sender_nm->is_method_handle_return(sender_pc)) {
-      unextended_sp = saved_fp;
-    }
-  }
-
-  // The interpreter and compiler(s) always save EBP/RBP in a known
-  // location on entry. We must record where that location is
-  // so this if EBP/RBP was live on callout from c2 we can find
-  // the saved copy no matter what it called.
-
-  // Since the interpreter always saves EBP/RBP if we record where it is then
-  // we don't have to always save EBP/RBP on entry and exit to c2 compiled
-  // code, on entry will be enough.
 #ifdef COMPILER2
   if (map->update_map()) {
-    map->set_location(rbp->as_VMReg(), (address) addr_at(link_offset));
-#ifdef AMD64
-    // this is weird "H" ought to be at a higher address however the
-    // oopMaps seems to have the "H" regs at the same address and the
-    // vanilla register.
-    // XXXX make this go away
-    if (true) {
-      map->set_location(rbp->as_VMReg()->next(), (address)addr_at(link_offset));
-    }
-#endif // AMD64
+    update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
   }
 #endif // COMPILER2
 
-  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
+  return frame(sender_sp, unextended_sp, link(), sender_pc());
 }
 
 
@@ -427,6 +434,7 @@
 // frame::sender_for_compiled_frame
 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
+  assert(!is_ricochet_frame(), "caller must handle this");
 
   // frame owned by optimizing compiler
   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
@@ -438,31 +446,7 @@
 
   // This is the saved value of EBP which may or may not really be an FP.
   // It is only an FP if the sender is an interpreter frame (or C1?).
-  intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
-
-  // If we are returning to a compiled MethodHandle call site, the
-  // saved_fp will in fact be a saved value of the unextended SP.  The
-  // simplest way to tell whether we are returning to such a call site
-  // is as follows:
-  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
-  assert(sender_cb, "sanity");
-  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
-
-  if (sender_nm != NULL) {
-    // If the sender PC is a deoptimization point, get the original
-    // PC.  For MethodHandle call site the unextended_sp is stored in
-    // saved_fp.
-    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
-      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
-      unextended_sp = saved_fp;
-    }
-    else if (sender_nm->is_deopt_entry(sender_pc)) {
-      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
-    }
-    else if (sender_nm->is_method_handle_return(sender_pc)) {
-      unextended_sp = saved_fp;
-    }
-  }
+  intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
 
   if (map->update_map()) {
     // Tell GC to use argument oopmaps for some runtime stubs that need it.
@@ -472,23 +456,15 @@
     if (_cb->oop_maps() != NULL) {
       OopMapSet::update_register_map(this, map);
     }
+
     // Since the prolog does the save and restore of EBP there is no oopmap
     // for it so we must fill in its location as if there was an oopmap entry
     // since if our caller was compiled code there could be live jvm state in it.
-    map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
-#ifdef AMD64
-    // this is weird "H" ought to be at a higher address however the
-    // oopMaps seems to have the "H" regs at the same address and the
-    // vanilla register.
-    // XXXX make this go away
-    if (true) {
-      map->set_location(rbp->as_VMReg()->next(), (address) (sender_sp - frame::sender_sp_offset));
-    }
-#endif // AMD64
+    update_map_with_saved_link(map, saved_fp_addr);
   }
 
   assert(sender_sp != sp(), "must have changed");
-  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
+  return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
 }
 
 
@@ -502,6 +478,7 @@
   if (is_entry_frame())       return sender_for_entry_frame(map);
   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
   assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
+  if (is_ricochet_frame())    return sender_for_ricochet_frame(map);
 
   if (_cb != NULL) {
     return sender_for_compiled_frame(map);
@@ -669,3 +646,23 @@
   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
   return &interpreter_frame_tos_address()[index];
 }
+
+#ifdef ASSERT
+
+#define DESCRIBE_FP_OFFSET(name) \
+  values.describe(frame_no, fp() + frame::name##_offset, #name)
+
+void frame::describe_pd(FrameValues& values, int frame_no) {
+  if (is_interpreted_frame()) {
+    DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
+    DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
+    DESCRIBE_FP_OFFSET(interpreter_frame_method);
+    DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
+    DESCRIBE_FP_OFFSET(interpreter_frame_cache);
+    DESCRIBE_FP_OFFSET(interpreter_frame_locals);
+    DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
+    DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
+  }
+
+}
+#endif
--- a/src/cpu/x86/vm/frame_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/frame_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -164,6 +164,7 @@
   // original sp we use that convention.
 
   intptr_t*     _unextended_sp;
+  void adjust_unextended_sp();
 
   intptr_t* ptr_at_addr(int offset) const {
     return (intptr_t*) addr_at(offset);
@@ -197,6 +198,9 @@
   // expression stack tos if we are nested in a java call
   intptr_t* interpreter_frame_last_sp() const;
 
+  // helper to update a map with callee-saved RBP
+  static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
+
 #ifndef CC_INTERP
   // deoptimization support
   void interpreter_frame_set_last_sp(intptr_t* sp);
--- a/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -45,6 +45,7 @@
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
+  adjust_unextended_sp();
 
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
@@ -62,6 +63,7 @@
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
+  adjust_unextended_sp();
 
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
@@ -91,6 +93,7 @@
   // assert(_pc != NULL, "no pc?");
 
   _cb = CodeCache::find_blob(_pc);
+  adjust_unextended_sp();
 
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -383,32 +383,6 @@
   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 }
 
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
-  MacroAssembler::call_VM_leaf_base(entry_point, 0);
-}
-
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) {
-  push(arg_1);
-  MacroAssembler::call_VM_leaf_base(entry_point, 1);
-}
-
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
-  push(arg_2);
-  push(arg_1);
-  MacroAssembler::call_VM_leaf_base(entry_point, 2);
-}
-
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
-  push(arg_3);
-  push(arg_2);
-  push(arg_1);
-  MacroAssembler::call_VM_leaf_base(entry_point, 3);
-}
-
-
 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
   // set sender sp
   lea(rsi, Address(rsp, wordSize));
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,12 +124,6 @@
   void load_ptr(int n, Register val);
   void store_ptr(int n, Register val);
 
-  // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
-  void super_call_VM_leaf(address entry_point);
-  void super_call_VM_leaf(address entry_point, Register arg_1);
-  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
-  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
-
   // Generate a subtype check: branch to ok_is_subtype if sub_klass is
   // a subtype of super_klass.  EAX holds the super_klass.  Blows ECX
   // and EDI.  Register sub_klass cannot be any of the above.
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -381,56 +381,6 @@
 }
 
 
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
-  MacroAssembler::call_VM_leaf_base(entry_point, 0);
-}
-
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
-                                                   Register arg_1) {
-  if (c_rarg0 != arg_1) {
-    mov(c_rarg0, arg_1);
-  }
-  MacroAssembler::call_VM_leaf_base(entry_point, 1);
-}
-
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
-                                                   Register arg_1,
-                                                   Register arg_2) {
-  assert(c_rarg0 != arg_2, "smashed argument");
-  assert(c_rarg1 != arg_1, "smashed argument");
-  if (c_rarg0 != arg_1) {
-    mov(c_rarg0, arg_1);
-  }
-  if (c_rarg1 != arg_2) {
-    mov(c_rarg1, arg_2);
-  }
-  MacroAssembler::call_VM_leaf_base(entry_point, 2);
-}
-
-void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
-                                                   Register arg_1,
-                                                   Register arg_2,
-                                                   Register arg_3) {
-  assert(c_rarg0 != arg_2, "smashed argument");
-  assert(c_rarg0 != arg_3, "smashed argument");
-  assert(c_rarg1 != arg_1, "smashed argument");
-  assert(c_rarg1 != arg_3, "smashed argument");
-  assert(c_rarg2 != arg_1, "smashed argument");
-  assert(c_rarg2 != arg_2, "smashed argument");
-  if (c_rarg0 != arg_1) {
-    mov(c_rarg0, arg_1);
-  }
-  if (c_rarg1 != arg_2) {
-    mov(c_rarg1, arg_2);
-  }
-  if (c_rarg2 != arg_3) {
-    mov(c_rarg2, arg_3);
-  }
-  MacroAssembler::call_VM_leaf_base(entry_point, 3);
-}
-
 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
   // set sender sp
   lea(r13, Address(rsp, wordSize));
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -136,13 +136,6 @@
   void load_ptr(int n, Register val);
   void store_ptr(int n, Register val);
 
-  // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
-  void super_call_VM_leaf(address entry_point);
-  void super_call_VM_leaf(address entry_point, Register arg_1);
-  void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
-  void super_call_VM_leaf(address entry_point,
-                          Register arg_1, Register arg_2, Register arg_3);
-
   // Generate a subtype check: branch to ok_is_subtype if sub_klass is
   // a subtype of super_klass.
   void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
--- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
   address generate_empty_entry(void);
   address generate_accessor_entry(void);
+  address generate_Reference_get_entry();
   void lock_method(void);
   void generate_stack_overflow_check(void);
 
--- a/src/cpu/x86/vm/interpreter_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interpreter_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -26,7 +26,9 @@
 #define CPU_X86_VM_INTERPRETER_X86_HPP
 
  public:
-  static Address::ScaleFactor stackElementScale() { return Address::times_4; }
+  static Address::ScaleFactor stackElementScale() {
+    return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
+  }
 
   // Offset from rsp (which points to the last stack element)
   static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -242,26 +242,6 @@
   return entry_point;
 }
 
-
-// This method tells the deoptimizer how big an interpreted frame must be:
-int AbstractInterpreter::size_activation(methodOop method,
-                                         int tempcount,
-                                         int popframe_extra_args,
-                                         int moncount,
-                                         int callee_param_count,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  return layout_activation(method,
-                           tempcount,
-                           popframe_extra_args,
-                           moncount,
-                           callee_param_count,
-                           callee_locals,
-                           (frame*) NULL,
-                           (frame*) NULL,
-                           is_top_frame);
-}
-
 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 
   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/interpreter_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -362,20 +362,6 @@
 
 }
 
-// This method tells the deoptimizer how big an interpreted frame must be:
-int AbstractInterpreter::size_activation(methodOop method,
-                                         int tempcount,
-                                         int popframe_extra_args,
-                                         int moncount,
-                                         int callee_param_count,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  return layout_activation(method,
-                           tempcount, popframe_extra_args, moncount,
-                           callee_param_count, callee_locals,
-                           (frame*) NULL, (frame*) NULL, is_top_frame);
-}
-
 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 
   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
 
@@ -37,6 +38,11 @@
 
 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+static RegisterOrConstant constant(int value) {
+  return RegisterOrConstant(value);
+}
+
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
   // Just before the actual machine code entry point, allocate space
@@ -69,23 +75,476 @@
   return me;
 }
 
+// stack walking support
+
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
+  RicochetFrame* f = RicochetFrame::from_frame(fr);
+  if (map->update_map())
+    frame::update_map_with_saved_link(map, &f->_sender_link);
+  return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc());
+}
+
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
+  RicochetFrame* f = RicochetFrame::from_frame(fr);
+
+  // pick up the argument type descriptor:
+  Thread* thread = Thread::current();
+  Handle cookie(thread, f->compute_saved_args_layout(true, true));
+
+  // process fixed part
+  blk->do_oop((oop*)f->saved_target_addr());
+  blk->do_oop((oop*)f->saved_args_layout_addr());
+
+  // process variable arguments:
+  if (cookie.is_null())  return;  // no arguments to describe
+
+  // the cookie is actually the invokeExact method for my target
+  // his argument signature is what I'm interested in
+  assert(cookie->is_method(), "");
+  methodHandle invoker(thread, methodOop(cookie()));
+  assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
+  assert(!invoker->is_static(), "must have MH argument");
+  int slot_count = invoker->size_of_parameters();
+  assert(slot_count >= 1, "must include 'this'");
+  intptr_t* base = f->saved_args_base();
+  intptr_t* retval = NULL;
+  if (f->has_return_value_slot())
+    retval = f->return_value_slot_addr();
+  int slot_num = slot_count;
+  intptr_t* loc = &base[slot_num -= 1];
+  //blk->do_oop((oop*) loc);   // original target, which is irrelevant
+  int arg_num = 0;
+  for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
+    if (ss.at_return_type())  continue;
+    BasicType ptype = ss.type();
+    if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
+    assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
+    loc = &base[slot_num -= type2size[ptype]];
+    bool is_oop = (ptype == T_OBJECT && loc != retval);
+    if (is_oop)  blk->do_oop((oop*)loc);
+    arg_num += 1;
+  }
+  assert(slot_num == 0, "must have processed all the arguments");
+}
+
+oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
+  oop cookie = NULL;
+  if (read_cache) {
+    cookie = saved_args_layout();
+    if (cookie != NULL)  return cookie;
+  }
+  oop target = saved_target();
+  oop mtype  = java_lang_invoke_MethodHandle::type(target);
+  oop mtform = java_lang_invoke_MethodType::form(mtype);
+  cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
+  if (write_cache)  {
+    (*saved_args_layout_addr()) = cookie;
+  }
+  return cookie;
+}
+
+void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
+                                                          // output params:
+                                                          int* bounce_offset,
+                                                          int* exception_offset,
+                                                          int* frame_size_in_words) {
+  (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
+
+  address start = __ pc();
+
 #ifdef ASSERT
-static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
-                           const char* error_message) {
+  __ hlt(); __ hlt(); __ hlt();
+  // here's a hint of something special:
+  __ push(MAGIC_NUMBER_1);
+  __ push(MAGIC_NUMBER_2);
+#endif //ASSERT
+  __ hlt();  // not reached
+
+  // A return PC has just been popped from the stack.
+  // Return values are in registers.
+  // The ebp points into the RicochetFrame, which contains
+  // a cleanup continuation we must return to.
+
+  (*bounce_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.bounce");
+
+  if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+  trace_method_handle(_masm, "return/ricochet_blob.bounce");
+
+  __ jmp(frame_address(continuation_offset_in_bytes()));
+  __ hlt();
+  DEBUG_ONLY(__ push(MAGIC_NUMBER_2));
+
+  (*exception_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.exception");
+
+  // compare this to Interpreter::rethrow_exception_entry, which is parallel code
+  // for example, see TemplateInterpreterGenerator::generate_throw_exception
+  // Live registers in:
+  //   rax: exception
+  //   rdx: return address/pc that threw exception (ignored, always equal to bounce addr)
+  __ verify_oop(rax);
+
+  // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed
+
+  // Take down the frame.
+
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg,
+                       saved_last_sp_register(),
+                       /*sender_pc_reg=*/ rdx);
+
+  // In between activations - previous activation type unknown yet
+  // compute continuation point - the continuation point expects the
+  // following registers set up:
+  //
+  // rax: exception
+  // rdx: return address/pc that threw exception
+  // rsp: expression stack of caller
+  // rbp: ebp of caller
+  __ push(rax);                                  // save exception
+  __ push(rdx);                                  // save return address
+  Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi);
+  NOT_LP64(__ get_thread(thread_reg));
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                   SharedRuntime::exception_handler_for_return_address),
+                  thread_reg, rdx);
+  __ mov(rbx, rax);                              // save exception handler
+  __ pop(rdx);                                   // restore return address
+  __ pop(rax);                                   // restore exception
+  __ jmp(rbx);                                   // jump to exception
+                                                 // handler of caller
+}
+
+void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
+                                                        Register rcx_recv,
+                                                        Register rax_argv,
+                                                        address return_handler,
+                                                        Register rbx_temp) {
+  const Register saved_last_sp = saved_last_sp_register();
+  Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
+  Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
+
+  // Push the RicochetFrame a word at a time.
+  // This creates something similar to an interpreter frame.
+  // Cf. TemplateInterpreterGenerator::generate_fixed_frame.
+  BLOCK_COMMENT("push RicochetFrame {");
+  DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame));
+  assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), "");
+#define RF_FIELD(push_value, name)                                      \
+  { push_value;                                                         \
+    assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); }
+  RF_FIELD(__ push(rbp),                   sender_link);
+  RF_FIELD(__ push(saved_last_sp),         exact_sender_sp);  // rsi/r13
+  RF_FIELD(__ pushptr(rcx_amh_conversion), conversion);
+  RF_FIELD(__ push(rax_argv),              saved_args_base);   // can be updated if args are shifted
+  RF_FIELD(__ push((int32_t) NULL_WORD),   saved_args_layout); // cache for GC layout cookie
+  if (UseCompressedOops) {
+    __ load_heap_oop(rbx_temp, rcx_mh_vmtarget);
+    RF_FIELD(__ push(rbx_temp),            saved_target);
+  } else {
+    RF_FIELD(__ pushptr(rcx_mh_vmtarget),  saved_target);
+  }
+  __ lea(rbx_temp, ExternalAddress(return_handler));
+  RF_FIELD(__ push(rbx_temp),              continuation);
+#undef RF_FIELD
+  assert(rfo == 0, "fully initialized the RicochetFrame");
+  // compute new frame pointer:
+  __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes()));
+  // Push guard word #1 in debug mode.
+  DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1));
+  // For debugging, leave behind an indication of which stub built this frame.
+  DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); });
+  BLOCK_COMMENT("} RicochetFrame");
+}
+
+void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
+                                                        Register rcx_recv,
+                                                        Register new_sp_reg,
+                                                        Register sender_pc_reg) {
+  assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg);
+  const Register saved_last_sp = saved_last_sp_register();
+  // Take down the frame.
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  BLOCK_COMMENT("end_ricochet_frame {");
+  // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down.
+  // This will keep stack in bounds even with unlimited tailcalls, each with an adapter.
+  if (rcx_recv->is_valid())
+    __ movptr(rcx_recv,    RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes()));
+  __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes()));
+  __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()));
+  __ movptr(rbp,           RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes()));
+  __ mov(rsp, new_sp_reg);
+  BLOCK_COMMENT("} end_ricochet_frame");
+}
+
+// Emit code to verify that RBP is pointing at a valid ricochet frame.
+#ifdef ASSERT
+enum {
+  ARG_LIMIT = 255, SLOP = 4,
+  // use this parameter for checking for garbage stack movements:
+  UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
+  // the slop defends against false alarms due to fencepost errors
+};
+
+void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
+  // The stack should look like this:
+  //    ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
+  // Check various invariants.
+  verify_offsets();
+
+  Register rdi_temp = rdi;
+  Register rcx_temp = rcx;
+  { __ push(rdi_temp); __ push(rcx_temp); }
+#define UNPUSH_TEMPS \
+  { __ pop(rcx_temp);  __ pop(rdi_temp); }
+
+  Address magic_number_1_addr  = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes());
+  Address magic_number_2_addr  = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes());
+  Address continuation_addr    = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes());
+  Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
+  Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
+
+  Label L_bad, L_ok;
+  BLOCK_COMMENT("verify_clean {");
+  // Magic numbers must check out:
+  __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1);
+  __ jcc(Assembler::notEqual, L_bad);
+  __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2);
+  __ jcc(Assembler::notEqual, L_bad);
+
+  // Arguments pointer must look reasonable:
+  __ movptr(rcx_temp, saved_args_base_addr);
+  __ cmpptr(rcx_temp, rbp);
+  __ jcc(Assembler::below, L_bad);
+  __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize);
+  __ cmpptr(rcx_temp, rbp);
+  __ jcc(Assembler::above, L_bad);
+
+  load_conversion_dest_type(_masm, rdi_temp, conversion_addr);
+  __ cmpl(rdi_temp, T_VOID);
+  __ jcc(Assembler::equal, L_ok);
+  __ movptr(rcx_temp, saved_args_base_addr);
+  load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
+  __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()),
+            (int32_t) RETURN_VALUE_PLACEHOLDER);
+  __ jcc(Assembler::equal, L_ok);
+  __ BIND(L_bad);
+  UNPUSH_TEMPS;
+  __ stop("damaged ricochet frame");
+  __ BIND(L_ok);
+  UNPUSH_TEMPS;
+  BLOCK_COMMENT("} verify_clean");
+
+#undef UNPUSH_TEMPS
+
+}
+#endif //ASSERT
+
+void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
+  if (VerifyMethodHandles)
+    verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(),
+                 "AMH argument is a Class");
+  __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
+}
+
+void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
+  int bits   = BitsPerByte;
+  int offset = (CONV_VMINFO_SHIFT / bits);
+  int shift  = (CONV_VMINFO_SHIFT % bits);
+  __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
+  assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
+  assert(shift == 0, "no shift needed");
+}
+
+void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
+  int bits   = BitsPerByte;
+  int offset = (CONV_DEST_TYPE_SHIFT / bits);
+  int shift  = (CONV_DEST_TYPE_SHIFT % bits);
+  __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
+  assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
+  __ shrl(reg, shift);
+  DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
+  assert((shift + conv_type_bits) == bits, "left justified in byte");
+}
+
+void MethodHandles::load_stack_move(MacroAssembler* _masm,
+                                    Register rdi_stack_move,
+                                    Register rcx_amh,
+                                    bool might_be_negative) {
+  BLOCK_COMMENT("load_stack_move {");
+  Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
+  __ movl(rdi_stack_move, rcx_amh_conversion);
+  __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
+#ifdef _LP64
+  if (might_be_negative) {
+    // clean high bits of stack motion register (was loaded as an int)
+    __ movslq(rdi_stack_move, rdi_stack_move);
+  }
+#endif //_LP64
+  if (VerifyMethodHandles) {
+    Label L_ok, L_bad;
+    int32_t stack_move_limit = 0x4000;  // extra-large
+    __ cmpptr(rdi_stack_move, stack_move_limit);
+    __ jcc(Assembler::greaterEqual, L_bad);
+    __ cmpptr(rdi_stack_move, -stack_move_limit);
+    __ jcc(Assembler::greater, L_ok);
+    __ bind(L_bad);
+    __ stop("load_stack_move of garbage value");
+    __ BIND(L_ok);
+  }
+  BLOCK_COMMENT("} load_stack_move");
+}
+
+#ifdef ASSERT
+void MethodHandles::RicochetFrame::verify_offsets() {
+  // Check compatibility of this struct with the more generally used offsets of class frame:
+  int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
+  assert(ebp_off + wordSize*frame::interpreter_frame_method_offset      == saved_args_base_offset_in_bytes(), "");
+  assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset     == conversion_offset_in_bytes(), "");
+  assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset   == exact_sender_sp_offset_in_bytes(), "");
+  // These last two have to be exact:
+  assert(ebp_off + wordSize*frame::link_offset                          == sender_link_offset_in_bytes(), "");
+  assert(ebp_off + wordSize*frame::return_addr_offset                   == sender_pc_offset_in_bytes(), "");
+}
+
+void MethodHandles::RicochetFrame::verify() const {
+  verify_offsets();
+  assert(magic_number_1() == MAGIC_NUMBER_1, "");
+  assert(magic_number_2() == MAGIC_NUMBER_2, "");
+  if (!Universe::heap()->is_gc_active()) {
+    if (saved_args_layout() != NULL) {
+      assert(saved_args_layout()->is_method(), "must be valid oop");
+    }
+    if (saved_target() != NULL) {
+      assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
+    }
+  }
+  int conv_op = adapter_conversion_op(conversion());
+  assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
+         "must be a sane conversion");
+  if (has_return_value_slot()) {
+    assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
+  }
+}
+#endif //PRODUCT
+
+#ifdef ASSERT
+void MethodHandles::verify_argslot(MacroAssembler* _masm,
+                                   Register argslot_reg,
+                                   const char* error_message) {
   // Verify that argslot lies within (rsp, rbp].
   Label L_ok, L_bad;
-  BLOCK_COMMENT("{ verify_argslot");
+  BLOCK_COMMENT("verify_argslot {");
   __ cmpptr(argslot_reg, rbp);
   __ jccb(Assembler::above, L_bad);
   __ cmpptr(rsp, argslot_reg);
   __ jccb(Assembler::below, L_ok);
   __ bind(L_bad);
   __ stop(error_message);
-  __ bind(L_ok);
+  __ BIND(L_ok);
   BLOCK_COMMENT("} verify_argslot");
 }
-#endif
+
+void MethodHandles::verify_argslots(MacroAssembler* _masm,
+                                    RegisterOrConstant arg_slots,
+                                    Register arg_slot_base_reg,
+                                    bool negate_argslots,
+                                    const char* error_message) {
+  // Verify that [argslot..argslot+size) lies within (rsp, rbp).
+  Label L_ok, L_bad;
+  Register rdi_temp = rdi;
+  BLOCK_COMMENT("verify_argslots {");
+  __ push(rdi_temp);
+  if (negate_argslots) {
+    if (arg_slots.is_constant()) {
+      arg_slots = -1 * arg_slots.as_constant();
+    } else {
+      __ movptr(rdi_temp, arg_slots);
+      __ negptr(rdi_temp);
+      arg_slots = rdi_temp;
+    }
+  }
+  __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale()));
+  __ cmpptr(rdi_temp, rbp);
+  __ pop(rdi_temp);
+  __ jcc(Assembler::above, L_bad);
+  __ cmpptr(rsp, arg_slot_base_reg);
+  __ jcc(Assembler::below, L_ok);
+  __ bind(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_argslots");
+}
 
+// Make sure that arg_slots has the same sign as the given direction.
+// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
+void MethodHandles::verify_stack_move(MacroAssembler* _masm,
+                                      RegisterOrConstant arg_slots, int direction) {
+  bool allow_zero = arg_slots.is_constant();
+  if (direction == 0) { direction = +1; allow_zero = true; }
+  assert(stack_move_unit() == -1, "else add extra checks here");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    BLOCK_COMMENT("verify_stack_move {");
+    // testl(arg_slots.as_register(), -stack_move_unit() - 1);  // no need
+    // jcc(Assembler::notZero, L_bad);
+    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
+    if (direction > 0) {
+      __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad);
+      __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
+      __ jcc(Assembler::less, L_ok);
+    } else {
+      __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad);
+      __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
+      __ jcc(Assembler::greater, L_ok);
+    }
+    __ bind(L_bad);
+    if (direction > 0)
+      __ stop("assert arg_slots > 0");
+    else
+      __ stop("assert arg_slots < 0");
+    __ BIND(L_ok);
+    BLOCK_COMMENT("} verify_stack_move");
+  } else {
+    intptr_t size = arg_slots.as_constant();
+    if (direction < 0)  size = -size;
+    assert(size >= 0, "correct direction of constant move");
+    assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
+  }
+}
+
+void MethodHandles::verify_klass(MacroAssembler* _masm,
+                                 Register obj, KlassHandle klass,
+                                 const char* error_message) {
+  oop* klass_addr = klass.raw_value();
+  assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
+         klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
+         "must be one of the SystemDictionaryHandles");
+  Register temp = rdi;
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_klass {");
+  __ verify_oop(obj);
+  __ testptr(obj, obj);
+  __ jcc(Assembler::zero, L_bad);
+  __ push(temp);
+  __ load_klass(temp, obj);
+  __ cmpptr(temp, ExternalAddress((address) klass_addr));
+  __ jcc(Assembler::equal, L_ok);
+  intptr_t super_check_offset = klass->super_check_offset();
+  __ movptr(temp, Address(temp, super_check_offset));
+  __ cmpptr(temp, ExternalAddress((address) klass_addr));
+  __ jcc(Assembler::equal, L_ok);
+  __ pop(temp);
+  __ bind(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  __ pop(temp);
+  BLOCK_COMMENT("} verify_klass");
+}
+#endif //ASSERT
 
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
@@ -103,19 +562,20 @@
   // emit WrongMethodType path first, to enable jccb back-branch from main path
   Label wrong_method_type;
   __ bind(wrong_method_type);
-  Label invoke_generic_slow_path;
+  Label invoke_generic_slow_path, invoke_exact_error_path;
   assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
   __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
   __ jcc(Assembler::notEqual, invoke_generic_slow_path);
-  __ push(rax_mtype);       // required mtype
-  __ push(rcx_recv);        // bad mh (1st stacked argument)
-  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
+  __ jmp(invoke_exact_error_path);
 
   // here's where control starts out:
   __ align(CodeEntryAlignment);
   address entry_point = __ pc();
 
   // fetch the MethodType from the method handle into rax (the 'check' register)
+  // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
+  // This would simplify several touchy bits of code.
+  // See 6984712: JSR 292 method handle calls need a clean argument base pointer
   {
     Register tem = rbx_method;
     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
@@ -128,17 +588,28 @@
   __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
   Register rdx_vmslots = rdx_temp;
   __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
-  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
+  Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots);
+  __ movptr(rcx_recv, mh_receiver_slot_addr);
 
   trace_method_handle(_masm, "invokeExact");
 
   __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
+
+  // Nobody uses the MH receiver slot after this.  Make sure.
+  DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999));
+
   __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
 
+  // error path for invokeExact (only)
+  __ bind(invoke_exact_error_path);
+  // Stub wants expected type in rax and the actual type in rcx
+  __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
+
   // for invokeGeneric (only), apply argument and result conversions on the fly
   __ bind(invoke_generic_slow_path);
 #ifdef ASSERT
-  { Label L;
+  if (VerifyMethodHandles) {
+    Label L;
     __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
     __ jcc(Assembler::equal, L);
     __ stop("bad methodOop::intrinsic_id");
@@ -150,22 +621,14 @@
   // make room on the stack for another pointer:
   Register rcx_argslot = rcx_recv;
   __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
-  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
+  insert_arg_slots(_masm, 2 * stack_move_unit(),
                    rcx_argslot, rbx_temp, rdx_temp);
 
   // load up an adapter from the calling type (Java weaves this)
-  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
   Register rdx_adapter = rdx_temp;
-  // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes()));
-  // deal with old JDK versions:
-  __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
-  __ cmpptr(rdi_temp, rdx_temp);
-  Label sorry_no_invoke_generic;
-  __ jcc(Assembler::below, sorry_no_invoke_generic);
-
-  __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0));
-  __ testptr(rdx_adapter, rdx_adapter);
-  __ jcc(Assembler::zero, sorry_no_invoke_generic);
+  __ load_heap_oop(rdx_temp,    Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,               rdi_temp)));
+  __ load_heap_oop(rdx_adapter, Address(rdx_temp,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
+  __ verify_oop(rdx_adapter);
   __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
   // As a trusted first argument, pass the type being called, so the adapter knows
   // the actual types of the arguments and return values.
@@ -176,49 +639,26 @@
   trace_method_handle(_masm, "invokeGeneric");
   __ jump_to_method_handle_entry(rcx, rdi_temp);
 
-  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
-  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
-  __ push(rax_mtype);       // required mtype
-  __ push(rcx_recv);        // bad mh (1st stacked argument)
-  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
-
   return entry_point;
 }
 
 // Helper to insert argument slots into the stack.
-// arg_slots must be a multiple of stack_move_unit() and <= 0
+// arg_slots must be a multiple of stack_move_unit() and < 0
+// rax_argslot is decremented to point to the new (shifted) location of the argslot
+// But, rdx_temp ends up holding the original value of rax_argslot.
 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
                                      RegisterOrConstant arg_slots,
-                                     int arg_mask,
                                      Register rax_argslot,
-                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
-  assert(temp3_reg == noreg, "temp3 not required");
+                                     Register rbx_temp, Register rdx_temp) {
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
-
-#ifdef ASSERT
-  verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jccb(Assembler::greater, L_bad);
-    __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jccb(Assembler::zero, L_ok);
-    __ bind(L_bad);
-    __ stop("assert arg_slots <= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() <= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif //ASSERT
-
-#ifdef _LP64
-  if (arg_slots.is_register()) {
-    // clean high bits of stack motion register (was loaded as an int)
-    __ movslq(arg_slots.as_register(), arg_slots.as_register());
-  }
-#endif
+  if (VerifyMethodHandles)
+    verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, -1);
 
   // Make space on the stack for the inserted argument(s).
   // Then pull down everything shallower than rax_argslot.
@@ -230,59 +670,39 @@
   //   argslot -= size;
   BLOCK_COMMENT("insert_arg_slots {");
   __ mov(rdx_temp, rsp);                        // source pointer for copy
-  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
+  __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
   {
     Label loop;
     __ BIND(loop);
     // pull one word down each time through the loop
     __ movptr(rbx_temp, Address(rdx_temp, 0));
-    __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
+    __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
     __ addptr(rdx_temp, wordSize);
     __ cmpptr(rdx_temp, rax_argslot);
-    __ jccb(Assembler::less, loop);
+    __ jcc(Assembler::below, loop);
   }
 
   // Now move the argslot down, to point to the opened-up space.
-  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
+  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
   BLOCK_COMMENT("} insert_arg_slots");
 }
 
 // Helper to remove argument slots from the stack.
-// arg_slots must be a multiple of stack_move_unit() and >= 0
+// arg_slots must be a multiple of stack_move_unit() and > 0
 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
-                                    RegisterOrConstant arg_slots,
-                                    Register rax_argslot,
-                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
-  assert(temp3_reg == noreg, "temp3 not required");
+                                     RegisterOrConstant arg_slots,
+                                     Register rax_argslot,
+                                     Register rbx_temp, Register rdx_temp) {
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
-
-#ifdef ASSERT
-  // Verify that [argslot..argslot+size) lies within (rsp, rbp).
-  __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
-  verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jccb(Assembler::less, L_bad);
-    __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jccb(Assembler::zero, L_ok);
-    __ bind(L_bad);
-    __ stop("assert arg_slots >= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() >= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif //ASSERT
-
-#ifdef _LP64
-  if (false) {                  // not needed, since register is positive
-    // clean high bits of stack motion register (was loaded as an int)
-    if (arg_slots.is_register())
-      __ movslq(arg_slots.as_register(), arg_slots.as_register());
-  }
-#endif
+  if (VerifyMethodHandles)
+    verify_argslots(_masm, arg_slots, rax_argslot, false,
+                    "deleted argument(s) must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, +1);
 
   BLOCK_COMMENT("remove_arg_slots {");
   // Pull up everything shallower than rax_argslot.
@@ -299,19 +719,249 @@
     __ BIND(loop);
     // pull one word up each time through the loop
     __ movptr(rbx_temp, Address(rdx_temp, 0));
-    __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
+    __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
     __ addptr(rdx_temp, -wordSize);
     __ cmpptr(rdx_temp, rsp);
-    __ jccb(Assembler::greaterEqual, loop);
+    __ jcc(Assembler::aboveEqual, loop);
   }
 
   // Now move the argslot up, to point to the just-copied block.
-  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
+  __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
   // And adjust the argslot address to point at the deletion point.
-  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
+  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
   BLOCK_COMMENT("} remove_arg_slots");
 }
 
+// Helper to copy argument slots to the top of the stack.
+// The sequence starts with rax_argslot and is counted by slot_count
+// slot_count must be a multiple of stack_move_unit() and >= 0
+// This function blows the temps but does not change rax_argslot.
+void MethodHandles::push_arg_slots(MacroAssembler* _masm,
+                                   Register rax_argslot,
+                                   RegisterOrConstant slot_count,
+                                   int skip_words_count,
+                                   Register rbx_temp, Register rdx_temp) {
+  assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
+                             (!slot_count.is_register() ? rbp : slot_count.as_register()),
+                             rsp);
+  assert(Interpreter::stackElementSize == wordSize, "else change this code");
+
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, slot_count, 0);
+
+  // allow constant zero
+  if (slot_count.is_constant() && slot_count.as_constant() == 0)
+    return;
+
+  BLOCK_COMMENT("push_arg_slots {");
+
+  Register rbx_top = rbx_temp;
+
+  // There is at most 1 word to carry down with the TOS.
+  switch (skip_words_count) {
+  case 1: __ pop(rdx_temp); break;
+  case 0:                   break;
+  default: ShouldNotReachHere();
+  }
+
+  if (slot_count.is_constant()) {
+    for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
+      __ pushptr(Address(rax_argslot, i * wordSize));
+    }
+  } else {
+    Label L_plural, L_loop, L_break;
+    // Emit code to dynamically check for the common cases, zero and one slot.
+    __ cmpl(slot_count.as_register(), (int32_t) 1);
+    __ jccb(Assembler::greater, L_plural);
+    __ jccb(Assembler::less, L_break);
+    __ pushptr(Address(rax_argslot, 0));
+    __ jmpb(L_break);
+    __ BIND(L_plural);
+
+    // Loop for 2 or more:
+    //   rbx = &rax[slot_count]
+    //   while (rbx > rax)  *(--rsp) = *(--rbx)
+    __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr));
+    __ BIND(L_loop);
+    __ subptr(rbx_top, wordSize);
+    __ pushptr(Address(rbx_top, 0));
+    __ cmpptr(rbx_top, rax_argslot);
+    __ jcc(Assembler::above, L_loop);
+    __ bind(L_break);
+  }
+  switch (skip_words_count) {
+  case 1: __ push(rdx_temp); break;
+  case 0:                    break;
+  default: ShouldNotReachHere();
+  }
+  BLOCK_COMMENT("} push_arg_slots");
+}
+
+// in-place movement; no change to rsp
+// blows rax_temp, rdx_temp
+void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
+                                      Register rbx_bottom,  // invariant
+                                      Address  top_addr,     // can use rax_temp
+                                      RegisterOrConstant positive_distance_in_slots,
+                                      Register rax_temp, Register rdx_temp) {
+  BLOCK_COMMENT("move_arg_slots_up {");
+  assert_different_registers(rbx_bottom,
+                             rax_temp, rdx_temp,
+                             positive_distance_in_slots.register_or_noreg());
+  Label L_loop, L_break;
+  Register rax_top = rax_temp;
+  if (!top_addr.is_same_address(Address(rax_top, 0)))
+    __ lea(rax_top, top_addr);
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (positive_distance_in_slots.is_register()) {
+      __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0);
+      __ jcc(Assembler::lessEqual, L_bad);
+    }
+    __ cmpptr(rbx_bottom, rax_top);
+    __ jcc(Assembler::below, L_ok);
+    __ bind(L_bad);
+    __ stop("valid bounds (copy up)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmpptr(rbx_bottom, rax_top);
+  __ jccb(Assembler::aboveEqual, L_break);
+  // work rax down to rbx, copying contiguous data upwards
+  // In pseudo-code:
+  //   [rbx, rax) = &[bottom, top)
+  //   while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--;
+  __ BIND(L_loop);
+  __ subptr(rax_top, wordSize);
+  __ movptr(rdx_temp, Address(rax_top, 0));
+  __ movptr(          Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp);
+  __ cmpptr(rax_top, rbx_bottom);
+  __ jcc(Assembler::above, L_loop);
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ bind(L_break);
+  BLOCK_COMMENT("} move_arg_slots_up");
+}
+
+// in-place movement; no change to rsp
+// blows rax_temp, rdx_temp
+void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
+                                        Address  bottom_addr,  // can use rax_temp
+                                        Register rbx_top,      // invariant
+                                        RegisterOrConstant negative_distance_in_slots,
+                                        Register rax_temp, Register rdx_temp) {
+  BLOCK_COMMENT("move_arg_slots_down {");
+  assert_different_registers(rbx_top,
+                             negative_distance_in_slots.register_or_noreg(),
+                             rax_temp, rdx_temp);
+  Label L_loop, L_break;
+  Register rax_bottom = rax_temp;
+  if (!bottom_addr.is_same_address(Address(rax_bottom, 0)))
+    __ lea(rax_bottom, bottom_addr);
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (negative_distance_in_slots.is_register()) {
+      __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0);
+      __ jcc(Assembler::greaterEqual, L_bad);
+    }
+    __ cmpptr(rax_bottom, rbx_top);
+    __ jcc(Assembler::below, L_ok);
+    __ bind(L_bad);
+    __ stop("valid bounds (copy down)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmpptr(rax_bottom, rbx_top);
+  __ jccb(Assembler::aboveEqual, L_break);
+  // work rax up to rbx, copying contiguous data downwards
+  // In pseudo-code:
+  //   [rax, rbx) = &[bottom, top)
+  //   while (rax < rbx) *(rax - distance) = *(rax + 0), rax++;
+  __ BIND(L_loop);
+  __ movptr(rdx_temp, Address(rax_bottom, 0));
+  __ movptr(          Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp);
+  __ addptr(rax_bottom, wordSize);
+  __ cmpptr(rax_bottom, rbx_top);
+  __ jcc(Assembler::below, L_loop);
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ bind(L_break);
+  BLOCK_COMMENT("} move_arg_slots_down");
+}
+
+// Copy from a field or array element to a stacked argument slot.
+// is_element (ignored) says whether caller is loading an array element instead of an instance field.
+void MethodHandles::move_typed_arg(MacroAssembler* _masm,
+                                   BasicType type, bool is_element,
+                                   Address slot_dest, Address value_src,
+                                   Register rbx_temp, Register rdx_temp) {
+  BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
+  if (type == T_OBJECT || type == T_ARRAY) {
+    __ load_heap_oop(rbx_temp, value_src);
+    __ movptr(slot_dest, rbx_temp);
+  } else if (type != T_VOID) {
+    int  arg_size      = type2aelembytes(type);
+    bool arg_is_signed = is_signed_subword_type(type);
+    int  slot_size     = (arg_size > wordSize) ? arg_size : wordSize;
+    __ load_sized_value(  rdx_temp,  value_src, arg_size, arg_is_signed, rbx_temp);
+    __ store_sized_value( slot_dest, rdx_temp,  slot_size,               rbx_temp);
+  }
+  BLOCK_COMMENT("} move_typed_arg");
+}
+
+void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
+                                      Address return_slot) {
+  BLOCK_COMMENT("move_return_value {");
+  // Old versions of the JVM must clean the FPU stack after every return.
+#ifndef _LP64
+#ifdef COMPILER2
+  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
+  if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) {
+    for (int i = 1; i < 8; i++) {
+        __ ffree(i);
+    }
+  } else if (UseSSE < 2) {
+    __ empty_FPU_stack();
+  }
+#endif //COMPILER2
+#endif //!_LP64
+
+  // Look at the type and pull the value out of the corresponding register.
+  if (type == T_VOID) {
+    // nothing to do
+  } else if (type == T_OBJECT) {
+    __ movptr(return_slot, rax);
+  } else if (type == T_INT || is_subword_type(type)) {
+    // write the whole word, even if only 32 bits is significant
+    __ movptr(return_slot, rax);
+  } else if (type == T_LONG) {
+    // store the value by parts
+    // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
+    __ store_sized_value(return_slot, rax, BytesPerLong, rdx);
+  } else if (NOT_LP64((type == T_FLOAT  && UseSSE < 1) ||
+                      (type == T_DOUBLE && UseSSE < 2) ||)
+             false) {
+    // Use old x86 FPU registers:
+    if (type == T_FLOAT)
+      __ fstp_s(return_slot);
+    else
+      __ fstp_d(return_slot);
+  } else if (type == T_FLOAT) {
+    __ movflt(return_slot, xmm0);
+  } else if (type == T_DOUBLE) {
+    __ movdbl(return_slot, xmm0);
+  } else {
+    ShouldNotReachHere();
+  }
+  BLOCK_COMMENT("} move_return_value");
+}
+
+
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
@@ -321,48 +971,90 @@
                               intptr_t* saved_sp,
                               intptr_t* saved_bp) {
   // called as a leaf from native code: do not block the JVM!
+  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have rcx_mh
   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
-  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
-  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
-         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
-  if (last_sp != saved_sp && last_sp != NULL)
-    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+  intptr_t* base_sp = last_sp;
+  typedef MethodHandles::RicochetFrame RicochetFrame;
+  RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
+  if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) {
+    // Probably an interpreter frame.
+    base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
+  }
+  intptr_t    mh_reg = (intptr_t)mh;
+  const char* mh_reg_name = "rcx_mh";
+  if (!has_mh)  mh_reg_name = "rcx";
+  tty->print_cr("MH %s %s="PTR_FORMAT" sp=("PTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="PTR_FORMAT,
+                adaptername, mh_reg_name, mh_reg,
+                (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   if (Verbose) {
-    printf(" reg dump: ");
+    tty->print(" reg dump: ");
     int saved_regs_count = (entry_sp-1) - saved_regs;
     // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
     int i;
     for (i = 0; i <= saved_regs_count; i++) {
-      if (i > 0 && i % 4 == 0 && i != saved_regs_count)
-        printf("\n   + dump: ");
-      printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
+      if (i > 0 && i % 4 == 0 && i != saved_regs_count) {
+        tty->cr();
+        tty->print("   + dump: ");
+      }
+      tty->print(" %d: "PTR_FORMAT, i, saved_regs[i]);
     }
-    printf("\n");
+    tty->cr();
+    if (last_sp != saved_sp && last_sp != NULL)
+      tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
     int stack_dump_count = 16;
     if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
       stack_dump_count = (int)(saved_bp + 2 - saved_sp);
     if (stack_dump_count > 64)  stack_dump_count = 48;
     for (i = 0; i < stack_dump_count; i += 4) {
-      printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
-             i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
+      tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
+                    i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
     }
-    print_method_handle(mh);
+    if (has_mh)
+      print_method_handle(mh);
   }
 }
+
+// The stub wraps the arguments in a struct on the stack to avoid
+// dealing with the different calling conventions for passing 6
+// arguments.
+struct MethodHandleStubArguments {
+  const char* adaptername;
+  oopDesc* mh;
+  intptr_t* saved_regs;
+  intptr_t* entry_sp;
+  intptr_t* saved_sp;
+  intptr_t* saved_bp;
+};
+void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
+  trace_method_handle_stub(args->adaptername,
+                           args->mh,
+                           args->saved_regs,
+                           args->entry_sp,
+                           args->saved_sp,
+                           args->saved_bp);
+}
+
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
   BLOCK_COMMENT("trace_method_handle {");
   __ push(rax);
-  __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
+  __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp  __ pusha();
   __ pusha();
-  // arguments:
-  __ push(rbp);               // interpreter frame pointer
+  __ mov(rbx, rsp);
+  __ enter();
+  // incoming state:
+  // rcx: method handle
+  // r13 or rsi: saved sp
+  // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
+  __ push(rbp);               // saved_bp
   __ push(rsi);               // saved_sp
   __ push(rax);               // entry_sp
+  __ push(rbx);               // pusha saved_regs
   __ push(rcx);               // mh
-  __ push(rcx);
+  __ push(rcx);               // adaptername
   __ movptr(Address(rsp, 0), (intptr_t) adaptername);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
+  __ leave();
   __ popa();
   __ pop(rax);
   BLOCK_COMMENT("} trace_method_handle");
@@ -376,13 +1068,20 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
+          //OP_PRIM_TO_REF is below...
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+          //OP_COLLECT_ARGS is below...
+         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
+         |(!UseRicochetFrames ? 0 :
+           java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
+           ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
+            ))
          );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 }
 
 //------------------------------------------------------------------------------
@@ -391,6 +1090,8 @@
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
+  MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
+
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
@@ -403,10 +1104,11 @@
   const Register rax_argslot = rax;
   const Register rbx_temp    = rbx;
   const Register rdx_temp    = rdx;
+  const Register rdi_temp    = rdi;
 
   // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
   // and gen_c2i_adapter (from compiled calls):
-  const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
+  const Register saved_last_sp = saved_last_sp_register();
 
   // Argument registers for _raise_exception.
   // 32-bit: Pass first two oop/int args in registers ECX and EDX.
@@ -439,11 +1141,18 @@
     return;
   }
 
+#ifdef ASSERT
+  __ push((int32_t) 0xEEEEEEEE);
+  __ push((int32_t) (intptr_t) entry_name(ek));
+  LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
+  __ push((int32_t) 0x33333333);
+#endif //ASSERT
+
   address interp_entry = __ pc();
 
   trace_method_handle(_masm, entry_name(ek));
 
-  BLOCK_COMMENT(entry_name(ek));
+  BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
 
   switch ((int) ek) {
   case _raise_exception:
@@ -459,27 +1168,15 @@
       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
 
       Register rbx_method = rbx_temp;
-      Label L_no_method;
-      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
-      __ testptr(rbx_method, rbx_method);
-      __ jccb(Assembler::zero, L_no_method);
 
       const int jobject_oop_offset = 0;
       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
-      __ testptr(rbx_method, rbx_method);
-      __ jccb(Assembler::zero, L_no_method);
       __ verify_oop(rbx_method);
 
       NOT_LP64(__ push(rarg2_required));
       __ push(rdi_pc);         // restore caller PC
       __ jmp(rbx_method_fce);  // jump to compiled entry
-
-      // Do something that is at least causes a valid throw from the interpreter.
-      __ bind(L_no_method);
-      __ push(rarg2_required);
-      __ push(rarg1_actual);
-      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
     }
     break;
 
@@ -554,7 +1251,6 @@
       __ load_klass(rax_klass, rcx_recv);
       __ verify_oop(rax_klass);
 
-      Register rdi_temp   = rdi;
       Register rbx_method = rbx_index;
 
       // get interface klass
@@ -589,17 +1285,15 @@
   case _bound_int_direct_mh:
   case _bound_long_direct_mh:
     {
-      bool direct_to_method = (ek >= _bound_ref_direct_mh);
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = _INSERT_NO_MASK;
-      int       arg_slots = -1;
-      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
+      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
+      BasicType arg_type  = ek_bound_mh_arg_type(ek);
+      int       arg_slots = type2size[arg_type];
 
       // make room for the new argument:
       __ movl(rax_argslot, rcx_bmh_vmargslot);
       __ lea(rax_argslot, __ argument_address(rax_argslot));
 
-      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp);
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
 
       // store bound argument into the new stack slot:
       __ load_heap_oop(rbx_temp, rcx_bmh_argument);
@@ -607,9 +1301,10 @@
         __ movptr(Address(rax_argslot, 0), rbx_temp);
       } else {
         Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
-        const int arg_size = type2aelembytes(arg_type);
-        __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp);
-        __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp);
+        move_typed_arg(_masm, arg_type, false,
+                       Address(rax_argslot, 0),
+                       prim_value_addr,
+                       rbx_temp, rdx_temp);
       }
 
       if (direct_to_method) {
@@ -646,7 +1341,7 @@
 
       // What class are we casting to?
       __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
-      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
+      load_klass_from_Class(_masm, rbx_klass);
 
       Label done;
       __ movptr(rdx_temp, vmarg);
@@ -681,6 +1376,7 @@
 
   case _adapter_prim_to_prim:
   case _adapter_ref_to_prim:
+  case _adapter_prim_to_ref:
     // handled completely by optimized cases
     __ stop("init_AdapterMethodHandle should not issue this");
     break;
@@ -732,8 +1428,7 @@
 
       // Do the requested conversion and store the value.
       Register rbx_vminfo = rbx_temp;
-      __ movl(rbx_vminfo, rcx_amh_conversion);
-      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+      load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
 
       // get the new MH:
       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
@@ -771,7 +1466,7 @@
 
       // on a little-endian machine we keep the first slot and add another after
       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
-      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
+      insert_arg_slots(_masm, stack_move_unit(),
                        rax_argslot, rbx_temp, rdx_temp);
       Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
       Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
@@ -823,7 +1518,7 @@
       __ movl(rax_argslot, rcx_amh_vmargslot);
       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       if (ek == _adapter_opt_f2d) {
-        insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
+        insert_arg_slots(_masm, stack_move_unit(),
                          rax_argslot, rbx_temp, rdx_temp);
       }
       Address vmarg(rax_argslot, -Interpreter::stackElementSize);
@@ -841,7 +1536,7 @@
 #else //_LP64
       if (ek == _adapter_opt_f2d) {
         __ fld_s(vmarg);        // load float to ST0
-        __ fstp_s(vmarg);       // store single
+        __ fstp_d(vmarg);       // store double
       } else {
         __ fld_d(vmarg);        // load double to ST0
         __ fstp_s(vmarg);       // store single
@@ -858,10 +1553,6 @@
     }
     break;
 
-  case _adapter_prim_to_ref:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
-    break;
-
   case _adapter_swap_args:
   case _adapter_rot_args:
     // handled completely by optimized cases
@@ -875,8 +1566,8 @@
   case _adapter_opt_rot_2_up:
   case _adapter_opt_rot_2_down:
     {
-      int swap_bytes = 0, rotate = 0;
-      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
+      int swap_slots = ek_adapter_opt_swap_slots(ek);
+      int rotate     = ek_adapter_opt_swap_mode(ek);
 
       // 'argslot' is the position of the first argument to swap
       __ movl(rax_argslot, rcx_amh_vmargslot);
@@ -884,83 +1575,71 @@
 
       // 'vminfo' is the second
       Register rbx_destslot = rbx_temp;
-      __ movl(rbx_destslot, rcx_amh_conversion);
-      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
-      __ andl(rbx_destslot, CONV_VMINFO_MASK);
+      load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion);
       __ lea(rbx_destslot, __ argument_address(rbx_destslot));
-      DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
+      if (VerifyMethodHandles)
+        verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame");
 
+      assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
       if (!rotate) {
-        for (int i = 0; i < swap_bytes; i += wordSize) {
-          __ movptr(rdx_temp, Address(rax_argslot , i));
-          __ push(rdx_temp);
-          __ movptr(rdx_temp, Address(rbx_destslot, i));
-          __ movptr(Address(rax_argslot, i), rdx_temp);
-          __ pop(rdx_temp);
-          __ movptr(Address(rbx_destslot, i), rdx_temp);
+        // simple swap
+        for (int i = 0; i < swap_slots; i++) {
+          __ movptr(rdi_temp, Address(rax_argslot,  i * wordSize));
+          __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize));
+          __ movptr(Address(rax_argslot,  i * wordSize), rdx_temp);
+          __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
         }
       } else {
-        // push the first chunk, which is going to get overwritten
-        for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
-          __ movptr(rdx_temp, Address(rax_argslot, i));
-          __ push(rdx_temp);
+        // A rotate is actually pair of moves, with an "odd slot" (or pair)
+        // changing place with a series of other slots.
+        // First, push the "odd slot", which is going to get overwritten
+        for (int i = swap_slots - 1; i >= 0; i--) {
+          // handle one with rdi_temp instead of a push:
+          if (i == 0)  __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
+          else         __ pushptr(         Address(rax_argslot, i * wordSize));
         }
-
         if (rotate > 0) {
-          // rotate upward
-          __ subptr(rax_argslot, swap_bytes);
-#ifdef ASSERT
-          {
-            // Verify that argslot > destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmpptr(rax_argslot, rbx_destslot);
-            __ jccb(Assembler::aboveEqual, L_ok);
-            __ stop("source must be above destination (upward rotation)");
-            __ bind(L_ok);
-          }
-#endif
+          // Here is rotate > 0:
+          // (low mem)                                          (high mem)
+          //     | dest:     more_slots...     | arg: odd_slot :arg+1 |
+          // =>
+          //     | dest: odd_slot | dest+1: more_slots...      :arg+1 |
           // work argslot down to destslot, copying contiguous data upwards
           // pseudo-code:
           //   rax = src_addr - swap_bytes
           //   rbx = dest_addr
           //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
-          Label loop;
-          __ bind(loop);
-          __ movptr(rdx_temp, Address(rax_argslot, 0));
-          __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
-          __ addptr(rax_argslot, -wordSize);
-          __ cmpptr(rax_argslot, rbx_destslot);
-          __ jccb(Assembler::aboveEqual, loop);
+          move_arg_slots_up(_masm,
+                            rbx_destslot,
+                            Address(rax_argslot, 0),
+                            swap_slots,
+                            rax_argslot, rdx_temp);
         } else {
-          __ addptr(rax_argslot, swap_bytes);
-#ifdef ASSERT
-          {
-            // Verify that argslot < destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmpptr(rax_argslot, rbx_destslot);
-            __ jccb(Assembler::belowEqual, L_ok);
-            __ stop("source must be below destination (downward rotation)");
-            __ bind(L_ok);
-          }
-#endif
+          // Here is the other direction, rotate < 0:
+          // (low mem)                                          (high mem)
+          //     | arg: odd_slot | arg+1: more_slots...       :dest+1 |
+          // =>
+          //     | arg:    more_slots...     | dest: odd_slot :dest+1 |
           // work argslot up to destslot, copying contiguous data downwards
           // pseudo-code:
           //   rax = src_addr + swap_bytes
           //   rbx = dest_addr
           //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
-          Label loop;
-          __ bind(loop);
-          __ movptr(rdx_temp, Address(rax_argslot, 0));
-          __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
-          __ addptr(rax_argslot, wordSize);
-          __ cmpptr(rax_argslot, rbx_destslot);
-          __ jccb(Assembler::belowEqual, loop);
+          // dest_slot denotes an exclusive upper limit
+          int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
+          if (limit_bias != 0)
+            __ addptr(rbx_destslot, - limit_bias * wordSize);
+          move_arg_slots_down(_masm,
+                              Address(rax_argslot, swap_slots * wordSize),
+                              rbx_destslot,
+                              -swap_slots,
+                              rax_argslot, rdx_temp);
+          __ subptr(rbx_destslot, swap_slots * wordSize);
         }
-
         // pop the original first chunk into the destination slot, now free
-        for (int i = 0; i < swap_bytes; i += wordSize) {
-          __ pop(rdx_temp);
-          __ movptr(Address(rbx_destslot, i), rdx_temp);
+        for (int i = 0; i < swap_slots; i++) {
+          if (i == 0)  __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
+          else         __ popptr(Address(rbx_destslot, i * wordSize));
         }
       }
 
@@ -976,53 +1655,22 @@
       __ lea(rax_argslot, __ argument_address(rax_argslot));
 
       // 'stack_move' is negative number of words to duplicate
-      Register rdx_stack_move = rdx_temp;
-      __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
-      __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
-
-      int argslot0_num = 0;
-      Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
-      assert(argslot0.base() == rsp, "");
-      int pre_arg_size = argslot0.disp();
-      assert(pre_arg_size % wordSize == 0, "");
-      assert(pre_arg_size > 0, "must include PC");
-
-      // remember the old rsp+1 (argslot[0])
-      Register rbx_oldarg = rbx_temp;
-      __ lea(rbx_oldarg, argslot0);
+      Register rdi_stack_move = rdi_temp;
+      load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
 
-      // move rsp down to make room for dups
-      __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
-
-      // compute the new rsp+1 (argslot[0])
-      Register rdx_newarg = rdx_temp;
-      __ lea(rdx_newarg, argslot0);
-
-      __ push(rdi);             // need a temp
-      // (preceding push must be done after arg addresses are taken!)
-
-      // pull down the pre_arg_size data (PC)
-      for (int i = -pre_arg_size; i < 0; i += wordSize) {
-        __ movptr(rdi, Address(rbx_oldarg, i));
-        __ movptr(Address(rdx_newarg, i), rdi);
+      if (VerifyMethodHandles) {
+        verify_argslots(_masm, rdi_stack_move, rax_argslot, true,
+                        "copied argument(s) must fall within current frame");
       }
 
-      // copy from rax_argslot[0...] down to new_rsp[1...]
-      // pseudo-code:
-      //   rbx = old_rsp+1
-      //   rdx = new_rsp+1
-      //   rax = argslot
-      //   while (rdx < rbx) *rdx++ = *rax++
-      Label loop;
-      __ bind(loop);
-      __ movptr(rdi, Address(rax_argslot, 0));
-      __ movptr(Address(rdx_newarg, 0), rdi);
-      __ addptr(rax_argslot, wordSize);
-      __ addptr(rdx_newarg, wordSize);
-      __ cmpptr(rdx_newarg, rbx_oldarg);
-      __ jccb(Assembler::less, loop);
+      // insert location is always the bottom of the argument list:
+      Address insert_location = __ argument_address(constant(0));
+      int pre_arg_words = insert_location.disp() / wordSize;   // return PC is pushed
+      assert(insert_location.base() == rsp, "");
 
-      __ pop(rdi);              // restore temp
+      __ negl(rdi_stack_move);
+      push_arg_slots(_masm, rax_argslot, rdi_stack_move,
+                     pre_arg_words, rbx_temp, rdx_temp);
 
       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
@@ -1035,63 +1683,583 @@
       __ movl(rax_argslot, rcx_amh_vmargslot);
       __ lea(rax_argslot, __ argument_address(rax_argslot));
 
-      __ push(rdi);             // need a temp
       // (must do previous push after argslot address is taken)
 
       // 'stack_move' is number of words to drop
-      Register rdi_stack_move = rdi;
-      __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
-      __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
+      Register rdi_stack_move = rdi_temp;
+      load_stack_move(_masm, rdi_stack_move, rcx_recv, false);
       remove_arg_slots(_masm, rdi_stack_move,
                        rax_argslot, rbx_temp, rdx_temp);
 
-      __ pop(rdi);              // restore temp
-
       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
 
   case _adapter_collect_args:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
-    break;
-
+  case _adapter_fold_args:
   case _adapter_spread_args:
     // handled completely by optimized cases
     __ stop("init_AdapterMethodHandle should not issue this");
     break;
 
+  case _adapter_opt_collect_ref:
+  case _adapter_opt_collect_int:
+  case _adapter_opt_collect_long:
+  case _adapter_opt_collect_float:
+  case _adapter_opt_collect_double:
+  case _adapter_opt_collect_void:
+  case _adapter_opt_collect_0_ref:
+  case _adapter_opt_collect_1_ref:
+  case _adapter_opt_collect_2_ref:
+  case _adapter_opt_collect_3_ref:
+  case _adapter_opt_collect_4_ref:
+  case _adapter_opt_collect_5_ref:
+  case _adapter_opt_filter_S0_ref:
+  case _adapter_opt_filter_S1_ref:
+  case _adapter_opt_filter_S2_ref:
+  case _adapter_opt_filter_S3_ref:
+  case _adapter_opt_filter_S4_ref:
+  case _adapter_opt_filter_S5_ref:
+  case _adapter_opt_collect_2_S0_ref:
+  case _adapter_opt_collect_2_S1_ref:
+  case _adapter_opt_collect_2_S2_ref:
+  case _adapter_opt_collect_2_S3_ref:
+  case _adapter_opt_collect_2_S4_ref:
+  case _adapter_opt_collect_2_S5_ref:
+  case _adapter_opt_fold_ref:
+  case _adapter_opt_fold_int:
+  case _adapter_opt_fold_long:
+  case _adapter_opt_fold_float:
+  case _adapter_opt_fold_double:
+  case _adapter_opt_fold_void:
+  case _adapter_opt_fold_1_ref:
+  case _adapter_opt_fold_2_ref:
+  case _adapter_opt_fold_3_ref:
+  case _adapter_opt_fold_4_ref:
+  case _adapter_opt_fold_5_ref:
+    {
+      // Given a fresh incoming stack frame, build a new ricochet frame.
+      // On entry, TOS points at a return PC, and RBP is the callers frame ptr.
+      // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
+      // RCX contains an AdapterMethodHandle of the indicated kind.
+
+      // Relevant AMH fields:
+      // amh.vmargslot:
+      //   points to the trailing edge of the arguments
+      //   to filter, collect, or fold.  For a boxing operation,
+      //   it points just after the single primitive value.
+      // amh.argument:
+      //   recursively called MH, on |collect| arguments
+      // amh.vmtarget:
+      //   final destination MH, on return value, etc.
+      // amh.conversion.dest:
+      //   tells what is the type of the return value
+      //   (not needed here, since dest is also derived from ek)
+      // amh.conversion.vminfo:
+      //   points to the trailing edge of the return value
+      //   when the vmtarget is to be called; this is
+      //   equal to vmargslot + (retained ? |collect| : 0)
+
+      // Pass 0 or more argument slots to the recursive target.
+      int collect_count_constant = ek_adapter_opt_collect_count(ek);
+
+      // The collected arguments are copied from the saved argument list:
+      int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
+
+      assert(ek_orig == _adapter_collect_args ||
+             ek_orig == _adapter_fold_args, "");
+      bool retain_original_args = (ek_orig == _adapter_fold_args);
+
+      // The return value is replaced (or inserted) at the 'vminfo' argslot.
+      // Sometimes we can compute this statically.
+      int dest_slot_constant = -1;
+      if (!retain_original_args)
+        dest_slot_constant = collect_slot_constant;
+      else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
+        // We are preserving all the arguments, and the return value is prepended,
+        // so the return slot is to the left (above) the |collect| sequence.
+        dest_slot_constant = collect_slot_constant + collect_count_constant;
+
+      // Replace all those slots by the result of the recursive call.
+      // The result type can be one of ref, int, long, float, double, void.
+      // In the case of void, nothing is pushed on the stack after return.
+      BasicType dest = ek_adapter_opt_collect_type(ek);
+      assert(dest == type2wfield[dest], "dest is a stack slot type");
+      int dest_count = type2size[dest];
+      assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
+
+      // Choose a return continuation.
+      EntryKind ek_ret = _adapter_opt_return_any;
+      if (dest != T_CONFLICT && OptimizeMethodHandles) {
+        switch (dest) {
+        case T_INT    : ek_ret = _adapter_opt_return_int;     break;
+        case T_LONG   : ek_ret = _adapter_opt_return_long;    break;
+        case T_FLOAT  : ek_ret = _adapter_opt_return_float;   break;
+        case T_DOUBLE : ek_ret = _adapter_opt_return_double;  break;
+        case T_OBJECT : ek_ret = _adapter_opt_return_ref;     break;
+        case T_VOID   : ek_ret = _adapter_opt_return_void;    break;
+        default       : ShouldNotReachHere();
+        }
+        if (dest == T_OBJECT && dest_slot_constant >= 0) {
+          EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
+          if (ek_try <= _adapter_opt_return_LAST &&
+              ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
+            ek_ret = ek_try;
+          }
+        }
+        assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
+      }
+
+      // Already pushed:  ... keep1 | collect | keep2 | sender_pc |
+      // push(sender_pc);
+
+      // Compute argument base:
+      Register rax_argv = rax_argslot;
+      __ lea(rax_argv, __ argument_address(constant(0)));
+
+      // Push a few extra argument words, if we need them to store the return value.
+      {
+        int extra_slots = 0;
+        if (retain_original_args) {
+          extra_slots = dest_count;
+        } else if (collect_count_constant == -1) {
+          extra_slots = dest_count;  // collect_count might be zero; be generous
+        } else if (dest_count > collect_count_constant) {
+          extra_slots = (dest_count - collect_count_constant);
+        } else {
+          // else we know we have enough dead space in |collect| to repurpose for return values
+        }
+        DEBUG_ONLY(extra_slots += 1);
+        if (extra_slots > 0) {
+          __ pop(rbx_temp);   // return value
+          __ subptr(rsp, (extra_slots * Interpreter::stackElementSize));
+          // Push guard word #2 in debug mode.
+          DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2));
+          __ push(rbx_temp);
+        }
+      }
+
+      RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv,
+                                          entry(ek_ret)->from_interpreted_entry(), rbx_temp);
+
+      // Now pushed:  ... keep1 | collect | keep2 | RF |
+      // some handy frame slots:
+      Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes());
+      Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
+      Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
+
+#ifdef ASSERT
+      if (VerifyMethodHandles && dest != T_CONFLICT) {
+        BLOCK_COMMENT("verify AMH.conv.dest");
+        load_conversion_dest_type(_masm, rbx_temp, conversion_addr);
+        Label L_dest_ok;
+        __ cmpl(rbx_temp, (int) dest);
+        __ jcc(Assembler::equal, L_dest_ok);
+        if (dest == T_INT) {
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt))) {
+              __ cmpl(rbx_temp, (int) bt);
+              __ jcc(Assembler::equal, L_dest_ok);
+            }
+          }
+        }
+        __ stop("bad dest in AMH.conv");
+        __ BIND(L_dest_ok);
+      }
+#endif //ASSERT
+
+      // Find out where the original copy of the recursive argument sequence begins.
+      Register rax_coll = rax_argv;
+      {
+        RegisterOrConstant collect_slot = collect_slot_constant;
+        if (collect_slot_constant == -1) {
+          __ movl(rdi_temp, rcx_amh_vmargslot);
+          collect_slot = rdi_temp;
+        }
+        if (collect_slot_constant != 0)
+          __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale()));
+        // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2|
+      }
+
+      // Replace the old AMH with the recursive MH.  (No going back now.)
+      // In the case of a boxing call, the recursive call is to a 'boxer' method,
+      // such as Integer.valueOf or Long.valueOf.  In the case of a filter
+      // or collect call, it will take one or more arguments, transform them,
+      // and return some result, to store back into argument_base[vminfo].
+      __ load_heap_oop(rcx_recv, rcx_amh_argument);
+      if (VerifyMethodHandles)  verify_method_handle(_masm, rcx_recv);
+
+      // Push a space for the recursively called MH first:
+      __ push((int32_t)NULL_WORD);
+
+      // Calculate |collect|, the number of arguments we are collecting.
+      Register rdi_collect_count = rdi_temp;
+      RegisterOrConstant collect_count;
+      if (collect_count_constant >= 0) {
+        collect_count = collect_count_constant;
+      } else {
+        __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp);
+        collect_count = rdi_collect_count;
+      }
+#ifdef ASSERT
+      if (VerifyMethodHandles && collect_count_constant >= 0) {
+        __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
+        Label L_count_ok;
+        __ cmpl(rbx_temp, collect_count_constant);
+        __ jcc(Assembler::equal, L_count_ok);
+        __ stop("bad vminfo in AMH.conv");
+        __ BIND(L_count_ok);
+      }
+#endif //ASSERT
+
+      // copy |collect| slots directly to TOS:
+      push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
+      // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
+      // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
+
+      // If necessary, adjust the saved arguments to make room for the eventual return value.
+      // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
+      // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
+      // In the non-retaining case, this might move keep2 either up or down.
+      // We don't have to copy the whole | RF... collect | complex,
+      // but we must adjust RF.saved_args_base.
+      // Also, from now on, we will forget about the original copy of |collect|.
+      // If we are retaining it, we will treat it as part of |keep2|.
+      // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
+
+      BLOCK_COMMENT("adjust trailing arguments {");
+      // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
+      int                open_count  = dest_count;
+      RegisterOrConstant close_count = collect_count_constant;
+      Register rdi_close_count = rdi_collect_count;
+      if (retain_original_args) {
+        close_count = constant(0);
+      } else if (collect_count_constant == -1) {
+        close_count = rdi_collect_count;
+      }
+
+      // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
+      RegisterOrConstant keep3_count;
+      Register rsi_keep3_count = rsi;  // can repair from RF.exact_sender_sp
+      if (dest_slot_constant >= 0) {
+        keep3_count = dest_slot_constant;
+      } else  {
+        load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr);
+        keep3_count = rsi_keep3_count;
+      }
+#ifdef ASSERT
+      if (VerifyMethodHandles && dest_slot_constant >= 0) {
+        load_conversion_vminfo(_masm, rbx_temp, conversion_addr);
+        Label L_vminfo_ok;
+        __ cmpl(rbx_temp, dest_slot_constant);
+        __ jcc(Assembler::equal, L_vminfo_ok);
+        __ stop("bad vminfo in AMH.conv");
+        __ BIND(L_vminfo_ok);
+      }
+#endif //ASSERT
+
+      // tasks remaining:
+      bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
+      bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
+      bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
+
+      if (stomp_dest | fix_arg_base) {
+        // we will probably need an updated rax_argv value
+        if (collect_slot_constant >= 0) {
+          // rax_coll already holds the leading edge of |keep2|, so tweak it
+          assert(rax_coll == rax_argv, "elided a move");
+          if (collect_slot_constant != 0)
+            __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
+        } else {
+          // Just reload from RF.saved_args_base.
+          __ movptr(rax_argv, saved_args_base_addr);
+        }
+      }
+
+      // Old and new argument locations (based at slot 0).
+      // Net shift (&new_argv - &old_argv) is (close_count - open_count).
+      bool zero_open_count = (open_count == 0);  // remember this bit of info
+      if (move_keep3 && fix_arg_base) {
+        // It will be easier to have everything in one register:
+        if (close_count.is_register()) {
+          // Deduct open_count from close_count register to get a clean +/- value.
+          __ subptr(close_count.as_register(), open_count);
+        } else {
+          close_count = close_count.as_constant() - open_count;
+        }
+        open_count = 0;
+      }
+      Address old_argv(rax_argv, 0);
+      Address new_argv(rax_argv, close_count,  Interpreter::stackElementScale(),
+                                - open_count * Interpreter::stackElementSize);
+
+      // First decide if any actual data are to be moved.
+      // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
+      // (As it happens, all movements involve an argument list size change.)
+
+      // If there are variable parameters, use dynamic checks to skip around the whole mess.
+      Label L_done;
+      if (!keep3_count.is_constant()) {
+        __ testl(keep3_count.as_register(), keep3_count.as_register());
+        __ jcc(Assembler::zero, L_done);
+      }
+      if (!close_count.is_constant()) {
+        __ cmpl(close_count.as_register(), open_count);
+        __ jcc(Assembler::equal, L_done);
+      }
+
+      if (move_keep3 && fix_arg_base) {
+        bool emit_move_down = false, emit_move_up = false, emit_guard = false;
+        if (!close_count.is_constant()) {
+          emit_move_down = emit_guard = !zero_open_count;
+          emit_move_up   = true;
+        } else if (open_count != close_count.as_constant()) {
+          emit_move_down = (open_count > close_count.as_constant());
+          emit_move_up   = !emit_move_down;
+        }
+        Label L_move_up;
+        if (emit_guard) {
+          __ cmpl(close_count.as_register(), open_count);
+          __ jcc(Assembler::greater, L_move_up);
+        }
+
+        if (emit_move_down) {
+          // Move arguments down if |+dest+| > |-collect-|
+          // (This is rare, except when arguments are retained.)
+          // This opens space for the return value.
+          if (keep3_count.is_constant()) {
+            for (int i = 0; i < keep3_count.as_constant(); i++) {
+              __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
+              __ movptr(          new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
+            }
+          } else {
+            Register rbx_argv_top = rbx_temp;
+            __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()));
+            move_arg_slots_down(_masm,
+                                old_argv,     // beginning of old argv
+                                rbx_argv_top, // end of old argv
+                                close_count,  // distance to move down (must be negative)
+                                rax_argv, rdx_temp);
+            // Used argv as an iteration variable; reload from RF.saved_args_base.
+            __ movptr(rax_argv, saved_args_base_addr);
+          }
+        }
+
+        if (emit_guard) {
+          __ jmp(L_done);  // assumes emit_move_up is true also
+          __ BIND(L_move_up);
+        }
+
+        if (emit_move_up) {
+
+          // Move arguments up if |+dest+| < |-collect-|
+          // (This is usual, except when |keep3| is empty.)
+          // This closes up the space occupied by the now-deleted collect values.
+          if (keep3_count.is_constant()) {
+            for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
+              __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
+              __ movptr(          new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
+            }
+          } else {
+            Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale());
+            move_arg_slots_up(_masm,
+                              rax_argv,     // beginning of old argv
+                              argv_top,     // end of old argv
+                              close_count,  // distance to move up (must be positive)
+                              rbx_temp, rdx_temp);
+          }
+        }
+      }
+      __ BIND(L_done);
+
+      if (fix_arg_base) {
+        // adjust RF.saved_args_base by adding (close_count - open_count)
+        if (!new_argv.is_same_address(Address(rax_argv, 0)))
+          __ lea(rax_argv, new_argv);
+        __ movptr(saved_args_base_addr, rax_argv);
+      }
+
+      if (stomp_dest) {
+        // Stomp the return slot, so it doesn't hold garbage.
+        // This isn't strictly necessary, but it may help detect bugs.
+        int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER;
+        __ movptr(Address(rax_argv, keep3_count, Address::times_ptr),
+                  (int32_t) forty_two);
+        // uses rsi_keep3_count
+      }
+      BLOCK_COMMENT("} adjust trailing arguments");
+
+      BLOCK_COMMENT("do_recursive_call");
+      __ mov(saved_last_sp, rsp);    // set rsi/r13 for callee
+      __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr());
+      // The globally unique bounce address has two purposes:
+      // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
+      // 2. When returned to, it cuts back the stack and redirects control flow
+      //    to the return handler.
+      // The return handler will further cut back the stack when it takes
+      // down the RF.  Perhaps there is a way to streamline this further.
+
+      // State during recursive call:
+      // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
+      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
+
+      break;
+    }
+
+  case _adapter_opt_return_ref:
+  case _adapter_opt_return_int:
+  case _adapter_opt_return_long:
+  case _adapter_opt_return_float:
+  case _adapter_opt_return_double:
+  case _adapter_opt_return_void:
+  case _adapter_opt_return_S0_ref:
+  case _adapter_opt_return_S1_ref:
+  case _adapter_opt_return_S2_ref:
+  case _adapter_opt_return_S3_ref:
+  case _adapter_opt_return_S4_ref:
+  case _adapter_opt_return_S5_ref:
+    {
+      BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
+      int       dest_slot_constant = ek_adapter_opt_return_slot(ek);
+
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+
+      if (dest_slot_constant == -1) {
+        // The current stub is a general handler for this dest_type.
+        // It can be called from _adapter_opt_return_any below.
+        // Stash the address in a little table.
+        assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
+        address return_handler = __ pc();
+        _adapter_return_handlers[dest_type_constant] = return_handler;
+        if (dest_type_constant == T_INT) {
+          // do the subword types too
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt)) &&
+                _adapter_return_handlers[bt] == NULL) {
+              _adapter_return_handlers[bt] = return_handler;
+            }
+          }
+        }
+      }
+
+      Register rbx_arg_base = rbx_temp;
+      assert_different_registers(rax, rdx,  // possibly live return value registers
+                                 rdi_temp, rbx_arg_base);
+
+      Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
+      Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
+
+      __ movptr(rbx_arg_base, saved_args_base_addr);
+      RegisterOrConstant dest_slot = dest_slot_constant;
+      if (dest_slot_constant == -1) {
+        load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
+        dest_slot = rdi_temp;
+      }
+      // Store the result back into the argslot.
+      // This code uses the interpreter calling sequence, in which the return value
+      // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
+      // There are certain irregularities with floating point values, which can be seen
+      // in TemplateInterpreterGenerator::generate_return_entry_for.
+      move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale()));
+
+      RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp);
+      __ push(rdx_temp);  // repush the return PC
+
+      // Load the final target and go.
+      if (VerifyMethodHandles)  verify_method_handle(_masm, rcx_recv);
+      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
+      __ hlt(); // --------------------
+      break;
+    }
+
+  case _adapter_opt_return_any:
+    {
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+      Register rdi_conv = rdi_temp;
+      assert_different_registers(rax, rdx,  // possibly live return value registers
+                                 rdi_conv, rbx_temp);
+
+      Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
+      load_conversion_dest_type(_masm, rdi_conv, conversion_addr);
+      __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0]));
+      __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr));
+
+#ifdef ASSERT
+      { Label L_badconv;
+        __ testptr(rbx_temp, rbx_temp);
+        __ jccb(Assembler::zero, L_badconv);
+        __ jmp(rbx_temp);
+        __ bind(L_badconv);
+        __ stop("bad method handle return");
+      }
+#else //ASSERT
+      __ jmp(rbx_temp);
+#endif //ASSERT
+      break;
+    }
+
   case _adapter_opt_spread_0:
-  case _adapter_opt_spread_1:
-  case _adapter_opt_spread_more:
+  case _adapter_opt_spread_1_ref:
+  case _adapter_opt_spread_2_ref:
+  case _adapter_opt_spread_3_ref:
+  case _adapter_opt_spread_4_ref:
+  case _adapter_opt_spread_5_ref:
+  case _adapter_opt_spread_ref:
+  case _adapter_opt_spread_byte:
+  case _adapter_opt_spread_char:
+  case _adapter_opt_spread_short:
+  case _adapter_opt_spread_int:
+  case _adapter_opt_spread_long:
+  case _adapter_opt_spread_float:
+  case _adapter_opt_spread_double:
     {
       // spread an array out into a group of arguments
-      int length_constant = get_ek_adapter_opt_spread_info(ek);
+      int length_constant = ek_adapter_opt_spread_count(ek);
+      bool length_can_be_zero = (length_constant == 0);
+      if (length_constant < 0) {
+        // some adapters with variable length must handle the zero case
+        if (!OptimizeMethodHandles ||
+            ek_adapter_opt_spread_type(ek) != T_OBJECT)
+          length_can_be_zero = true;
+      }
 
       // find the address of the array argument
       __ movl(rax_argslot, rcx_amh_vmargslot);
       __ lea(rax_argslot, __ argument_address(rax_argslot));
 
-      // grab some temps
-      { __ push(rsi); __ push(rdi); }
-      // (preceding pushes must be done after argslot address is taken!)
-#define UNPUSH_RSI_RDI \
-      { __ pop(rdi); __ pop(rsi); }
+      // grab another temp
+      Register rsi_temp = rsi;
+      { if (rsi_temp == saved_last_sp)  __ push(saved_last_sp); }
+      // (preceding push must be done after argslot address is taken!)
+#define UNPUSH_RSI \
+      { if (rsi_temp == saved_last_sp)  __ pop(saved_last_sp); }
 
       // arx_argslot points both to the array and to the first output arg
       vmarg = Address(rax_argslot, 0);
 
       // Get the array value.
-      Register  rsi_array       = rsi;
+      Register  rsi_array       = rsi_temp;
       Register  rdx_array_klass = rdx_temp;
-      BasicType elem_type       = T_OBJECT;
+      BasicType elem_type = ek_adapter_opt_spread_type(ek);
+      int       elem_slots = type2size[elem_type];  // 1 or 2
+      int       array_slots = 1;  // array is always a T_OBJECT
       int       length_offset   = arrayOopDesc::length_offset_in_bytes();
       int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
       __ movptr(rsi_array, vmarg);
-      Label skip_array_check;
-      if (length_constant == 0) {
+
+      Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
+      if (length_can_be_zero) {
+        // handle the null pointer case, if zero is allowed
+        Label L_skip;
+        if (length_constant < 0) {
+          load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion);
+          __ testl(rbx_temp, rbx_temp);
+          __ jcc(Assembler::notZero, L_skip);
+        }
         __ testptr(rsi_array, rsi_array);
-        __ jcc(Assembler::zero, skip_array_check);
+        __ jcc(Assembler::zero, L_array_is_empty);
+        __ bind(L_skip);
       }
       __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
       __ load_klass(rdx_array_klass, rsi_array);
@@ -1099,22 +2267,20 @@
       // Check the array type.
       Register rbx_klass = rbx_temp;
       __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
-      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
+      load_klass_from_Class(_masm, rbx_klass);
 
       Label ok_array_klass, bad_array_klass, bad_array_length;
-      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
+      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
       // If we get here, the type check failed!
       __ jmp(bad_array_klass);
-      __ bind(ok_array_klass);
+      __ BIND(ok_array_klass);
 
       // Check length.
       if (length_constant >= 0) {
         __ cmpl(Address(rsi_array, length_offset), length_constant);
       } else {
         Register rbx_vminfo = rbx_temp;
-        __ movl(rbx_vminfo, rcx_amh_conversion);
-        assert(CONV_VMINFO_SHIFT == 0, "preshifted");
-        __ andl(rbx_vminfo, CONV_VMINFO_MASK);
+        load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
         __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
       }
       __ jcc(Assembler::notEqual, bad_array_length);
@@ -1126,91 +2292,106 @@
         // Form a pointer to the end of the affected region.
         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
         // 'stack_move' is negative number of words to insert
-        Register rdi_stack_move = rdi;
-        __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
-        __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
+        // This number already accounts for elem_slots.
+        Register rdi_stack_move = rdi_temp;
+        load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
+        __ cmpptr(rdi_stack_move, 0);
+        assert(stack_move_unit() < 0, "else change this comparison");
+        __ jcc(Assembler::less, L_insert_arg_space);
+        __ jcc(Assembler::equal, L_copy_args);
+        // single argument case, with no array movement
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
+                         rax_argslot, rbx_temp, rdx_temp);
+        __ jmp(L_args_done);  // no spreading to do
+        __ BIND(L_insert_arg_space);
+        // come here in the usual case, stack_move < 0 (2 or more spread arguments)
         Register rsi_temp = rsi_array;  // spill this
-        insert_arg_slots(_masm, rdi_stack_move, -1,
+        insert_arg_slots(_masm, rdi_stack_move,
                          rax_argslot, rbx_temp, rsi_temp);
-        // reload the array (since rsi was killed)
-        __ movptr(rsi_array, vmarg);
-      } else if (length_constant > 1) {
-        int arg_mask = 0;
-        int new_slots = (length_constant - 1);
-        for (int i = 0; i < new_slots; i++) {
-          arg_mask <<= 1;
-          arg_mask |= _INSERT_REF_MASK;
-        }
-        insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
+        // reload the array since rsi was killed
+        // reload from rdx_argslot_limit since rax_argslot is now decremented
+        __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
+      } else if (length_constant >= 1) {
+        int new_slots = (length_constant * elem_slots) - array_slots;
+        insert_arg_slots(_masm, new_slots * stack_move_unit(),
                          rax_argslot, rbx_temp, rdx_temp);
-      } else if (length_constant == 1) {
-        // no stack resizing required
       } else if (length_constant == 0) {
-        remove_arg_slots(_masm, -stack_move_unit(),
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
                          rax_argslot, rbx_temp, rdx_temp);
+      } else {
+        ShouldNotReachHere();
       }
 
       // Copy from the array to the new slots.
       // Note: Stack change code preserves integrity of rax_argslot pointer.
       // So even after slot insertions, rax_argslot still points to first argument.
+      // Beware:  Arguments that are shallow on the stack are deep in the array,
+      // and vice versa.  So a downward-growing stack (the usual) has to be copied
+      // elementwise in reverse order from the source array.
+      __ BIND(L_copy_args);
       if (length_constant == -1) {
         // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
+        // Array element [0] goes at rdx_argslot_limit[-wordSize].
         Register rsi_source = rsi_array;
         __ lea(rsi_source, Address(rsi_array, elem0_offset));
+        Register rdx_fill_ptr = rdx_argslot_limit;
         Label loop;
-        __ bind(loop);
-        __ movptr(rbx_temp, Address(rsi_source, 0));
-        __ movptr(Address(rax_argslot, 0), rbx_temp);
+        __ BIND(loop);
+        __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
+        move_typed_arg(_masm, elem_type, true,
+                       Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
+                       rbx_temp, rdi_temp);
         __ addptr(rsi_source, type2aelembytes(elem_type));
-        __ addptr(rax_argslot, Interpreter::stackElementSize);
-        __ cmpptr(rax_argslot, rdx_argslot_limit);
-        __ jccb(Assembler::less, loop);
+        __ cmpptr(rdx_fill_ptr, rax_argslot);
+        __ jcc(Assembler::above, loop);
       } else if (length_constant == 0) {
-        __ bind(skip_array_check);
         // nothing to copy
       } else {
         int elem_offset = elem0_offset;
-        int slot_offset = 0;
+        int slot_offset = length_constant * Interpreter::stackElementSize;
         for (int index = 0; index < length_constant; index++) {
-          __ movptr(rbx_temp, Address(rsi_array, elem_offset));
-          __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
+          slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
+          move_typed_arg(_masm, elem_type, true,
+                         Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
+                         rbx_temp, rdi_temp);
           elem_offset += type2aelembytes(elem_type);
-           slot_offset += Interpreter::stackElementSize;
         }
       }
+      __ BIND(L_args_done);
 
       // Arguments are spread.  Move to next method handle.
-      UNPUSH_RSI_RDI;
+      UNPUSH_RSI;
       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 
       __ bind(bad_array_klass);
-      UNPUSH_RSI_RDI;
+      UNPUSH_RSI;
       assert(!vmarg.uses(rarg2_required), "must be different registers");
-      __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
-      __ movptr(rarg1_actual,   vmarg);                                         // bad array
-      __ movl(  rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
+      __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
+      __ movptr(        rarg1_actual,   vmarg);                                         // bad array
+      __ movl(          rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
 
       __ bind(bad_array_length);
-      UNPUSH_RSI_RDI;
+      UNPUSH_RSI;
       assert(!vmarg.uses(rarg2_required), "must be different registers");
-      __ mov   (rarg2_required, rcx_recv);                       // AMH requiring a certain length
-      __ movptr(rarg1_actual,   vmarg);                          // bad array
-      __ movl(  rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
+      __ mov(    rarg2_required, rcx_recv);                       // AMH requiring a certain length
+      __ movptr( rarg1_actual,   vmarg);                          // bad array
+      __ movl(   rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
+#undef UNPUSH_RSI
 
-#undef UNPUSH_RSI_RDI
+      break;
     }
-    break;
 
-  case _adapter_flyby:
-  case _adapter_ricochet:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
-    break;
-
-  default:  ShouldNotReachHere();
+  default:
+    // do not require all platforms to recognize all adapter types
+    __ nop();
+    return;
   }
+  BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
   __ hlt();
 
   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Platform-specific definitions for method handles.
+// These definitions are inlined into class MethodHandles.
+
+// Adapters
+enum /* platform_dependent_constants */ {
+  adapter_code_size = NOT_LP64(30000 DEBUG_ONLY(+ 10000)) LP64_ONLY(80000 DEBUG_ONLY(+ 120000))
+};
+
+public:
+
+// The stack just after the recursive call from a ricochet frame
+// looks something like this.  Offsets are marked in words, not bytes.
+// rsi (r13 on LP64) is part of the interpreter calling sequence
+// which tells the callee where is my real rsp (for frame walking).
+// (...lower memory addresses)
+// rsp:     [ return pc                 ]   always the global RicochetBlob::bounce_addr
+// rsp+1:   [ recursive arg N           ]
+// rsp+2:   [ recursive arg N-1         ]
+// ...
+// rsp+N:   [ recursive arg 1           ]
+// rsp+N+1: [ recursive method handle   ]
+// ...
+// rbp-6:   [ cleanup continuation pc   ]   <-- (struct RicochetFrame)
+// rbp-5:   [ saved target MH           ]   the MH we will call on the saved args
+// rbp-4:   [ saved args layout oop     ]   an int[] array which describes argument layout
+// rbp-3:   [ saved args pointer        ]   address of transformed adapter arg M (slot 0)
+// rbp-2:   [ conversion                ]   information about how the return value is used
+// rbp-1:   [ exact sender sp           ]   exact TOS (rsi/r13) of original sender frame
+// rbp+0:   [ saved sender fp           ]   (for original sender of AMH)
+// rbp+1:   [ saved sender pc           ]   (back to original sender of AMH)
+// rbp+2:   [ transformed adapter arg M ]   <-- (extended TOS of original sender)
+// rbp+3:   [ transformed adapter arg M-1]
+// ...
+// rbp+M+1: [ transformed adapter arg 1 ]
+// rbp+M+2: [ padding                   ] <-- (rbp + saved args base offset)
+// ...      [ optional padding]
+// (higher memory addresses...)
+//
+// The arguments originally passed by the original sender
+// are lost, and arbitrary amounts of stack motion might have
+// happened due to argument transformation.
+// (This is done by C2I/I2C adapters and non-direct method handles.)
+// This is why there is an unpredictable amount of memory between
+// the extended and exact TOS of the sender.
+// The ricochet adapter itself will also (in general) perform
+// transformations before the recursive call.
+//
+// The transformed and saved arguments, immediately above the saved
+// return PC, are a well-formed method handle invocation ready to execute.
+// When the GC needs to walk the stack, these arguments are described
+// via the saved arg types oop, an int[] array with a private format.
+// This array is derived from the type of the transformed adapter
+// method handle, which also sits at the base of the saved argument
+// bundle.  Since the GC may not be able to fish out the int[]
+// array, so it is pushed explicitly on the stack.  This may be
+// an unnecessary expense.
+//
+// The following register conventions are significant at this point:
+// rsp       the thread stack, as always; preserved by caller
+// rsi/r13   exact TOS of recursive frame (contents of [rbp-2])
+// rcx       recursive method handle (contents of [rsp+N+1])
+// rbp       preserved by caller (not used by caller)
+// Unless otherwise specified, all registers can be blown by the call.
+//
+// If this frame must be walked, the transformed adapter arguments
+// will be found with the help of the saved arguments descriptor.
+//
+// Therefore, the descriptor must match the referenced arguments.
+// The arguments must be followed by at least one word of padding,
+// which will be necessary to complete the final method handle call.
+// That word is not treated as holding an oop.  Neither is the word
+//
+// The word pointed to by the return argument pointer is not
+// treated as an oop, even if points to a saved argument.
+// This allows the saved argument list to have a "hole" in it
+// to receive an oop from the recursive call.
+// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
+//
+// When the recursive callee returns, RicochetBlob::bounce_addr will
+// immediately jump to the continuation stored in the RF.
+// This continuation will merge the recursive return value
+// into the saved argument list.  At that point, the original
+// rsi, rbp, and rsp will be reloaded, the ricochet frame will
+// disappear, and the final target of the adapter method handle
+// will be invoked on the transformed argument list.
+
+class RicochetFrame {
+  friend class MethodHandles;
+
+ private:
+  intptr_t* _continuation;          // what to do when control gets back here
+  oopDesc*  _saved_target;          // target method handle to invoke on saved_args
+  oopDesc*  _saved_args_layout;     // caching point for MethodTypeForm.vmlayout cookie
+  intptr_t* _saved_args_base;       // base of pushed arguments (slot 0, arg N) (-3)
+  intptr_t  _conversion;            // misc. information from original AdapterMethodHandle (-2)
+  intptr_t* _exact_sender_sp;       // parallel to interpreter_frame_sender_sp (-1)
+  intptr_t* _sender_link;           // *must* coincide with frame::link_offset (0)
+  address   _sender_pc;             // *must* coincide with frame::return_addr_offset (1)
+
+ public:
+  intptr_t* continuation() const        { return _continuation; }
+  oop       saved_target() const        { return _saved_target; }
+  oop       saved_args_layout() const   { return _saved_args_layout; }
+  intptr_t* saved_args_base() const     { return _saved_args_base; }
+  intptr_t  conversion() const          { return _conversion; }
+  intptr_t* exact_sender_sp() const     { return _exact_sender_sp; }
+  intptr_t* sender_link() const         { return _sender_link; }
+  address   sender_pc() const           { return _sender_pc; }
+
+  intptr_t* extended_sender_sp() const  { return saved_args_base(); }
+
+  intptr_t  return_value_slot_number() const {
+    return adapter_conversion_vminfo(conversion());
+  }
+  BasicType return_value_type() const {
+    return adapter_conversion_dest_type(conversion());
+  }
+  bool has_return_value_slot() const {
+    return return_value_type() != T_VOID;
+  }
+  intptr_t* return_value_slot_addr() const {
+    assert(has_return_value_slot(), "");
+    return saved_arg_slot_addr(return_value_slot_number());
+  }
+  intptr_t* saved_target_slot_addr() const {
+    return saved_arg_slot_addr(saved_args_length());
+  }
+  intptr_t* saved_arg_slot_addr(int slot) const {
+    assert(slot >= 0, "");
+    return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
+  }
+
+  jint      saved_args_length() const;
+  jint      saved_arg_offset(int arg) const;
+
+  // GC interface
+  oop*  saved_target_addr()                     { return (oop*)&_saved_target; }
+  oop*  saved_args_layout_addr()                { return (oop*)&_saved_args_layout; }
+
+  oop  compute_saved_args_layout(bool read_cache, bool write_cache);
+
+  // Compiler/assembler interface.
+  static int continuation_offset_in_bytes()     { return offset_of(RicochetFrame, _continuation); }
+  static int saved_target_offset_in_bytes()     { return offset_of(RicochetFrame, _saved_target); }
+  static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); }
+  static int saved_args_base_offset_in_bytes()  { return offset_of(RicochetFrame, _saved_args_base); }
+  static int conversion_offset_in_bytes()       { return offset_of(RicochetFrame, _conversion); }
+  static int exact_sender_sp_offset_in_bytes()  { return offset_of(RicochetFrame, _exact_sender_sp); }
+  static int sender_link_offset_in_bytes()      { return offset_of(RicochetFrame, _sender_link); }
+  static int sender_pc_offset_in_bytes()        { return offset_of(RicochetFrame, _sender_pc); }
+
+  // This value is not used for much, but it apparently must be nonzero.
+  static int frame_size_in_bytes()              { return sender_link_offset_in_bytes(); }
+
+#ifdef ASSERT
+  // The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
+  enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
+  static int magic_number_1_offset_in_bytes()   { return -wordSize; }
+  static int magic_number_2_offset_in_bytes()   { return sizeof(RicochetFrame); }
+  intptr_t magic_number_1() const               { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); };
+  intptr_t magic_number_2() const               { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); };
+#endif //ASSERT
+
+  enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
+
+  static void verify_offsets() NOT_DEBUG_RETURN;
+  void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
+  void zap_arguments() NOT_DEBUG_RETURN;
+
+  static void generate_ricochet_blob(MacroAssembler* _masm,
+                                     // output params:
+                                     int* bounce_offset,
+                                     int* exception_offset,
+                                     int* frame_size_in_words);
+
+  static void enter_ricochet_frame(MacroAssembler* _masm,
+                                   Register rcx_recv,
+                                   Register rax_argv,
+                                   address return_handler,
+                                   Register rbx_temp);
+  static void leave_ricochet_frame(MacroAssembler* _masm,
+                                   Register rcx_recv,
+                                   Register new_sp_reg,
+                                   Register sender_pc_reg);
+
+  static Address frame_address(int offset = 0) {
+    // The RicochetFrame is found by subtracting a constant offset from rbp.
+    return Address(rbp, - sender_link_offset_in_bytes() + offset);
+  }
+
+  static RicochetFrame* from_frame(const frame& fr) {
+    address bp = (address) fr.fp();
+    RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
+    rf->verify();
+    return rf;
+  }
+
+  static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+};
+
+// Additional helper methods for MethodHandles code generation:
+public:
+  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
+  static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
+  static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
+
+  static void load_stack_move(MacroAssembler* _masm,
+                              Register rdi_stack_move,
+                              Register rcx_amh,
+                              bool might_be_negative);
+
+  static void insert_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register rax_argslot,
+                               Register rbx_temp, Register rdx_temp);
+
+  static void remove_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register rax_argslot,
+                               Register rbx_temp, Register rdx_temp);
+
+  static void push_arg_slots(MacroAssembler* _masm,
+                                   Register rax_argslot,
+                                   RegisterOrConstant slot_count,
+                                   int skip_words_count,
+                                   Register rbx_temp, Register rdx_temp);
+
+  static void move_arg_slots_up(MacroAssembler* _masm,
+                                Register rbx_bottom,  // invariant
+                                Address  top_addr,    // can use rax_temp
+                                RegisterOrConstant positive_distance_in_slots,
+                                Register rax_temp, Register rdx_temp);
+
+  static void move_arg_slots_down(MacroAssembler* _masm,
+                                  Address  bottom_addr,  // can use rax_temp
+                                  Register rbx_top,      // invariant
+                                  RegisterOrConstant negative_distance_in_slots,
+                                  Register rax_temp, Register rdx_temp);
+
+  static void move_typed_arg(MacroAssembler* _masm,
+                             BasicType type, bool is_element,
+                             Address slot_dest, Address value_src,
+                             Register rbx_temp, Register rdx_temp);
+
+  static void move_return_value(MacroAssembler* _masm, BasicType type,
+                                Address return_slot);
+
+  static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
+                             const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_argslots(MacroAssembler* _masm,
+                              RegisterOrConstant argslot_count,
+                              Register argslot_reg,
+                              bool negate_argslot,
+                              const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_stack_move(MacroAssembler* _masm,
+                                RegisterOrConstant arg_slots,
+                                int direction) NOT_DEBUG_RETURN;
+
+  static void verify_klass(MacroAssembler* _masm,
+                           Register obj, KlassHandle klass,
+                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
+
+  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
+    verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
+                 "reference is a MH");
+  }
+
+  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
+
+  static Register saved_last_sp_register() {
+    // Should be in sharedRuntime, not here.
+    return LP64_ONLY(r13) NOT_LP64(rsi);
+  }
--- a/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
 
 #define __ masm->
 
-ExceptionBlob*     OptoRuntime::_exception_blob;
-
 //------------------------------generate_exception_blob---------------------------
 // creates exception blob at the end
 // Using exception blob, this code is jumped from a compiled method.
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -42,18 +42,6 @@
 #endif
 
 #define __ masm->
-#ifdef COMPILER2
-UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
-#endif // COMPILER2
-
-DeoptimizationBlob *SharedRuntime::_deopt_blob;
-SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*       SharedRuntime::_wrong_method_blob;
-RuntimeStub*       SharedRuntime::_ic_miss_blob;
-RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
 
 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
 
@@ -2791,7 +2779,7 @@
 // setup oopmap, and calls safepoint code to stop the compiled code for
 // a safepoint.
 //
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
 
   // Account for thread arg in our frame
   const int additional_words = 1;
@@ -2888,7 +2876,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -2970,34 +2958,3 @@
   // frame_size_words or bytes??
   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
 }
-
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                        "wrong_method_stub");
-
-  _ic_miss_blob      = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_deopt_blob();
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -41,24 +41,10 @@
 #include "opto/runtime.hpp"
 #endif
 
-DeoptimizationBlob *SharedRuntime::_deopt_blob;
-#ifdef COMPILER2
-UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
-ExceptionBlob      *OptoRuntime::_exception_blob;
-#endif // COMPILER2
-
-SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*       SharedRuntime::_wrong_method_blob;
-RuntimeStub*       SharedRuntime::_ic_miss_blob;
-RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
+#define __ masm->
 
 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
 
-#define __ masm->
-
 class SimpleRuntimeFrame {
 
   public:
@@ -3070,7 +3056,7 @@
 // Generate a special Compile2Runtime blob that saves all registers,
 // and setup oopmap.
 //
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
   assert(StubRoutines::forward_exception_entry() != NULL,
          "must be generated before");
 
@@ -3156,7 +3142,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3233,36 +3219,6 @@
 }
 
 
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                        "wrong_method_stub");
-  _ic_miss_blob =      generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_deopt_blob();
-
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
-
-
 #ifdef COMPILER2
 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
 //
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2151,6 +2151,8 @@
   // if they expect all registers to be preserved.
   enum layout {
     thread_off,    // last_java_sp
+    arg1_off,
+    arg2_off,
     rbp_off,       // callee saved register
     ret_pc,
     framesize
@@ -2185,7 +2187,7 @@
   // either at call sites or otherwise assume that stack unwinding will be initiated,
   // so caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name, address runtime_entry,
-                                   bool restore_saved_exception_pc) {
+                                   bool restore_saved_exception_pc, Register arg1 = noreg, Register arg2 = noreg) {
 
     int insts_size = 256;
     int locs_size  = 32;
@@ -2218,6 +2220,13 @@
 
     // push java thread (becomes first argument of C function)
     __ movptr(Address(rsp, thread_off * wordSize), java_thread);
+    if (arg1 != noreg) {
+      __ movptr(Address(rsp, arg1_off * wordSize), arg1);
+    }
+    if (arg2 != noreg) {
+      assert(arg1 != noreg, "missing reg arg");
+      __ movptr(Address(rsp, arg2_off * wordSize), arg2);
+    }
 
     // Set up last_Java_sp and last_Java_fp
     __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
@@ -2309,6 +2318,12 @@
                                                                                    CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
     StubRoutines::_d2l_wrapper                              = generate_d2i_wrapper(T_LONG,
                                                                                    CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
+
+    // Build this early so it's available for the interpreter
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, rax, rcx);
   }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2934,7 +2934,9 @@
   // caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name,
                                    address runtime_entry,
-                                   bool restore_saved_exception_pc) {
+                                   bool restore_saved_exception_pc,
+                                   Register arg1 = noreg,
+                                   Register arg2 = noreg) {
     // Information about frame layout at time of blocking runtime call.
     // Note that we only have to preserve callee-saved registers since
     // the compilers are responsible for supplying a continuation point
@@ -2980,6 +2982,13 @@
     __ set_last_Java_frame(rsp, rbp, NULL);
 
     // Call runtime
+    if (arg1 != noreg) {
+      assert(arg2 != c_rarg1, "clobbered");
+      __ movptr(c_rarg1, arg1);
+    }
+    if (arg2 != noreg) {
+      __ movptr(c_rarg2, arg2);
+    }
     __ movptr(c_rarg0, r15_thread);
     BLOCK_COMMENT("call runtime_entry");
     __ call(RuntimeAddress(runtime_entry));
@@ -3052,6 +3061,14 @@
     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
 
     StubRoutines::x86::_verify_mxcsr_entry    = generate_verify_mxcsr();
+
+    // Build this early so it's available for the interpreter.  Stub
+    // expects the required and actual types as register arguments in
+    // j_rarg0 and j_rarg1 respectively.
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, rax, rcx);
   }
 
   void generate_all() {
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -34,11 +34,6 @@
   code_size2 = 22000            // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 10000
-};
-
 class x86 {
  friend class StubGenerator;
  friend class VMStructs;
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,11 +36,6 @@
   code_size2 = 22000           // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 40000
-};
-
 class x86 {
  friend class StubGenerator;
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -112,32 +112,6 @@
   return entry;
 }
 
-// Arguments are: required type at TOS+4, failing object (or NULL) at TOS.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-
-  __ pop(rbx);                  // actual failing object is at TOS
-  __ pop(rax);                  // required type is at TOS+4
-
-  __ verify_oop(rbx);
-  __ verify_oop(rax);
-
-  // Various method handle types use interpreter registers as temps.
-  __ restore_bcp();
-  __ restore_locals();
-
-  // Expression stack must be empty before entering the VM for an exception.
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             // pass required type, failing object (or NULL)
-             rax, rbx);
-  return entry;
-}
-
-
 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
   assert(!pass_oop || message == NULL, "either oop or message but not both");
   address entry = __ pc();
@@ -776,6 +750,98 @@
 
 }
 
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code below can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+
+  // rbx,: methodOop
+  // rcx: receiver (preserve for slow entry into asm interpreter)
+
+  // rsi: senderSP must preserved for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ movptr(rax, Address(rsp, wordSize));
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, slow_path);
+
+    // rax: local 0 (must be preserved across the G1 barrier call)
+    //
+    // rbx: method (at this point it's scratch)
+    // rcx: receiver (at this point it's scratch)
+    // rdx: scratch
+    // rdi: scratch
+    //
+    // rsi: sender sp
+
+    // Preserve the sender sp in case the pre-barrier
+    // calls the runtime
+    __ push(rsi);
+
+    // Load the value of the referent field.
+    const Address field_address(rax, referent_offset);
+    __ movptr(rax, field_address);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ get_thread(rcx);
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            rax /* pre_val */,
+                            rcx /* thread */,
+                            rbx /* tmp */,
+                            true /* tosca_save */,
+                            true /* expand_call */);
+
+    // _areturn
+    __ pop(rsi);                // get sender sp
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    __ bind(slow_path);
+    (void) generate_normal_entry(false);
+
+    return entry;
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
 //
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the native method
@@ -1444,6 +1510,8 @@
     case Interpreter::java_lang_math_log     : // fall thru
     case Interpreter::java_lang_math_log10   : // fall thru
     case Interpreter::java_lang_math_sqrt    : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);     break;
+    case Interpreter::java_lang_ref_reference_get
+                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
     default                                  : ShouldNotReachHere();                                                       break;
   }
 
@@ -1495,6 +1563,7 @@
                                            int tempcount,
                                            int popframe_extra_args,
                                            int moncount,
+                                           int caller_actual_parameters,
                                            int callee_param_count,
                                            int callee_locals,
                                            frame* caller,
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -120,31 +120,6 @@
   return entry;
 }
 
-// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-
-  __ pop(c_rarg2);              // failing object is at TOS
-  __ pop(c_rarg1);              // required type is at TOS+8
-
-  __ verify_oop(c_rarg1);
-  __ verify_oop(c_rarg2);
-
-  // Various method handle types use interpreter registers as temps.
-  __ restore_bcp();
-  __ restore_locals();
-
-  // Expression stack must be empty before entering the VM for an exception.
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             // pass required type, failing object (or NULL)
-             c_rarg1, c_rarg2);
-  return entry;
-}
-
 address TemplateInterpreterGenerator::generate_exception_handler_common(
         const char* name, const char* message, bool pass_oop) {
   assert(!pass_oop || message == NULL, "either oop or message but not both");
@@ -757,6 +732,95 @@
   return entry_point;
 }
 
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+  //
+  // rbx: methodOop
+
+  // r13: senderSP must preserve for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+    // rbx: method
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ movptr(rax, Address(rsp, wordSize));
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, slow_path);
+
+    // rax: local 0
+    // rbx: method (but can be used as scratch now)
+    // rdx: scratch
+    // rdi: scratch
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+
+    // Load the value of the referent field.
+    const Address field_address(rax, referent_offset);
+    __ load_heap_oop(rax, field_address);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            rax /* pre_val */,
+                            r15_thread /* thread */,
+                            rbx /* tmp */,
+                            true /* tosca_live */,
+                            true /* expand_call */);
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+    __ ret(0);
+
+    // generate a vanilla interpreter entry as the slow path
+    __ bind(slow_path);
+    (void) generate_normal_entry(false);
+
+    return entry;
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
+
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the
 // native method than the typical interpreter frame setup.
@@ -1463,6 +1527,8 @@
   case Interpreter::java_lang_math_log     : // fall thru
   case Interpreter::java_lang_math_log10   : // fall thru
   case Interpreter::java_lang_math_sqrt    : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);    break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
   default                                  : ShouldNotReachHere();                                                       break;
   }
 
@@ -1512,6 +1578,7 @@
                                            int tempcount,
                                            int popframe_extra_args,
                                            int moncount,
+                                           int caller_actual_parameters,
                                            int callee_param_count,
                                            int callee_locals,
                                            frame* caller,
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -140,7 +140,12 @@
         }
         __ get_thread(rcx);
         __ save_bcp();
-        __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
+        __ g1_write_barrier_pre(rdx /* obj */,
+                                rbx /* pre_val */,
+                                rcx /* thread */,
+                                rsi /* tmp */,
+                                val != noreg /* tosca_live */,
+                                false /* expand_call */);
 
         // Do the actual store
         // noreg means NULL
@@ -149,7 +154,11 @@
           // No post barrier for NULL
         } else {
           __ movl(Address(rdx, 0), val);
-          __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
+          __ g1_write_barrier_post(rdx /* store_adr */,
+                                   val /* new_val */,
+                                   rcx /* thread */,
+                                   rbx /* tmp */,
+                                   rsi /* tmp2 */);
         }
         __ restore_bcp();
 
@@ -364,15 +373,17 @@
     __ jcc(Assembler::equal, L);
     __ cmpl(rdx, JVM_CONSTANT_String);
     __ jcc(Assembler::equal, L);
+    __ cmpl(rdx, JVM_CONSTANT_Object);
+    __ jcc(Assembler::equal, L);
     __ stop("unexpected tag type in ldc");
     __ bind(L);
   }
 #endif
   Label isOop;
   // atos and itos
-  // String is only oop type we will see here
-  __ cmpl(rdx, JVM_CONSTANT_String);
-  __ jccb(Assembler::equal, isOop);
+  // Integer is only non-oop type we will see here
+  __ cmpl(rdx, JVM_CONSTANT_Integer);
+  __ jccb(Assembler::notEqual, isOop);
   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
   __ push(itos);
   __ jmp(Done);
@@ -413,7 +424,7 @@
 
   Label L_done, L_throw_exception;
   const Register con_klass_temp = rcx;  // same as Rcache
-  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(con_klass_temp, rax);
   __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
   __ jcc(Assembler::notEqual, L_done);
   __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
@@ -423,7 +434,7 @@
 
   // Load the exception from the system-array which wraps it:
   __ bind(L_throw_exception);
-  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+  __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
 
   __ bind(L_done);
@@ -937,9 +948,9 @@
   __ jcc(Assembler::zero, is_null);
 
   // Move subklass into EBX
-  __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rbx, rax);
   // Move superklass into EAX
-  __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, rdx);
   __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
   // Compress array+index*wordSize+12 into a single register.  Frees ECX.
   __ lea(rdx, element_address);
@@ -1992,7 +2003,7 @@
   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
     assert(state == vtos, "only valid state");
     __ movptr(rax, aaddress(0));
-    __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
+    __ load_klass(rdi, rax);
     __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
     Label skip_register_finalizer;
@@ -2939,7 +2950,7 @@
   // get receiver klass
   __ null_check(recv, oopDesc::klass_offset_in_bytes());
   // Keep recv in rcx for callee expects it there
-  __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, recv);
   __ verify_oop(rax);
 
   // profile this call
@@ -3019,7 +3030,7 @@
 
   // Get receiver klass into rdx - also a null check
   __ restore_locals();  // restore rdi
-  __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rdx, rcx);
   __ verify_oop(rdx);
 
   // profile this call
@@ -3074,6 +3085,7 @@
 
 void TemplateTable::invokedynamic(int byte_no) {
   transition(vtos, vtos);
+  assert(byte_no == f1_oop, "use this argument");
 
   if (!EnableInvokeDynamic) {
     // We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3086,7 +3098,6 @@
     return;
   }
 
-  assert(byte_no == f1_oop, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax: CallSite object (f1)
@@ -3097,14 +3108,14 @@
   Register rax_callsite      = rax;
   Register rcx_method_handle = rcx;
 
-  if (ProfileInterpreter) {
-    // %%% should make a type profile for any invokedynamic that takes a ref argument
-    // profile this call
-    __ profile_call(rsi);
-  }
-
-  __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
+  // %%% should make a type profile for any invokedynamic that takes a ref argument
+  // profile this call
+  __ profile_call(rsi);
+
+  __ verify_oop(rax_callsite);
+  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
   __ null_check(rcx_method_handle);
+  __ verify_oop(rcx_method_handle);
   __ prepare_to_jump_from_interpreted();
   __ jump_to_method_handle_entry(rcx_method_handle, rdx);
 }
@@ -3249,7 +3260,7 @@
                 (int32_t)markOopDesc::prototype()); // header
       __ pop(rcx);   // get saved klass back in the register.
     }
-    __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx);  // klass
+    __ store_klass(rax, rcx);  // klass
 
     {
       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
@@ -3324,7 +3335,7 @@
   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
 
   __ bind(resolved);
-  __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rbx, rdx);
 
   // Generate subtype check.  Blows ECX.  Resets EDI.  Object in EDX.
   // Superklass in EAX.  Subklass in EBX.
@@ -3367,12 +3378,12 @@
   __ push(atos);
   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
   __ pop_ptr(rdx);
-  __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rdx, rdx);
   __ jmp(resolved);
 
   // Get superklass in EAX and subklass in EDX
   __ bind(quicked);
-  __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rdx, rax);
   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
 
   __ bind(resolved);
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -147,12 +147,21 @@
         } else {
           __ leaq(rdx, obj);
         }
-        __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
+        __ g1_write_barrier_pre(rdx /* obj */,
+                                rbx /* pre_val */,
+                                r15_thread /* thread */,
+                                r8  /* tmp */,
+                                val != noreg /* tosca_live */,
+                                false /* expand_call */);
         if (val == noreg) {
           __ store_heap_oop_null(Address(rdx, 0));
         } else {
           __ store_heap_oop(Address(rdx, 0), val);
-          __ g1_write_barrier_post(rdx, val, r8, rbx);
+          __ g1_write_barrier_post(rdx /* store_adr */,
+                                   val /* new_val */,
+                                   r15_thread /* thread */,
+                                   r8 /* tmp */,
+                                   rbx /* tmp2 */);
         }
 
       }
@@ -376,6 +385,8 @@
     __ jcc(Assembler::equal, L);
     __ cmpl(rdx, JVM_CONSTANT_String);
     __ jcc(Assembler::equal, L);
+    __ cmpl(rdx, JVM_CONSTANT_Object);
+    __ jcc(Assembler::equal, L);
     __ stop("unexpected tag type in ldc");
     __ bind(L);
   }
@@ -427,7 +438,7 @@
   Label L_done, L_throw_exception;
   const Register con_klass_temp = rcx;  // same as cache
   const Register array_klass_temp = rdx;  // same as index
-  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(con_klass_temp, rax);
   __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
   __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
   __ jcc(Assembler::notEqual, L_done);
@@ -438,7 +449,7 @@
 
   // Load the exception from the system-array which wraps it:
   __ bind(L_throw_exception);
-  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+  __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
 
   __ bind(L_done);
@@ -3128,7 +3139,6 @@
     return;
   }
 
-  assert(byte_no == f1_oop, "use this argument");
   prepare_invoke(rax, rbx, byte_no);
 
   // rax: CallSite object (f1)
@@ -3139,14 +3149,14 @@
   Register rax_callsite      = rax;
   Register rcx_method_handle = rcx;
 
-  if (ProfileInterpreter) {
-    // %%% should make a type profile for any invokedynamic that takes a ref argument
-    // profile this call
-    __ profile_call(r13);
-  }
-
-  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
+  // %%% should make a type profile for any invokedynamic that takes a ref argument
+  // profile this call
+  __ profile_call(r13);
+
+  __ verify_oop(rax_callsite);
+  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
   __ null_check(rcx_method_handle);
+  __ verify_oop(rcx_method_handle);
   __ prepare_to_jump_from_interpreted();
   __ jump_to_method_handle_entry(rcx_method_handle, rdx);
 }
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates.  All Rights Reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -441,12 +441,25 @@
       }
     }
 
-    // On family 21 processors default is no sw prefetch
-    if ( cpu_family() == 21 ) {
+    // some defaults for AMD family 15h
+    if ( cpu_family() == 0x15 ) {
+      // On family 15h processors default is no sw prefetch
       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
         AllocatePrefetchStyle = 0;
       }
+      // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
+      if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
+        AllocatePrefetchInstr = 3;
+      }
+      // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
+      if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
+        UseXMMForArrayCopy = true;
+      }
+      if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
+        UseUnalignedLoadStores = true;
+      }
     }
+
   }
 
   if( is_intel() ) { // Intel cpus specific settings
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/x86_32.ad	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/x86_32.ad	Wed Jul 27 17:32:44 2011 -0700
@@ -12989,6 +12989,53 @@
 %}
 
 // ============================================================================
+// Counted Loop limit node which represents exact final iterator value.
+// Note: the resulting value should fit into integer range since
+// counted loops have limit check on overflow.
+instruct loopLimit_eReg(eAXRegI limit, nadxRegI init, immI stride, eDXRegI limit_hi, nadxRegI tmp, eFlagsReg flags) %{
+  match(Set limit (LoopLimit (Binary init limit) stride));
+  effect(TEMP limit_hi, TEMP tmp, KILL flags);
+  ins_cost(300);
+
+  format %{ "loopLimit $init,$limit,$stride  # $limit = $init + $stride *( $limit - $init + $stride -1)/ $stride, kills $limit_hi" %}
+  ins_encode %{
+    int strd = (int)$stride$$constant;
+    assert(strd != 1 && strd != -1, "sanity");
+    int m1 = (strd > 0) ? 1 : -1;
+    // Convert limit to long (EAX:EDX)
+    __ cdql();
+    // Convert init to long (init:tmp)
+    __ movl($tmp$$Register, $init$$Register);
+    __ sarl($tmp$$Register, 31);
+    // $limit - $init
+    __ subl($limit$$Register, $init$$Register);
+    __ sbbl($limit_hi$$Register, $tmp$$Register);
+    // + ($stride - 1)
+    if (strd > 0) {
+      __ addl($limit$$Register, (strd - 1));
+      __ adcl($limit_hi$$Register, 0);
+      __ movl($tmp$$Register, strd);
+    } else {
+      __ addl($limit$$Register, (strd + 1));
+      __ adcl($limit_hi$$Register, -1);
+      __ lneg($limit_hi$$Register, $limit$$Register);
+      __ movl($tmp$$Register, -strd);
+    }
+    // signed devision: (EAX:EDX) / pos_stride
+    __ idivl($tmp$$Register);
+    if (strd < 0) {
+      // restore sign
+      __ negl($tmp$$Register);
+    }
+    // (EAX) * stride
+    __ mull($tmp$$Register);
+    // + init (ignore upper bits)
+    __ addl($limit$$Register, $init$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// ============================================================================
 // Branch Instructions
 // Jump Table
 instruct jumpXtnd(eRegI switch_val) %{
--- a/src/cpu/x86/vm/x86_64.ad	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Wed Jul 27 17:32:44 2011 -0700
@@ -830,6 +830,17 @@
   }
 }
 
+// This could be in MacroAssembler but it's fairly C2 specific
+void emit_cmpfp_fixup(MacroAssembler& _masm) {
+  Label exit;
+  __ jccb(Assembler::noParity, exit);
+  __ pushf();
+  __ andq(Address(rsp, 0), 0xffffff2b);
+  __ popf();
+  __ bind(exit);
+  __ nop(); // (target for branch to avoid branch to branch)
+}
+
 
 //=============================================================================
 const bool Matcher::constant_table_absolute_addressing = true;
@@ -2173,27 +2184,9 @@
     emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
   %}
 
-  enc_class cmpfp_fixup()
-  %{
-    // jnp,s exit
-    emit_opcode(cbuf, 0x7B);
-    emit_d8(cbuf, 0x0A);
-
-    // pushfq
-    emit_opcode(cbuf, 0x9C);
-
-    // andq $0xffffff2b, (%rsp)
-    emit_opcode(cbuf, Assembler::REX_W);
-    emit_opcode(cbuf, 0x81);
-    emit_opcode(cbuf, 0x24);
-    emit_opcode(cbuf, 0x24);
-    emit_d32(cbuf, 0xffffff2b);
-
-    // popfq
-    emit_opcode(cbuf, 0x9D);
-
-    // nop (target for branch to avoid branch to branch)
-    emit_opcode(cbuf, 0x90);
+  enc_class cmpfp_fixup() %{
+      MacroAssembler _masm(&cbuf);
+      emit_cmpfp_fixup(_masm);
   %}
 
   enc_class cmpfp3(rRegI dst)
@@ -3179,50 +3172,6 @@
     emit_rm(cbuf, 0x3, 0x0, dstenc);
   %}
 
-  enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
-                       rcx_RegI tmp)
-  %{
-    // cadd_cmpLT
-
-    int tmpReg = $tmp$$reg;
-
-    int penc = $p$$reg;
-    int qenc = $q$$reg;
-    int yenc = $y$$reg;
-
-    // subl $p,$q
-    if (penc < 8) {
-      if (qenc >= 8) {
-        emit_opcode(cbuf, Assembler::REX_B);
-      }
-    } else {
-      if (qenc < 8) {
-        emit_opcode(cbuf, Assembler::REX_R);
-      } else {
-        emit_opcode(cbuf, Assembler::REX_RB);
-      }
-    }
-    emit_opcode(cbuf, 0x2B);
-    emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
-
-    // sbbl $tmp, $tmp
-    emit_opcode(cbuf, 0x1B);
-    emit_rm(cbuf, 0x3, tmpReg, tmpReg);
-
-    // andl $tmp, $y
-    if (yenc >= 8) {
-      emit_opcode(cbuf, Assembler::REX_B);
-    }
-    emit_opcode(cbuf, 0x23);
-    emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
-
-    // addl $p,$tmp
-    if (penc >= 8) {
-        emit_opcode(cbuf, Assembler::REX_R);
-    }
-    emit_opcode(cbuf, 0x03);
-    emit_rm(cbuf, 0x3, penc & 7, tmpReg);
-  %}
 
   // Compare the lonogs and set -1, 0, or 1 into dst
   enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
@@ -10206,9 +10155,7 @@
 %}
 
 
-instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
-                         rRegI tmp,
-                         rFlagsReg cr)
+instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr)
 %{
   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
   effect(TEMP tmp, KILL cr);
@@ -10218,25 +10165,19 @@
             "sbbl    $tmp, $tmp\n\t"
             "andl    $tmp, $y\n\t"
             "addl    $p, $tmp" %}
-  ins_encode(enc_cmpLTP(p, q, y, tmp));
+  ins_encode %{
+    Register Rp = $p$$Register;
+    Register Rq = $q$$Register;
+    Register Ry = $y$$Register;
+    Register Rt = $tmp$$Register;
+    __ subl(Rp, Rq);
+    __ sbbl(Rt, Rt);
+    __ andl(Rt, Ry);
+    __ addl(Rp, Rt);
+  %}
   ins_pipe(pipe_cmplt);
 %}
 
-/* If I enable this, I encourage spilling in the inner loop of compress.
-instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
-%{
-  match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
-  effect( TEMP tmp, KILL cr );
-  ins_cost(400);
-
-  format %{ "SUB    $p,$q\n\t"
-            "SBB    RCX,RCX\n\t"
-            "AND    RCX,$y\n\t"
-            "ADD    $p,RCX" %}
-  ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
-%}
-*/
-
 //---------- FP Instructions------------------------------------------------
 
 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
@@ -10305,14 +10246,8 @@
             "popfq\n"
     "exit:   nop\t# avoid branch to branch" %}
   ins_encode %{
-    Label L_exit;
     __ ucomiss($src$$XMMRegister, $constantaddress($con));
-    __ jcc(Assembler::noParity, L_exit);
-    __ pushf();
-    __ andq(rsp, 0xffffff2b);
-    __ popf();
-    __ bind(L_exit);
-    __ nop();
+    emit_cmpfp_fixup(_masm);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -10393,14 +10328,8 @@
             "popfq\n"
     "exit:   nop\t# avoid branch to branch" %}
   ins_encode %{
-    Label L_exit;
     __ ucomisd($src$$XMMRegister, $constantaddress($con));
-    __ jcc(Assembler::noParity, L_exit);
-    __ pushf();
-    __ andq(rsp, 0xffffff2b);
-    __ popf();
-    __ bind(L_exit);
-    __ nop();
+    emit_cmpfp_fixup(_masm);
   %}
   ins_pipe(pipe_slow);
 %}
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -657,7 +657,7 @@
   if (!is_exact) {
     if (method->intrinsic_id() == vmIntrinsics::_invokeExact) {
       CALL_VM_NOCHECK_NOFIX(
-        InterpreterRuntime::throw_WrongMethodTypeException(
+        SharedRuntime::throw_WrongMethodTypeException(
           thread, method_type, mhtype));
       // NB all oops trashed!
       assert(HAS_PENDING_EXCEPTION, "should do");
@@ -673,7 +673,7 @@
     oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form);
     if (adapter == NULL) {
       CALL_VM_NOCHECK_NOFIX(
-        InterpreterRuntime::throw_WrongMethodTypeException(
+        SharedRuntime::throw_WrongMethodTypeException(
           thread, method_type, mhtype));
       // NB all oops trashed!
       assert(HAS_PENDING_EXCEPTION, "should do");
@@ -1302,6 +1302,26 @@
   return generate_entry((address) CppInterpreter::accessor_entry);
 }
 
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+  if (UseG1GC) {
+    // We need to generate have a routine that generates code to:
+    //   * load the value in the referent field
+    //   * passes that value to the pre-barrier.
+    //
+    // In the case of G1 this will record the value of the
+    // referent in an SATB buffer if marking is active.
+    // This will cause concurrent marking to mark the referent
+    // field as live.
+    Unimplemented();
+  }
+#endif // SERIALGC
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
 address InterpreterGenerator::generate_native_entry(bool synchronized) {
   assert(synchronized == false, "should be");
 
@@ -1357,6 +1377,10 @@
     entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
     break;
 
+  case Interpreter::java_lang_ref_reference_get:
+    entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
+    break;
+
   default:
     ShouldNotReachHere();
   }
@@ -1403,6 +1427,7 @@
                                            int       tempcount,
                                            int       popframe_extra_args,
                                            int       moncount,
+                                           int       caller_actual_parameters,
                                            int       callee_param_count,
                                            int       callee_locals,
                                            frame*    caller,
--- a/src/cpu/zero/vm/interpreterGenerator_zero.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/zero/vm/interpreterGenerator_zero.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -37,6 +37,7 @@
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
   address generate_empty_entry();
   address generate_accessor_entry();
+  address generate_Reference_get_entry();
   address generate_method_handle_entry();
 
 #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
--- a/src/cpu/zero/vm/interpreter_zero.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/cpu/zero/vm/interpreter_zero.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -82,24 +82,6 @@
   return true;
 }
 
-int AbstractInterpreter::size_activation(methodOop method,
-                                         int tempcount,
-                                         int popframe_extra_args,
-                                         int moncount,
-                                         int callee_param_count,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  return layout_activation(method,
-                           tempcount,
-                           popframe_extra_args,
-                           moncount,
-                           callee_param_count,
-                           callee_locals,
-                           (frame*) NULL,
-                           (frame*) NULL,
-                           is_top_frame);
-}
-
 void Deoptimization::unwind_callee_save_values(frame* f,
                                                vframeArray* vframe_array) {
 }
--- a/src/os/linux/vm/globals_linux.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/linux/vm/globals_linux.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,19 +29,25 @@
 // Defines Linux specific flags. They are not available on other platforms.
 //
 #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
-  product(bool, UseOprofile, false,                                 \
-        "enable support for Oprofile profiler")                     \
-                                                                    \
-  product(bool, UseLinuxPosixThreadCPUClocks, true,                 \
-          "enable fast Linux Posix clocks where available")
-// NB: The default value of UseLinuxPosixThreadCPUClocks may be
-// overridden in Arguments::parse_each_vm_init_arg.
+  product(bool, UseOprofile, false,                                     \
+        "enable support for Oprofile profiler")                         \
+                                                                        \
+  product(bool, UseLinuxPosixThreadCPUClocks, true,                     \
+          "enable fast Linux Posix clocks where available")             \
+/*  NB: The default value of UseLinuxPosixThreadCPUClocks may be        \
+    overridden in Arguments::parse_each_vm_init_arg.  */                \
+                                                                        \
+  product(bool, UseHugeTLBFS, false,                                    \
+          "Use MAP_HUGETLB for large pages")                            \
+                                                                        \
+  product(bool, UseSHM, false,                                          \
+          "Use SYSV shared memory for large pages")
 
 //
 // Defines Linux-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
-define_pd_global(bool, UseLargePages, false);
+define_pd_global(bool, UseLargePages, true);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
 define_pd_global(bool, UseThreadPriorities, true) ;
--- a/src/os/linux/vm/os_linux.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2465,16 +2465,40 @@
   return res != (uintptr_t) MAP_FAILED;
 }
 
+// Define MAP_HUGETLB here so we can build HotSpot on old systems.
+#ifndef MAP_HUGETLB
+#define MAP_HUGETLB 0x40000
+#endif
+
+// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE 14
+#endif
+
 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
+  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+    uintptr_t res =
+      (uintptr_t) ::mmap(addr, size, prot,
+                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
+                         -1, 0);
+    return res != (uintptr_t) MAP_FAILED;
+  }
+
   return commit_memory(addr, size, exec);
 }
 
-void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
+void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+    // We don't check the return value: madvise(MADV_HUGEPAGE) may not
+    // be supported or the memory may already be backed by huge pages.
+    ::madvise(addr, bytes, MADV_HUGEPAGE);
+  }
+}
 
 void os::free_memory(char *addr, size_t bytes) {
-  ::mmap(addr, bytes, PROT_READ | PROT_WRITE,
-         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
+  ::madvise(addr, bytes, MADV_DONTNEED);
 }
 
 void os::numa_make_global(char *addr, size_t bytes) {
@@ -2812,6 +2836,43 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
+bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
+  bool result = false;
+  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
+                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+                  -1, 0);
+
+  if (p != (void *) -1) {
+    // We don't know if this really is a huge page or not.
+    FILE *fp = fopen("/proc/self/maps", "r");
+    if (fp) {
+      while (!feof(fp)) {
+        char chars[257];
+        long x = 0;
+        if (fgets(chars, sizeof(chars), fp)) {
+          if (sscanf(chars, "%lx-%*x", &x) == 1
+              && x == (long)p) {
+            if (strstr (chars, "hugepage")) {
+              result = true;
+              break;
+            }
+          }
+        }
+      }
+      fclose(fp);
+    }
+    munmap (p, page_size);
+    if (result)
+      return true;
+  }
+
+  if (warn) {
+    warning("HugeTLBFS is not supported by the operating system.");
+  }
+
+  return result;
+}
+
 /*
 * Set the coredump_filter bits to include largepages in core dump (bit 6)
 *
@@ -2853,8 +2914,22 @@
 
 static size_t _large_page_size = 0;
 
-bool os::large_page_init() {
-  if (!UseLargePages) return false;
+void os::large_page_init() {
+  if (!UseLargePages) {
+    UseHugeTLBFS = false;
+    UseSHM = false;
+    return;
+  }
+
+  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
+    // If UseLargePages is specified on the command line try both methods,
+    // if it's default, then try only HugeTLBFS.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseHugeTLBFS = true;
+    } else {
+      UseHugeTLBFS = UseSHM = true;
+    }
+  }
 
   if (LargePageSizeInBytes) {
     _large_page_size = LargePageSizeInBytes;
@@ -2899,20 +2974,24 @@
     }
   }
 
+  // print a warning if any large page related flag is specified on command line
+  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+
   const size_t default_page_size = (size_t)Linux::page_size();
   if (_large_page_size > default_page_size) {
     _page_sizes[0] = _large_page_size;
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
+  UseHugeTLBFS = UseHugeTLBFS &&
+                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
+
+  if (UseHugeTLBFS)
+    UseSHM = false;
+
+  UseLargePages = UseHugeTLBFS || UseSHM;
 
   set_coredump_filter();
-
-  // Large page support is available on 2.6 or newer kernel, some vendors
-  // (e.g. Redhat) have backported it to their 2.4 based distributions.
-  // We optimistically assume the support is available. If later it turns out
-  // not true, VM will automatically switch to use regular page size.
-  return true;
 }
 
 #ifndef SHM_HUGETLB
@@ -2922,7 +3001,7 @@
 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
-  assert(UseLargePages, "only for large pages");
+  assert(UseLargePages && UseSHM, "only for SHM large pages");
 
   key_t key = IPC_PRIVATE;
   char *addr;
@@ -2989,16 +3068,15 @@
   return _large_page_size;
 }
 
-// Linux does not support anonymous mmap with large page memory. The only way
-// to reserve large page memory without file backing is through SysV shared
-// memory API. The entire memory region is committed and pinned upfront.
-// Hopefully this will change in the future...
+// HugeTLBFS allows application to commit large page memory on demand;
+// with SysV SHM the entire memory region must be allocated as shared
+// memory.
 bool os::can_commit_large_page_memory() {
-  return false;
+  return UseHugeTLBFS;
 }
 
 bool os::can_execute_large_page_memory() {
-  return false;
+  return UseHugeTLBFS;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
@@ -4045,7 +4123,7 @@
 #endif
   }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
@@ -4097,6 +4175,23 @@
         UseNUMA = false;
       }
     }
+    // With SHM large pages we cannot uncommit a page, so there's not way
+    // we can make the adaptive lgrp chunk resizing work. If the user specified
+    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+    // disable adaptive resizing.
+    if (UseNUMA && UseLargePages && UseSHM) {
+      if (!FLAG_IS_DEFAULT(UseNUMA)) {
+        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+          UseLargePages = false;
+        } else {
+          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+          UseAdaptiveSizePolicy = false;
+          UseAdaptiveNUMAChunkSizing = false;
+        }
+      } else {
+        UseNUMA = false;
+      }
+    }
     if (!UseNUMA && ForceNUMA) {
       UseNUMA = true;
     }
--- a/src/os/linux/vm/os_linux.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/linux/vm/os_linux.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -86,6 +86,9 @@
 
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
+
+  static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
+
  public:
   static void init_thread_fpu_state();
   static int  get_fpu_control_word();
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2826,7 +2826,9 @@
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
-  Solaris::set_mpss_range(addr, bytes, alignment_hint);
+  if (UseLargePages && UseMPSS) {
+    Solaris::set_mpss_range(addr, bytes, alignment_hint);
+  }
 }
 
 // Tell the OS to make the range local to the first-touching LWP
@@ -3334,11 +3336,11 @@
   return true;
 }
 
-bool os::large_page_init() {
+void os::large_page_init() {
   if (!UseLargePages) {
     UseISM = false;
     UseMPSS = false;
-    return false;
+    return;
   }
 
   // print a warning if any large page related flag is specified on command line
@@ -3359,7 +3361,6 @@
             Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
 
   UseLargePages = UseISM || UseMPSS;
-  return UseLargePages;
 }
 
 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
@@ -4990,7 +4991,7 @@
 #endif
 }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
@@ -5044,6 +5045,20 @@
         UseNUMA = false;
       }
     }
+    // ISM is not compatible with the NUMA allocator - it always allocates
+    // pages round-robin across the lgroups.
+    if (UseNUMA && UseLargePages && UseISM) {
+      if (!FLAG_IS_DEFAULT(UseNUMA)) {
+        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
+          UseLargePages = false;
+        } else {
+          warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
+          UseNUMA = false;
+        }
+      } else {
+        UseNUMA = false;
+      }
+    }
     if (!UseNUMA && ForceNUMA) {
       UseNUMA = true;
     }
--- a/src/os/windows/vm/decoder_windows.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/windows/vm/decoder_windows.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
+#include "runtime/os.hpp"
 #include "utilities/decoder.hpp"
 
 HMODULE                   Decoder::_dbghelp_handle = NULL;
@@ -35,7 +36,7 @@
   if (!_initialized) {
     _initialized = true;
 
-    HMODULE handle = ::LoadLibrary("dbghelp.dll");
+    HINSTANCE handle = os::win32::load_Windows_dll("dbghelp.dll", NULL, 0);
     if (!handle) {
       _decoder_status = helper_not_found;
         return;
--- a/src/os/windows/vm/jvm_windows.h	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/windows/vm/jvm_windows.h	Wed Jul 27 17:32:44 2011 -0700
@@ -30,10 +30,33 @@
  * JNI conversion, which should be sorted out later.
  */
 
+// JDK7 requires VS2010
+#if _MSC_VER >= 1600
+// JDK7 minimum platform requirement: Windows XP
+#if _WIN32_WINNT < 0x0501
+#undef _WIN32_WINNT
+#define _WIN32_WINNT  0x0501
+#endif
+#endif
+
 #include <windows.h>
-// #include <windef.h>
-// #include <winbase.h>
 
+#if _MSC_VER <= 1200
+// Psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform
+// SDK from Microsoft.  Here are the definitions copied from Psapi.h
+typedef struct _MODULEINFO {
+    LPVOID lpBaseOfDll;
+    DWORD SizeOfImage;
+    LPVOID EntryPoint;
+} MODULEINFO, *LPMODULEINFO;
+
+#else
+#include <Psapi.h>
+#endif
+
+
+
+#include <Tlhelp32.h>
 
 // #include "jni.h"
 
--- a/src/os/windows/vm/os_windows.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -98,7 +98,6 @@
 #include <imagehlp.h>             // For os::dll_address_to_function_name
 
 /* for enumerating dll libraries */
-#include <tlhelp32.h>
 #include <vdmdbg.h>
 
 // for timer info max values which include all bits
@@ -241,11 +240,11 @@
     /* Win32 library search order (See the documentation for LoadLibrary):
      *
      * 1. The directory from which application is loaded.
-     * 2. The current directory
-     * 3. The system wide Java Extensions directory (Java only)
-     * 4. System directory (GetSystemDirectory)
-     * 5. Windows directory (GetWindowsDirectory)
-     * 6. The PATH environment variable
+     * 2. The system wide Java Extensions directory (Java only)
+     * 3. System directory (GetSystemDirectory)
+     * 4. Windows directory (GetWindowsDirectory)
+     * 5. The PATH environment variable
+     * 6. The current directory
      */
 
     char *library_path;
@@ -261,8 +260,6 @@
     *(strrchr(tmp, '\\')) = '\0';
     strcat(library_path, tmp);
 
-    strcat(library_path, ";.");
-
     GetWindowsDirectory(tmp, sizeof(tmp));
     strcat(library_path, ";");
     strcat(library_path, tmp);
@@ -281,6 +278,8 @@
         strcat(library_path, path_str);
     }
 
+    strcat(library_path, ";.");
+
     Arguments::set_library_path(library_path);
     FREE_C_HEAP_ARRAY(char, library_path);
   }
@@ -920,6 +919,8 @@
   HINSTANCE dbghelp;
   EXCEPTION_POINTERS ep;
   MINIDUMP_EXCEPTION_INFORMATION mei;
+  MINIDUMP_EXCEPTION_INFORMATION* pmei;
+
   HANDLE hProcess = GetCurrentProcess();
   DWORD processId = GetCurrentProcessId();
   HANDLE dumpFile;
@@ -936,7 +937,7 @@
     return;
   }
 
-  dbghelp = LoadLibrary("DBGHELP.DLL");
+  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
 
   if (dbghelp == NULL) {
     VMError::report_coredump_status("Failed to load dbghelp.dll", false);
@@ -970,17 +971,22 @@
     VMError::report_coredump_status("Failed to create file for dumping", false);
     return;
   }
-
-  ep.ContextRecord = (PCONTEXT) contextRecord;
-  ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
-
-  mei.ThreadId = GetCurrentThreadId();
-  mei.ExceptionPointers = &ep;
+  if (exceptionRecord != NULL && contextRecord != NULL) {
+    ep.ContextRecord = (PCONTEXT) contextRecord;
+    ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
+
+    mei.ThreadId = GetCurrentThreadId();
+    mei.ExceptionPointers = &ep;
+    pmei = &mei;
+  } else {
+    pmei = NULL;
+  }
+
 
   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
-  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, &mei, NULL, NULL) == false &&
-      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, &mei, NULL, NULL) == false) {
+  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
+      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
     VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
   } else {
     VMError::report_coredump_status(buffer, true);
@@ -1196,70 +1202,6 @@
 
 //-----------------------------------------------------------
 // Helper functions for fatal error handler
-
-// The following library functions are resolved dynamically at runtime:
-
-// PSAPI functions, for Windows NT, 2000, XP
-
-// psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform
-// SDK from Microsoft.  Here are the definitions copied from psapi.h
-typedef struct _MODULEINFO {
-    LPVOID lpBaseOfDll;
-    DWORD SizeOfImage;
-    LPVOID EntryPoint;
-} MODULEINFO, *LPMODULEINFO;
-
-static BOOL  (WINAPI *_EnumProcessModules)  ( HANDLE, HMODULE *, DWORD, LPDWORD );
-static DWORD (WINAPI *_GetModuleFileNameEx) ( HANDLE, HMODULE, LPTSTR, DWORD );
-static BOOL  (WINAPI *_GetModuleInformation)( HANDLE, HMODULE, LPMODULEINFO, DWORD );
-
-// ToolHelp Functions, for Windows 95, 98 and ME
-
-static HANDLE(WINAPI *_CreateToolhelp32Snapshot)(DWORD,DWORD) ;
-static BOOL  (WINAPI *_Module32First)           (HANDLE,LPMODULEENTRY32) ;
-static BOOL  (WINAPI *_Module32Next)            (HANDLE,LPMODULEENTRY32) ;
-
-bool _has_psapi;
-bool _psapi_init = false;
-bool _has_toolhelp;
-
-static bool _init_psapi() {
-  HINSTANCE psapi = LoadLibrary( "PSAPI.DLL" ) ;
-  if( psapi == NULL ) return false ;
-
-  _EnumProcessModules = CAST_TO_FN_PTR(
-      BOOL(WINAPI *)(HANDLE, HMODULE *, DWORD, LPDWORD),
-      GetProcAddress(psapi, "EnumProcessModules")) ;
-  _GetModuleFileNameEx = CAST_TO_FN_PTR(
-      DWORD (WINAPI *)(HANDLE, HMODULE, LPTSTR, DWORD),
-      GetProcAddress(psapi, "GetModuleFileNameExA"));
-  _GetModuleInformation = CAST_TO_FN_PTR(
-      BOOL (WINAPI *)(HANDLE, HMODULE, LPMODULEINFO, DWORD),
-      GetProcAddress(psapi, "GetModuleInformation"));
-
-  _has_psapi = (_EnumProcessModules && _GetModuleFileNameEx && _GetModuleInformation);
-  _psapi_init = true;
-  return _has_psapi;
-}
-
-static bool _init_toolhelp() {
-  HINSTANCE kernel32 = LoadLibrary("Kernel32.DLL") ;
-  if (kernel32 == NULL) return false ;
-
-  _CreateToolhelp32Snapshot = CAST_TO_FN_PTR(
-      HANDLE(WINAPI *)(DWORD,DWORD),
-      GetProcAddress(kernel32, "CreateToolhelp32Snapshot"));
-  _Module32First = CAST_TO_FN_PTR(
-      BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32),
-      GetProcAddress(kernel32, "Module32First" ));
-  _Module32Next = CAST_TO_FN_PTR(
-      BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32),
-      GetProcAddress(kernel32, "Module32Next" ));
-
-  _has_toolhelp = (_CreateToolhelp32Snapshot && _Module32First && _Module32Next);
-  return _has_toolhelp;
-}
-
 #ifdef _WIN64
 // Helper routine which returns true if address in
 // within the NTDLL address space.
@@ -1271,7 +1213,7 @@
 
   hmod = GetModuleHandle("NTDLL.DLL");
   if ( hmod == NULL ) return false;
-  if ( !_GetModuleInformation( GetCurrentProcess(), hmod,
+  if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
                                &minfo, sizeof(MODULEINFO)) )
     return false;
 
@@ -1310,14 +1252,16 @@
   static char filename[ MAX_PATH ];
   int         result = 0;
 
-  if (!_has_psapi && (_psapi_init || !_init_psapi())) return 0;
+  if (!os::PSApiDll::PSApiAvailable()) {
+    return 0;
+  }
 
   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
                          FALSE, pid ) ;
   if (hProcess == NULL) return 0;
 
   DWORD size_needed;
-  if (!_EnumProcessModules(hProcess, modules,
+  if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
                            sizeof(modules), &size_needed)) {
       CloseHandle( hProcess );
       return 0;
@@ -1328,13 +1272,13 @@
 
   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
     // Get Full pathname:
-    if(!_GetModuleFileNameEx(hProcess, modules[i],
+    if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
                              filename, sizeof(filename))) {
         filename[0] = '\0';
     }
 
     MODULEINFO modinfo;
-    if (!_GetModuleInformation(hProcess, modules[i],
+    if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
                                &modinfo, sizeof(modinfo))) {
         modinfo.lpBaseOfDll = NULL;
         modinfo.SizeOfImage = 0;
@@ -1358,17 +1302,19 @@
   static MODULEENTRY32  modentry ;
   int                   result = 0;
 
-  if (!_has_toolhelp) return 0;
+  if (!os::Kernel32Dll::HelpToolsAvailable()) {
+    return 0;
+  }
 
   // Get a handle to a Toolhelp snapshot of the system
-  hSnapShot = _CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
+  hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
   if( hSnapShot == INVALID_HANDLE_VALUE ) {
       return FALSE ;
   }
 
   // iterate through all modules
   modentry.dwSize = sizeof(MODULEENTRY32) ;
-  bool not_done = _Module32First( hSnapShot, &modentry ) != 0;
+  bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
 
   while( not_done ) {
     // invoke the callback
@@ -1377,7 +1323,7 @@
     if (result) break;
 
     modentry.dwSize = sizeof(MODULEENTRY32) ;
-    not_done = _Module32Next( hSnapShot, &modentry ) != 0;
+    not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
   }
 
   CloseHandle(hSnapShot);
@@ -1623,10 +1569,6 @@
    enumerate_modules(pid, _print_module, (void *)st);
 }
 
-// function pointer to Windows API "GetNativeSystemInfo".
-typedef void (WINAPI *GetNativeSystemInfo_func_type)(LPSYSTEM_INFO);
-static GetNativeSystemInfo_func_type _GetNativeSystemInfo;
-
 void os::print_os_info(outputStream* st) {
   st->print("OS:");
 
@@ -1653,17 +1595,10 @@
       // find out whether we are running on 64 bit processor or not.
       SYSTEM_INFO si;
       ZeroMemory(&si, sizeof(SYSTEM_INFO));
-      // Check to see if _GetNativeSystemInfo has been initialized.
-      if (_GetNativeSystemInfo == NULL) {
-        HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32.dll"));
-        _GetNativeSystemInfo =
-            CAST_TO_FN_PTR(GetNativeSystemInfo_func_type,
-                           GetProcAddress(hKernel32,
-                                          "GetNativeSystemInfo"));
-        if (_GetNativeSystemInfo == NULL)
+        if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
           GetSystemInfo(&si);
       } else {
-        _GetNativeSystemInfo(&si);
+        os::Kernel32Dll::GetNativeSystemInfo(&si);
       }
       if (os_vers == 5002) {
         if (osvi.wProductType == VER_NT_WORKSTATION &&
@@ -2671,47 +2606,14 @@
 #define MEM_LARGE_PAGES 0x20000000
 #endif
 
-// GetLargePageMinimum is only available on Windows 2003. The other functions
-// are available on NT but not on Windows 98/Me. We have to resolve them at
-// runtime.
-typedef SIZE_T (WINAPI *GetLargePageMinimum_func_type) (void);
-typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type)
-             (HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
-typedef BOOL (WINAPI *OpenProcessToken_func_type) (HANDLE, DWORD, PHANDLE);
-typedef BOOL (WINAPI *LookupPrivilegeValue_func_type) (LPCTSTR, LPCTSTR, PLUID);
-
-static GetLargePageMinimum_func_type   _GetLargePageMinimum;
-static AdjustTokenPrivileges_func_type _AdjustTokenPrivileges;
-static OpenProcessToken_func_type      _OpenProcessToken;
-static LookupPrivilegeValue_func_type  _LookupPrivilegeValue;
-
-static HINSTANCE _kernel32;
-static HINSTANCE _advapi32;
 static HANDLE    _hProcess;
 static HANDLE    _hToken;
 
 static size_t _large_page_size = 0;
 
 static bool resolve_functions_for_large_page_init() {
-  _kernel32 = LoadLibrary("kernel32.dll");
-  if (_kernel32 == NULL) return false;
-
-  _GetLargePageMinimum   = CAST_TO_FN_PTR(GetLargePageMinimum_func_type,
-                            GetProcAddress(_kernel32, "GetLargePageMinimum"));
-  if (_GetLargePageMinimum == NULL) return false;
-
-  _advapi32 = LoadLibrary("advapi32.dll");
-  if (_advapi32 == NULL) return false;
-
-  _AdjustTokenPrivileges = CAST_TO_FN_PTR(AdjustTokenPrivileges_func_type,
-                            GetProcAddress(_advapi32, "AdjustTokenPrivileges"));
-  _OpenProcessToken      = CAST_TO_FN_PTR(OpenProcessToken_func_type,
-                            GetProcAddress(_advapi32, "OpenProcessToken"));
-  _LookupPrivilegeValue  = CAST_TO_FN_PTR(LookupPrivilegeValue_func_type,
-                            GetProcAddress(_advapi32, "LookupPrivilegeValueA"));
-  return _AdjustTokenPrivileges != NULL &&
-         _OpenProcessToken      != NULL &&
-         _LookupPrivilegeValue  != NULL;
+  return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
+    os::Advapi32Dll::AdvapiAvailable();
 }
 
 static bool request_lock_memory_privilege() {
@@ -2720,8 +2622,8 @@
 
   LUID luid;
   if (_hProcess != NULL &&
-      _OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
-      _LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
+      os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
+      os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
 
     TOKEN_PRIVILEGES tp;
     tp.PrivilegeCount = 1;
@@ -2730,7 +2632,7 @@
 
     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
     // privilege. Check GetLastError() too. See MSDN document.
-    if (_AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
+    if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
         (GetLastError() == ERROR_SUCCESS)) {
       return true;
     }
@@ -2740,22 +2642,14 @@
 }
 
 static void cleanup_after_large_page_init() {
-  _GetLargePageMinimum = NULL;
-  _AdjustTokenPrivileges = NULL;
-  _OpenProcessToken = NULL;
-  _LookupPrivilegeValue = NULL;
-  if (_kernel32) FreeLibrary(_kernel32);
-  _kernel32 = NULL;
-  if (_advapi32) FreeLibrary(_advapi32);
-  _advapi32 = NULL;
   if (_hProcess) CloseHandle(_hProcess);
   _hProcess = NULL;
   if (_hToken) CloseHandle(_hToken);
   _hToken = NULL;
 }
 
-bool os::large_page_init() {
-  if (!UseLargePages) return false;
+void os::large_page_init() {
+  if (!UseLargePages) return;
 
   // print a warning if any large page related flag is specified on command line
   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
@@ -2765,7 +2659,7 @@
 # define WARN(msg) if (warn_on_failure) { warning(msg); }
   if (resolve_functions_for_large_page_init()) {
     if (request_lock_memory_privilege()) {
-      size_t s = _GetLargePageMinimum();
+      size_t s = os::Kernel32Dll::GetLargePageMinimum();
       if (s) {
 #if defined(IA32) || defined(AMD64)
         if (s > 4*M || LargePageSizeInBytes > 4*M) {
@@ -2800,7 +2694,7 @@
   }
 
   cleanup_after_large_page_init();
-  return success;
+  UseLargePages = success;
 }
 
 // On win32, one cannot release just a part of reserved memory, it's an
@@ -3178,18 +3072,10 @@
 os::YieldResult os::NakedYield() {
   // Use either SwitchToThread() or Sleep(0)
   // Consider passing back the return value from SwitchToThread().
-  // We use GetProcAddress() as ancient Win9X versions of windows doen't support SwitchToThread.
-  // In that case we revert to Sleep(0).
-  static volatile STTSignature stt = (STTSignature) 1 ;
-
-  if (stt == ((STTSignature) 1)) {
-    stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ;
-    // It's OK if threads race during initialization as the operation above is idempotent.
-  }
-  if (stt != NULL) {
-    return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
+  if (os::Kernel32Dll::SwitchToThreadAvailable()) {
+    return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
   } else {
-    Sleep (0) ;
+    Sleep(0);
   }
   return os::YIELD_UNKNOWN ;
 }
@@ -3413,6 +3299,44 @@
 }
 
 
+HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
+  char path[MAX_PATH];
+  DWORD size;
+  DWORD pathLen = (DWORD)sizeof(path);
+  HINSTANCE result = NULL;
+
+  // only allow library name without path component
+  assert(strchr(name, '\\') == NULL, "path not allowed");
+  assert(strchr(name, ':') == NULL, "path not allowed");
+  if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
+    jio_snprintf(ebuf, ebuflen,
+      "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
+    return NULL;
+  }
+
+  // search system directory
+  if ((size = GetSystemDirectory(path, pathLen)) > 0) {
+    strcat(path, "\\");
+    strcat(path, name);
+    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
+      return result;
+    }
+  }
+
+  // try Windows directory
+  if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
+    strcat(path, "\\");
+    strcat(path, name);
+    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
+      return result;
+    }
+  }
+
+  jio_snprintf(ebuf, ebuflen,
+    "os::win32::load_windows_dll() cannot load %s from system directories.", name);
+  return NULL;
+}
+
 void os::win32::setmode_streams() {
   _setmode(_fileno(stdin), _O_BINARY);
   _setmode(_fileno(stdout), _O_BINARY);
@@ -3553,7 +3477,7 @@
 #endif
 }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // Setup Windows Exceptions
 
@@ -3646,10 +3570,6 @@
     }
   }
 
-  // initialize PSAPI or ToolHelp for fatal error handler
-  if (win32::is_nt()) _init_psapi();
-  else _init_toolhelp();
-
 #ifndef _WIN64
   // Print something if NX is enabled (win32 on AMD64)
   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
@@ -4696,12 +4616,6 @@
 // We don't build a headless jre for Windows
 bool os::is_headless_jre() { return false; }
 
-// OS_SocketInterface
-// Not used on Windows
-
-// OS_SocketInterface
-typedef struct hostent * (PASCAL FAR *ws2_ifn_ptr_t)(...);
-ws2_ifn_ptr_t *get_host_by_name_fn = NULL;
 
 typedef CRITICAL_SECTION mutex_t;
 #define mutexInit(m)    InitializeCriticalSection(m)
@@ -4709,58 +4623,36 @@
 #define mutexLock(m)    EnterCriticalSection(m)
 #define mutexUnlock(m)  LeaveCriticalSection(m)
 
-static bool sockfnptrs_initialized = FALSE;
+static bool sock_initialized = FALSE;
 static mutex_t sockFnTableMutex;
 
-/* is Winsock2 loaded? better to be explicit than to rely on sockfnptrs */
-static bool winsock2Available = FALSE;
-
-
-static void initSockFnTable() {
-  int (PASCAL FAR* WSAStartupPtr)(WORD, LPWSADATA);
+static void initSock() {
   WSADATA wsadata;
 
+  if (!os::WinSock2Dll::WinSock2Available()) {
+    jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
+      ::GetLastError());
+    return;
+  }
+  if (sock_initialized == TRUE) return;
+
   ::mutexInit(&sockFnTableMutex);
   ::mutexLock(&sockFnTableMutex);
-
-  if (sockfnptrs_initialized == FALSE) {
-        HMODULE hWinsock;
-
-          /* try to load Winsock2, and if that fails, load Winsock */
-    hWinsock = ::LoadLibrary("ws2_32.dll");
-
-    if (hWinsock == NULL) {
-      jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
-      ::GetLastError());
-      return;
-    }
-
-    /* If we loaded a DLL, then we might as well initialize it.  */
-    WSAStartupPtr = (int (PASCAL FAR *)(WORD, LPWSADATA))
-    ::GetProcAddress(hWinsock, "WSAStartup");
-
-    if (WSAStartupPtr(MAKEWORD(1,1), &wsadata) != 0) {
-        jio_fprintf(stderr, "Could not initialize Winsock\n");
-    }
-
-    get_host_by_name_fn
-        = (ws2_ifn_ptr_t*) GetProcAddress(hWinsock, "gethostbyname");
+  if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) {
+      jio_fprintf(stderr, "Could not initialize Winsock\n");
   }
-
-  assert(get_host_by_name_fn != NULL,
-    "gethostbyname function not found");
-  sockfnptrs_initialized = TRUE;
+  sock_initialized = TRUE;
   ::mutexUnlock(&sockFnTableMutex);
 }
 
 struct hostent*  os::get_host_by_name(char* name) {
-  if (!sockfnptrs_initialized) {
-    initSockFnTable();
+  if (!sock_initialized) {
+    initSock();
   }
-
-  assert(sockfnptrs_initialized == TRUE && get_host_by_name_fn != NULL,
-    "sockfnptrs is not initialized or pointer to gethostbyname function is NULL");
-  return (*get_host_by_name_fn)(name);
+  if (!os::WinSock2Dll::WinSock2Available()) {
+    return NULL;
+  }
+  return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
 }
 
 
@@ -4857,3 +4749,367 @@
   ShouldNotReachHere();
   return 0;
 }
+
+
+// Kernel32 API
+typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
+GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
+BOOL                        os::Kernel32Dll::initialized = FALSE;
+SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
+  assert(initialized && _GetLargePageMinimum != NULL,
+    "GetLargePageMinimumAvailable() not yet called");
+  return _GetLargePageMinimum();
+}
+
+BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _GetLargePageMinimum != NULL;
+}
+
+
+#ifndef JDK6_OR_EARLIER
+
+void os::Kernel32Dll::initialize() {
+  if (!initialized) {
+    HMODULE handle = ::GetModuleHandle("Kernel32.dll");
+    assert(handle != NULL, "Just check");
+    _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
+    initialized = TRUE;
+  }
+}
+
+
+// Kernel32 API
+inline BOOL os::Kernel32Dll::SwitchToThread() {
+  return ::SwitchToThread();
+}
+
+inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
+  return true;
+}
+
+  // Help tools
+inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
+  return true;
+}
+
+inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
+  return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
+}
+
+inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+  return ::Module32First(hSnapshot, lpme);
+}
+
+inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+  return ::Module32Next(hSnapshot, lpme);
+}
+
+
+inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
+  return true;
+}
+
+inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
+  ::GetNativeSystemInfo(lpSystemInfo);
+}
+
+// PSAPI API
+inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
+  return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
+}
+
+inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
+  return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
+}
+
+inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
+  return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
+}
+
+inline BOOL os::PSApiDll::PSApiAvailable() {
+  return true;
+}
+
+
+// WinSock2 API
+inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
+  return ::WSAStartup(wVersionRequested, lpWSAData);
+}
+
+inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
+  return ::gethostbyname(name);
+}
+
+inline BOOL os::WinSock2Dll::WinSock2Available() {
+  return true;
+}
+
+// Advapi API
+inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
+   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
+   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
+     return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+       BufferLength, PreviousState, ReturnLength);
+}
+
+inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
+  PHANDLE TokenHandle) {
+    return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+}
+
+inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
+  return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
+}
+
+inline BOOL os::Advapi32Dll::AdvapiAvailable() {
+  return true;
+}
+
+#else
+// Kernel32 API
+typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
+typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
+typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
+typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
+typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
+
+SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
+CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
+Module32First_Fn            os::Kernel32Dll::_Module32First = NULL;
+Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
+GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
+
+void os::Kernel32Dll::initialize() {
+  if (!initialized) {
+    HMODULE handle = ::GetModuleHandle("Kernel32.dll");
+    assert(handle != NULL, "Just check");
+
+    _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
+    _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
+    _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
+      ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
+    _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
+    _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
+    _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
+
+    initialized = TRUE;
+  }
+}
+
+BOOL os::Kernel32Dll::SwitchToThread() {
+  assert(initialized && _SwitchToThread != NULL,
+    "SwitchToThreadAvailable() not yet called");
+  return _SwitchToThread();
+}
+
+
+BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _SwitchToThread != NULL;
+}
+
+// Help tools
+BOOL os::Kernel32Dll::HelpToolsAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _CreateToolhelp32Snapshot != NULL &&
+         _Module32First != NULL &&
+         _Module32Next != NULL;
+}
+
+HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
+  assert(initialized && _CreateToolhelp32Snapshot != NULL,
+    "HelpToolsAvailable() not yet called");
+
+  return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
+}
+
+BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+  assert(initialized && _Module32First != NULL,
+    "HelpToolsAvailable() not yet called");
+
+  return _Module32First(hSnapshot, lpme);
+}
+
+inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+  assert(initialized && _Module32Next != NULL,
+    "HelpToolsAvailable() not yet called");
+
+  return _Module32Next(hSnapshot, lpme);
+}
+
+
+BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _GetNativeSystemInfo != NULL;
+}
+
+void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
+  assert(initialized && _GetNativeSystemInfo != NULL,
+    "GetNativeSystemInfoAvailable() not yet called");
+
+  _GetNativeSystemInfo(lpSystemInfo);
+}
+
+// PSAPI API
+
+
+typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
+typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
+typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
+
+EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
+GetModuleFileNameEx_Fn  os::PSApiDll::_GetModuleFileNameEx = NULL;
+GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
+BOOL                    os::PSApiDll::initialized = FALSE;
+
+void os::PSApiDll::initialize() {
+  if (!initialized) {
+    HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
+    if (handle != NULL) {
+      _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
+        "EnumProcessModules");
+      _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
+        "GetModuleFileNameExA");
+      _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
+        "GetModuleInformation");
+    }
+    initialized = TRUE;
+  }
+}
+
+
+
+BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
+  assert(initialized && _EnumProcessModules != NULL,
+    "PSApiAvailable() not yet called");
+  return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
+}
+
+DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
+  assert(initialized && _GetModuleFileNameEx != NULL,
+    "PSApiAvailable() not yet called");
+  return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
+}
+
+BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
+  assert(initialized && _GetModuleInformation != NULL,
+    "PSApiAvailable() not yet called");
+  return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
+}
+
+BOOL os::PSApiDll::PSApiAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _EnumProcessModules != NULL &&
+    _GetModuleFileNameEx != NULL &&
+    _GetModuleInformation != NULL;
+}
+
+
+// WinSock2 API
+typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
+typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
+
+WSAStartup_Fn    os::WinSock2Dll::_WSAStartup = NULL;
+gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
+BOOL             os::WinSock2Dll::initialized = FALSE;
+
+void os::WinSock2Dll::initialize() {
+  if (!initialized) {
+    HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
+    if (handle != NULL) {
+      _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
+      _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
+    }
+    initialized = TRUE;
+  }
+}
+
+
+BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
+  assert(initialized && _WSAStartup != NULL,
+    "WinSock2Available() not yet called");
+  return _WSAStartup(wVersionRequested, lpWSAData);
+}
+
+struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
+  assert(initialized && _gethostbyname != NULL,
+    "WinSock2Available() not yet called");
+  return _gethostbyname(name);
+}
+
+BOOL os::WinSock2Dll::WinSock2Available() {
+  if (!initialized) {
+    initialize();
+  }
+  return _WSAStartup != NULL &&
+    _gethostbyname != NULL;
+}
+
+typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
+typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
+typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
+
+AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
+OpenProcessToken_Fn      os::Advapi32Dll::_OpenProcessToken = NULL;
+LookupPrivilegeValue_Fn  os::Advapi32Dll::_LookupPrivilegeValue = NULL;
+BOOL                     os::Advapi32Dll::initialized = FALSE;
+
+void os::Advapi32Dll::initialize() {
+  if (!initialized) {
+    HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
+    if (handle != NULL) {
+      _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
+        "AdjustTokenPrivileges");
+      _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
+        "OpenProcessToken");
+      _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
+        "LookupPrivilegeValue");
+    }
+    initialized = TRUE;
+  }
+}
+
+BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
+   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
+   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
+   assert(initialized && _AdjustTokenPrivileges != NULL,
+     "AdvapiAvailable() not yet called");
+   return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+       BufferLength, PreviousState, ReturnLength);
+}
+
+BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
+  PHANDLE TokenHandle) {
+   assert(initialized && _OpenProcessToken != NULL,
+     "AdvapiAvailable() not yet called");
+    return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+}
+
+BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
+   assert(initialized && _LookupPrivilegeValue != NULL,
+     "AdvapiAvailable() not yet called");
+  return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
+}
+
+BOOL os::Advapi32Dll::AdvapiAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _AdjustTokenPrivileges != NULL &&
+    _OpenProcessToken != NULL &&
+    _LookupPrivilegeValue != NULL;
+}
+
+#endif
+
--- a/src/os/windows/vm/os_windows.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os/windows/vm/os_windows.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -24,7 +24,6 @@
 
 #ifndef OS_WINDOWS_VM_OS_WINDOWS_HPP
 #define OS_WINDOWS_VM_OS_WINDOWS_HPP
-
 // Win32_OS defines the interface to windows operating systems
 
 class win32 {
@@ -55,6 +54,9 @@
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
 
+  // load dll from Windows system directory or Windows directory
+  static HINSTANCE load_Windows_dll(const char* name, char *ebuf, int ebuflen);
+
  public:
   // Generic interface:
 
@@ -132,4 +134,100 @@
 
 } ;
 
+// JDK7 requires VS2010
+#if _MSC_VER < 1600
+#define JDK6_OR_EARLIER 1
+#endif
+
+
+
+class WinSock2Dll: AllStatic {
+public:
+  static BOOL WSAStartup(WORD, LPWSADATA);
+  static struct hostent* gethostbyname(const char *name);
+  static BOOL WinSock2Available();
+#ifdef JDK6_OR_EARLIER
+private:
+  static int (PASCAL FAR* _WSAStartup)(WORD, LPWSADATA);
+  static struct hostent *(PASCAL FAR *_gethostbyname)(...);
+  static BOOL initialized;
+
+  static void initialize();
+#endif
+};
+
+class Kernel32Dll: AllStatic {
+public:
+  static BOOL SwitchToThread();
+  static SIZE_T GetLargePageMinimum();
+
+  static BOOL SwitchToThreadAvailable();
+  static BOOL GetLargePageMinimumAvailable();
+
+  // Help tools
+  static BOOL HelpToolsAvailable();
+  static HANDLE CreateToolhelp32Snapshot(DWORD,DWORD);
+  static BOOL Module32First(HANDLE,LPMODULEENTRY32);
+  static BOOL Module32Next(HANDLE,LPMODULEENTRY32);
+
+  static BOOL GetNativeSystemInfoAvailable();
+  static void GetNativeSystemInfo(LPSYSTEM_INFO);
+
+private:
+  // GetLargePageMinimum available on Windows Vista/Windows Server 2003
+  // and later
+  static SIZE_T (WINAPI *_GetLargePageMinimum)(void);
+  static BOOL initialized;
+
+  static void initialize();
+
+#ifdef JDK6_OR_EARLIER
+private:
+  static BOOL (WINAPI *_SwitchToThread)(void);
+  static HANDLE (WINAPI* _CreateToolhelp32Snapshot)(DWORD,DWORD);
+  static BOOL (WINAPI* _Module32First)(HANDLE,LPMODULEENTRY32);
+  static BOOL (WINAPI* _Module32Next)(HANDLE,LPMODULEENTRY32);
+  static void (WINAPI *_GetNativeSystemInfo)(LPSYSTEM_INFO);
+#endif
+
+};
+
+class Advapi32Dll: AllStatic {
+public:
+  static BOOL AdjustTokenPrivileges(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
+  static BOOL OpenProcessToken(HANDLE, DWORD, PHANDLE);
+  static BOOL LookupPrivilegeValue(LPCTSTR, LPCTSTR, PLUID);
+
+  static BOOL AdvapiAvailable();
+
+#ifdef JDK6_OR_EARLIER
+private:
+  static BOOL (WINAPI *_AdjustTokenPrivileges)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
+  static BOOL (WINAPI *_OpenProcessToken)(HANDLE, DWORD, PHANDLE);
+  static BOOL (WINAPI *_LookupPrivilegeValue)(LPCTSTR, LPCTSTR, PLUID);
+  static BOOL initialized;
+
+  static void initialize();
+#endif
+};
+
+class PSApiDll: AllStatic {
+public:
+  static BOOL EnumProcessModules(HANDLE, HMODULE *, DWORD, LPDWORD);
+  static DWORD GetModuleFileNameEx(HANDLE, HMODULE, LPTSTR, DWORD);
+  static BOOL GetModuleInformation(HANDLE, HMODULE, LPMODULEINFO, DWORD);
+
+  static BOOL PSApiAvailable();
+
+#ifdef JDK6_OR_EARLIER
+private:
+  static BOOL (WINAPI *_EnumProcessModules)(HANDLE, HMODULE *, DWORD, LPDWORD);
+  static BOOL (WINAPI *_GetModuleFileNameEx)(HANDLE, HMODULE, LPTSTR, DWORD);;
+  static BOOL (WINAPI *_GetModuleInformation)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
+  static BOOL initialized;
+
+  static void initialize();
+#endif
+};
+
 #endif // OS_WINDOWS_VM_OS_WINDOWS_HPP
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -93,7 +93,7 @@
 
 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }
@@ -155,7 +155,7 @@
 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 !!
-!! Copyright (c) 2005, 2008 Oracle and/or its affiliates. All rights reserved.
+!! Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
 !! DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 !!
 !! This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/hsdis/README	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/tools/hsdis/README	Wed Jul 27 17:32:44 2011 -0700
@@ -1,4 +1,4 @@
-Copyright (c) 2008 Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   
 This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -519,42 +519,126 @@
 // Code stubs for Garbage-First barriers.
 class G1PreBarrierStub: public CodeStub {
  private:
+  bool _do_load;
   LIR_Opr _addr;
   LIR_Opr _pre_val;
   LIR_PatchCode _patch_code;
   CodeEmitInfo* _info;
 
  public:
-  // pre_val (a temporary register) must be a register;
+  // Version that _does_ generate a load of the previous value from addr.
   // addr (the address of the field to be read) must be a LIR_Address
+  // pre_val (a temporary register) must be a register;
   G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
-    _addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info)
+    _addr(addr), _pre_val(pre_val), _do_load(true),
+    _patch_code(patch_code), _info(info)
   {
     assert(_pre_val->is_register(), "should be temporary register");
     assert(_addr->is_address(), "should be the address of the field");
   }
 
+  // Version that _does not_ generate load of the previous value; the
+  // previous value is assumed to have already been loaded into pre_val.
+  G1PreBarrierStub(LIR_Opr pre_val) :
+    _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
+    _patch_code(lir_patch_none), _info(NULL)
+  {
+    assert(_pre_val->is_register(), "should be a register");
+  }
+
   LIR_Opr addr() const { return _addr; }
   LIR_Opr pre_val() const { return _pre_val; }
   LIR_PatchCode patch_code() const { return _patch_code; }
   CodeEmitInfo* info() const { return _info; }
+  bool do_load() const { return _do_load; }
 
   virtual void emit_code(LIR_Assembler* e);
   virtual void visit(LIR_OpVisitState* visitor) {
-    // don't pass in the code emit info since it's processed in the fast
-    // path
-    if (_info != NULL)
-      visitor->do_slow_case(_info);
-    else
+    if (_do_load) {
+      // don't pass in the code emit info since it's processed in the fast
+      // path
+      if (_info != NULL)
+        visitor->do_slow_case(_info);
+      else
+        visitor->do_slow_case();
+
+      visitor->do_input(_addr);
+      visitor->do_temp(_pre_val);
+    } else {
       visitor->do_slow_case();
-    visitor->do_input(_addr);
-    visitor->do_temp(_pre_val);
+      visitor->do_input(_pre_val);
+    }
   }
 #ifndef PRODUCT
   virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
 #endif // PRODUCT
 };
 
+// This G1 barrier code stub is used in Unsafe.getObject.
+// It generates a sequence of guards around the SATB
+// barrier code that are used to detect when we have
+// the referent field of a Reference object.
+// The first check is assumed to have been generated
+// in the code generated for Unsafe.getObject().
+
+class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
+ private:
+  LIR_Opr _val;
+  LIR_Opr _src;
+
+  LIR_Opr _tmp;
+  LIR_Opr _thread;
+
+  bool _gen_src_check;
+
+ public:
+  // A G1 barrier that is guarded by generated guards that determine whether
+  // val (which is the result of Unsafe.getObject() should be recorded in an
+  // SATB log buffer. We could be reading the referent field of a Reference object
+  // using Unsafe.getObject() and we need to record the referent.
+  //
+  // * val is the operand returned by the unsafe.getObject routine.
+  // * src is the base object
+  // * tmp is a temp used to load the klass of src, and then reference type
+  // * thread is the thread object.
+
+  G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
+                                LIR_Opr tmp, LIR_Opr thread,
+                                bool gen_src_check) :
+    _val(val), _src(src),
+    _tmp(tmp), _thread(thread),
+    _gen_src_check(gen_src_check)
+  {
+    assert(_val->is_register(), "should have already been loaded");
+    assert(_src->is_register(), "should have already been loaded");
+
+    assert(_tmp->is_register(), "should be a temporary register");
+  }
+
+  LIR_Opr val() const { return _val; }
+  LIR_Opr src() const { return _src; }
+
+  LIR_Opr tmp() const { return _tmp; }
+  LIR_Opr thread() const { return _thread; }
+
+  bool gen_src_check() const { return _gen_src_check; }
+
+  virtual void emit_code(LIR_Assembler* e);
+
+  virtual void visit(LIR_OpVisitState* visitor) {
+    visitor->do_slow_case();
+    visitor->do_input(_val);
+    visitor->do_input(_src);
+    visitor->do_input(_thread);
+
+    visitor->do_temp(_tmp);
+  }
+
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
+#endif // PRODUCT
+};
+
 class G1PostBarrierStub: public CodeStub {
  private:
   LIR_Opr _addr;
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2913,6 +2913,46 @@
       block()->set_end(end);
       break;
     }
+
+  case vmIntrinsics::_Reference_get:
+    {
+      if (UseG1GC) {
+        // With java.lang.ref.reference.get() we must go through the
+        // intrinsic - when G1 is enabled - even when get() is the root
+        // method of the compile so that, if necessary, the value in
+        // the referent field of the reference object gets recorded by
+        // the pre-barrier code.
+        // Specifically, if G1 is enabled, the value in the referent
+        // field is recorded by the G1 SATB pre barrier. This will
+        // result in the referent being marked live and the reference
+        // object removed from the list of discovered references during
+        // reference processing.
+
+        // Set up a stream so that appending instructions works properly.
+        ciBytecodeStream s(scope->method());
+        s.reset_to_bci(0);
+        scope_data()->set_stream(&s);
+        s.next();
+
+        // setup the initial block state
+        _block = start_block;
+        _state = start_block->state()->copy_for_parsing();
+        _last  = start_block;
+        load_local(objectType, 0);
+
+        // Emit the intrinsic node.
+        bool result = try_inline_intrinsics(scope->method());
+        if (!result) BAILOUT("failed to inline intrinsic");
+        method_return(apop());
+
+        // connect the begin and end blocks and we're all done.
+        BlockEnd* end = last()->as_BlockEnd();
+        block()->set_end(end);
+        break;
+      }
+      // Otherwise, fall thru
+    }
+
   default:
     scope_data()->add_to_work_list(start_block);
     iterate_all_blocks();
@@ -3150,6 +3190,15 @@
       append_unsafe_CAS(callee);
       return true;
 
+    case vmIntrinsics::_Reference_get:
+      // It is only when G1 is enabled that we absolutely
+      // need to use the intrinsic version of Reference.get()
+      // so that the value in the referent field, if necessary,
+      // can be registered by the pre-barrier code.
+      if (!UseG1GC) return false;
+      preserves_state = true;
+      break;
+
     default                       : return false; // do not inline
   }
   // create intrinsic node
--- a/src/share/vm/c1/c1_Instruction.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_Instruction.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -596,7 +596,7 @@
 // of the inserted block, without recomputing the values of the other blocks
 // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
 BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
-  BlockBegin* new_sux = new BlockBegin(-99);
+  BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
 
   // mark this block (special treatment when block order is computed)
   new_sux->set(critical_edge_split_flag);
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -132,17 +132,22 @@
     if (value->is_null_object()) {
       output()->print("null");
     } else if (!value->is_loaded()) {
-      output()->print("<unloaded object 0x%x>", value);
+      output()->print("<unloaded object " PTR_FORMAT ">", value);
     } else if (value->is_method()) {
       ciMethod* m = (ciMethod*)value;
       output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8());
     } else {
-      output()->print("<object 0x%x>", value->constant_encoding());
+      output()->print("<object " PTR_FORMAT ">", value->constant_encoding());
     }
   } else if (type->as_InstanceConstant() != NULL) {
-    output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->constant_encoding());
+    ciInstance* value = type->as_InstanceConstant()->value();
+    if (value->is_loaded()) {
+      output()->print("<instance " PTR_FORMAT ">", value->constant_encoding());
+    } else {
+      output()->print("<unloaded instance " PTR_FORMAT ">", value);
+    }
   } else if (type->as_ArrayConstant() != NULL) {
-    output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->constant_encoding());
+    output()->print("<array " PTR_FORMAT ">", type->as_ArrayConstant()->value()->constant_encoding());
   } else if (type->as_ClassConstant() != NULL) {
     ciInstanceKlass* klass = type->as_ClassConstant()->value();
     if (!klass->is_loaded()) {
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1209,6 +1209,38 @@
   set_no_result(x);
 }
 
+// Examble: ref.get()
+// Combination of LoadField and g1 pre-write barrier
+void LIRGenerator::do_Reference_get(Intrinsic* x) {
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  assert(x->number_of_arguments() == 1, "wrong type");
+
+  LIRItem reference(x->argument_at(0), this);
+  reference.load_item();
+
+  // need to perform the null check on the reference objecy
+  CodeEmitInfo* info = NULL;
+  if (x->needs_null_check()) {
+    info = state_for(x);
+  }
+
+  LIR_Address* referent_field_adr =
+    new LIR_Address(reference.result(), referent_offset, T_OBJECT);
+
+  LIR_Opr result = rlock_result(x);
+
+  __ load(referent_field_adr, result, info);
+
+  // Register the value in the referent field with the pre-barrier
+  pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
+              result /* pre_val */,
+              false  /* do_load */,
+              false  /* patch */,
+              NULL   /* info */);
+}
 
 // Example: object.getClass ()
 void LIRGenerator::do_getClass(Intrinsic* x) {
@@ -1351,13 +1383,14 @@
 
 // Various barriers
 
-void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info) {
+void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+                               bool do_load, bool patch, CodeEmitInfo* info) {
   // Do the pre-write barrier, if any.
   switch (_bs->kind()) {
 #ifndef SERIALGC
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
-      G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
+      G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // SERIALGC
     case BarrierSet::CardTableModRef:
@@ -1398,9 +1431,8 @@
 ////////////////////////////////////////////////////////////////////////
 #ifndef SERIALGC
 
-void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info) {
-  if (G1DisablePreBarrier) return;
-
+void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+                                                     bool do_load, bool patch, CodeEmitInfo* info) {
   // First we test whether marking is in progress.
   BasicType flag_type;
   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
@@ -1419,26 +1451,40 @@
   // Read the marking-in-progress flag.
   LIR_Opr flag_val = new_register(T_INT);
   __ load(mark_active_flag_addr, flag_val);
-
-  LIR_PatchCode pre_val_patch_code =
-    patch ? lir_patch_normal : lir_patch_none;
-
-  LIR_Opr pre_val = new_register(T_OBJECT);
-
   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
-  if (!addr_opr->is_address()) {
-    assert(addr_opr->is_register(), "must be");
-    addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+
+  LIR_PatchCode pre_val_patch_code = lir_patch_none;
+
+  CodeStub* slow;
+
+  if (do_load) {
+    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
+    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
+
+    if (patch)
+      pre_val_patch_code = lir_patch_normal;
+
+    pre_val = new_register(T_OBJECT);
+
+    if (!addr_opr->is_address()) {
+      assert(addr_opr->is_register(), "must be");
+      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+    }
+    slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
+  } else {
+    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
+    assert(pre_val->is_register(), "must be");
+    assert(pre_val->type() == T_OBJECT, "must be an object");
+    assert(info == NULL, "sanity");
+
+    slow = new G1PreBarrierStub(pre_val);
   }
-  CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
-                                        info);
+
   __ branch(lir_cond_notEqual, T_INT, slow);
   __ branch_destination(slow->continuation());
 }
 
 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-  if (G1DisablePostBarrier) return;
-
   // If the "new_val" is a constant NULL, no barrier is necessary.
   if (new_val->is_constant() &&
       new_val->as_constant_ptr()->as_jobject() == NULL) return;
@@ -1662,6 +1708,8 @@
   if (is_oop) {
     // Do the pre-write barrier, if any.
     pre_barrier(LIR_OprFact::address(address),
+                LIR_OprFact::illegalOpr /* pre_val */,
+                true /* do_load*/,
                 needs_patching,
                 (info ? new CodeEmitInfo(info) : NULL));
   }
@@ -2091,9 +2139,144 @@
   off.load_item();
   src.load_item();
 
-  LIR_Opr reg = reg = rlock_result(x, x->basic_type());
+  LIR_Opr reg = rlock_result(x, x->basic_type());
 
   get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
+
+#ifndef SERIALGC
+  // We might be reading the value of the referent field of a
+  // Reference object in order to attach it back to the live
+  // object graph. If G1 is enabled then we need to record
+  // the value that is being returned in an SATB log buffer.
+  //
+  // We need to generate code similar to the following...
+  //
+  // if (offset == java_lang_ref_Reference::referent_offset) {
+  //   if (src != NULL) {
+  //     if (klass(src)->reference_type() != REF_NONE) {
+  //       pre_barrier(..., reg, ...);
+  //     }
+  //   }
+  // }
+  //
+  // The first non-constant check of either the offset or
+  // the src operand will be done here; the remainder
+  // will take place in the generated code stub.
+
+  if (UseG1GC && type == T_OBJECT) {
+    bool gen_code_stub = true;       // Assume we need to generate the slow code stub.
+    bool gen_offset_check = true;       // Assume the code stub has to generate the offset guard.
+    bool gen_source_check = true;       // Assume the code stub has to check the src object for null.
+
+    if (off.is_constant()) {
+      jlong off_con = (off.type()->is_int() ?
+                        (jlong) off.get_jint_constant() :
+                        off.get_jlong_constant());
+
+
+      if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
+        // The constant offset is something other than referent_offset.
+        // We can skip generating/checking the remaining guards and
+        // skip generation of the code stub.
+        gen_code_stub = false;
+      } else {
+        // The constant offset is the same as referent_offset -
+        // we do not need to generate a runtime offset check.
+        gen_offset_check = false;
+      }
+    }
+
+    // We don't need to generate stub if the source object is an array
+    if (gen_code_stub && src.type()->is_array()) {
+      gen_code_stub = false;
+    }
+
+    if (gen_code_stub) {
+      // We still need to continue with the checks.
+      if (src.is_constant()) {
+        ciObject* src_con = src.get_jobject_constant();
+
+        if (src_con->is_null_object()) {
+          // The constant src object is null - We can skip
+          // generating the code stub.
+          gen_code_stub = false;
+        } else {
+          // Non-null constant source object. We still have to generate
+          // the slow stub - but we don't need to generate the runtime
+          // null object check.
+          gen_source_check = false;
+        }
+      }
+    }
+
+    if (gen_code_stub) {
+      // Temoraries.
+      LIR_Opr src_klass = new_register(T_OBJECT);
+
+      // Get the thread pointer for the pre-barrier
+      LIR_Opr thread = getThreadPointer();
+
+      CodeStub* stub;
+
+      // We can have generate one runtime check here. Let's start with
+      // the offset check.
+      if (gen_offset_check) {
+        // if (offset == referent_offset) -> slow code stub
+        // If offset is an int then we can do the comparison with the
+        // referent_offset constant; otherwise we need to move
+        // referent_offset into a temporary register and generate
+        // a reg-reg compare.
+
+        LIR_Opr referent_off;
+
+        if (off.type()->is_int()) {
+          referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
+        } else {
+          assert(off.type()->is_long(), "what else?");
+          referent_off = new_register(T_LONG);
+          __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
+        }
+
+        __ cmp(lir_cond_equal, off.result(), referent_off);
+
+        // Optionally generate "src == null" check.
+        stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+                                                    src_klass, thread,
+                                                    gen_source_check);
+
+        __ branch(lir_cond_equal, as_BasicType(off.type()), stub);
+      } else {
+        if (gen_source_check) {
+          // offset is a const and equals referent offset
+          // if (source != null) -> slow code stub
+          __ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
+
+          // Since we are generating the "if src == null" guard here,
+          // there is no need to generate the "src == null" check again.
+          stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+                                                    src_klass, thread,
+                                                    false);
+
+          __ branch(lir_cond_notEqual, T_OBJECT, stub);
+        } else {
+          // We have statically determined that offset == referent_offset
+          // && src != null so we unconditionally branch to code stub
+          // to perform the guards and record reg in the SATB log buffer.
+
+          stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+                                                    src_klass, thread,
+                                                    false);
+
+          __ branch(lir_cond_always, T_ILLEGAL, stub);
+        }
+      }
+
+      // Continuation point
+      __ branch_destination(stub->continuation());
+    }
+  }
+#endif // SERIALGC
+
   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
 }
 
@@ -2616,7 +2799,7 @@
 
       // Load CallSite object from constant pool cache.
       __ oop2reg(cpcache->constant_encoding(), tmp);
-      __ load(new LIR_Address(tmp, (int)call_site_offset, T_OBJECT), tmp);
+      __ move_wide(new LIR_Address(tmp, (int)call_site_offset, T_OBJECT), tmp);
 
       // Load target MethodHandle from CallSite object.
       __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
@@ -2759,6 +2942,10 @@
     do_AttemptUpdate(x);
     break;
 
+  case vmIntrinsics::_Reference_get:
+    do_Reference_get(x);
+    break;
+
   default: ShouldNotReachHere(); break;
   }
 }
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -246,6 +246,7 @@
   void do_AttemptUpdate(Intrinsic* x);
   void do_NIOCheckIndex(Intrinsic* x);
   void do_FPIntrinsics(Intrinsic* x);
+  void do_Reference_get(Intrinsic* x);
 
   void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
 
@@ -260,13 +261,14 @@
 
   // generic interface
 
-  void pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info);
+  void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
   void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
 
   // specific implementations
   // pre barriers
 
-  void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info);
+  void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+                                         bool do_load, bool patch, CodeEmitInfo* info);
 
   // post barriers
 
--- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -252,26 +252,28 @@
         Constant::CompareResult t_compare_res = x_tval_const->compare(cond, y_const);
         Constant::CompareResult f_compare_res = x_fval_const->compare(cond, y_const);
 
-        guarantee(t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable, "incomparable constants in IfOp");
-
-        Value new_tval = t_compare_res == Constant::cond_true ? tval : fval;
-        Value new_fval = f_compare_res == Constant::cond_true ? tval : fval;
+        // not_comparable here is a valid return in case we're comparing unloaded oop constants
+        if (t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable) {
+          Value new_tval = t_compare_res == Constant::cond_true ? tval : fval;
+          Value new_fval = f_compare_res == Constant::cond_true ? tval : fval;
 
-        _ifop_count++;
-        if (new_tval == new_fval) {
-          return new_tval;
-        } else {
-          return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval);
+          _ifop_count++;
+          if (new_tval == new_fval) {
+            return new_tval;
+          } else {
+            return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval);
+          }
         }
       }
     } else {
       Constant* x_const = x->as_Constant();
       if (x_const != NULL) {         // x and y are constants
         Constant::CompareResult x_compare_res = x_const->compare(cond, y_const);
-        guarantee(x_compare_res != Constant::not_comparable, "incomparable constants in IfOp");
-
-        _ifop_count++;
-        return x_compare_res == Constant::cond_true ? tval : fval;
+        // not_comparable here is a valid return in case we're comparing unloaded oop constants
+        if (x_compare_res != Constant::not_comparable) {
+          _ifop_count++;
+          return x_compare_res == Constant::cond_true ? tval : fval;
+        }
       }
     }
   }
--- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1068,9 +1068,21 @@
           // first replace the tail, then the call
 #ifdef ARM
           if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) {
+            nmethod* nm = CodeCache::find_nmethod(instr_pc);
+            oop* oop_addr = NULL;
+            assert(nm != NULL, "invalid nmethod_pc");
+            RelocIterator oops(nm, copy_buff, copy_buff + 1);
+            while (oops.next()) {
+              if (oops.type() == relocInfo::oop_type) {
+                oop_Relocation* r = oops.oop_reloc();
+                oop_addr = r->oop_addr();
+                break;
+              }
+            }
+            assert(oop_addr != NULL, "oop relocation must exist");
             copy_buff -= *byte_count;
             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
-            n_copy2->set_data((intx) (load_klass()), instr_pc);
+            n_copy2->set_pc_relative_offset((address)oop_addr, instr_pc);
           }
 #endif
 
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -232,14 +232,7 @@
   }
 
   // compute size of arguments
-  int arg_size = target->arg_size();
-  if (code == Bytecodes::_invokedynamic) {
-    assert(!target->is_static(), "receiver explicit in method");
-    arg_size--;  // implicit, not really on stack
-  }
-  if (!target->is_loaded() && code == Bytecodes::_invokestatic) {
-    arg_size--;
-  }
+  int arg_size = target->invoke_arg_size(code);
   int arg_base = MAX2(state._stack_height - arg_size, 0);
 
   // direct recursive calls are skipped if they can be bound statically without introducing
--- a/src/share/vm/ci/ciCallProfile.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciCallProfile.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -36,6 +36,7 @@
 private:
   // Fields are initialized directly by ciMethod::call_profile_at_bci.
   friend class ciMethod;
+  friend class ciMethodHandle;
 
   enum { MorphismLimit = 2 }; // Max call site's morphism we care about
   int  _limit;                // number of receivers have been determined
@@ -58,11 +59,11 @@
 
 public:
   // Note:  The following predicates return false for invalid profiles:
-  bool      has_receiver(int i) { return _limit > i; }
-  int       morphism()          { return _morphism; }
-  int       limit()             { return _limit; }
+  bool      has_receiver(int i) const { return _limit > i; }
+  int       morphism() const          { return _morphism; }
+  int       limit() const             { return _limit; }
 
-  int       count()             { return _count; }
+  int       count() const             { return _count; }
   int       receiver_count(int i)  {
     assert(i < _limit, "out of Call Profile MorphismLimit");
     return _receiver_count[i];
--- a/src/share/vm/ci/ciEnv.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -50,6 +50,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "prims/jvmtiExport.hpp"
+#include "prims/methodHandleWalk.hpp"
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -372,6 +373,7 @@
 // ------------------------------------------------------------------
 // ciEnv::get_klass_by_name_impl
 ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
+                                       constantPoolHandle cpool,
                                        ciSymbol* name,
                                        bool require_local) {
   ASSERT_IN_VM;
@@ -387,7 +389,7 @@
                     sym->utf8_length()-2,
                     KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass));
     ciSymbol* strippedname = get_symbol(strippedsym);
-    return get_klass_by_name_impl(accessing_klass, strippedname, require_local);
+    return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
   }
 
   // Check for prior unloaded klass.  The SystemDictionary's answers
@@ -444,6 +446,7 @@
     // Get element ciKlass recursively.
     ciKlass* elem_klass =
       get_klass_by_name_impl(accessing_klass,
+                             cpool,
                              get_symbol(elem_sym),
                              require_local);
     if (elem_klass != NULL && elem_klass->is_loaded()) {
@@ -452,6 +455,19 @@
     }
   }
 
+  if (found_klass() == NULL && !cpool.is_null() && cpool->has_preresolution()) {
+    // Look inside the constant pool for pre-resolved class entries.
+    for (int i = cpool->length() - 1; i >= 1; i--) {
+      if (cpool->tag_at(i).is_klass()) {
+        klassOop kls = cpool->resolved_klass_at(i);
+        if (Klass::cast(kls)->name() == sym) {
+          found_klass = KlassHandle(THREAD, kls);
+          break;
+        }
+      }
+    }
+  }
+
   if (found_klass() != NULL) {
     // Found it.  Build a CI handle.
     return get_object(found_klass())->as_klass();
@@ -469,6 +485,7 @@
                                   ciSymbol* klass_name,
                                   bool require_local) {
   GUARDED_VM_ENTRY(return get_klass_by_name_impl(accessing_klass,
+                                                 constantPoolHandle(),
                                                  klass_name,
                                                  require_local);)
 }
@@ -509,13 +526,14 @@
   if (klass.is_null()) {
     // Not found in constant pool.  Use the name to do the lookup.
     ciKlass* k = get_klass_by_name_impl(accessor,
+                                        cpool,
                                         get_symbol(klass_name),
                                         false);
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
     } else if (k->loader() != accessor->loader() &&
-               get_klass_by_name_impl(accessor, k->name(), true) == NULL) {
+               get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
     } else {
@@ -566,7 +584,7 @@
     index = cpc_entry->constant_pool_index();
     oop obj = cpc_entry->f1();
     if (obj != NULL) {
-      assert(obj->is_instance(), "must be an instance");
+      assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
       ciObject* ciobj = get_object(obj);
       return ciConstant(T_OBJECT, ciobj);
     }
@@ -608,7 +626,7 @@
     return ciConstant(T_OBJECT, klass->java_mirror());
   } else if (tag.is_object()) {
     oop obj = cpool->object_at(index);
-    assert(obj->is_instance(), "must be an instance");
+    assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
     ciObject* ciobj = get_object(obj);
     return ciConstant(T_OBJECT, ciobj);
   } else if (tag.is_method_type()) {
@@ -730,9 +748,35 @@
   Symbol* name_sym = cpool->name_ref_at(index);
   Symbol* sig_sym  = cpool->signature_ref_at(index);
 
+  if (cpool->has_preresolution()
+      || (holder == ciEnv::MethodHandle_klass() &&
+          methodOopDesc::is_method_handle_invoke_name(name_sym))) {
+    // Short-circuit lookups for JSR 292-related call sites.
+    // That is, do not rely only on name-based lookups, because they may fail
+    // if the names are not resolvable in the boot class loader (7056328).
+    switch (bc) {
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokeinterface:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+      {
+        methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc);
+        if (m != NULL) {
+          return get_object(m)->as_method();
+        }
+      }
+    }
+  }
+
   if (holder_is_accessible) { // Our declared holder is loaded.
     instanceKlass* lookup = declared_holder->get_instanceKlass();
     methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
+    if (m != NULL &&
+        (bc == Bytecodes::_invokestatic
+         ?  instanceKlass::cast(m->method_holder())->is_not_initialized()
+         : !instanceKlass::cast(m->method_holder())->is_loaded())) {
+      m = NULL;
+    }
     if (m != NULL) {
       // We found the method.
       return get_object(m)->as_method();
@@ -757,7 +801,7 @@
   assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
 
   bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
-  if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
+  if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null())
     // FIXME: code generation could allow for null (unlinked) call site
     is_resolved = false;
 
@@ -771,7 +815,7 @@
 
   // Get the invoker methodOop from the constant pool.
   oop f1_value = cpool->cache()->main_entry_at(index)->f1();
-  methodOop signature_invoker = methodOop(f1_value);
+  methodOop signature_invoker = (methodOop) f1_value;
   assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
@@ -1047,7 +1091,7 @@
 // ciEnv::find_system_klass
 ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) {
   VM_ENTRY_MARK;
-  return get_klass_by_name_impl(NULL, klass_name, false);
+  return get_klass_by_name_impl(NULL, constantPoolHandle(), klass_name, false);
 }
 
 // ------------------------------------------------------------------
--- a/src/share/vm/ci/ciEnv.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -141,6 +141,7 @@
 
   // Implementation methods for loading and constant pool access.
   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
+                                  constantPoolHandle cpool,
                                   ciSymbol* klass_name,
                                   bool require_local);
   ciKlass*   get_klass_by_index_impl(constantPoolHandle cpool,
--- a/src/share/vm/ci/ciField.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciField.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -287,7 +287,7 @@
 }
 
 ciType* ciField::compute_type_impl() {
-  ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, _signature, false);
+  ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, constantPoolHandle(), _signature, false);
   if (!type->is_primitive_type() && is_shared()) {
     // We must not cache a pointer to an unshared type, in a shared field.
     bool type_is_also_shared = false;
--- a/src/share/vm/ci/ciMethod.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciMethod.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -125,7 +125,8 @@
   _name = env->get_symbol(h_m()->name());
   _holder = env->get_object(h_m()->method_holder())->as_instance_klass();
   ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
-  _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
+  constantPoolHandle cpool = h_m()->constants();
+  _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
   _method_data = NULL;
   // Take a snapshot of these values, so they will be commensurate with the MDO.
   if (ProfileInterpreter || TieredCompilation) {
@@ -152,7 +153,7 @@
   // These fields are always filled in.
   _name = name;
   _holder = holder;
-  _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, signature);
+  _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, constantPoolHandle(), signature);
   _intrinsic_id = vmIntrinsics::_none;
   _liveness = NULL;
   _can_be_statically_bound = false;
--- a/src/share/vm/ci/ciMethod.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciMethod.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -127,7 +127,24 @@
   ciSignature* signature() const                 { return _signature; }
   ciType*      return_type() const               { return _signature->return_type(); }
   int          arg_size_no_receiver() const      { return _signature->size(); }
-  int          arg_size() const                  { return _signature->size() + (_flags.is_static() ? 0 : 1); }
+  // Can only be used on loaded ciMethods
+  int          arg_size() const                  {
+    check_is_loaded();
+    return _signature->size() + (_flags.is_static() ? 0 : 1);
+  }
+  // Report the number of elements on stack when invoking this method.
+  // This is different than the regular arg_size because invokdynamic
+  // has an implicit receiver.
+  int invoke_arg_size(Bytecodes::Code code) const {
+    int arg_size = _signature->size();
+    // Add a receiver argument, maybe:
+    if (code != Bytecodes::_invokestatic &&
+        code != Bytecodes::_invokedynamic) {
+      arg_size++;
+    }
+    return arg_size;
+  }
+
 
   // Method code and related information.
   address code()                                 { if (_code == NULL) load_code(); return _code; }
@@ -276,9 +293,9 @@
   void print_short_name(outputStream* st = tty);
 
   methodOop get_method_handle_target() {
-    klassOop receiver_limit_oop = NULL;
-    int flags = 0;
-    return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
+    KlassHandle receiver_limit; int flags = 0;
+    methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags);
+    return m();
   }
 };
 
--- a/src/share/vm/ci/ciMethodData.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciMethodData.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -233,7 +233,10 @@
 
 public:
   bool is_method_data()  { return true; }
-  bool is_empty() { return _state == empty_state; }
+
+  void set_mature() { _state = mature_state; }
+
+  bool is_empty()  { return _state == empty_state; }
   bool is_mature() { return _state == mature_state; }
 
   int creation_mileage() { return _orig.creation_mileage(); }
--- a/src/share/vm/ci/ciMethodHandle.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "ci/ciClassList.hpp"
 #include "ci/ciInstance.hpp"
+#include "ci/ciMethodData.hpp"
 #include "ci/ciMethodHandle.hpp"
 #include "ci/ciUtilities.hpp"
 #include "prims/methodHandleWalk.hpp"
@@ -36,13 +37,23 @@
 // ciMethodHandle::get_adapter
 //
 // Return an adapter for this MethodHandle.
-ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
   VM_ENTRY_MARK;
   Handle h(get_oop());
   methodHandle callee(_callee->get_methodOop());
+  assert(callee->is_method_handle_invoke(), "");
+  oop mt1 = callee->method_handle_type();
+  oop mt2 = java_lang_invoke_MethodHandle::type(h());
+  if (!java_lang_invoke_MethodType::equals(mt1, mt2)) {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      tty->print_cr("ciMethodHandle::get_adapter: types not equal");
+      mt1->print(); mt2->print();
+    }
+    return NULL;
+  }
   // We catch all exceptions here that could happen in the method
   // handle compiler and stop the VM.
-  MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
+  MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD);
   if (!HAS_PENDING_EXCEPTION) {
     methodHandle m = mhc.compile(THREAD);
     if (!HAS_PENDING_EXCEPTION) {
@@ -52,12 +63,28 @@
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
     tty->print("*** ciMethodHandle::get_adapter => ");
     PENDING_EXCEPTION->print();
-    tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@
+    tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print();
   }
   CLEAR_PENDING_EXCEPTION;
   return NULL;
 }
 
+// ------------------------------------------------------------------
+// ciMethodHandle::get_adapter
+//
+// Return an adapter for this MethodHandle.
+ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+  ciMethod* result = get_adapter_impl(is_invokedynamic);
+  if (result) {
+    // Fake up the MDO maturity.
+    ciMethodData* mdo = result->method_data();
+    if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) {
+      mdo->set_mature();
+    }
+  }
+  return result;
+}
+
 
 // ------------------------------------------------------------------
 // ciMethodHandle::print_impl
--- a/src/share/vm/ci/ciMethodHandle.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciMethodHandle.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP
 #define SHARE_VM_CI_CIMETHODHANDLE_HPP
 
+#include "ci/ciCallProfile.hpp"
 #include "ci/ciInstance.hpp"
 #include "prims/methodHandles.hpp"
 
@@ -33,32 +34,36 @@
 // The class represents a java.lang.invoke.MethodHandle object.
 class ciMethodHandle : public ciInstance {
 private:
-  ciMethod* _callee;
+  ciMethod*      _callee;
+  ciMethod*      _caller;
+  ciCallProfile  _profile;
 
   // Return an adapter for this MethodHandle.
-  ciMethod* get_adapter(bool is_invokedynamic) const;
+  ciMethod* get_adapter_impl(bool is_invokedynamic) const;
+  ciMethod* get_adapter(     bool is_invokedynamic) const;
 
 protected:
   void print_impl(outputStream* st);
 
 public:
-  ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {};
+  ciMethodHandle(instanceHandle h_i) :
+    ciInstance(h_i),
+    _callee(NULL),
+    _caller(NULL)
+  {}
 
   // What kind of ciObject is this?
   bool is_method_handle() const { return true; }
 
-  ciMethod* callee() const { return _callee; }
-  void  set_callee(ciMethod* m) { _callee = m; }
+  void set_callee(ciMethod* m)                  { _callee  = m;       }
+  void set_caller(ciMethod* m)                  { _caller  = m;       }
+  void set_call_profile(ciCallProfile profile)  { _profile = profile; }
 
   // Return an adapter for a MethodHandle call.
-  ciMethod* get_method_handle_adapter() const {
-    return get_adapter(false);
-  }
+  ciMethod* get_method_handle_adapter() const { return get_adapter(false); }
 
   // Return an adapter for an invokedynamic call.
-  ciMethod* get_invokedynamic_adapter() const {
-    return get_adapter(true);
-  }
+  ciMethod* get_invokedynamic_adapter() const { return get_adapter(true);  }
 };
 
 #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP
--- a/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -93,6 +93,7 @@
       // element klass by name.
       _element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl(
                           this,
+                          constantPoolHandle(),
                           construct_array_name(base_element_klass()->name(),
                                                dimension() - 1),
                           false);
--- a/src/share/vm/ci/ciObject.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciObject.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -201,14 +201,24 @@
 // ciObject::can_be_constant
 bool ciObject::can_be_constant() {
   if (ScavengeRootsInCode >= 1)  return true;  // now everybody can encode as a constant
-  return handle() == NULL || !is_scavengable();
+  return handle() == NULL || is_perm();
 }
 
 // ------------------------------------------------------------------
 // ciObject::should_be_constant()
 bool ciObject::should_be_constant() {
   if (ScavengeRootsInCode >= 2)  return true;  // force everybody to be a constant
-  return handle() == NULL || !is_scavengable();
+  if (!JavaObjectsInPerm && !is_null_object()) {
+    // We want Strings and Classes to be embeddable by default since
+    // they used to be in the perm world.  Not all Strings used to be
+    // embeddable but there's no easy way to distinguish the interned
+    // from the regulars ones so just treat them all that way.
+    ciEnv* env = CURRENT_ENV;
+    if (klass() == env->String_klass() || klass() == env->Class_klass()) {
+      return true;
+    }
+  }
+  return handle() == NULL || is_perm();
 }
 
 
--- a/src/share/vm/ci/ciObject.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciObject.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -116,7 +116,7 @@
   int hash();
 
   // Tells if this oop has an encoding as a constant.
-  // True if is_scavengable is false.
+  // True if is_perm is true.
   // Also true if ScavengeRootsInCode is non-zero.
   // If it does not have an encoding, the compiler is responsible for
   // making other arrangements for dealing with the oop.
@@ -124,7 +124,7 @@
   bool can_be_constant();
 
   // Tells if this oop should be made a constant.
-  // True if is_scavengable is false or ScavengeRootsInCode > 1.
+  // True if is_perm is true or ScavengeRootsInCode > 1.
   bool should_be_constant();
 
   // Is this object guaranteed to be in the permanent part of the heap?
--- a/src/share/vm/ci/ciSignature.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciSignature.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -35,7 +35,7 @@
 
 // ------------------------------------------------------------------
 // ciSignature::ciSignature
-ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol) {
+ciSignature::ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* symbol) {
   ASSERT_IN_VM;
   EXCEPTION_CONTEXT;
   _accessing_klass = accessing_klass;
@@ -64,7 +64,7 @@
         CLEAR_PENDING_EXCEPTION;
       } else {
         ciSymbol* klass_name = env->get_symbol(name);
-        type = env->get_klass_by_name_impl(_accessing_klass, klass_name, false);
+        type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
       }
     }
     _types->append(type);
--- a/src/share/vm/ci/ciSignature.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/ci/ciSignature.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -44,7 +44,7 @@
 
   friend class ciMethod;
 
-  ciSignature(ciKlass* accessing_klass, ciSymbol* signature);
+  ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
 
   void get_all_klasses();
 
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2196,11 +2196,12 @@
                                               TRAPS) {
   typeArrayHandle nullHandle;
   int length = methods()->length();
-  // If JVMTI original method ordering is enabled we have to
+  // If JVMTI original method ordering or sharing is enabled we have to
   // remember the original class file ordering.
   // We temporarily use the vtable_index field in the methodOop to store the
   // class file index, so we can read in after calling qsort.
-  if (JvmtiExport::can_maintain_original_method_order()) {
+  // Put the method ordering in the shared archive.
+  if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
     for (int index = 0; index < length; index++) {
       methodOop m = methodOop(methods->obj_at(index));
       assert(!m->valid_vtable_index(), "vtable index should not be set");
@@ -2214,8 +2215,9 @@
                               methods_parameter_annotations(),
                               methods_default_annotations());
 
-  // If JVMTI original method ordering is enabled construct int array remembering the original ordering
-  if (JvmtiExport::can_maintain_original_method_order()) {
+  // If JVMTI original method ordering or sharing is enabled construct int
+  // array remembering the original ordering
+  if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
     typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle));
     typeArrayHandle method_ordering(THREAD, new_ordering);
     for (int index = 0; index < length; index++) {
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1357,7 +1357,7 @@
 };
 
 
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS) {
   if (!StackTraceInThrowable) return;
   ResourceMark rm(THREAD);
 
@@ -1374,6 +1374,16 @@
   JavaThread* thread = (JavaThread*)THREAD;
   BacktraceBuilder bt(CHECK);
 
+  // If there is no Java frame just return the method that was being called
+  // with bci 0
+  if (!thread->has_last_Java_frame()) {
+    if (max_depth >= 1 && method() != NULL) {
+      bt.push(method(), 0, CHECK);
+      set_backtrace(throwable(), bt.backtrace());
+    }
+    return;
+  }
+
   // Instead of using vframe directly, this version of fill_in_stack_trace
   // basically handles everything by hand. This significantly improved the
   // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.
@@ -1477,7 +1487,7 @@
   set_backtrace(throwable(), bt.backtrace());
 }
 
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable) {
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method) {
   // No-op if stack trace is disabled
   if (!StackTraceInThrowable) {
     return;
@@ -1491,7 +1501,7 @@
   PRESERVE_EXCEPTION_MARK;
 
   JavaThread* thread = JavaThread::active();
-  fill_in_stack_trace(throwable, thread);
+  fill_in_stack_trace(throwable, method, thread);
   // ignore exceptions thrown during stack trace filling
   CLEAR_PENDING_EXCEPTION;
 }
@@ -2564,6 +2574,18 @@
   return name;
 }
 
+bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
+  if (rtype(mt1) != rtype(mt2))
+    return false;
+  if (ptype_count(mt1) != ptype_count(mt2))
+    return false;
+  for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
+    if (ptype(mt1, i) != ptype(mt2, i))
+      return false;
+  }
+  return true;
+}
+
 oop java_lang_invoke_MethodType::rtype(oop mt) {
   assert(is_instance(mt), "must be a MethodType");
   return mt->obj_field(_rtype_offset);
@@ -2592,6 +2614,7 @@
 // Support for java_lang_invoke_MethodTypeForm
 
 int java_lang_invoke_MethodTypeForm::_vmslots_offset;
+int java_lang_invoke_MethodTypeForm::_vmlayout_offset;
 int java_lang_invoke_MethodTypeForm::_erasedType_offset;
 int java_lang_invoke_MethodTypeForm::_genericInvoker_offset;
 
@@ -2599,6 +2622,7 @@
   klassOop k = SystemDictionary::MethodTypeForm_klass();
   if (k != NULL) {
     compute_optional_offset(_vmslots_offset,    k, vmSymbols::vmslots_name(),    vmSymbols::int_signature(), true);
+    compute_optional_offset(_vmlayout_offset,   k, vmSymbols::vmlayout_name(),   vmSymbols::object_signature());
     compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true);
     compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true);
     if (_genericInvoker_offset == 0)  _genericInvoker_offset = -1;  // set to explicit "empty" value
@@ -2607,9 +2631,31 @@
 
 int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) {
   assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
+  assert(_vmslots_offset > 0, "");
   return mtform->int_field(_vmslots_offset);
 }
 
+oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) {
+  assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
+  assert(_vmlayout_offset > 0, "");
+  return mtform->obj_field(_vmlayout_offset);
+}
+
+oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) {
+  assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
+  oop previous = vmlayout(mtform);
+  if (previous != NULL) {
+    return previous;  // someone else beat us to it
+  }
+  HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr<oop>(_vmlayout_offset);
+  OrderAccess::storestore();  // make sure our copy is fully committed
+  previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous);
+  if (previous != NULL) {
+    return previous;  // someone else beat us to it
+  }
+  return cookie;
+}
+
 oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) {
   assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
   return mtform->obj_field(_erasedType_offset);
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -440,8 +440,8 @@
   static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable);
 
   // Fill in current stack trace, can cause GC
-  static void fill_in_stack_trace(Handle throwable, TRAPS);
-  static void fill_in_stack_trace(Handle throwable);
+  static void fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS);
+  static void fill_in_stack_trace(Handle throwable, methodHandle method = methodHandle());
   // Programmatic access to stack trace
   static oop  get_stack_trace_element(oop throwable, int index, TRAPS);
   static int  get_stack_trace_depth(oop throwable, TRAPS);
@@ -949,18 +949,19 @@
     OP_CHECK_CAST    = 0x2, // ref-to-ref conversion; requires a Class argument
     OP_PRIM_TO_PRIM  = 0x3, // converts from one primitive to another
     OP_REF_TO_PRIM   = 0x4, // unboxes a wrapper to produce a primitive
-    OP_PRIM_TO_REF   = 0x5, // boxes a primitive into a wrapper (NYI)
+    OP_PRIM_TO_REF   = 0x5, // boxes a primitive into a wrapper
     OP_SWAP_ARGS     = 0x6, // swap arguments (vminfo is 2nd arg)
     OP_ROT_ARGS      = 0x7, // rotate arguments (vminfo is displaced arg)
     OP_DUP_ARGS      = 0x8, // duplicates one or more arguments (at TOS)
     OP_DROP_ARGS     = 0x9, // remove one or more argument slots
-    OP_COLLECT_ARGS  = 0xA, // combine one or more arguments into a varargs (NYI)
+    OP_COLLECT_ARGS  = 0xA, // combine arguments using an auxiliary function
     OP_SPREAD_ARGS   = 0xB, // expand in place a varargs array (of known size)
-    OP_FLYBY         = 0xC, // operate first on reified argument list (NYI)
-    OP_RICOCHET      = 0xD, // run an adapter chain on the return value (NYI)
+    OP_FOLD_ARGS     = 0xC, // combine but do not remove arguments; prepend result
+    //OP_UNUSED_13   = 0xD, // unused code, perhaps for reified argument lists
     CONV_OP_LIMIT    = 0xE, // limit of CONV_OP enumeration
 
     CONV_OP_MASK     = 0xF00, // this nybble contains the conversion op field
+    CONV_TYPE_MASK   = 0x0F,  // fits T_ADDRESS and below
     CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use
     CONV_VMINFO_SHIFT     =  0, // position of bits in CONV_VMINFO_MASK
     CONV_OP_SHIFT         =  8, // position of bits in CONV_OP_MASK
@@ -1078,6 +1079,8 @@
     return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass();
   }
 
+  static bool equals(oop mt1, oop mt2);
+
   // Accessors for code generation:
   static int rtype_offset_in_bytes()            { return _rtype_offset; }
   static int ptypes_offset_in_bytes()           { return _ptypes_offset; }
@@ -1089,6 +1092,7 @@
 
  private:
   static int _vmslots_offset;           // number of argument slots needed
+  static int _vmlayout_offset;          // object describing internal calling sequence
   static int _erasedType_offset;        // erasedType = canonical MethodType
   static int _genericInvoker_offset;    // genericInvoker = adapter for invokeGeneric
 
@@ -1100,8 +1104,12 @@
   static oop            erasedType(oop mtform);
   static oop            genericInvoker(oop mtform);
 
+  static oop            vmlayout(oop mtform);
+  static oop       init_vmlayout(oop mtform, oop cookie);
+
   // Accessors for code generation:
   static int vmslots_offset_in_bytes()          { return _vmslots_offset; }
+  static int vmlayout_offset_in_bytes()         { return _vmlayout_offset; }
   static int erasedType_offset_in_bytes()       { return _erasedType_offset; }
   static int genericInvoker_offset_in_bytes()   { return _genericInvoker_offset; }
 };
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1256,6 +1256,16 @@
         methodHandle m(THREAD, methodOop(methods->obj_at(index2)));
         m()->link_method(m, CHECK_(nh));
       }
+      if (JvmtiExport::has_redefined_a_class()) {
+        // Reinitialize vtable because RedefineClasses may have changed some
+        // entries in this vtable for super classes so the CDS vtable might
+        // point to old or obsolete entries.  RedefineClasses doesn't fix up
+        // vtables in the shared system dictionary, only the main one.
+        // It also redefines the itable too so fix that too.
+        ResourceMark rm(THREAD);
+        ik->vtable()->initialize_vtable(false, CHECK_(nh));
+        ik->itable()->initialize_itable(false, CHECK_(nh));
+      }
     }
 
     if (TraceClassLoading) {
@@ -2354,8 +2364,17 @@
       spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
       if (spe == NULL)
         spe = invoke_method_table()->add_entry(index, hash, signature, name_id);
-      if (spe->property_oop() == NULL)
+      if (spe->property_oop() == NULL) {
         spe->set_property_oop(m());
+        // Link m to his method type, if it is suitably generic.
+        oop mtform = java_lang_invoke_MethodType::form(mt());
+        if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform)
+            // vmlayout must be an invokeExact:
+            && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)
+            && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) {
+          java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m());
+        }
+      }
     } else {
       non_cached_result = m;
     }
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -152,6 +152,7 @@
   template(DirectMethodHandle_klass,     java_lang_invoke_DirectMethodHandle, Pre_JSR292) \
   template(MethodType_klass,             java_lang_invoke_MethodType,       Pre_JSR292) \
   template(MethodTypeForm_klass,         java_lang_invoke_MethodTypeForm,   Pre_JSR292) \
+  template(BootstrapMethodError_klass,   java_lang_BootstrapMethodError, Pre_JSR292) \
   template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
   template(CallSite_klass,               java_lang_invoke_CallSite,         Pre_JSR292) \
   /* Note: MethodHandle must be first, and CallSite last in group */          \
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -148,6 +148,7 @@
   template(java_lang_InstantiationException,          "java/lang/InstantiationException")         \
   template(java_lang_InstantiationError,              "java/lang/InstantiationError")             \
   template(java_lang_InterruptedException,            "java/lang/InterruptedException")           \
+  template(java_lang_BootstrapMethodError,            "java/lang/BootstrapMethodError")           \
   template(java_lang_LinkageError,                    "java/lang/LinkageError")                   \
   template(java_lang_NegativeArraySizeException,      "java/lang/NegativeArraySizeException")     \
   template(java_lang_NoSuchFieldException,            "java/lang/NoSuchFieldException")           \
@@ -420,6 +421,7 @@
   template(vmtarget_name,                             "vmtarget")                                 \
   template(vmentry_name,                              "vmentry")                                  \
   template(vmslots_name,                              "vmslots")                                  \
+  template(vmlayout_name,                             "vmlayout")                                 \
   template(vmindex_name,                              "vmindex")                                  \
   template(vmargslot_name,                            "vmargslot")                                \
   template(flags_name,                                "flags")                                    \
@@ -472,6 +474,7 @@
   template(void_signature,                            "V")                                        \
   template(byte_array_signature,                      "[B")                                       \
   template(char_array_signature,                      "[C")                                       \
+  template(int_array_signature,                       "[I")                                       \
   template(object_void_signature,                     "(Ljava/lang/Object;)V")                    \
   template(object_int_signature,                      "(Ljava/lang/Object;)I")                    \
   template(object_boolean_signature,                  "(Ljava/lang/Object;)Z")                    \
@@ -550,6 +553,13 @@
   template(sun_management_ManagementFactory,           "sun/management/ManagementFactory")                        \
   template(sun_management_Sensor,                      "sun/management/Sensor")                                   \
   template(sun_management_Agent,                       "sun/management/Agent")                                    \
+  template(sun_management_GarbageCollectorImpl,        "sun/management/GarbageCollectorImpl")                     \
+  template(getGcInfoBuilder_name,                      "getGcInfoBuilder")                                        \
+  template(getGcInfoBuilder_signature,                 "()Lsun/management/GcInfoBuilder;")                        \
+  template(com_sun_management_GcInfo,                  "com/sun/management/GcInfo")                               \
+  template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
+  template(createGCNotification_name,                  "createGCNotification")                                    \
+  template(createGCNotification_signature,             "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
   template(createMemoryPoolMBean_name,                 "createMemoryPoolMBean")                                   \
   template(createMemoryManagerMBean_name,              "createMemoryManagerMBean")                                \
   template(createGarbageCollectorMBean_name,           "createGarbageCollectorMBean")                             \
@@ -567,6 +577,7 @@
   template(java_lang_management_MemoryPoolMXBean,      "java/lang/management/MemoryPoolMXBean")                   \
   template(java_lang_management_MemoryManagerMXBean,   "java/lang/management/MemoryManagerMXBean")                \
   template(java_lang_management_GarbageCollectorMXBean,"java/lang/management/GarbageCollectorMXBean")             \
+  template(gcInfoBuilder_name,                         "gcInfoBuilder")                                           \
   template(createMemoryPool_name,                      "createMemoryPool")                                        \
   template(createMemoryManager_name,                   "createMemoryManager")                                     \
   template(createGarbageCollector_name,                "createGarbageCollector")                                  \
@@ -757,6 +768,10 @@
   do_intrinsic(_checkIndex,               java_nio_Buffer,        checkIndex_name, int_int_signature,            F_R)   \
    do_name(     checkIndex_name,                                 "checkIndex")                                          \
                                                                                                                         \
+  /* java/lang/ref/Reference */                                                                                         \
+  do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
+                                                                                                                        \
+                                                                                                                        \
   do_class(sun_misc_AtomicLongCSImpl,     "sun/misc/AtomicLongCSImpl")                                                  \
   do_intrinsic(_get_AtomicLong,           sun_misc_AtomicLongCSImpl, get_name, void_long_signature,              F_R)   \
   /*   (symbols get_name and void_long_signature defined above) */                                                      \
--- a/src/share/vm/code/codeBlob.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/codeBlob.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -152,6 +152,32 @@
 }
 
 
+void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
+  // Do not hold the CodeCache lock during name formatting.
+  assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
+
+  if (stub != NULL) {
+    char stub_id[256];
+    assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
+    jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
+    if (PrintStubCode) {
+      tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
+      Disassembler::decode(stub->code_begin(), stub->code_end());
+    }
+    Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
+
+    if (JvmtiExport::should_post_dynamic_code_generated()) {
+      const char* stub_name = name2;
+      if (name2[0] == '\0')  stub_name = name1;
+      JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
+    }
+  }
+
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+}
+
+
 void CodeBlob::flush() {
   if (_oop_maps) {
     FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
@@ -312,23 +338,7 @@
     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
   }
 
-  // Do not hold the CodeCache lock during name formatting.
-  if (stub != NULL) {
-    char stub_id[256];
-    jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name);
-    if (PrintStubCode) {
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
-      Disassembler::decode(stub->code_begin(), stub->code_end());
-    }
-    Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
+  trace_new_stub(stub, "RuntimeStub - ", stub_name);
 
   return stub;
 }
@@ -340,6 +350,50 @@
   return p;
 }
 
+// operator new shared by all singletons:
+void* SingletonBlob::operator new(size_t s, unsigned size) {
+  void* p = CodeCache::allocate(size);
+  if (!p) fatal("Initial size of CodeCache is too small");
+  return p;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of RicochetBlob
+
+RicochetBlob::RicochetBlob(
+  CodeBuffer* cb,
+  int         size,
+  int         bounce_offset,
+  int         exception_offset,
+  int         frame_size
+)
+: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL)
+{
+  _bounce_offset = bounce_offset;
+  _exception_offset = exception_offset;
+}
+
+
+RicochetBlob* RicochetBlob::create(
+  CodeBuffer* cb,
+  int         bounce_offset,
+  int         exception_offset,
+  int         frame_size)
+{
+  RicochetBlob* blob = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(RicochetBlob));
+    blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size);
+  }
+
+  trace_new_stub(blob, "RicochetBlob");
+
+  return blob;
+}
+
 
 //----------------------------------------------------------------------------------------------------
 // Implementation of DeoptimizationBlob
@@ -386,34 +440,12 @@
                                          frame_size);
   }
 
-  // Do not hold the CodeCache lock during name formatting.
-  if (blob != NULL) {
-    char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin());
-    if (PrintStubCode) {
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->code_begin(), blob->code_end());
-    }
-    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
+  trace_new_stub(blob, "DeoptimizationBlob");
 
   return blob;
 }
 
 
-void* DeoptimizationBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
-
 //----------------------------------------------------------------------------------------------------
 // Implementation of UncommonTrapBlob
 
@@ -441,33 +473,12 @@
     blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
   }
 
-  // Do not hold the CodeCache lock during name formatting.
-  if (blob != NULL) {
-    char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin());
-    if (PrintStubCode) {
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->code_begin(), blob->code_end());
-    }
-    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
+  trace_new_stub(blob, "UncommonTrapBlob");
 
   return blob;
 }
 
 
-void* UncommonTrapBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
 #endif // COMPILER2
 
 
@@ -498,33 +509,12 @@
     blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
   }
 
-  // We do not need to hold the CodeCache lock during name formatting
-  if (blob != NULL) {
-    char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin());
-    if (PrintStubCode) {
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->code_begin(), blob->code_end());
-    }
-    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
+  trace_new_stub(blob, "ExceptionBlob");
 
   return blob;
 }
 
 
-void* ExceptionBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
 #endif // COMPILER2
 
 
@@ -554,35 +544,12 @@
     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
   }
 
-  // We do not need to hold the CodeCache lock during name formatting.
-  if (blob != NULL) {
-    char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin());
-    if (PrintStubCode) {
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->code_begin(), blob->code_end());
-    }
-    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
+  trace_new_stub(blob, "SafepointBlob");
 
   return blob;
 }
 
 
-void* SafepointBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
-
-
 //----------------------------------------------------------------------------------------------------
 // Verification and printing
 
--- a/src/share/vm/code/codeBlob.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/codeBlob.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -35,6 +35,7 @@
 // Suptypes are:
 //   nmethod            : Compiled Java methods (include method that calls to native code)
 //   RuntimeStub        : Call to VM runtime methods
+//   RicochetBlob       : Used for blocking MethodHandle adapters
 //   DeoptimizationBlob : Used for deoptimizatation
 //   ExceptionBlob      : Used for stack unrolling
 //   SafepointBlob      : Used to handle illegal instruction exceptions
@@ -95,12 +96,13 @@
   void flush();
 
   // Typing
-  virtual bool is_buffer_blob() const                 { return false; }
-  virtual bool is_nmethod() const                     { return false; }
-  virtual bool is_runtime_stub() const                { return false; }
-  virtual bool is_deoptimization_stub() const         { return false; }
-  virtual bool is_uncommon_trap_stub() const          { return false; }
-  virtual bool is_exception_stub() const              { return false; }
+  virtual bool is_buffer_blob() const            { return false; }
+  virtual bool is_nmethod() const                { return false; }
+  virtual bool is_runtime_stub() const           { return false; }
+  virtual bool is_ricochet_stub() const          { return false; }
+  virtual bool is_deoptimization_stub() const    { return false; }
+  virtual bool is_uncommon_trap_stub() const     { return false; }
+  virtual bool is_exception_stub() const         { return false; }
   virtual bool is_safepoint_stub() const              { return false; }
   virtual bool is_adapter_blob() const                { return false; }
   virtual bool is_method_handles_adapter_blob() const { return false; }
@@ -182,6 +184,9 @@
   virtual void print_on(outputStream* st) const;
   virtual void print_value_on(outputStream* st) const;
 
+  // Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
+  static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = "");
+
   // Print the comment associated with offset on stream, if there is one
   virtual void print_block_comment(outputStream* stream, address block_begin) {
     intptr_t offset = (intptr_t)(block_begin - code_begin());
@@ -318,7 +323,11 @@
 
 class SingletonBlob: public CodeBlob {
   friend class VMStructs;
-  public:
+
+ protected:
+  void* operator new(size_t s, unsigned size);
+
+ public:
    SingletonBlob(
      const char* name,
      CodeBuffer* cb,
@@ -341,6 +350,50 @@
 
 
 //----------------------------------------------------------------------------------------------------
+// RicochetBlob
+// Holds an arbitrary argument list indefinitely while Java code executes recursively.
+
+class RicochetBlob: public SingletonBlob {
+  friend class VMStructs;
+ private:
+
+  int _bounce_offset;
+  int _exception_offset;
+
+  // Creation support
+  RicochetBlob(
+    CodeBuffer* cb,
+    int         size,
+    int         bounce_offset,
+    int         exception_offset,
+    int         frame_size
+  );
+
+ public:
+  // Creation
+  static RicochetBlob* create(
+    CodeBuffer* cb,
+    int         bounce_offset,
+    int         exception_offset,
+    int         frame_size
+  );
+
+  // Typing
+  bool is_ricochet_stub() const { return true; }
+
+  // GC for args
+  void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
+
+  address bounce_addr() const           { return code_begin() + _bounce_offset; }
+  address exception_addr() const        { return code_begin() + _exception_offset; }
+  bool returns_to_bounce_addr(address pc) const {
+    address bounce_pc = bounce_addr();
+    return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc);
+  }
+};
+
+
+//----------------------------------------------------------------------------------------------------
 // DeoptimizationBlob
 
 class DeoptimizationBlob: public SingletonBlob {
@@ -368,8 +421,6 @@
     int         frame_size
   );
 
-  void* operator new(size_t s, unsigned size);
-
  public:
   // Creation
   static DeoptimizationBlob* create(
@@ -383,7 +434,6 @@
 
   // Typing
   bool is_deoptimization_stub() const { return true; }
-  const DeoptimizationBlob *as_deoptimization_stub() const { return this; }
   bool exception_address_is_unpack_entry(address pc) const {
     address unpack_pc = unpack();
     return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
@@ -444,8 +494,6 @@
     int         frame_size
   );
 
-  void* operator new(size_t s, unsigned size);
-
  public:
   // Creation
   static UncommonTrapBlob* create(
@@ -476,8 +524,6 @@
     int         frame_size
   );
 
-  void* operator new(size_t s, unsigned size);
-
  public:
   // Creation
   static ExceptionBlob* create(
@@ -509,8 +555,6 @@
     int         frame_size
   );
 
-  void* operator new(size_t s, unsigned size);
-
  public:
   // Creation
   static SafepointBlob* create(
--- a/src/share/vm/code/codeCache.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/codeCache.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -796,6 +796,7 @@
   int nmethodCount = 0;
   int runtimeStubCount = 0;
   int adapterCount = 0;
+  int ricochetStubCount = 0;
   int deoptimizationStubCount = 0;
   int uncommonTrapStubCount = 0;
   int bufferBlobCount = 0;
@@ -840,6 +841,8 @@
       }
     } else if (cb->is_runtime_stub()) {
       runtimeStubCount++;
+    } else if (cb->is_ricochet_stub()) {
+      ricochetStubCount++;
     } else if (cb->is_deoptimization_stub()) {
       deoptimizationStubCount++;
     } else if (cb->is_uncommon_trap_stub()) {
@@ -876,6 +879,7 @@
   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
   tty->print_cr("adapters: %d",adapterCount);
   tty->print_cr("buffer blobs: %d",bufferBlobCount);
+  tty->print_cr("ricochet_stubs: %d",ricochetStubCount);
   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
   tty->print_cr("\nnmethod size distribution (non-zombie java)");
--- a/src/share/vm/code/nmethod.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/nmethod.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1821,7 +1821,7 @@
   void maybe_print(oop* p) {
     if (_print_nm == NULL)  return;
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
                   (intptr_t)(*p), (intptr_t)p);
     (*p)->print();
@@ -1843,7 +1843,9 @@
   if (!method()->is_native()) {
     SimpleScopeDesc ssd(this, fr.pc());
     Bytecode_invoke call(ssd.method(), ssd.bci());
-    bool has_receiver = call.has_receiver();
+    // compiled invokedynamic call sites have an implicit receiver at
+    // resolution time, so make sure it gets GC'ed.
+    bool has_receiver = !call.is_invokestatic();
     Symbol* signature = call.signature();
     fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
   }
@@ -2322,7 +2324,7 @@
       _nm->print_nmethod(true);
       _ok = false;
     }
-    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
@@ -2335,7 +2337,7 @@
     DebugScavengeRoot debug_scavenge_root(this);
     oops_do(&debug_scavenge_root);
     if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad non-perm oop in the code cache");
+      fatal("found an unadvertised bad scavengable oop in the code cache");
   }
   assert(scavenge_root_not_marked(), "");
 }
--- a/src/share/vm/code/nmethod.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/nmethod.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -109,7 +109,7 @@
 class nmethod : public CodeBlob {
   friend class VMStructs;
   friend class NMethodSweeper;
-  friend class CodeCache;  // non-perm oops
+  friend class CodeCache;  // scavengable oops
  private:
   // Shared fields for all nmethod's
   methodOop _method;
@@ -466,17 +466,17 @@
   bool is_at_poll_return(address pc);
   bool is_at_poll_or_poll_return(address pc);
 
-  // Non-perm oop support
+  // Scavengable oop support
   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
  protected:
-  enum { npl_on_list = 0x01, npl_marked = 0x10 };
-  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
+  enum { sl_on_list = 0x01, sl_marked = 0x10 };
+  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 #ifndef PRODUCT
-  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
-  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
-  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
+  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
+  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
+  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 #endif //PRODUCT
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
--- a/src/share/vm/code/pcDesc.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/code/pcDesc.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -44,7 +44,7 @@
 void PcDesc::print(nmethod* code) {
 #ifndef PRODUCT
   ResourceMark rm;
-  tty->print_cr("PcDesc(pc=0x%lx offset=%x):", real_pc(code), pc_offset());
+  tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits);
 
   if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
     return;
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -301,12 +301,23 @@
   st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
   st->print("%4d ", compile_id);    // print compilation number
 
+  // For unloaded methods the transition to zombie occurs after the
+  // method is cleared so it's impossible to report accurate
+  // information for that case.
+  bool is_synchronized = false;
+  bool has_exception_handler = false;
+  bool is_native = false;
+  if (method != NULL) {
+    is_synchronized       = method->is_synchronized();
+    has_exception_handler = method->has_exception_handler();
+    is_native             = method->is_native();
+  }
   // method attributes
   const char compile_type   = is_osr_method                   ? '%' : ' ';
-  const char sync_char      = method->is_synchronized()       ? 's' : ' ';
-  const char exception_char = method->has_exception_handler() ? '!' : ' ';
+  const char sync_char      = is_synchronized                 ? 's' : ' ';
+  const char exception_char = has_exception_handler           ? '!' : ' ';
   const char blocking_char  = is_blocking                     ? 'b' : ' ';
-  const char native_char    = method->is_native()             ? 'n' : ' ';
+  const char native_char    = is_native                       ? 'n' : ' ';
 
   // print method attributes
   st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
@@ -317,11 +328,15 @@
   }
   st->print("     ");  // more indent
 
-  method->print_short_name(st);
-  if (is_osr_method) {
-    st->print(" @ %d", osr_bci);
+  if (method == NULL) {
+    st->print("(method)");
+  } else {
+    method->print_short_name(st);
+    if (is_osr_method) {
+      st->print(" @ %d", osr_bci);
+    }
+    st->print(" (%d bytes)", method->code_size());
   }
-  st->print(" (%d bytes)", method->code_size());
 
   if (msg != NULL) {
     st->print("   %s", msg);
@@ -1053,6 +1068,15 @@
     return;
   }
 
+  // If the requesting thread is holding the pending list lock
+  // then we just return. We can't risk blocking while holding
+  // the pending list lock or a 3-way deadlock may occur
+  // between the reference handler thread, a GC (instigated
+  // by a compiler thread), and compiled method registration.
+  if (instanceRefKlass::owns_pending_list_lock(JavaThread::current())) {
+    return;
+  }
+
   // Outputs from the following MutexLocker block:
   CompileTask* task     = NULL;
   bool         blocking = false;
@@ -1388,17 +1412,8 @@
 // Should the current thread be blocked until this compilation request
 // has been fulfilled?
 bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
-  if (!BackgroundCompilation) {
-    Symbol* class_name = method->method_holder()->klass_part()->name();
-    if (class_name->starts_with("java/lang/ref/Reference", 23)) {
-      // The reference handler thread can dead lock with the GC if compilation is blocking,
-      // so we avoid blocking compiles for anything in the java.lang.ref.Reference class,
-      // including inner classes such as ReferenceHandler.
-      return false;
-    }
-    return true;
-  }
-  return false;
+  assert(!instanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
+  return !BackgroundCompilation;
 }
 
 
--- a/src/share/vm/compiler/disassembler.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/compiler/disassembler.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -283,10 +283,10 @@
         st->print("Stub::%s", desc->name());
         if (desc->begin() != adr)
           st->print("%+d 0x%p",adr - desc->begin(), adr);
-        else if (WizardMode) st->print(" " INTPTR_FORMAT, adr);
+        else if (WizardMode) st->print(" " PTR_FORMAT, adr);
         return;
       }
-      st->print("Stub::<unknown> " INTPTR_FORMAT, adr);
+      st->print("Stub::<unknown> " PTR_FORMAT, adr);
       return;
     }
 
@@ -314,8 +314,8 @@
     }
   }
 
-  // Fall through to a simple numeral.
-  st->print(INTPTR_FORMAT, (intptr_t)adr);
+  // Fall through to a simple (hexadecimal) numeral.
+  st->print(PTR_FORMAT, adr);
 }
 
 void decode_env::print_insn_labels() {
@@ -326,7 +326,7 @@
     cb->print_block_comment(st, p);
   }
   if (_print_pc) {
-    st->print("  " INTPTR_FORMAT ": ", (intptr_t) p);
+    st->print("  " PTR_FORMAT ": ", p);
   }
 }
 
@@ -432,7 +432,7 @@
 void Disassembler::decode(CodeBlob* cb, outputStream* st) {
   if (!load_library())  return;
   decode_env env(cb, st);
-  env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb);
+  env.output()->print_cr("Decoding CodeBlob " PTR_FORMAT, cb);
   env.decode_instructions(cb->code_begin(), cb->code_end());
 }
 
@@ -446,7 +446,7 @@
 void Disassembler::decode(nmethod* nm, outputStream* st) {
   if (!load_library())  return;
   decode_env env(nm, st);
-  env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm);
+  env.output()->print_cr("Decoding compiled method " PTR_FORMAT ":", nm);
   env.output()->print_cr("Code:");
 
 #ifdef SHARK
@@ -478,9 +478,9 @@
     int offset = 0;
     for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) {
       if ((offset % 8) == 0) {
-        env.output()->print_cr("  " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT "   " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p));
+        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT "   " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p));
       } else {
-        env.output()->print_cr("  " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT,                    (intptr_t) p, offset, *((int32_t*) p));
+        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT,                    p, offset, *((int32_t*) p));
       }
     }
   }
--- a/src/share/vm/compiler/oopMap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/compiler/oopMap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -638,7 +638,9 @@
     assert(*derived_loc != (oop)base_loc, "location already added");
     assert(_list != NULL, "list must exist");
     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
-    assert(offset >= -1000000, "wrong derived pointer info");
+    // This assert is invalid because derived pointers can be
+    // arbitrarily far away from their base.
+    // assert(offset >= -1000000, "wrong derived pointer info");
 
     if (TraceDerivedPointers) {
       tty->print_cr(
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1963,10 +1963,21 @@
 // Iteration support, mostly delegated from a CMS generation
 
 void CompactibleFreeListSpace::save_marks() {
-  // mark the "end" of the used space at the time of this call;
+  assert(Thread::current()->is_VM_thread(),
+         "Global variable should only be set when single-threaded");
+  // Mark the "end" of the used space at the time of this call;
   // note, however, that promoted objects from this point
   // on are tracked in the _promoInfo below.
   set_saved_mark_word(unallocated_block());
+#ifdef ASSERT
+  // Check the sanity of save_marks() etc.
+  MemRegion ur    = used_region();
+  MemRegion urasm = used_region_at_save_marks();
+  assert(ur.contains(urasm),
+         err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
+                 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
+                 ur.start(), ur.end(), urasm.start(), urasm.end()));
+#endif
   // inform allocator that promotions should be tracked.
   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
   _promoInfo.startTrackingPromotions();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -407,6 +407,11 @@
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
+                             "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
+                             _sweep_limit, bottom(), end());
+    }
   }
   NOT_PRODUCT(
     void clear_sweep_limit() { _sweep_limit = NULL; }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2026,7 +2026,7 @@
   }
 
   {
-    TraceCMSMemoryManagerStats();
+    TraceCMSMemoryManagerStats tmms(gch->gc_cause());
   }
   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
     ref_processor(), clear_all_soft_refs);
@@ -3189,10 +3189,9 @@
 }
 
 void CMSCollector::setup_cms_unloading_and_verification_state() {
-  const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
+  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
                              || VerifyBeforeExit;
-  const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
-                             |   SharedHeap::SO_CodeCache;
+  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
 
   if (should_unload_classes()) {   // Should unload classes this cycle
     remove_root_scanning_option(rso);  // Shrink the root set appropriately
@@ -3480,7 +3479,7 @@
 void CMSCollector::checkpointRootsInitial(bool asynch) {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
-  TraceCMSMemoryManagerStats tms(_collectorState);
+  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
 
   ReferenceProcessor* rp = ref_processor();
   SpecializationStats::clear();
@@ -4859,7 +4858,8 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
-  TraceCMSMemoryManagerStats tms(_collectorState);
+  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+
   verify_work_stacks_empty();
   verify_overflow_empty();
 
@@ -5994,7 +5994,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   increment_sweep_count();
-  TraceCMSMemoryManagerStats tms(_collectorState);
+  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
@@ -7888,60 +7888,64 @@
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("\n====================\nStarting new sweep\n");
-  }
-}
-
-// We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not yet have added back to
-// the free lists.
+    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
+                        _limit);
+  }
+}
+
+void SweepClosure::print_on(outputStream* st) const {
+  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                _sp->bottom(), _sp->end());
+  tty->print_cr("_limit = " PTR_FORMAT, _limit);
+  tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
+  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
+  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
+                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
+}
+
+#ifndef PRODUCT
+// Assertion checking only:  no useful work in product mode --
+// however, if any of the flags below become product flags,
+// you may need to review this code to see if it needs to be
+// enabled in product mode.
 SweepClosure::~SweepClosure() {
   assert_lock_strong(_freelistLock);
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
-  // Flush any remaining coterminal free run as a single
-  // coalesced chunk to the appropriate free list.
   if (inFreeRange()) {
-    assert(freeFinger() < _limit, "freeFinger points too high");
-    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print("Sweep: last chunk: ");
-      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
-                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
-    }
-  } // else nothing to flush
-  NOT_PRODUCT(
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT " bytes",
-                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
-                             SIZE_FORMAT" bytes  "
-        "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
-        _numObjectsLive, _numWordsLive*sizeof(HeapWord),
-        _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
-        sizeof(HeapWord);
-      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
-
-      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
-        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
-        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
-        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
-        gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
-          indexListReturnedBytes);
-        gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
-          dictReturnedBytes);
-      }
-    }
-  )
-  // Now, in debug mode, just null out the sweep_limit
-  NOT_PRODUCT(_sp->clear_sweep_limit();)
+    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
+    print();
+    ShouldNotReachHere();
+  }
+  if (Verbose && PrintGC) {
+    gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
+                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+    gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
+                           SIZE_FORMAT" bytes  "
+      "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
+      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
+      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
+                        * sizeof(HeapWord);
+    gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
+
+    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
+      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
+      size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
+      size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
+      gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
+      gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
+        indexListReturnedBytes);
+      gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
+        dictReturnedBytes);
+    }
+  }
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("end of sweep\n================\n");
-  }
-}
+    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
+                           _limit);
+  }
+}
+#endif  // PRODUCT
 
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
@@ -8001,15 +8005,17 @@
   // we started the sweep, it may no longer be one because heap expansion
   // may have caused us to coalesce the block ending at the address _limit
   // with a newly expanded chunk (this happens when _limit was set to the
-  // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
+  // previous _end of the space), so we may have stepped past _limit:
+  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
   if (addr >= _limit) { // we have swept up to or past the limit: finish up
     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
            "sweep _limit out of bounds");
     assert(addr < _sp->end(), "addr out of bounds");
-    // Flush any remaining coterminal free run as a single
+    // Flush any free range we might be holding as a single
     // coalesced chunk to the appropriate free list.
     if (inFreeRange()) {
-      assert(freeFinger() < _limit, "finger points too high");
+      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
+             err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
       flush_cur_free_chunk(freeFinger(),
                            pointer_delta(addr, freeFinger()));
       if (CMSTraceSweeper) {
@@ -8033,7 +8039,16 @@
     res = fc->size();
     do_already_free_chunk(fc);
     debug_only(_sp->verifyFreeLists());
-    assert(res == fc->size(), "Don't expect the size to change");
+    // If we flush the chunk at hand in lookahead_and_flush()
+    // and it's coalesced with a preceding chunk, then the
+    // process of "mangling" the payload of the coalesced block
+    // will cause erasure of the size information from the
+    // (erstwhile) header of all the coalesced blocks but the
+    // first, so the first disjunct in the assert will not hold
+    // in that specific case (in which case the second disjunct
+    // will hold).
+    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
+           "Otherwise the size info doesn't change at this step");
     NOT_PRODUCT(
       _numObjectsAlreadyFree++;
       _numWordsAlreadyFree += res;
@@ -8103,7 +8118,7 @@
 //
 
 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
-  size_t size = fc->size();
+  const size_t size = fc->size();
   // Chunks that cannot be coalesced are not in the
   // free lists.
   if (CMSTestInFreeList && !fc->cantCoalesce()) {
@@ -8112,7 +8127,7 @@
   }
   // a chunk that is already free, should not have been
   // marked in the bit map
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const addr = (HeapWord*) fc;
   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
   // Verify that the bit map has no bits marked between
   // addr and purported end of this block.
@@ -8149,7 +8164,7 @@
         }
       } else {
         // the midst of a free range, we are coalescing
-        debug_only(record_free_block_coalesced(fc);)
+        print_free_block_coalesced(fc);
         if (CMSTraceSweeper) {
           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
         }
@@ -8173,6 +8188,10 @@
         }
       }
     }
+    // Note that if the chunk is not coalescable (the else arm
+    // below), we unconditionally flush, without needing to do
+    // a "lookahead," as we do below.
+    if (inFreeRange()) lookahead_and_flush(fc, size);
   } else {
     // Code path common to both original and adaptive free lists.
 
@@ -8191,8 +8210,8 @@
   // This is a chunk of garbage.  It is not in any free list.
   // Add it to a free list or let it possibly be coalesced into
   // a larger chunk.
-  HeapWord* addr = (HeapWord*) fc;
-  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
+  HeapWord* const addr = (HeapWord*) fc;
+  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
 
   if (_sp->adaptive_freelists()) {
     // Verify that the bit map has no bits marked between
@@ -8205,7 +8224,6 @@
       // start of a new free range
       assert(size > 0, "A free range should have a size");
       initialize_free_range(addr, false);
-
     } else {
       // this will be swept up when we hit the end of the
       // free range
@@ -8235,6 +8253,9 @@
     // addr and purported end of just dead object.
     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
   }
+  assert(_limit >= addr + size,
+         "A freshly garbage chunk can't possibly straddle over _limit");
+  if (inFreeRange()) lookahead_and_flush(fc, size);
   return size;
 }
 
@@ -8284,8 +8305,8 @@
            (!_collector->should_unload_classes()
             || oop(addr)->is_parsable()),
            "Should be an initialized object");
-    // Note that there are objects used during class redefinition
-    // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
+    // Note that there are objects used during class redefinition,
+    // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
     // which are discarded with their is_conc_safe state still
     // false.  These object may be floating garbage so may be
     // seen here.  If they are floating garbage their size
@@ -8307,7 +8328,7 @@
                                                  size_t chunkSize) {
   // do_post_free_or_garbage_chunk() should only be called in the case
   // of the adaptive free list allocator.
-  bool fcInFreeLists = fc->isFree();
+  const bool fcInFreeLists = fc->isFree();
   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
   if (CMSTestInFreeList && fcInFreeLists) {
@@ -8318,11 +8339,11 @@
     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
   }
 
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const fc_addr = (HeapWord*) fc;
 
   bool coalesce;
-  size_t left  = pointer_delta(addr, freeFinger());
-  size_t right = chunkSize;
+  const size_t left  = pointer_delta(fc_addr, freeFinger());
+  const size_t right = chunkSize;
   switch (FLSCoalescePolicy) {
     // numeric value forms a coalition aggressiveness metric
     case 0:  { // never coalesce
@@ -8355,15 +8376,15 @@
   // If the chunk is in a free range and either we decided to coalesce above
   // or the chunk is near the large block at the end of the heap
   // (isNearLargestChunk() returns true), then coalesce this chunk.
-  bool doCoalesce = inFreeRange() &&
-    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
+  const bool doCoalesce = inFreeRange()
+                          && (coalesce || _g->isNearLargestChunk(fc_addr));
   if (doCoalesce) {
     // Coalesce the current free range on the left with the new
     // chunk on the right.  If either is on a free list,
     // it must be removed from the list and stashed in the closure.
     if (freeRangeInFreeLists()) {
-      FreeChunk* ffc = (FreeChunk*)freeFinger();
-      assert(ffc->size() == pointer_delta(addr, freeFinger()),
+      FreeChunk* const ffc = (FreeChunk*)freeFinger();
+      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
         "Size of free range is inconsistent with chunk size.");
       if (CMSTestInFreeList) {
         assert(_sp->verifyChunkInFreeLists(ffc),
@@ -8380,13 +8401,14 @@
       _sp->removeFreeChunkFromFreeLists(fc);
     }
     set_lastFreeRangeCoalesced(true);
+    print_free_block_coalesced(fc);
   } else {  // not in a free range and/or should not coalesce
     // Return the current free range and start a new one.
     if (inFreeRange()) {
       // In a free range but cannot coalesce with the right hand chunk.
       // Put the current free range into the free lists.
       flush_cur_free_chunk(freeFinger(),
-                           pointer_delta(addr, freeFinger()));
+                           pointer_delta(fc_addr, freeFinger()));
     }
     // Set up for new free range.  Pass along whether the right hand
     // chunk is in the free lists.
@@ -8394,6 +8416,42 @@
   }
 }
 
+// Lookahead flush:
+// If we are tracking a free range, and this is the last chunk that
+// we'll look at because its end crosses past _limit, we'll preemptively
+// flush it along with any free range we may be holding on to. Note that
+// this can be the case only for an already free or freshly garbage
+// chunk. If this block is an object, it can never straddle
+// over _limit. The "straddling" occurs when _limit is set at
+// the previous end of the space when this cycle started, and
+// a subsequent heap expansion caused the previously co-terminal
+// free block to be coalesced with the newly expanded portion,
+// thus rendering _limit a non-block-boundary making it dangerous
+// for the sweeper to step over and examine.
+void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
+  assert(inFreeRange(), "Should only be called if currently in a free range.");
+  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
+  assert(_sp->used_region().contains(eob - 1),
+         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
+                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+  if (eob >= _limit) {
+    assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
+                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
+                             "[" PTR_FORMAT "," PTR_FORMAT ")",
+                             _limit, fc, eob, _sp->bottom(), _sp->end());
+    }
+    // Return the storage we are tracking back into the free lists.
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("Flushing ... ");
+    }
+    assert(freeFinger() < eob, "Error");
+    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
+  }
+}
+
 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   assert(size > 0,
@@ -8419,6 +8477,8 @@
     }
     _sp->addChunkAndRepairOffsetTable(chunk, size,
             lastFreeRangeCoalesced());
+  } else if (CMSTraceSweeper) {
+    gclog_or_tty->print_cr("Already in free list: nothing to flush");
   }
   set_inFreeRange(false);
   set_freeRangeInFreeLists(false);
@@ -8477,13 +8537,14 @@
 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
   return debug_cms_space->verifyChunkInFreeLists(fc);
 }
-
-void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
+#endif
+
+void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
-  }
-}
-#endif
+    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
+                           fc, fc->size());
+  }
+}
 
 // CMSIsAliveClosure
 bool CMSIsAliveClosure::do_object_b(oop obj) {
@@ -9236,11 +9297,12 @@
   return res;
 }
 
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() {
+TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
 
   switch (phase) {
     case CMSCollector::InitialMarking:
       initialize(true  /* fullGC */ ,
+                 cause /* cause of the GC */,
                  true  /* recordGCBeginTime */,
                  true  /* recordPreGCUsage */,
                  false /* recordPeakUsage */,
@@ -9252,6 +9314,7 @@
 
     case CMSCollector::FinalMarking:
       initialize(true  /* fullGC */ ,
+                 cause /* cause of the GC */,
                  false /* recordGCBeginTime */,
                  false /* recordPreGCUsage */,
                  false /* recordPeakUsage */,
@@ -9263,6 +9326,7 @@
 
     case CMSCollector::Sweeping:
       initialize(true  /* fullGC */ ,
+                 cause /* cause of the GC */,
                  false /* recordGCBeginTime */,
                  false /* recordPreGCUsage */,
                  true  /* recordPeakUsage */,
@@ -9278,8 +9342,9 @@
 }
 
 // when bailing out of cms in concurrent mode failure
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() {
+TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(GCCause::Cause cause): TraceMemoryManagerStats() {
   initialize(true /* fullGC */ ,
+             cause /* cause of the GC */,
              true /* recordGCBeginTime */,
              true /* recordPreGCUsage */,
              true /* recordPeakUsage */,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1701,9 +1701,9 @@
   CMSCollector*                  _collector;  // collector doing the work
   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
   CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;// the address at which the sweep should stop because
-                                        // we do not expect blocks eligible for sweeping past
-                                        // that address.
+  HeapWord*                      _limit;// the address at or above which the sweep should stop
+                                        // because we do not expect newly garbage blocks
+                                        // eligible for sweeping past that address.
   Mutex*                         _freelistLock; // Free list lock (in space)
   CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                 // generation)
@@ -1750,6 +1750,10 @@
   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
   // Process a free chunk during sweeping.
   void do_already_free_chunk(FreeChunk *fc);
+  // Work method called when processing an already free or a
+  // freshly garbage chunk to do a lookahead and possibly a
+  // premptive flush if crossing over _limit.
+  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
   // Process a live chunk during sweeping.
@@ -1758,8 +1762,6 @@
   // Accessors.
   HeapWord* freeFinger() const          { return _freeFinger; }
   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
-  size_t freeRangeSize() const          { return _freeRangeSize; }
-  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
   bool inFreeRange()    const           { return _inFreeRange; }
   void set_inFreeRange(bool v)          { _inFreeRange = v; }
   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
@@ -1779,14 +1781,16 @@
   void do_yield_work(HeapWord* addr);
 
   // Debugging/Printing
-  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
+  void print_free_block_coalesced(FreeChunk* fc) const;
 
  public:
   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
                CMSBitMap* bitMap, bool should_yield);
-  ~SweepClosure();
+  ~SweepClosure() PRODUCT_RETURN;
 
   size_t       do_blk_careful(HeapWord* addr);
+  void         print() const { print_on(tty); }
+  void         print_on(outputStream *st) const;
 };
 
 // Closures related to weak references processing
@@ -1895,8 +1899,8 @@
 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
 
  public:
-  TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase);
-  TraceCMSMemoryManagerStats();
+  TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
+  TraceCMSMemoryManagerStats(GCCause::Cause cause);
 };
 
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -172,7 +172,7 @@
 
   // hash a given key (index of card_ptr) with the specified size
   static unsigned int hash(size_t key, size_t size) {
-    return (unsigned int) key % size;
+    return (unsigned int) (key % size);
   }
 
   // hash a given key (index of card_ptr)
@@ -180,11 +180,11 @@
     return hash(key, _n_card_counts);
   }
 
-  unsigned ptr_2_card_num(jbyte* card_ptr) {
-    return (unsigned) (card_ptr - _ct_bot);
+  unsigned int ptr_2_card_num(jbyte* card_ptr) {
+    return (unsigned int) (card_ptr - _ct_bot);
   }
 
-  jbyte* card_num_2_ptr(unsigned card_num) {
+  jbyte* card_num_2_ptr(unsigned int card_num) {
     return (jbyte*) (_ct_bot + card_num);
   }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -826,6 +826,14 @@
 void ConcurrentMark::checkpointRootsInitialPost() {
   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 
+  // If we force an overflow during remark, the remark operation will
+  // actually abort and we'll restart concurrent marking. If we always
+  // force an oveflow during remark we'll never actually complete the
+  // marking phase. So, we initilize this here, at the start of the
+  // cycle, so that at the remaining overflow number will decrease at
+  // every remark and we'll eventually not need to cause one.
+  force_overflow_stw()->init();
+
   // For each region note start of marking.
   NoteStartOfMarkHRClosure startcl;
   g1h->heap_region_iterate(&startcl);
@@ -893,27 +901,37 @@
 }
 
 /*
-   Notice that in the next two methods, we actually leave the STS
-   during the barrier sync and join it immediately afterwards. If we
-   do not do this, this then the following deadlock can occur: one
-   thread could be in the barrier sync code, waiting for the other
-   thread to also sync up, whereas another one could be trying to
-   yield, while also waiting for the other threads to sync up too.
-
-   Because the thread that does the sync barrier has left the STS, it
-   is possible to be suspended for a Full GC or an evacuation pause
-   could occur. This is actually safe, since the entering the sync
-   barrier is one of the last things do_marking_step() does, and it
-   doesn't manipulate any data structures afterwards.
-*/
+ * Notice that in the next two methods, we actually leave the STS
+ * during the barrier sync and join it immediately afterwards. If we
+ * do not do this, the following deadlock can occur: one thread could
+ * be in the barrier sync code, waiting for the other thread to also
+ * sync up, whereas another one could be trying to yield, while also
+ * waiting for the other threads to sync up too.
+ *
+ * Note, however, that this code is also used during remark and in
+ * this case we should not attempt to leave / enter the STS, otherwise
+ * we'll either hit an asseert (debug / fastdebug) or deadlock
+ * (product). So we should only leave / enter the STS if we are
+ * operating concurrently.
+ *
+ * Because the thread that does the sync barrier has left the STS, it
+ * is possible to be suspended for a Full GC or an evacuation pause
+ * could occur. This is actually safe, since the entering the sync
+ * barrier is one of the last things do_marking_step() does, and it
+ * doesn't manipulate any data structures afterwards.
+ */
 
 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
 
-  ConcurrentGCThread::stsLeave();
+  if (concurrent()) {
+    ConcurrentGCThread::stsLeave();
+  }
   _first_overflow_barrier_sync.enter();
-  ConcurrentGCThread::stsJoin();
+  if (concurrent()) {
+    ConcurrentGCThread::stsJoin();
+  }
   // at this point everyone should have synced up and not be doing any
   // more work
 
@@ -923,7 +941,12 @@
   // let task 0 do this
   if (task_num == 0) {
     // task 0 is responsible for clearing the global data structures
-    clear_marking_state();
+    // We should be here because of an overflow. During STW we should
+    // not clear the overflow flag since we rely on it being true when
+    // we exit this method to abort the pause and restart concurent
+    // marking.
+    clear_marking_state(concurrent() /* clear_overflow */);
+    force_overflow()->update();
 
     if (PrintGC) {
       gclog_or_tty->date_stamp(PrintGCDateStamps);
@@ -940,15 +963,45 @@
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
 
-  ConcurrentGCThread::stsLeave();
+  if (concurrent()) {
+    ConcurrentGCThread::stsLeave();
+  }
   _second_overflow_barrier_sync.enter();
-  ConcurrentGCThread::stsJoin();
+  if (concurrent()) {
+    ConcurrentGCThread::stsJoin();
+  }
   // at this point everything should be re-initialised and ready to go
 
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
 }
 
+#ifndef PRODUCT
+void ForceOverflowSettings::init() {
+  _num_remaining = G1ConcMarkForceOverflow;
+  _force = false;
+  update();
+}
+
+void ForceOverflowSettings::update() {
+  if (_num_remaining > 0) {
+    _num_remaining -= 1;
+    _force = true;
+  } else {
+    _force = false;
+  }
+}
+
+bool ForceOverflowSettings::should_force() {
+  if (_force) {
+    _force = false;
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif // !PRODUCT
+
 void ConcurrentMark::grayRoot(oop p) {
   HeapWord* addr = (HeapWord*) p;
   // We can't really check against _heap_start and _heap_end, since it
@@ -1117,6 +1170,7 @@
   _restart_for_overflow = false;
 
   size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
+  force_overflow_conc()->init();
   set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
@@ -1845,7 +1899,7 @@
   while (!_cleanup_list.is_empty()) {
     HeapRegion* hr = _cleanup_list.remove_head();
     assert(hr != NULL, "the list was not empty");
-    hr->rem_set()->clear();
+    hr->par_clear();
     tmp_free_list.add_as_tail(hr);
 
     // Instead of adding one region at a time to the secondary_free_list,
@@ -2703,12 +2757,16 @@
 
 }
 
-void ConcurrentMark::clear_marking_state() {
+void ConcurrentMark::clear_marking_state(bool clear_overflow) {
   _markStack.setEmpty();
   _markStack.clear_overflow();
   _regionStack.setEmpty();
   _regionStack.clear_overflow();
-  clear_has_overflown();
+  if (clear_overflow) {
+    clear_has_overflown();
+  } else {
+    assert(has_overflown(), "pre-condition");
+  }
   _finger = _heap_start;
 
   for (int i = 0; i < (int)_max_task_num; ++i) {
@@ -2996,6 +3054,28 @@
     _should_gray_objects = true;
 }
 
+// Resets the region fields of active CMTasks whose values point
+// into the collection set.
+void ConcurrentMark::reset_active_task_region_fields_in_cset() {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
+  assert(parallel_marking_threads() <= _max_task_num, "sanity");
+
+  for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
+    CMTask* task = _tasks[i];
+    HeapWord* task_finger = task->finger();
+    if (task_finger != NULL) {
+      assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
+      HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
+      if (finger_region->in_collection_set()) {
+        // The task's current region is in the collection set.
+        // This region will be evacuated in the current GC and
+        // the region fields in the task will be stale.
+        task->giveup_current_region();
+      }
+    }
+  }
+}
+
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
@@ -4279,6 +4359,15 @@
     }
   }
 
+  // If we are about to wrap up and go into termination, check if we
+  // should raise the overflow flag.
+  if (do_termination && !has_aborted()) {
+    if (_cm->force_overflow()->should_force()) {
+      _cm->set_has_overflown();
+      regular_clock_call();
+    }
+  }
+
   // We still haven't aborted. Now, let's try to get into the
   // termination protocol.
   if (do_termination && !has_aborted()) {
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -316,6 +316,19 @@
   void setEmpty()   { _index = 0; clear_overflow(); }
 };
 
+class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
+private:
+#ifndef PRODUCT
+  uintx _num_remaining;
+  bool _force;
+#endif // !defined(PRODUCT)
+
+public:
+  void init() PRODUCT_RETURN;
+  void update() PRODUCT_RETURN;
+  bool should_force() PRODUCT_RETURN_( return false; );
+};
+
 // this will enable a variety of different statistics per GC task
 #define _MARKING_STATS_       0
 // this will enable the higher verbose levels
@@ -462,6 +475,9 @@
 
   WorkGang* _parallel_workers;
 
+  ForceOverflowSettings _force_overflow_conc;
+  ForceOverflowSettings _force_overflow_stw;
+
   void weakRefsWork(bool clear_all_soft_refs);
 
   void swapMarkBitMaps();
@@ -470,7 +486,7 @@
   // task local ones; should be called during initial mark.
   void reset();
   // It resets all the marking data structures.
-  void clear_marking_state();
+  void clear_marking_state(bool clear_overflow = true);
 
   // It should be called to indicate which phase we're in (concurrent
   // mark or remark) and how many threads are currently active.
@@ -547,6 +563,22 @@
   void enter_first_sync_barrier(int task_num);
   void enter_second_sync_barrier(int task_num);
 
+  ForceOverflowSettings* force_overflow_conc() {
+    return &_force_overflow_conc;
+  }
+
+  ForceOverflowSettings* force_overflow_stw() {
+    return &_force_overflow_stw;
+  }
+
+  ForceOverflowSettings* force_overflow() {
+    if (concurrent()) {
+      return force_overflow_conc();
+    } else {
+      return force_overflow_stw();
+    }
+  }
+
 public:
   // Manipulation of the global mark stack.
   // Notice that the first mark_stack_push is CAS-based, whereas the
@@ -777,10 +809,19 @@
 
   // It indicates that a new collection set is being chosen.
   void newCSet();
+
   // It registers a collection set heap region with CM. This is used
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Resets the region fields of any active CMTask whose region fields
+  // are in the collection set (i.e. the region currently claimed by
+  // the CMTask will be evacuated and may be used, subsequently, as
+  // an alloc region). When this happens the region fields in the CMTask
+  // are stale and, hence, should be cleared causing the worker thread
+  // to claim a new region.
+  void reset_active_task_region_fields_in_cset();
+
   // Registers the maximum region-end associated with a set of
   // regions with CM. Again this is used to determine whether any
   // heap regions are located above the finger.
@@ -1007,9 +1048,6 @@
   void setup_for_region(HeapRegion* hr);
   // it brings up-to-date the limit of the region
   void update_region_limit();
-  // it resets the local fields after a task has finished scanning a
-  // region
-  void giveup_current_region();
 
   // called when either the words scanned or the refs visited limit
   // has been reached
@@ -1062,6 +1100,11 @@
   // exit the termination protocol after it's entered it.
   virtual bool should_exit_termination();
 
+  // Resets the local region fields after a task has finished scanning a
+  // region; or when they have become stale as a result of the region
+  // being evacuated.
+  void giveup_current_region();
+
   HeapWord* finger()            { return _finger; }
 
   bool has_aborted()            { return _has_aborted; }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -428,6 +428,37 @@
   _cmThread->stop();
 }
 
+#ifdef ASSERT
+// A region is added to the collection set as it is retired
+// so an address p can point to a region which will be in the
+// collection set but has not yet been retired.  This method
+// therefore is only accurate during a GC pause after all
+// regions have been retired.  It is used for debugging
+// to check if an nmethod has references to objects that can
+// be move during a partial collection.  Though it can be
+// inaccurate, it is sufficient for G1 because the conservative
+// implementation of is_scavengable() for G1 will indicate that
+// all nmethods must be scanned during a partial collection.
+bool G1CollectedHeap::is_in_partial_collection(const void* p) {
+  HeapRegion* hr = heap_region_containing(p);
+  return hr != NULL && hr->in_collection_set();
+}
+#endif
+
+// Returns true if the reference points to an object that
+// can move in an incremental collecction.
+bool G1CollectedHeap::is_scavengable(const void* p) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  HeapRegion* hr = heap_region_containing(p);
+  if (hr == NULL) {
+     // perm gen (or null)
+     return false;
+  } else {
+    return !hr->isHumongous();
+  }
+}
+
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
@@ -1161,7 +1192,8 @@
     TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
                 PrintGC, true, gclog_or_tty);
 
-    TraceMemoryManagerStats tms(true /* fullGC */);
+    TraceCollectorStats tcs(g1mm()->full_collection_counters());
+    TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
@@ -1339,6 +1371,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
+  g1mm()->update_counters();
 
   return true;
 }
@@ -1971,6 +2004,10 @@
 
   init_mutator_alloc_region();
 
+  // Do create of the monitoring and management support so that
+  // values in the heap have been properly initialized.
+  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
+
   return JNI_OK;
 }
 
@@ -2113,6 +2150,28 @@
      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 }
 
+#ifndef PRODUCT
+void G1CollectedHeap::allocate_dummy_regions() {
+  // Let's fill up most of the region
+  size_t word_size = HeapRegion::GrainWords - 1024;
+  // And as a result the region we'll allocate will be humongous.
+  guarantee(isHumongous(word_size), "sanity");
+
+  for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
+    // Let's use the existing mechanism for the allocation
+    HeapWord* dummy_obj = humongous_obj_allocate(word_size);
+    if (dummy_obj != NULL) {
+      MemRegion mr(dummy_obj, word_size);
+      CollectedHeap::fill_with_object(mr);
+    } else {
+      // If we can't allocate once, we probably cannot allocate
+      // again. Let's get out of the loop.
+      break;
+    }
+  }
+}
+#endif // !PRODUCT
+
 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 
@@ -2777,17 +2836,26 @@
                              bool silent,
                              bool use_prev_marking) {
   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
-    if (!silent) { gclog_or_tty->print("roots "); }
+    if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
     VerifyRootsClosure rootsCl(use_prev_marking);
     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
-    process_strong_roots(true,  // activate StrongRootsScope
-                         false,
-                         SharedHeap::SO_AllClasses,
+    // We apply the relevant closures to all the oops in the
+    // system dictionary, the string table and the code cache.
+    const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+    process_strong_roots(true,      // activate StrongRootsScope
+                         true,      // we set "collecting perm gen" to true,
+                                    // so we don't reset the dirty cards in the perm gen.
+                         SharedHeap::ScanningOption(so),  // roots scanning options
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
+    // Since we used "collecting_perm_gen" == true above, we will not have
+    // checked the refs from perm into the G1-collected heap. We check those
+    // references explicitly below. Whether the relevant cards are dirty
+    // is checked further below in the rem set verification.
+    if (!silent) { gclog_or_tty->print("Permgen roots "); }
+    perm_gen()->oop_iterate(&rootsCl);
     bool failures = rootsCl.failures();
-    rem_set()->invalidate(perm_gen()->used_region(), false);
     if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
     verify_region_sets();
     if (!silent) { gclog_or_tty->print("HeapRegions "); }
@@ -3164,7 +3232,8 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
 
-    TraceMemoryManagerStats tms(false /* fullGC */);
+    TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
+    TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
 
     // If the secondary_free_list is not empty, append it to the
     // free_list. No need to wait for the cleanup operation to finish;
@@ -3254,8 +3323,9 @@
       // progress, this will be zero.
       _cm->set_oops_do_bound();
 
-      if (mark_in_progress())
+      if (mark_in_progress()) {
         concurrent_mark()->newCSet();
+      }
 
 #if YOUNG_LIST_VERBOSE
       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
@@ -3265,6 +3335,16 @@
 
       g1_policy()->choose_collection_set(target_pause_time_ms);
 
+      // We have chosen the complete collection set. If marking is
+      // active then, we clear the region fields of any of the
+      // concurrent marking tasks whose region fields point into
+      // the collection set as these values will become stale. This
+      // will cause the owning marking threads to claim a new region
+      // when marking restarts.
+      if (mark_in_progress()) {
+        concurrent_mark()->reset_active_task_region_fields_in_cset();
+      }
+
       // Nothing to do if we were unable to choose a collection set.
 #if G1_REM_SET_LOGGING
       gclog_or_tty->print_cr("\nAfter pause, heap:");
@@ -3338,6 +3418,8 @@
         doConcurrentMark();
       }
 
+      allocate_dummy_regions();
+
 #if YOUNG_LIST_VERBOSE
       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
       _young_list->print();
@@ -3401,6 +3483,8 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
+  g1mm()->update_counters();
+
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
@@ -3933,6 +4017,9 @@
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
                                                oop old) {
+  assert(obj_in_cs(old),
+         err_msg("obj: "PTR_FORMAT" should still be in the CSet",
+                 (HeapWord*) old));
   markOop m = old->mark();
   oop forward_ptr = old->forward_to_atomic(old);
   if (forward_ptr == NULL) {
@@ -3955,7 +4042,13 @@
     }
     return old;
   } else {
-    // Someone else had a place to copy it.
+    // Forward-to-self failed. Either someone else managed to allocate
+    // space for this object (old != forward_ptr) or they beat us in
+    // self-forwarding it (old == forward_ptr).
+    assert(old == forward_ptr || !obj_in_cs(forward_ptr),
+           err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
+                   "should not be in the CSet",
+                   (HeapWord*) old, (HeapWord*) forward_ptr));
     return forward_ptr;
   }
 }
@@ -4266,11 +4359,10 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop(heap_oop);
-    assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
-           "shouldn't still be in the CSet if evacuation didn't fail.");
     HeapWord* addr = (HeapWord*)obj;
-    if (_g1->is_in_g1_reserved(addr))
+    if (_g1->is_in_g1_reserved(addr)) {
       _cm->grayRoot(oop(addr));
+    }
   }
 }
 
@@ -4919,36 +5011,45 @@
 
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
-    : _ct_bs(ct_bs) { }
+  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
+    : _g1h(g1h), _ct_bs(ct_bs) { }
   virtual bool doHeapRegion(HeapRegion* r) {
-    MemRegion mr(r->bottom(), r->end());
     if (r->is_survivor()) {
-      _ct_bs->verify_dirty_region(mr);
+      _g1h->verify_dirty_region(r);
     } else {
-      _ct_bs->verify_clean_region(mr);
+      _g1h->verify_not_dirty_region(r);
     }
     return false;
   }
 };
 
+void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
+  // All of the region should be clean.
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  MemRegion mr(hr->bottom(), hr->end());
+  ct_bs->verify_not_dirty_region(mr);
+}
+
+void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
+  // We cannot guarantee that [bottom(),end()] is dirty.  Threads
+  // dirty allocated blocks as they allocate them. The thread that
+  // retires each region and replaces it with a new one will do a
+  // maximal allocation to fill in [pre_dummy_top(),end()] but will
+  // not dirty that area (one less thing to have to do while holding
+  // a lock). So we can only verify that [bottom(),pre_dummy_top()]
+  // is dirty.
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  MemRegion mr(hr->bottom(), hr->pre_dummy_top());
+  ct_bs->verify_dirty_region(mr);
+}
+
 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
-    // We cannot guarantee that [bottom(),end()] is dirty.  Threads
-    // dirty allocated blocks as they allocate them. The thread that
-    // retires each region and replaces it with a new one will do a
-    // maximal allocation to fill in [pre_dummy_top(),end()] but will
-    // not dirty that area (one less thing to have to do while holding
-    // a lock). So we can only verify that [bottom(),pre_dummy_top()]
-    // is dirty. Also note that verify_dirty_region() requires
-    // mr.start() and mr.end() to be card aligned and pre_dummy_top()
-    // is not guaranteed to be.
-    MemRegion mr(hr->bottom(),
-                 ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
-    ct_bs->verify_dirty_region(mr);
+    verify_dirty_region(hr);
   }
 }
 
@@ -4991,7 +5092,7 @@
   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
 #ifndef PRODUCT
   if (G1VerifyCTCleanup || VerifyAfterGC) {
-    G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
+    G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
     heap_region_iterate(&cleanup_verifier);
   }
 #endif
@@ -5314,6 +5415,7 @@
     if (new_alloc_region != NULL) {
       g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
+      g1mm()->update_eden_counters();
       return new_alloc_region;
     }
   }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -28,7 +28,9 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
+#include "gc_implementation/shared/hSpaceCounters.hpp"
 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
 #include "memory/barrierSet.hpp"
 #include "memory/memRegion.hpp"
@@ -57,6 +59,7 @@
 class ConcurrentMark;
 class ConcurrentMarkThread;
 class ConcurrentG1Refine;
+class GenerationCounters;
 
 typedef OverflowTaskQueue<StarTask>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
@@ -236,6 +239,9 @@
   // current collection.
   HeapRegion* _gc_alloc_region_list;
 
+  // Helper for monitoring and management support.
+  G1MonitoringSupport* _g1mm;
+
   // Determines PLAB size for a particular allocation purpose.
   static size_t desired_plab_sz(GCAllocPurpose purpose);
 
@@ -298,6 +304,14 @@
   // started is maintained in _total_full_collections in CollectedHeap.
   volatile unsigned int _full_collections_completed;
 
+  // This is a non-product method that is helpful for testing. It is
+  // called at the end of a GC and artificially expands the heap by
+  // allocating a number of dead regions. This way we can induce very
+  // frequent marking cycles and stress the cleanup / concurrent
+  // cleanup code more (as all the regions that will be allocated by
+  // this method will be found dead by the marking cycle).
+  void allocate_dummy_regions() PRODUCT_RETURN;
+
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
@@ -542,6 +556,9 @@
   HeapWord* expand_and_allocate(size_t word_size);
 
 public:
+
+  G1MonitoringSupport* g1mm() { return _g1mm; }
+
   // Expand the garbage-first heap by at least the given size (in bytes!).
   // Returns true if the heap was expanded by the requested amount;
   // false otherwise.
@@ -953,6 +970,8 @@
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
+  void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
+  void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   void verify_dirty_young_regions() PRODUCT_RETURN;
 
@@ -1235,6 +1254,12 @@
     return hr != NULL && hr->is_young();
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr);
+
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+
+G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
+                                         VirtualSpace* g1_storage_addr) :
+  _g1h(g1h),
+  _incremental_collection_counters(NULL),
+  _full_collection_counters(NULL),
+  _non_young_collection_counters(NULL),
+  _old_space_counters(NULL),
+  _young_collection_counters(NULL),
+  _eden_counters(NULL),
+  _from_counters(NULL),
+  _to_counters(NULL),
+  _g1_storage_addr(g1_storage_addr)
+{
+  // Counters for GC collections
+  //
+  //  name "collector.0".  In a generational collector this would be the
+  // young generation collection.
+  _incremental_collection_counters =
+    new CollectorCounters("G1 incremental collections", 0);
+  //   name "collector.1".  In a generational collector this would be the
+  // old generation collection.
+  _full_collection_counters =
+    new CollectorCounters("G1 stop-the-world full collections", 1);
+
+  // timer sampling for all counters supporting sampling only update the
+  // used value.  See the take_sample() method.  G1 requires both used and
+  // capacity updated so sampling is not currently used.  It might
+  // be sufficient to update all counters in take_sample() even though
+  // take_sample() only returns "used".  When sampling was used, there
+  // were some anomolous values emitted which may have been the consequence
+  // of not updating all values simultaneously (i.e., see the calculation done
+  // in eden_space_used(), is it possbile that the values used to
+  // calculate either eden_used or survivor_used are being updated by
+  // the collector when the sample is being done?).
+  const bool sampled = false;
+
+  // "Generation" and "Space" counters.
+  //
+  //  name "generation.1" This is logically the old generation in
+  // generational GC terms.  The "1, 1" parameters are for
+  // the n-th generation (=1) with 1 space.
+  // Counters are created from minCapacity, maxCapacity, and capacity
+  _non_young_collection_counters =
+    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
+
+  //  name  "generation.1.space.0"
+  // Counters are created from maxCapacity, capacity, initCapacity,
+  // and used.
+  _old_space_counters = new HSpaceCounters("space", 0,
+    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
+
+  //   Young collection set
+  //  name "generation.0".  This is logically the young generation.
+  //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
+  // See  _non_young_collection_counters for additional counters
+  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
+
+  // Replace "max_heap_byte_size() with maximum young gen size for
+  // g1Collectedheap
+  //  name "generation.0.space.0"
+  // See _old_space_counters for additional counters
+  _eden_counters = new HSpaceCounters("eden", 0,
+    _g1h->max_capacity(), eden_space_committed(),
+    _young_collection_counters);
+
+  //  name "generation.0.space.1"
+  // See _old_space_counters for additional counters
+  // Set the arguments to indicate that this survivor space is not used.
+  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
+    _young_collection_counters);
+
+  //  name "generation.0.space.2"
+  // See _old_space_counters for additional counters
+  _to_counters = new HSpaceCounters("s1", 2,
+    _g1h->max_capacity(),
+    survivor_space_committed(),
+    _young_collection_counters);
+}
+
+size_t G1MonitoringSupport::overall_committed() {
+  return g1h()->capacity();
+}
+
+size_t G1MonitoringSupport::overall_used() {
+  return g1h()->used_unlocked();
+}
+
+size_t G1MonitoringSupport::eden_space_committed() {
+  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
+}
+
+size_t G1MonitoringSupport::eden_space_used() {
+  size_t young_list_length = g1h()->young_list()->length();
+  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
+  size_t survivor_used = survivor_space_used();
+  eden_used = subtract_up_to_zero(eden_used, survivor_used);
+  return eden_used;
+}
+
+size_t G1MonitoringSupport::survivor_space_committed() {
+  return MAX2(survivor_space_used(),
+              (size_t) HeapRegion::GrainBytes);
+}
+
+size_t G1MonitoringSupport::survivor_space_used() {
+  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
+  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
+  return survivor_used;
+}
+
+size_t G1MonitoringSupport::old_space_committed() {
+  size_t committed = overall_committed();
+  size_t eden_committed = eden_space_committed();
+  size_t survivor_committed = survivor_space_committed();
+  committed = subtract_up_to_zero(committed, eden_committed);
+  committed = subtract_up_to_zero(committed, survivor_committed);
+  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
+  return committed;
+}
+
+// See the comment near the top of g1MonitoringSupport.hpp for
+// an explanation of these calculations for "used" and "capacity".
+size_t G1MonitoringSupport::old_space_used() {
+  size_t used = overall_used();
+  size_t eden_used = eden_space_used();
+  size_t survivor_used = survivor_space_used();
+  used = subtract_up_to_zero(used, eden_used);
+  used = subtract_up_to_zero(used, survivor_used);
+  return used;
+}
+
+void G1MonitoringSupport::update_counters() {
+  if (UsePerfData) {
+    eden_counters()->update_capacity(eden_space_committed());
+    eden_counters()->update_used(eden_space_used());
+    to_counters()->update_capacity(survivor_space_committed());
+    to_counters()->update_used(survivor_space_used());
+    old_space_counters()->update_capacity(old_space_committed());
+    old_space_counters()->update_used(old_space_used());
+    non_young_collection_counters()->update_all();
+  }
+}
+
+void G1MonitoringSupport::update_eden_counters() {
+  if (UsePerfData) {
+    eden_counters()->update_capacity(eden_space_committed());
+    eden_counters()->update_used(eden_space_used());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
+
+#include "gc_implementation/shared/hSpaceCounters.hpp"
+
+class G1CollectedHeap;
+class G1SpaceMonitoringSupport;
+
+// Class for monitoring logical spaces in G1.
+// G1 defines a set of regions as a young
+// collection (analogous to a young generation).
+// The young collection is a logical generation
+// with no fixed chunk (see space.hpp) reflecting
+// the address space for the generation.  In addition
+// to the young collection there is its complement
+// the non-young collection that is simply the regions
+// not in the young collection.  The non-young collection
+// is treated here as a logical old generation only
+// because the monitoring tools expect a generational
+// heap.  The monitoring tools expect that a Space
+// (see space.hpp) exists that describe the
+// address space of young collection and non-young
+// collection and such a view is provided here.
+//
+// This class provides interfaces to access
+// the value of variables for the young collection
+// that include the "capacity" and "used" of the
+// young collection along with constant values
+// for the minimum and maximum capacities for
+// the logical spaces.  Similarly for the non-young
+// collection.
+//
+// Also provided are counters for G1 concurrent collections
+// and stop-the-world full heap collecitons.
+//
+// Below is a description of how "used" and "capactiy"
+// (or committed) is calculated for the logical spaces.
+//
+// 1) The used space calculation for a pool is not necessarily
+// independent of the others. We can easily get from G1 the overall
+// used space in the entire heap, the number of regions in the young
+// generation (includes both eden and survivors), and the number of
+// survivor regions. So, from that we calculate:
+//
+//  survivor_used = survivor_num * region_size
+//  eden_used     = young_region_num * region_size - survivor_used
+//  old_gen_used  = overall_used - eden_used - survivor_used
+//
+// Note that survivor_used and eden_used are upper bounds. To get the
+// actual value we would have to iterate over the regions and add up
+// ->used(). But that'd be expensive. So, we'll accept some lack of
+// accuracy for those two. But, we have to be careful when calculating
+// old_gen_used, in case we subtract from overall_used more then the
+// actual number and our result goes negative.
+//
+// 2) Calculating the used space is straightforward, as described
+// above. However, how do we calculate the committed space, given that
+// we allocate space for the eden, survivor, and old gen out of the
+// same pool of regions? One way to do this is to use the used value
+// as also the committed value for the eden and survivor spaces and
+// then calculate the old gen committed space as follows:
+//
+//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//
+// Maybe a better way to do that would be to calculate used for eden
+// and survivor as a sum of ->used() over their regions and then
+// calculate committed as region_num * region_size (i.e., what we use
+// to calculate the used space now). This is something to consider
+// in the future.
+//
+// 3) Another decision that is again not straightforward is what is
+// the max size that each memory pool can grow to. One way to do this
+// would be to use the committed size for the max for the eden and
+// survivors and calculate the old gen max as follows (basically, it's
+// a similar pattern to what we use for the committed space, as
+// described above):
+//
+//  old_gen_max = overall_max - eden_max - survivor_max
+//
+// Unfortunately, the above makes the max of each pool fluctuate over
+// time and, even though this is allowed according to the spec, it
+// broke several assumptions in the M&M framework (there were cases
+// where used would reach a value greater than max). So, for max we
+// use -1, which means "undefined" according to the spec.
+//
+// 4) Now, there is a very subtle issue with all the above. The
+// framework will call get_memory_usage() on the three pools
+// asynchronously. As a result, each call might get a different value
+// for, say, survivor_num which will yield inconsistent values for
+// eden_used, survivor_used, and old_gen_used (as survivor_num is used
+// in the calculation of all three). This would normally be
+// ok. However, it's possible that this might cause the sum of
+// eden_used, survivor_used, and old_gen_used to go over the max heap
+// size and this seems to sometimes cause JConsole (and maybe other
+// clients) to get confused. There's not a really an easy / clean
+// solution to this problem, due to the asynchrounous nature of the
+// framework.
+
+class G1MonitoringSupport : public CHeapObj {
+  G1CollectedHeap* _g1h;
+  VirtualSpace* _g1_storage_addr;
+
+  // jstat performance counters
+  //  incremental collections both fully and partially young
+  CollectorCounters*   _incremental_collection_counters;
+  //  full stop-the-world collections
+  CollectorCounters*   _full_collection_counters;
+  //  young collection set counters.  The _eden_counters,
+  // _from_counters, and _to_counters are associated with
+  // this "generational" counter.
+  GenerationCounters*  _young_collection_counters;
+  //  non-young collection set counters. The _old_space_counters
+  // below are associated with this "generational" counter.
+  GenerationCounters*  _non_young_collection_counters;
+  // Counters for the capacity and used for
+  //   the whole heap
+  HSpaceCounters*      _old_space_counters;
+  //   the young collection
+  HSpaceCounters*      _eden_counters;
+  //   the survivor collection (only one, _to_counters, is actively used)
+  HSpaceCounters*      _from_counters;
+  HSpaceCounters*      _to_counters;
+
+  // It returns x - y if x > y, 0 otherwise.
+  // As described in the comment above, some of the inputs to the
+  // calculations we have to do are obtained concurrently and hence
+  // may be inconsistent with each other. So, this provides a
+  // defensive way of performing the subtraction and avoids the value
+  // going negative (which would mean a very large result, given that
+  // the parameter are size_t).
+  static size_t subtract_up_to_zero(size_t x, size_t y) {
+    if (x > y) {
+      return x - y;
+    } else {
+      return 0;
+    }
+  }
+
+ public:
+  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
+
+  G1CollectedHeap* g1h() { return _g1h; }
+  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
+
+  // Performance Counter accessors
+  void update_counters();
+  void update_eden_counters();
+
+  CollectorCounters* incremental_collection_counters() {
+    return _incremental_collection_counters;
+  }
+  CollectorCounters* full_collection_counters() {
+    return _full_collection_counters;
+  }
+  GenerationCounters* non_young_collection_counters() {
+    return _non_young_collection_counters;
+  }
+  HSpaceCounters*      old_space_counters() { return _old_space_counters; }
+  HSpaceCounters*      eden_counters() { return _eden_counters; }
+  HSpaceCounters*      from_counters() { return _from_counters; }
+  HSpaceCounters*      to_counters() { return _to_counters; }
+
+  // Monitoring support used by
+  //   MemoryService
+  //   jstat counters
+  size_t overall_committed();
+  size_t overall_used();
+
+  size_t eden_space_committed();
+  size_t eden_space_used();
+
+  size_t survivor_space_committed();
+  size_t survivor_space_used();
+
+  size_t old_space_committed();
+  size_t old_space_used();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -157,7 +157,6 @@
   void set_try_claimed() { _try_claimed = true; }
 
   void scanCard(size_t index, HeapRegion *r) {
-    _cards_done++;
     DirtyCardToOopClosure* cl =
       r->new_dcto_closure(_oc,
                          CardTableModRefBS::Precise,
@@ -168,17 +167,14 @@
     HeapWord* card_start = _bot_shared->address_for_index(index);
     HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
     Space *sp = SharedHeap::heap()->space_containing(card_start);
-    MemRegion sm_region;
-    if (ParallelGCThreads > 0) {
-      // first find the used area
-      sm_region = sp->used_region_at_save_marks();
-    } else {
-      // The closure is not idempotent.  We shouldn't look at objects
-      // allocated during the GC.
-      sm_region = sp->used_region_at_save_marks();
-    }
+    MemRegion sm_region = sp->used_region_at_save_marks();
     MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
-    if (!mr.is_empty()) {
+    if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
+      // We make the card as "claimed" lazily (so races are possible
+      // but they're benign), which reduces the number of duplicate
+      // scans (the rsets of the regions in the cset can intersect).
+      _ct_bs->set_card_claimed(index);
+      _cards_done++;
       cl->do_MemRegion(mr);
     }
   }
@@ -199,6 +195,9 @@
     HeapRegionRemSet* hrrs = r->rem_set();
     if (hrrs->iter_is_complete()) return false; // All done.
     if (!_try_claimed && !hrrs->claim_iter()) return false;
+    // If we ever free the collection set concurrently, we should also
+    // clear the card table concurrently therefore we won't need to
+    // add regions of the collection set to the dirty cards region.
     _g1h->push_dirty_cards_region(r);
     // If we didn't return above, then
     //   _try_claimed || r->claim_iter()
@@ -230,15 +229,10 @@
         _g1h->push_dirty_cards_region(card_region);
       }
 
-       // If the card is dirty, then we will scan it during updateRS.
-      if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
-        // We make the card as "claimed" lazily (so races are possible but they're benign),
-        // which reduces the number of duplicate scans (the rsets of the regions in the cset
-        // can intersect).
-        if (!_ct_bs->is_card_claimed(card_index)) {
-          _ct_bs->set_card_claimed(card_index);
-          scanCard(card_index, card_region);
-        }
+      // If the card is dirty, then we will scan it during updateRS.
+      if (!card_region->in_collection_set() &&
+          !_ct_bs->is_card_dirty(card_index)) {
+        scanCard(card_index, card_region);
       }
     }
     if (!_try_claimed) {
@@ -246,8 +240,6 @@
     }
     return false;
   }
-  // Set all cards back to clean.
-  void cleanup() {_g1h->cleanUpCardTable();}
   size_t cards_done() { return _cards_done;}
   size_t cards_looked_up() { return _cards;}
 };
@@ -566,8 +558,9 @@
     update_rs_cl.set_region(r);
     HeapWord* stop_point =
       r->oops_on_card_seq_iterate_careful(scanRegion,
-                                        &filter_then_update_rs_cset_oop_cl,
-                                        false /* filter_young */);
+                                          &filter_then_update_rs_cset_oop_cl,
+                                          false /* filter_young */,
+                                          NULL  /* card_ptr */);
 
     // Since this is performed in the event of an evacuation failure, we
     // we shouldn't see a non-null stop point
@@ -735,12 +728,6 @@
                                 (OopClosure*)&mux :
                                 (OopClosure*)&update_rs_oop_cl));
 
-  // Undirty the card.
-  *card_ptr = CardTableModRefBS::clean_card_val();
-  // We must complete this write before we do any of the reads below.
-  OrderAccess::storeload();
-  // And process it, being careful of unallocated portions of TLAB's.
-
   // The region for the current card may be a young region. The
   // current card may have been a card that was evicted from the
   // card cache. When the card was inserted into the cache, we had
@@ -749,7 +736,7 @@
   // and tagged as young.
   //
   // We wish to filter out cards for such a region but the current
-  // thread, if we're running conucrrently, may "see" the young type
+  // thread, if we're running concurrently, may "see" the young type
   // change at any time (so an earlier "is_young" check may pass or
   // fail arbitrarily). We tell the iteration code to perform this
   // filtering when it has been determined that there has been an actual
@@ -759,7 +746,8 @@
   HeapWord* stop_point =
     r->oops_on_card_seq_iterate_careful(dirtyRegion,
                                         &filter_then_update_rs_oop_cl,
-                                        filter_young);
+                                        filter_young,
+                                        card_ptr);
 
   // If stop_point is non-null, then we encountered an unallocated region
   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,9 @@
 
 
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
-  assert(pre_val->is_oop_or_null(true), "Error");
+  // Nulls should have been already filtered.
+  assert(pre_val->is_oop(true), "Error");
+
   if (!JavaThread::satb_mark_queue_set().is_active()) return;
   Thread* thr = Thread::current();
   if (thr->is_Java_thread()) {
@@ -59,20 +61,6 @@
   }
 }
 
-// When we know the current java thread:
-template <class T> void
-G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
-                                                    oop new_val,
-                                                    JavaThread* jt) {
-  if (!JavaThread::satb_mark_queue_set().is_active()) return;
-  T heap_oop = oopDesc::load_heap_oop(field);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
-    assert(pre_val->is_oop(true /* ignore mark word */), "Error");
-    jt->satb_mark_queue().enqueue(pre_val);
-  }
-}
-
 template <class T> void
 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
   if (!JavaThread::satb_mark_queue_set().is_active()) return;
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -37,12 +37,11 @@
 // snapshot-at-the-beginning marking.
 
 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
-private:
+public:
   // Add "pre_val" to a set of objects that may have been disconnected from the
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-public:
   G1SATBCardTableModRefBS(MemRegion whole_heap,
                           int max_covered_regions);
 
@@ -61,10 +60,6 @@
     }
   }
 
-  // When we know the current java thread:
-  template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
-                                                            JavaThread* jt);
-
   // We export this to make it available in cases where the static
   // type of the barrier set is known.  Note that it is non-virtual.
   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -89,13 +89,9 @@
           "The number of discovered reference objects to process before "   \
           "draining concurrent marking work queues.")                       \
                                                                             \
-  experimental(bool, G1UseConcMarkReferenceProcessing, false,               \
+  experimental(bool, G1UseConcMarkReferenceProcessing, true,                \
           "If true, enable reference discovery during concurrent "          \
-          "marking and reference processing at the end of remark "          \
-          "(unsafe).")                                                      \
-                                                                            \
-  develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
-          "If true, count frac of ptr writes with null pre-vals.")          \
+          "marking and reference processing at the end of remark.")         \
                                                                             \
   product(intx, G1SATBBufferSize, 1*K,                                      \
           "Number of entries in an SATB log buffer.")                       \
@@ -150,12 +146,6 @@
   develop(bool, G1PrintParCleanupStats, false,                              \
           "When true, print extra stats about parallel cleanup.")           \
                                                                             \
-  develop(bool, G1DisablePreBarrier, false,                                 \
-          "Disable generation of pre-barrier (i.e., marking barrier)   ")   \
-                                                                            \
-  develop(bool, G1DisablePostBarrier, false,                                \
-          "Disable generation of post-barrier (i.e., RS barrier)   ")       \
-                                                                            \
   product(intx, G1UpdateBufferSize, 256,                                    \
           "Size of an update buffer")                                       \
                                                                             \
@@ -310,13 +300,22 @@
   develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
           "Artificial delay during concurrent region freeing")              \
                                                                             \
+  develop(uintx, G1DummyRegionsPerGC, 0,                                    \
+          "The number of dummy regions G1 will allocate at the end of "     \
+          "each evacuation pause in order to artificially fill up the "     \
+          "heap and stress the marking implementation.")                    \
+                                                                            \
   develop(bool, ReduceInitialCardMarksForG1, false,                         \
           "When ReduceInitialCardMarks is true, this flag setting "         \
           " controls whether G1 allows the RICM optimization")              \
                                                                             \
   develop(bool, G1ExitOnExpansionFailure, false,                            \
           "Raise a fatal VM exit out of memory failure in the event "       \
-          " that heap expansion fails due to running out of swap.")
+          " that heap expansion fails due to running out of swap.")         \
+                                                                            \
+  develop(uintx, G1ConcMarkForceOverflow, 0,                                \
+          "The number of times we'll force an overflow during "             \
+          "concurrent marking")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -376,6 +376,17 @@
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
+void HeapRegion::par_clear() {
+  assert(used() == 0, "the region should have been already cleared");
+  assert(capacity() == (size_t) HeapRegion::GrainBytes,
+         "should be back to normal");
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->clear();
+  CardTableModRefBS* ct_bs =
+                   (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
+  ct_bs->clear(MemRegion(bottom(), end()));
+}
+
 // <PREDICTION>
 void HeapRegion::calc_gc_efficiency() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -600,7 +611,15 @@
 HeapRegion::
 oops_on_card_seq_iterate_careful(MemRegion mr,
                                  FilterOutOfRegionClosure* cl,
-                                 bool filter_young) {
+                                 bool filter_young,
+                                 jbyte* card_ptr) {
+  // Currently, we should only have to clean the card if filter_young
+  // is true and vice versa.
+  if (filter_young) {
+    assert(card_ptr != NULL, "pre-condition");
+  } else {
+    assert(card_ptr == NULL, "pre-condition");
+  }
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If we're within a stop-world GC, then we might look at a card in a
@@ -626,6 +645,15 @@
 
   assert(!is_young(), "check value of filter_young");
 
+  // We can only clean the card here, after we make the decision that
+  // the card is not young. And we only clean the card if we have been
+  // asked to (i.e., card_ptr != NULL).
+  if (card_ptr != NULL) {
+    *card_ptr = CardTableModRefBS::clean_card_val();
+    // We must complete this write before we do any of the reads below.
+    OrderAccess::storeload();
+  }
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
   HeapWord* cur = block_start(mr.start());
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -584,6 +584,7 @@
 
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space);
+  void par_clear();
 
   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 
@@ -802,12 +803,16 @@
   HeapWord*
   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 
-  // In this version - if filter_young is true and the region
-  // is a young region then we skip the iteration.
+  // filter_young: if true and the region is a young region then we
+  // skip the iteration.
+  // card_ptr: if not NULL, and we decide that the card is not young
+  // and we iterate over it, we'll clean the card before we start the
+  // iteration.
   HeapWord*
   oops_on_card_seq_iterate_careful(MemRegion mr,
                                    FilterOutOfRegionClosure* cl,
-                                   bool filter_young);
+                                   bool filter_young,
+                                   jbyte* card_ptr);
 
   // A version of block start that is guaranteed to find *some* block
   // boundary at or before "p", but does not object iteration, and may
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,49 +29,48 @@
 #include "memory/sharedHeap.hpp"
 #include "memory/space.inline.hpp"
 #include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
 
-void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
-                                                        DirtyCardToOopClosure* dcto_cl,
-                                                        MemRegionClosure* cl,
-                                                        bool clear,
-                                                        int n_threads) {
-  if (n_threads > 0) {
-    assert((n_threads == 1 && ParallelGCThreads == 0) ||
-           n_threads <= (int)ParallelGCThreads,
-           "# worker threads != # requested!");
-    // Make sure the LNC array is valid for the space.
-    jbyte**   lowest_non_clean;
-    uintptr_t lowest_non_clean_base_chunk_index;
-    size_t    lowest_non_clean_chunk_size;
-    get_LNC_array_for_space(sp, lowest_non_clean,
-                            lowest_non_clean_base_chunk_index,
-                            lowest_non_clean_chunk_size);
+void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
+                                                             OopsInGenClosure* cl,
+                                                             CardTableRS* ct,
+                                                             int n_threads) {
+  assert(n_threads > 0, "Error: expected n_threads > 0");
+  assert((n_threads == 1 && ParallelGCThreads == 0) ||
+         n_threads <= (int)ParallelGCThreads,
+         "# worker threads != # requested!");
+  // Make sure the LNC array is valid for the space.
+  jbyte**   lowest_non_clean;
+  uintptr_t lowest_non_clean_base_chunk_index;
+  size_t    lowest_non_clean_chunk_size;
+  get_LNC_array_for_space(sp, lowest_non_clean,
+                          lowest_non_clean_base_chunk_index,
+                          lowest_non_clean_chunk_size);
 
-    int n_strides = n_threads * StridesPerThread;
-    SequentialSubTasksDone* pst = sp->par_seq_tasks();
-    pst->set_n_threads(n_threads);
-    pst->set_n_tasks(n_strides);
+  int n_strides = n_threads * ParGCStridesPerThread;
+  SequentialSubTasksDone* pst = sp->par_seq_tasks();
+  pst->set_n_threads(n_threads);
+  pst->set_n_tasks(n_strides);
 
-    int stride = 0;
-    while (!pst->is_task_claimed(/* reference */ stride)) {
-      process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
-                     lowest_non_clean,
-                     lowest_non_clean_base_chunk_index,
-                     lowest_non_clean_chunk_size);
-    }
-    if (pst->all_tasks_completed()) {
-      // Clear lowest_non_clean array for next time.
-      intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
-      uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
-      for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
-        intptr_t ind = ch - lowest_non_clean_base_chunk_index;
-        assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
-               "Bounds error");
-        lowest_non_clean[ind] = NULL;
-      }
+  int stride = 0;
+  while (!pst->is_task_claimed(/* reference */ stride)) {
+    process_stride(sp, mr, stride, n_strides, cl, ct,
+                   lowest_non_clean,
+                   lowest_non_clean_base_chunk_index,
+                   lowest_non_clean_chunk_size);
+  }
+  if (pst->all_tasks_completed()) {
+    // Clear lowest_non_clean array for next time.
+    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
+    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
+    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
+      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
+      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
+             "Bounds error");
+      lowest_non_clean[ind] = NULL;
     }
   }
 }
@@ -81,14 +80,13 @@
 process_stride(Space* sp,
                MemRegion used,
                jint stride, int n_strides,
-               DirtyCardToOopClosure* dcto_cl,
-               MemRegionClosure* cl,
-               bool clear,
+               OopsInGenClosure* cl,
+               CardTableRS* ct,
                jbyte** lowest_non_clean,
                uintptr_t lowest_non_clean_base_chunk_index,
                size_t    lowest_non_clean_chunk_size) {
-  // We don't have to go downwards here; it wouldn't help anyway,
-  // because of parallelism.
+  // We go from higher to lower addresses here; it wouldn't help that much
+  // because of the strided parallelism pattern used here.
 
   // Find the first card address of the first chunk in the stride that is
   // at least "bottom" of the used region.
@@ -101,25 +99,35 @@
   if ((uintptr_t)stride >= start_chunk_stride_num) {
     chunk_card_start = (jbyte*)(start_card +
                                 (stride - start_chunk_stride_num) *
-                                CardsPerStrideChunk);
+                                ParGCCardsPerStrideChunk);
   } else {
     // Go ahead to the next chunk group boundary, then to the requested stride.
     chunk_card_start = (jbyte*)(start_card +
                                 (n_strides - start_chunk_stride_num + stride) *
-                                CardsPerStrideChunk);
+                                ParGCCardsPerStrideChunk);
   }
 
   while (chunk_card_start < end_card) {
-    // We don't have to go downwards here; it wouldn't help anyway,
-    // because of parallelism.  (We take care with "min_done"; see below.)
+    // Even though we go from lower to higher addresses below, the
+    // strided parallelism can interleave the actual processing of the
+    // dirty pages in various ways. For a specific chunk within this
+    // stride, we take care to avoid double scanning or missing a card
+    // by suitably initializing the "min_done" field in process_chunk_boundaries()
+    // below, together with the dirty region extension accomplished in
+    // DirtyCardToOopClosure::do_MemRegion().
+    jbyte*    chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
     // Invariant: chunk_mr should be fully contained within the "used" region.
-    jbyte*    chunk_card_end = chunk_card_start + CardsPerStrideChunk;
     MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                          chunk_card_end >= end_card ?
                                            used.end() : addr_for(chunk_card_end));
     assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
     assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
 
+    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
+                                                     cl->gen_boundary());
+    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
+
+
     // Process the chunk.
     process_chunk_boundaries(sp,
                              dcto_cl,
@@ -129,13 +137,30 @@
                              lowest_non_clean_base_chunk_index,
                              lowest_non_clean_chunk_size);
 
-    non_clean_card_iterate_work(chunk_mr, cl, clear);
+    // We want the LNC array updates above in process_chunk_boundaries
+    // to be visible before any of the card table value changes as a
+    // result of the dirty card iteration below.
+    OrderAccess::storestore();
+
+    // We do not call the non_clean_card_iterate_serial() version because
+    // we want to clear the cards: clear_cl here does the work of finding
+    // contiguous dirty ranges of cards to process and clear.
+    clear_cl.do_MemRegion(chunk_mr);
 
     // Find the next chunk of the stride.
-    chunk_card_start += CardsPerStrideChunk * n_strides;
+    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
   }
 }
 
+
+// If you want a talkative process_chunk_boundaries,
+// then #define NOISY(x) x
+#ifdef NOISY
+#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow"
+#else
+#define NOISY(x)
+#endif
+
 void
 CardTableModRefBS::
 process_chunk_boundaries(Space* sp,
@@ -146,126 +171,232 @@
                          uintptr_t lowest_non_clean_base_chunk_index,
                          size_t    lowest_non_clean_chunk_size)
 {
-  // We must worry about the chunk boundaries.
+  // We must worry about non-array objects that cross chunk boundaries,
+  // because such objects are both precisely and imprecisely marked:
+  // .. if the head of such an object is dirty, the entire object
+  //    needs to be scanned, under the interpretation that this
+  //    was an imprecise mark
+  // .. if the head of such an object is not dirty, we can assume
+  //    precise marking and it's efficient to scan just the dirty
+  //    cards.
+  // In either case, each scanned reference must be scanned precisely
+  // once so as to avoid cloning of a young referent. For efficiency,
+  // our closures depend on this property and do not protect against
+  // double scans.
 
-  // First, set our max_to_do:
-  HeapWord* max_to_do = NULL;
   uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
   cur_chunk_index           = cur_chunk_index - lowest_non_clean_base_chunk_index;
 
+  NOISY(tty->print_cr("===========================================================================");)
+  NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",
+                      chunk_mr.start(), chunk_mr.end());)
+
+  // First, set "our" lowest_non_clean entry, which would be
+  // used by the thread scanning an adjoining left chunk with
+  // a non-array object straddling the mutual boundary.
+  // Find the object that spans our boundary, if one exists.
+  // first_block is the block possibly straddling our left boundary.
+  HeapWord* first_block = sp->block_start(chunk_mr.start());
+  assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
+         "First chunk should always have a co-initial block");
+  // Does the block straddle the chunk's left boundary, and is it
+  // a non-array object?
+  if (first_block < chunk_mr.start()        // first block straddles left bdry
+      && sp->block_is_obj(first_block)      // first block is an object
+      && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
+           || oop(first_block)->is_typeArray())) {
+    // Find our least non-clean card, so that a left neighbour
+    // does not scan an object straddling the mutual boundary
+    // too far to the right, and attempt to scan a portion of
+    // that object twice.
+    jbyte* first_dirty_card = NULL;
+    jbyte* last_card_of_first_obj =
+        byte_for(first_block + sp->block_size(first_block) - 1);
+    jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
+    jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
+    jbyte* last_card_to_check =
+      (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
+                    (intptr_t) last_card_of_first_obj);
+    // Note that this does not need to go beyond our last card
+    // if our first object completely straddles this chunk.
+    for (jbyte* cur = first_card_of_cur_chunk;
+         cur <= last_card_to_check; cur++) {
+      jbyte val = *cur;
+      if (card_will_be_scanned(val)) {
+        first_dirty_card = cur; break;
+      } else {
+        assert(!card_may_have_been_dirty(val), "Error");
+      }
+    }
+    if (first_dirty_card != NULL) {
+      NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",
+                    first_dirty_card);)
+      assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size,
+             "Bounds error.");
+      assert(lowest_non_clean[cur_chunk_index] == NULL,
+             "Write exactly once : value should be stable hereafter for this round");
+      lowest_non_clean[cur_chunk_index] = first_dirty_card;
+    } NOISY(else {
+      tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
+      // In the future, we could have this thread look for a non-NULL value to copy from its
+      // right neighbour (up to the end of the first object).
+      if (last_card_of_cur_chunk < last_card_of_first_obj) {
+        tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
+                      "   might be efficient to get value from right neighbour?");
+      }
+    })
+  } else {
+    // In this case we can help our neighbour by just asking them
+    // to stop at our first card (even though it may not be dirty).
+    NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
+    assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
+    jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
+    lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
+  }
+  NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT
+                "   which corresponds to the heap address " PTR_FORMAT,
+                cur_chunk_index, lowest_non_clean[cur_chunk_index],
+                (lowest_non_clean[cur_chunk_index] != NULL)
+                ? addr_for(lowest_non_clean[cur_chunk_index])
+                : NULL);)
+  NOISY(tty->print_cr("---------------------------------------------------------------------------");)
+
+  // Next, set our own max_to_do, which will strictly/exclusively bound
+  // the highest address that we will scan past the right end of our chunk.
+  HeapWord* max_to_do = NULL;
   if (chunk_mr.end() < used.end()) {
-    // This is not the last chunk in the used region.  What is the last
-    // object?
-    HeapWord* last_block = sp->block_start(chunk_mr.end());
+    // This is not the last chunk in the used region.
+    // What is our last block? We check the first block of
+    // the next (right) chunk rather than strictly check our last block
+    // because it's potentially more efficient to do so.
+    HeapWord* const last_block = sp->block_start(chunk_mr.end());
     assert(last_block <= chunk_mr.end(), "In case this property changes.");
-    if (last_block == chunk_mr.end()
-        || !sp->block_is_obj(last_block)) {
+    if ((last_block == chunk_mr.end())     // our last block does not straddle boundary
+        || !sp->block_is_obj(last_block)   // last_block isn't an object
+        || oop(last_block)->is_objArray()  // last_block is an array (precisely marked)
+        || oop(last_block)->is_typeArray()) {
       max_to_do = chunk_mr.end();
-
+      NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n"
+                         "   max_to_do left at " PTR_FORMAT, max_to_do);)
     } else {
-      // It is an object and starts before the end of the current chunk.
+      assert(last_block < chunk_mr.end(), "Tautology");
+      // It is a non-array object that straddles the right boundary of this chunk.
       // last_obj_card is the card corresponding to the start of the last object
       // in the chunk.  Note that the last object may not start in
       // the chunk.
-      jbyte* last_obj_card = byte_for(last_block);
-      if (!card_may_have_been_dirty(*last_obj_card)) {
-        // The card containing the head is not dirty.  Any marks in
+      jbyte* const last_obj_card = byte_for(last_block);
+      const jbyte val = *last_obj_card;
+      if (!card_will_be_scanned(val)) {
+        assert(!card_may_have_been_dirty(val), "Error");
+        // The card containing the head is not dirty.  Any marks on
         // subsequent cards still in this chunk must have been made
-        // precisely; we can cap processing at the end.
+        // precisely; we can cap processing at the end of our chunk.
         max_to_do = chunk_mr.end();
+        NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n"
+                            "   max_to_do left at " PTR_FORMAT,
+                            max_to_do);)
       } else {
         // The last object must be considered dirty, and extends onto the
         // following chunk.  Look for a dirty card in that chunk that will
         // bound our processing.
         jbyte* limit_card = NULL;
-        size_t last_block_size = sp->block_size(last_block);
-        jbyte* last_card_of_last_obj =
+        const size_t last_block_size = sp->block_size(last_block);
+        jbyte* const last_card_of_last_obj =
           byte_for(last_block + last_block_size - 1);
-        jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
+        jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
         // This search potentially goes a long distance looking
-        // for the next card that will be scanned.  For example,
-        // an object that is an array of primitives will not
-        // have any cards covering regions interior to the array
-        // that will need to be scanned. The scan can be terminated
-        // at the last card of the next chunk.  That would leave
-        // limit_card as NULL and would result in "max_to_do"
-        // being set with the LNC value or with the end
-        // of the last block.
-        jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
-          CardsPerStrideChunk;
-        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
-          == CardsPerStrideChunk, "last card of next chunk may be wrong");
-        jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
-                                                  last_card_of_next_chunk);
+        // for the next card that will be scanned, terminating
+        // at the end of the last_block, if no earlier dirty card
+        // is found.
+        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
+               "last card of next chunk may be wrong");
         for (jbyte* cur = first_card_of_next_chunk;
-             cur <= last_card_to_check; cur++) {
-          if (card_will_be_scanned(*cur)) {
+             cur <= last_card_of_last_obj; cur++) {
+          const jbyte val = *cur;
+          if (card_will_be_scanned(val)) {
+            NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x",
+                                cur, (int)val);)
             limit_card = cur; break;
+          } else {
+            assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
           }
         }
-        assert(0 <= cur_chunk_index+1 &&
-               cur_chunk_index+1 < lowest_non_clean_chunk_size,
+        if (limit_card != NULL) {
+          max_to_do = addr_for(limit_card);
+          assert(limit_card != NULL && max_to_do != NULL, "Error");
+          NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT
+                        "   max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: "
+                        PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
+                        limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));)
+        } else {
+          // The following is a pessimistic value, because it's possible
+          // that a dirty card on a subsequent chunk has been cleared by
+          // the time we get to look at it; we'll correct for that further below,
+          // using the LNC array which records the least non-clean card
+          // before cards were cleared in a particular chunk.
+          limit_card = last_card_of_last_obj;
+          max_to_do = last_block + last_block_size;
+          assert(limit_card != NULL && max_to_do != NULL, "Error");
+          NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n"
+                              "   Setting limit_card to " PTR_FORMAT
+                              " and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
+                              limit_card, last_block, last_block_size, max_to_do);)
+        }
+        assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
                "Bounds error.");
-        // LNC for the next chunk
-        jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
-        if (limit_card == NULL) {
-          limit_card = lnc_card;
-        }
-        if (limit_card != NULL) {
+        // It is possible that a dirty card for the last object may have been
+        // cleared before we had a chance to examine it. In that case, the value
+        // will have been logged in the LNC for that chunk.
+        // We need to examine as many chunks to the right as this object
+        // covers.
+        const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
+                                                    - lowest_non_clean_base_chunk_index;
+        DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
+                                                      - lowest_non_clean_base_chunk_index;)
+        assert(last_chunk_index_to_check <= last_chunk_index,
+               err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT
+                       " exceeds last_chunk_index " INTPTR_FORMAT,
+                       last_chunk_index_to_check, last_chunk_index));
+        for (uintptr_t lnc_index = cur_chunk_index + 1;
+             lnc_index <= last_chunk_index_to_check;
+             lnc_index++) {
+          jbyte* lnc_card = lowest_non_clean[lnc_index];
           if (lnc_card != NULL) {
-            limit_card = (jbyte*)MIN2((intptr_t)limit_card,
-                                      (intptr_t)lnc_card);
-          }
-          max_to_do = addr_for(limit_card);
-        } else {
-          max_to_do = last_block + last_block_size;
+            // we can stop at the first non-NULL entry we find
+            if (lnc_card <= limit_card) {
+              NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT,
+                                  "   max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT,
+                                  lnc_card, limit_card, addr_for(lnc_card), max_to_do);)
+              limit_card = lnc_card;
+              max_to_do = addr_for(limit_card);
+              assert(limit_card != NULL && max_to_do != NULL, "Error");
+            }
+            // In any case, we break now
+            break;
+          }  // else continue to look for a non-NULL entry if any
         }
+        assert(limit_card != NULL && max_to_do != NULL, "Error");
       }
+      assert(max_to_do != NULL, "OOPS 1 !");
     }
-    assert(max_to_do != NULL, "OOPS!");
+    assert(max_to_do != NULL, "OOPS 2!");
   } else {
     max_to_do = used.end();
+    NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n"
+                  "   max_to_do left at " PTR_FORMAT,
+                  max_to_do);)
   }
+  assert(max_to_do != NULL, "OOPS 3!");
   // Now we can set the closure we're using so it doesn't to beyond
   // max_to_do.
   dcto_cl->set_min_done(max_to_do);
 #ifndef PRODUCT
   dcto_cl->set_last_bottom(max_to_do);
 #endif
+  NOISY(tty->print_cr("===========================================================================\n");)
+}
 
-  // Now we set *our" lowest_non_clean entry.
-  // Find the object that spans our boundary, if one exists.
-  // Nothing to do on the first chunk.
-  if (chunk_mr.start() > used.start()) {
-    // first_block is the block possibly spanning the chunk start
-    HeapWord* first_block = sp->block_start(chunk_mr.start());
-    // Does the block span the start of the chunk and is it
-    // an object?
-    if (first_block < chunk_mr.start() &&
-        sp->block_is_obj(first_block)) {
-      jbyte* first_dirty_card = NULL;
-      jbyte* last_card_of_first_obj =
-          byte_for(first_block + sp->block_size(first_block) - 1);
-      jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
-      jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
-      jbyte* last_card_to_check =
-        (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
-                      (intptr_t) last_card_of_first_obj);
-      for (jbyte* cur = first_card_of_cur_chunk;
-           cur <= last_card_to_check; cur++) {
-        if (card_will_be_scanned(*cur)) {
-          first_dirty_card = cur; break;
-        }
-      }
-      if (first_dirty_card != NULL) {
-        assert(0 <= cur_chunk_index &&
-                 cur_chunk_index < lowest_non_clean_chunk_size,
-               "Bounds error.");
-        lowest_non_clean[cur_chunk_index] = first_dirty_card;
-      }
-    }
-  }
-}
+#undef NOISY
 
 void
 CardTableModRefBS::
@@ -282,8 +413,8 @@
   // LNC array for the covered region.  Any later expansion can't affect
   // the used_at_save_marks region.
   // (I observed a bug in which the first thread to execute this would
-  // resize, and then it would cause "expand_and_allocates" that would
-  // Increase the number of chunks in the covered region.  Then a second
+  // resize, and then it would cause "expand_and_allocate" that would
+  // increase the number of chunks in the covered region.  Then a second
   // thread would come and execute this, see that the size didn't match,
   // and free and allocate again.  So the first thread would be using a
   // freed "_lowest_non_clean" array.)
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -77,7 +77,23 @@
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
-      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+#ifndef PRODUCT
+      if (_g->to()->is_in_reserved(obj)) {
+        tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p);
+        GenCollectedHeap* gch =  (GenCollectedHeap*)Universe::heap();
+        Space* sp = gch->space_containing(p);
+        oop obj = oop(sp->block_start(p));
+        assert((HeapWord*)obj < (HeapWord*)p, "Error");
+        tty->print_cr("Object: " PTR_FORMAT, obj);
+        tty->print_cr("-------");
+        obj->print();
+        tty->print_cr("-----");
+        tty->print_cr("Heap:");
+        tty->print_cr("-----");
+        gch->print();
+        ShouldNotReachHere();
+      }
+#endif
       // OK, we need to ensure that it is copied.
       // We read the klass and mark in this order, so that we can reliably
       // get the size of the object: if the mark we read is not a
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -339,6 +339,21 @@
   return false;
 }
 
+bool ParallelScavengeHeap::is_scavengable(const void* addr) {
+  return is_in_young((oop)addr);
+}
+
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is perm (low addr), old, young (high addr)
+  return p >= old_gen()->reserved().end();
+}
+#endif
+
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -127,6 +127,12 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
+  // Return true if the reference points to an object that
+  // can be moved in a partial collection.  For currently implemented
+  // generational collectors that means during a collection of
+  // the young gen.
+  virtual bool is_scavengable(const void* addr);
+
   // Does this heap support heap inspection? (+PrintClassHistogram)
   bool supports_heap_inspection() const { return true; }
 
@@ -143,6 +149,10 @@
     return perm_gen()->reserved().contains(p);
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void *p);
+#endif
+
   bool is_permanent(const void *p) const {    // committed part
     return perm_gen()->is_in(p);
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -51,7 +51,12 @@
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
-  return young_gen()->is_in_reserved(p);
+  // Assumes the the old gen address range is lower than that of the young gen.
+  const void* loc = (void*) p;
+  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
+  assert(result == young_gen()->is_in_reserved(p),
+        err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
 }
 
 inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -173,7 +173,7 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
     TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(true /* Full GC */);
+    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
     if (TraceGen1Time) accumulated_time()->start();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -176,10 +176,6 @@
   object_mark_sweep()->compact(ZapUnusedHeapArea);
 }
 
-void PSOldGen::move_and_update(ParCompactionManager* cm) {
-  PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
-}
-
 size_t PSOldGen::contiguous_available() const {
   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 }
@@ -228,6 +224,12 @@
   const size_t alignment = virtual_space()->alignment();
   size_t aligned_bytes  = align_size_up(bytes, alignment);
   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
+
+  if (UseNUMA) {
+    // With NUMA we use round-robin page allocation for the old gen. Expand by at least
+    // providing a page per lgroup. Alignment is larger or equal to the page size.
+    aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
+  }
   if (aligned_bytes == 0){
     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
     // return true with the implication that and expansion was done when it
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,9 +143,6 @@
   void adjust_pointers();
   void compact();
 
-  // Parallel old
-  virtual void move_and_update(ParCompactionManager* cm);
-
   // Size info
   size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
   size_t used_in_bytes() const            { return object_space()->used_in_bytes(); }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2057,7 +2057,7 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
     TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(true /* Full GC */);
+    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
     if (TraceGen1Time) accumulated_time()->start();
 
@@ -2104,11 +2104,7 @@
     // klasses are used in the update of an object?
     compact_perm(vmthread_cm);
 
-    if (UseParallelOldGCCompacting) {
-      compact();
-    } else {
-      compact_serial(vmthread_cm);
-    }
+    compact();
 
     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
     // done before resizing.
@@ -2582,18 +2578,16 @@
     // each thread?
     if (total_dense_prefix_regions > 0) {
       uint tasks_for_dense_prefix = 1;
-      if (UseParallelDensePrefixUpdate) {
-        if (total_dense_prefix_regions <=
-            (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
-          // Don't over partition.  This assumes that
-          // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
-          // so there are not many regions to process.
-          tasks_for_dense_prefix = parallel_gc_threads;
-        } else {
-          // Over partition
-          tasks_for_dense_prefix = parallel_gc_threads *
-            PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
-        }
+      if (total_dense_prefix_regions <=
+          (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
+        // Don't over partition.  This assumes that
+        // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
+        // so there are not many regions to process.
+        tasks_for_dense_prefix = parallel_gc_threads;
+      } else {
+        // Over partition
+        tasks_for_dense_prefix = parallel_gc_threads *
+          PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
       }
       size_t regions_per_thread = total_dense_prefix_regions /
         tasks_for_dense_prefix;
@@ -2733,21 +2727,6 @@
 }
 #endif  // #ifdef ASSERT
 
-void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
-  EventMark m("5 compact serial");
-  TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
-
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  old_gen->start_array()->reset();
-  old_gen->move_and_update(cm);
-  young_gen->move_and_update(cm);
-}
-
 void
 PSParallelCompact::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
@@ -3530,11 +3509,8 @@
            "Object liveness is wrong.");
     return ParMarkBitMap::incomplete;
   }
-  assert(UseParallelOldGCDensePrefix ||
-         (HeapMaximumCompactionInterval > 1) ||
-         (MarkSweepAlwaysCompactCount > 1) ||
-         (forwarding_ptr == new_pointer),
-    "Calculation of new location is incorrect");
+  assert(HeapMaximumCompactionInterval > 1 || MarkSweepAlwaysCompactCount > 1 ||
+         forwarding_ptr == new_pointer, "new location is incorrect");
   return ParMarkBitMap::incomplete;
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1027,9 +1027,6 @@
                                        ParallelTaskTerminator* terminator_ptr,
                                        uint parallel_gc_threads);
 
-  // For debugging only - compacts the old gen serially
-  static void compact_serial(ParCompactionManager* cm);
-
   // If objects are left in eden after a collection, try to move the boundary
   // and absorb them into the old gen.  Returns true if eden was emptied.
   static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,12 +121,6 @@
   }
 }
 
-
-
-void PSPermGen::move_and_update(ParCompactionManager* cm) {
-  PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id);
-}
-
 void PSPermGen::precompact() {
   // Reset start array first.
   _start_array.reset();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,9 +51,6 @@
   // MarkSweep code
   virtual void precompact();
 
-  // Parallel old
-  virtual void move_and_update(ParCompactionManager* cm);
-
   virtual const char* name() const { return "PSPermGen"; }
 };
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -322,7 +322,7 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
     TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(false /* not full GC */);
+    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
     if (TraceGen0Time) accumulated_time()->start();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -792,12 +792,6 @@
   to_mark_sweep()->compact(false);
 }
 
-void PSYoungGen::move_and_update(ParCompactionManager* cm) {
-  PSParallelCompact::move_and_update(cm, PSParallelCompact::eden_space_id);
-  PSParallelCompact::move_and_update(cm, PSParallelCompact::from_space_id);
-  PSParallelCompact::move_and_update(cm, PSParallelCompact::to_space_id);
-}
-
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,9 +127,6 @@
   void adjust_pointers();
   void compact();
 
-  // Parallel Old
-  void move_and_update(ParCompactionManager* cm);
-
   // Called during/after gc
   void swap_spaces();
 
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -76,7 +76,7 @@
     _beforeSweep = 0;
     _coalBirths = 0;
     _coalDeaths = 0;
-    _splitBirths = split_birth? 1 : 0;
+    _splitBirths = (split_birth ? 1 : 0);
     _splitDeaths = 0;
     _returnedBytes = 0;
   }
--- a/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,15 +51,18 @@
 
     cname = PerfDataManager::counter_name(_name_space, "minCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
+                                     _virtual_space == NULL ? 0 :
                                      _virtual_space->committed_size(), CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
+                                     _virtual_space == NULL ? 0 :
                                      _virtual_space->reserved_size(), CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "capacity");
     _current_size = PerfDataManager::create_variable(SUN_GC, cname,
-                                      PerfData::U_Bytes,
+                                     PerfData::U_Bytes,
+                                     _virtual_space == NULL ? 0 :
                                      _virtual_space->committed_size(), CHECK);
   }
 }
--- a/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,10 +61,11 @@
   }
 
   virtual void update_all() {
-    _current_size->set_value(_virtual_space->committed_size());
+    _current_size->set_value(_virtual_space == NULL ? 0 :
+                             _virtual_space->committed_size());
   }
 
   const char* name_space() const        { return _name_space; }
+
 };
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/hSpaceCounters.hpp"
+#include "memory/generation.hpp"
+#include "memory/resourceArea.hpp"
+
+HSpaceCounters::HSpaceCounters(const char* name,
+                               int ordinal,
+                               size_t max_size,
+                               size_t initial_capacity,
+                               GenerationCounters* gc) {
+
+  if (UsePerfData) {
+    EXCEPTION_MARK;
+    ResourceMark rm;
+
+    const char* cns =
+      PerfDataManager::name_space(gc->name_space(), "space", ordinal);
+
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    strcpy(_name_space, cns);
+
+    const char* cname = PerfDataManager::counter_name(_name_space, "name");
+    PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
+    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
+                                     (jlong)max_size, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "capacity");
+    _capacity = PerfDataManager::create_variable(SUN_GC, cname,
+                                                 PerfData::U_Bytes,
+                                                 initial_capacity, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "used");
+    _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+                                             (jlong) 0, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "initCapacity");
+    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
+                                     initial_capacity, CHECK);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
+
+#ifndef SERIALGC
+#include "gc_implementation/shared/generationCounters.hpp"
+#include "memory/generation.hpp"
+#include "runtime/perfData.hpp"
+#endif
+
+// A HSpaceCounter is a holder class for performance counters
+// that track a collections (logical spaces) in a heap;
+
+class HeapSpaceUsedHelper;
+class G1SpaceMonitoringSupport;
+
+class HSpaceCounters: public CHeapObj {
+  friend class VMStructs;
+
+ private:
+  PerfVariable*        _capacity;
+  PerfVariable*        _used;
+
+  // Constant PerfData types don't need to retain a reference.
+  // However, it's a good idea to document them here.
+
+  char*             _name_space;
+
+ public:
+
+  HSpaceCounters(const char* name, int ordinal, size_t max_size,
+                 size_t initial_capacity, GenerationCounters* gc);
+
+  ~HSpaceCounters() {
+    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+  }
+
+  inline void update_capacity(size_t v) {
+    _capacity->set_value(v);
+  }
+
+  inline void update_used(size_t v) {
+    _used->set_value(v);
+  }
+
+  debug_only(
+    // for security reasons, we do not allow arbitrary reads from
+    // the counters as they may live in shared memory.
+    jlong used() {
+      return _used->get_value();
+    }
+    jlong capacity() {
+      return _used->get_value();
+    }
+  )
+
+  inline void update_all(size_t capacity, size_t used) {
+    update_capacity(capacity);
+    update_used(used);
+  }
+
+  const char* name_space() const        { return _name_space; }
+};
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -269,6 +269,13 @@
   // space). If you need the more conservative answer use is_permanent().
   virtual bool is_in_permanent(const void *p) const = 0;
 
+
+#ifdef ASSERT
+  // Returns true if "p" is in the part of the
+  // heap being collected.
+  virtual bool is_in_partial_collection(const void *p) = 0;
+#endif
+
   bool is_in_permanent_or_null(const void *p) const {
     return p == NULL || is_in_permanent(p);
   }
@@ -284,11 +291,7 @@
 
   // An object is scavengable if its location may move during a scavenge.
   // (A scavenge is a GC which is not a full GC.)
-  // Currently, this just means it is not perm (and not null).
-  // This could change if we rethink what's in perm-gen.
-  bool is_scavengable(const void *p) const {
-    return !is_in_permanent_or_null(p);
-  }
+  virtual bool is_scavengable(const void *p) = 0;
 
   // Returns "TRUE" if "p" is a method oop in the
   // current heap, with high probability. This predicate
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -104,6 +104,7 @@
     java_lang_math_sqrt,                                        // implementation of java.lang.Math.sqrt  (x)
     java_lang_math_log,                                         // implementation of java.lang.Math.log   (x)
     java_lang_math_log10,                                       // implementation of java.lang.Math.log10 (x)
+    java_lang_ref_reference_get,                                // implementation of java.lang.ref.Reference.get()
     number_of_method_entries,
     invalid = -1
   };
@@ -140,7 +141,7 @@
   // Method activation
   static MethodKind method_kind(methodHandle m);
   static address    entry_for_kind(MethodKind k)                { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
-  static address    entry_for_method(methodHandle m)            { return _entry_table[method_kind(m)]; }
+  static address    entry_for_method(methodHandle m)            { return entry_for_kind(method_kind(m)); }
 
   static void       print_method_kind(MethodKind kind)          PRODUCT_RETURN;
 
@@ -174,19 +175,32 @@
                                     int temps,
                                     int popframe_args,
                                     int monitors,
+                                    int caller_actual_parameters,
                                     int callee_params,
                                     int callee_locals,
-                                    bool is_top_frame);
+                                    bool is_top_frame) {
+    return layout_activation(method,
+                             temps,
+                             popframe_args,
+                             monitors,
+                             caller_actual_parameters,
+                             callee_params,
+                             callee_locals,
+                             (frame*)NULL,
+                             (frame*)NULL,
+                             is_top_frame);
+  }
 
   static int       layout_activation(methodOop method,
-                                      int temps,
-                                      int popframe_args,
-                                      int monitors,
-                                      int callee_params,
-                                      int callee_locals,
-                                      frame* caller,
-                                      frame* interpreter_frame,
-                                      bool is_top_frame);
+                                     int temps,
+                                     int popframe_args,
+                                     int monitors,
+                                     int caller_actual_parameters,
+                                     int callee_params,
+                                     int callee_locals,
+                                     frame* caller,
+                                     frame* interpreter_frame,
+                                     bool is_top_frame);
 
   // Runtime support
   static bool       is_not_reached(                       methodHandle method, int bci);
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -203,11 +203,14 @@
   if (value == NULL) {
     st->print_cr(" NULL");
   } else if (java_lang_String::is_instance(value)) {
-    EXCEPTION_MARK;
-    Handle h_value (THREAD, value);
-    Symbol* sym = java_lang_String::as_symbol(h_value, CATCH);
-    print_symbol(sym, st);
-    sym->decrement_refcount();
+    char buf[40];
+    int len = java_lang_String::utf8_length(value);
+    java_lang_String::as_utf8_string(value, buf, sizeof(buf));
+    if (len >= (int)sizeof(buf)) {
+      st->print_cr(" %s...[%d]", buf, len);
+    } else {
+      st->print_cr(" %s", buf);
+    }
   } else {
     st->print_cr(" " PTR_FORMAT, (intptr_t) value);
   }
--- a/src/share/vm/interpreter/cppInterpreter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/cppInterpreter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,6 +125,7 @@
     method_entry(java_lang_math_sqrt  );
     method_entry(java_lang_math_log   );
     method_entry(java_lang_math_log10 );
+    method_entry(java_lang_ref_reference_get);
     Interpreter::_native_entry_begin = Interpreter::code()->code_end();
     method_entry(native);
     method_entry(native_synchronized);
--- a/src/share/vm/interpreter/interpreter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/interpreter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -208,12 +208,6 @@
     return empty;
   }
 
-  // Accessor method?
-  if (m->is_accessor()) {
-    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
-    return accessor;
-  }
-
   // Special intrinsic method?
   // Note: This test must come _after_ the test for native methods,
   //       otherwise we will run into problems with JDK 1.2, see also
@@ -227,6 +221,15 @@
     case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
     case vmIntrinsics::_dlog  : return java_lang_math_log  ;
     case vmIntrinsics::_dlog10: return java_lang_math_log10;
+
+    case vmIntrinsics::_Reference_get:
+                                return java_lang_ref_reference_get;
+  }
+
+  // Accessor method?
+  if (m->is_accessor()) {
+    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
+    return accessor;
   }
 
   // Note: for now: zero locals for all non-empty methods
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -139,9 +139,15 @@
   ResourceMark rm(thread);
   methodHandle m (thread, method(thread));
   Bytecode_loadconstant ldc(m, bci(thread));
-  oop result = ldc.resolve_constant(THREAD);
-  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index()));
-  assert(result == cpce->f1(), "expected result for assembly code");
+  oop result = ldc.resolve_constant(CHECK);
+#ifdef ASSERT
+  {
+    // The bytecode wrappers aren't GC-safe so construct a new one
+    Bytecode_loadconstant ldc2(m, bci(thread));
+    ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index());
+    assert(result == cpce->f1(), "expected result for assembly code");
+  }
+#endif
 }
 IRT_END
 
@@ -356,25 +362,6 @@
   THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
 IRT_END
 
-// required can be either a MethodType, or a Class (for a single argument)
-// actual (if not null) can be either a MethodHandle, or an arbitrary value (for a single argument)
-IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* thread,
-                                                                   oopDesc* required,
-                                                                   oopDesc* actual)) {
-  ResourceMark rm(thread);
-  char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual);
-
-  if (ProfileTraps) {
-    note_trap(thread, Deoptimization::Reason_constraint, CHECK);
-  }
-
-  // create exception
-  THROW_MSG(vmSymbols::java_lang_invoke_WrongMethodTypeException(), message);
-}
-IRT_END
-
-
-
 // exception_handler_for_exception(...) returns the continuation address,
 // the exception oop (via TLS) and sets the bci/bcp for the continuation.
 // The exception oop is returned to make sure it is preserved over GC (it
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -98,7 +98,6 @@
   static void    throw_StackOverflowError(JavaThread* thread);
   static void    throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index);
   static void    throw_ClassCastException(JavaThread* thread, oopDesc* obj);
-  static void    throw_WrongMethodTypeException(JavaThread* thread, oopDesc* mtype = NULL, oopDesc* mhandle = NULL);
   static void    create_exception(JavaThread* thread, char* name, char* message);
   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -294,6 +294,16 @@
   Symbol*  method_signature  = pool->signature_ref_at(index);
   KlassHandle  current_klass(THREAD, pool->pool_holder());
 
+  if (pool->has_preresolution()
+      || (resolved_klass() == SystemDictionary::MethodHandle_klass() &&
+          methodOopDesc::is_method_handle_invoke_name(method_name))) {
+    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index);
+    if (result_oop != NULL) {
+      resolved_method = methodHandle(THREAD, result_oop);
+      return;
+    }
+  }
+
   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
 }
 
@@ -327,6 +337,7 @@
 
   // 1. check if klass is not interface
   if (resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@@ -413,6 +424,7 @@
 
  // check if klass is interface
   if (!resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@@ -534,6 +546,7 @@
 
   // check for errors
   if (is_static != fd.is_static()) {
+    ResourceMark rm(THREAD);
     char msg[200];
     jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
@@ -631,6 +644,7 @@
 
   // check if static
   if (!resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                       resolved_method->name(),
@@ -671,6 +685,7 @@
 
   // check if not static
   if (resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf),
                  "Expecting non-static method %s",
@@ -717,6 +732,7 @@
 
   // check if not static
   if (sel_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                                                                              resolved_method->name(),
@@ -757,6 +773,7 @@
 
   // check if not static
   if (resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                                                                              resolved_method->name(),
@@ -873,6 +890,7 @@
 
   // check if receiver klass implements the resolved interface
   if (!recv_klass->is_subtype_of(resolved_klass())) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
                  (Klass::cast(recv_klass()))->external_name(),
@@ -1109,7 +1127,24 @@
   // The extra MH receiver will be inserted into the stack on every call.
   methodHandle resolved_method;
   KlassHandle current_klass(THREAD, pool->pool_holder());
-  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
+  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
+      // throw these guys, since they are already wrapped
+      return;
+    }
+    if (!PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
+      // intercept only LinkageErrors which might have failed to wrap
+      return;
+    }
+    // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS.
+    Handle ex(THREAD, PENDING_EXCEPTION);
+    CLEAR_PENDING_EXCEPTION;
+    oop bsme = Klass::cast(SystemDictionary::BootstrapMethodError_klass())->java_mirror();
+    MethodHandles::raise_exception(Bytecodes::_athrow, ex(), bsme, CHECK);
+    // java code should not return, but if it does throw out anyway
+    THROW(vmSymbols::java_lang_InternalError());
+  }
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
--- a/src/share/vm/interpreter/rewriter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/rewriter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -63,6 +63,15 @@
   _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
 }
 
+// Unrewrite the bytecodes if an error occurs.
+void Rewriter::restore_bytecodes() {
+  int len = _methods->length();
+
+  for (int i = len-1; i >= 0; i--) {
+    methodOop method = (methodOop)_methods->obj_at(i);
+    scan_method(method, true);
+  }
+}
 
 // Creates a constant pool cache given a CPC map
 void Rewriter::make_constant_pool_cache(TRAPS) {
@@ -133,57 +142,94 @@
 
 
 // Rewrite a classfile-order CP index into a native-order CPC index.
-void Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  int  cp_index    = Bytes::get_Java_u2(p);
-  int  cache_index = cp_entry_to_cp_cache(cp_index);
-  Bytes::put_native_u2(p, cache_index);
+  if (!reverse) {
+    int  cp_index    = Bytes::get_Java_u2(p);
+    int  cache_index = cp_entry_to_cp_cache(cp_index);
+    Bytes::put_native_u2(p, cache_index);
+  } else {
+    int cache_index = Bytes::get_native_u2(p);
+    int pool_index = cp_cache_entry_pool_index(cache_index);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
-void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  assert(p[-1] == Bytecodes::_invokedynamic, "");
-  int cp_index = Bytes::get_Java_u2(p);
-  int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
-  int cpc2 = add_secondary_cp_cache_entry(cpc);
+  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
+  if (!reverse) {
+    int cp_index = Bytes::get_Java_u2(p);
+    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
+    int cpc2 = add_secondary_cp_cache_entry(cpc);
 
-  // Replace the trailing four bytes with a CPC index for the dynamic
-  // call site.  Unlike other CPC entries, there is one per bytecode,
-  // not just one per distinct CP entry.  In other words, the
-  // CPC-to-CP relation is many-to-one for invokedynamic entries.
-  // This means we must use a larger index size than u2 to address
-  // all these entries.  That is the main reason invokedynamic
-  // must have a five-byte instruction format.  (Of course, other JVM
-  // implementations can use the bytes for other purposes.)
-  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
-  // Note: We use native_u4 format exclusively for 4-byte indexes.
+    // Replace the trailing four bytes with a CPC index for the dynamic
+    // call site.  Unlike other CPC entries, there is one per bytecode,
+    // not just one per distinct CP entry.  In other words, the
+    // CPC-to-CP relation is many-to-one for invokedynamic entries.
+    // This means we must use a larger index size than u2 to address
+    // all these entries.  That is the main reason invokedynamic
+    // must have a five-byte instruction format.  (Of course, other JVM
+    // implementations can use the bytes for other purposes.)
+    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
+    // Note: We use native_u4 format exclusively for 4-byte indexes.
+  } else {
+    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
+                        Bytes::get_native_u4(p));
+    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
+    int pool_index = cp_cache_entry_pool_index(secondary_index);
+    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
+    // zero out 4 bytes
+    Bytes::put_Java_u4(p, 0);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
 // Rewrite some ldc bytecodes to _fast_aldc
-void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
-  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
-  address p = bcp + offset;
-  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
-  constantTag tag = _pool->tag_at(cp_index).value();
-  if (tag.is_method_handle() || tag.is_method_type()) {
-    int cache_index = cp_entry_to_cp_cache(cp_index);
-    if (is_wide) {
-      (*bcp) = Bytecodes::_fast_aldc_w;
-      assert(cache_index == (u2)cache_index, "");
-      Bytes::put_native_u2(p, cache_index);
-    } else {
-      (*bcp) = Bytecodes::_fast_aldc;
-      assert(cache_index == (u1)cache_index, "");
-      (*p) = (u1)cache_index;
+void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
+                                 bool reverse) {
+  if (!reverse) {
+    assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
+    address p = bcp + offset;
+    int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
+    constantTag tag = _pool->tag_at(cp_index).value();
+    if (tag.is_method_handle() || tag.is_method_type()) {
+      int cache_index = cp_entry_to_cp_cache(cp_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_fast_aldc_w;
+        assert(cache_index == (u2)cache_index, "index overflow");
+        Bytes::put_native_u2(p, cache_index);
+      } else {
+        (*bcp) = Bytecodes::_fast_aldc;
+        assert(cache_index == (u1)cache_index, "index overflow");
+        (*p) = (u1)cache_index;
+      }
+    }
+  } else {
+    Bytecodes::Code rewritten_bc =
+              (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
+    if ((*bcp) == rewritten_bc) {
+      address p = bcp + offset;
+      int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
+      int pool_index = cp_cache_entry_pool_index(cache_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_ldc_w;
+        assert(pool_index == (u2)pool_index, "index overflow");
+        Bytes::put_Java_u2(p, pool_index);
+      } else {
+        (*bcp) = Bytecodes::_ldc;
+        assert(pool_index == (u1)pool_index, "index overflow");
+        (*p) = (u1)pool_index;
+      }
     }
   }
 }
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(methodOop method) {
+void Rewriter::scan_method(methodOop method, bool reverse) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -238,6 +284,13 @@
 #endif
           break;
         }
+        case Bytecodes::_fast_linearswitch:
+        case Bytecodes::_fast_binaryswitch: {
+#ifndef CC_INTERP
+          (*bcp) = Bytecodes::_lookupswitch;
+#endif
+          break;
+        }
         case Bytecodes::_getstatic      : // fall through
         case Bytecodes::_putstatic      : // fall through
         case Bytecodes::_getfield       : // fall through
@@ -246,16 +299,18 @@
         case Bytecodes::_invokespecial  : // fall through
         case Bytecodes::_invokestatic   :
         case Bytecodes::_invokeinterface:
-          rewrite_member_reference(bcp, prefix_length+1);
+          rewrite_member_reference(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_invokedynamic:
-          rewrite_invokedynamic(bcp, prefix_length+1);
+          rewrite_invokedynamic(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_ldc:
-          maybe_rewrite_ldc(bcp, prefix_length+1, false);
+        case Bytecodes::_fast_aldc:
+          maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
           break;
         case Bytecodes::_ldc_w:
-          maybe_rewrite_ldc(bcp, prefix_length+1, true);
+        case Bytecodes::_fast_aldc_w:
+          maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
           break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
@@ -275,12 +330,13 @@
   if (nof_jsrs > 0) {
     method->set_has_jsrs();
     // Second pass will revisit this method.
-    assert(method->has_jsrs(), "");
+    assert(method->has_jsrs(), "didn't we just set this?");
   }
 }
 
 // After constant pool is created, revisit methods containing jsrs.
 methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
+  ResourceMark rm(THREAD);
   ResolveOopMapConflicts romc(method);
   methodHandle original_method = method;
   method = romc.do_potential_rewrite(CHECK_(methodHandle()));
@@ -302,7 +358,6 @@
   return method;
 }
 
-
 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   ResourceMark rm(THREAD);
   Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
@@ -345,34 +400,57 @@
   }
 
   // rewrite methods, in two passes
-  int i, len = _methods->length();
+  int len = _methods->length();
 
-  for (i = len; --i >= 0; ) {
+  for (int i = len-1; i >= 0; i--) {
     methodOop method = (methodOop)_methods->obj_at(i);
     scan_method(method);
   }
 
   // allocate constant pool cache, now that we've seen all the bytecodes
-  make_constant_pool_cache(CHECK);
+  make_constant_pool_cache(THREAD);
+
+  // Restore bytecodes to their unrewritten state if there are exceptions
+  // rewriting bytecodes or allocating the cpCache
+  if (HAS_PENDING_EXCEPTION) {
+    restore_bytecodes();
+    return;
+  }
+}
 
-  for (i = len; --i >= 0; ) {
-    methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
+// Relocate jsr/rets in a method.  This can't be done with the rewriter
+// stage because it can throw other exceptions, leaving the bytecodes
+// pointing at constant pool cache entries.
+// Link and check jvmti dependencies while we're iterating over the methods.
+// JSR292 code calls with a different set of methods, so two entry points.
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
+  objArrayHandle methods(THREAD, this_oop->methods());
+  relocate_and_link(this_oop, methods, THREAD);
+}
+
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
+                                 objArrayHandle methods, TRAPS) {
+  int len = methods->length();
+  for (int i = len-1; i >= 0; i--) {
+    methodHandle m(THREAD, (methodOop)methods->obj_at(i));
 
     if (m->has_jsrs()) {
       m = rewrite_jsrs(m, CHECK);
       // Method might have gotten rewritten.
-      _methods->obj_at_put(i, m());
+      methods->obj_at_put(i, m());
     }
 
-    // Set up method entry points for compiler and interpreter.
+    // Set up method entry points for compiler and interpreter    .
     m->link_method(m, CHECK);
 
+    // This is for JVMTI and unrelated to relocator but the last thing we do
 #ifdef ASSERT
     if (StressMethodComparator) {
       static int nmc = 0;
       for (int j = i; j >= 0 && j >= i-4; j--) {
         if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
-        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+        bool z = MethodComparator::methods_EMCP(m(),
+                   (methodOop)methods->obj_at(j));
         if (j == i && !z) {
           tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
           assert(z, "method must compare equal to itself");
--- a/src/share/vm/interpreter/rewriter.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/rewriter.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -85,13 +85,15 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(methodOop m);
-  methodHandle rewrite_jsrs(methodHandle m, TRAPS);
+  void scan_method(methodOop m, bool reverse = false);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  void rewrite_member_reference(address bcp, int offset);
-  void rewrite_invokedynamic(address bcp, int offset);
-  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
+  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
+  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
+  // Revert bytecodes in case of an exception.
+  void restore_bytecodes();
 
+  static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
  public:
   // Driver routine:
   static void rewrite(instanceKlassHandle klass, TRAPS);
@@ -100,6 +102,13 @@
   enum {
     _secondary_entry_tag = nth_bit(30)
   };
+
+  // Second pass, not gated by is_rewritten flag
+  static void relocate_and_link(instanceKlassHandle klass, TRAPS);
+  // JSR292 version to call with it's own methods.
+  static void relocate_and_link(instanceKlassHandle klass,
+                                objArrayHandle methods, TRAPS);
+
 };
 
 #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -171,7 +171,6 @@
 address    TemplateInterpreter::_throw_ArrayStoreException_entry            = NULL;
 address    TemplateInterpreter::_throw_ArithmeticException_entry            = NULL;
 address    TemplateInterpreter::_throw_ClassCastException_entry             = NULL;
-address    TemplateInterpreter::_throw_WrongMethodType_entry                = NULL;
 address    TemplateInterpreter::_throw_NullPointerException_entry           = NULL;
 address    TemplateInterpreter::_throw_StackOverflowError_entry             = NULL;
 address    TemplateInterpreter::_throw_exception_entry                      = NULL;
@@ -346,7 +345,6 @@
     Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
     Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
     Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
-    Interpreter::_throw_WrongMethodType_entry                = generate_WrongMethodType_handler();
     Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
     Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
   }
@@ -372,6 +370,7 @@
   method_entry(java_lang_math_sqrt )
   method_entry(java_lang_math_log  )
   method_entry(java_lang_math_log10)
+  method_entry(java_lang_ref_reference_get)
 
   // all native method kinds (must be one contiguous block)
   Interpreter::_native_entry_begin = Interpreter::code()->code_end();
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -51,7 +51,6 @@
   }
   address generate_exception_handler_common(const char* name, const char* message, bool pass_oop);
   address generate_ClassCastException_handler();
-  address generate_WrongMethodType_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
   address generate_return_entry_for(TosState state, int step);
--- a/src/share/vm/memory/allocation.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/allocation.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -44,6 +44,14 @@
   return (void *) AllocateHeap(size, "CHeapObj-new");
 }
 
+void* CHeapObj::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
+  char* p = (char*) os::malloc(size);
+#ifdef ASSERT
+  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+#endif
+  return p;
+}
+
 void CHeapObj::operator delete(void* p){
  FreeHeap(p);
 }
--- a/src/share/vm/memory/allocation.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/allocation.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -34,6 +34,8 @@
 #include "opto/c2_globals.hpp"
 #endif
 
+#include <new>
+
 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
@@ -99,6 +101,7 @@
 class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
   void* operator new(size_t size);
+  void* operator new (size_t size, const std::nothrow_t&  nothrow_constant);
   void  operator delete(void* p);
   void* new_array(size_t size);
 };
--- a/src/share/vm/memory/blockOffsetTable.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -541,23 +541,42 @@
     // to go back by.
     size_t n_cards_back = entry_to_cards_back(offset);
     q -= (N_words * n_cards_back);
-    assert(q >= _sp->bottom(), "Went below bottom!");
+    assert(q >= _sp->bottom(),
+           err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
+                   q, _sp->bottom()));
+    assert(q < _sp->end(),
+           err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
+                   q, _sp->end()));
     index -= n_cards_back;
     offset = _array->offset_array(index);
   }
   assert(offset < N_words, "offset too large");
   index--;
   q -= offset;
+  assert(q >= _sp->bottom(),
+         err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
+                 q, _sp->bottom()));
+  assert(q < _sp->end(),
+         err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
+                 q, _sp->end()));
   HeapWord* n = q;
 
   while (n <= addr) {
     debug_only(HeapWord* last = q);   // for debugging
     q = n;
     n += _sp->block_size(n);
-    assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n));
+    assert(n > q,
+           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
+                   " while querying blk_start(" PTR_FORMAT ")"
+                   " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                   n, last, addr, _sp->bottom(), _sp->end()));
   }
-  assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
-  assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
+  assert(q <= addr,
+         err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
+                 q, addr));
+  assert(addr <= n,
+         err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
+                 addr, n));
   return q;
 }
 
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -455,36 +455,40 @@
   return true;
 }
 
-
-void CardTableModRefBS::non_clean_card_iterate(Space* sp,
-                                               MemRegion mr,
-                                               DirtyCardToOopClosure* dcto_cl,
-                                               MemRegionClosure* cl,
-                                               bool clear) {
+void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
+                                                                 MemRegion mr,
+                                                                 OopsInGenClosure* cl,
+                                                                 CardTableRS* ct) {
   if (!mr.is_empty()) {
     int n_threads = SharedHeap::heap()->n_par_threads();
     if (n_threads > 0) {
 #ifndef SERIALGC
-      par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
+      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 #else  // SERIALGC
       fatal("Parallel gc not supported here.");
 #endif // SERIALGC
     } else {
-      non_clean_card_iterate_work(mr, cl, clear);
+      // We do not call the non_clean_card_iterate_serial() version below because
+      // we want to clear the cards (which non_clean_card_iterate_serial() does not
+      // do for us): clear_cl here does the work of finding contiguous dirty ranges
+      // of cards to process and clear.
+
+      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
+                                                       cl->gen_boundary());
+      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
+
+      clear_cl.do_MemRegion(mr);
     }
   }
 }
 
-// NOTE: For this to work correctly, it is important that
-// we look for non-clean cards below (so as to catch those
-// marked precleaned), rather than look explicitly for dirty
-// cards (and miss those marked precleaned). In that sense,
-// the name precleaned is currently somewhat of a misnomer.
-void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
-                                                    MemRegionClosure* cl,
-                                                    bool clear) {
-  // Figure out whether we have to worry about parallelism.
-  bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
+// The iterator itself is not MT-aware, but
+// MT-aware callers and closures can use this to
+// accomplish dirty card iteration in parallel. The
+// iterator itself does not clear the dirty cards, or
+// change their values in any manner.
+void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
+                                                      MemRegionClosure* cl) {
   for (int i = 0; i < _cur_covered_regions; i++) {
     MemRegion mri = mr.intersection(_covered[i]);
     if (mri.word_size() > 0) {
@@ -506,22 +510,6 @@
           MemRegion cur_cards(addr_for(cur_entry),
                               non_clean_cards * card_size_in_words);
           MemRegion dirty_region = cur_cards.intersection(mri);
-          if (clear) {
-            for (size_t i = 0; i < non_clean_cards; i++) {
-              // Clean the dirty cards (but leave the other non-clean
-              // alone.)  If parallel, do the cleaning atomically.
-              jbyte cur_entry_val = cur_entry[i];
-              if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
-                if (is_par) {
-                  jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
-                  assert(res != clean_card,
-                         "Dirty card mysteriously cleaned");
-                } else {
-                  cur_entry[i] = clean_card;
-                }
-              }
-            }
-          }
           cl->do_MemRegion(dirty_region);
         }
         cur_entry = next_entry;
@@ -530,22 +518,6 @@
   }
 }
 
-void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
-                                                 OopClosure* cl,
-                                                 bool clear,
-                                                 bool before_save_marks) {
-  // Note that dcto_cl is resource-allocated, so there is no
-  // corresponding "delete".
-  DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
-  MemRegion used_mr;
-  if (before_save_marks) {
-    used_mr = sp->used_region_at_save_marks();
-  } else {
-    used_mr = sp->used_region();
-  }
-  non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
-}
-
 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
@@ -593,9 +565,8 @@
   memset(first, dirty_card, last-first);
 }
 
-// NOTES:
-// (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
-//     iterates over dirty cards ranges in increasing address order.
+// Unlike several other card table methods, dirty_card_iterate()
+// iterates over dirty cards ranges in increasing address order.
 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
                                            MemRegionClosure* cl) {
   for (int i = 0; i < _cur_covered_regions; i++) {
@@ -685,43 +656,37 @@
 }
 
 #ifndef PRODUCT
-class GuaranteeNotModClosure: public MemRegionClosure {
-  CardTableModRefBS* _ct;
-public:
-  GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
-  void do_MemRegion(MemRegion mr) {
-    jbyte* entry = _ct->byte_for(mr.start());
-    guarantee(*entry != CardTableModRefBS::clean_card,
-              "Dirty card in region that should be clean");
+void CardTableModRefBS::verify_region(MemRegion mr,
+                                      jbyte val, bool val_equals) {
+  jbyte* start    = byte_for(mr.start());
+  jbyte* end      = byte_for(mr.last());
+  bool   failures = false;
+  for (jbyte* curr = start; curr <= end; ++curr) {
+    jbyte curr_val = *curr;
+    bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
+    if (failed) {
+      if (!failures) {
+        tty->cr();
+        tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
+        tty->print_cr("==   %sexpecting value: %d",
+                      (val_equals) ? "" : "not ", val);
+        failures = true;
+      }
+      tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
+                    "val: %d", curr, addr_for(curr),
+                    (HeapWord*) (((size_t) addr_for(curr)) + card_size),
+                    (int) curr_val);
+    }
   }
-};
-
-void CardTableModRefBS::verify_clean_region(MemRegion mr) {
-  GuaranteeNotModClosure blk(this);
-  non_clean_card_iterate_work(mr, &blk, false);
+  guarantee(!failures, "there should not have been any failures");
 }
 
-// To verify a MemRegion is entirely dirty this closure is passed to
-// dirty_card_iterate. If the region is dirty do_MemRegion will be
-// invoked only once with a MemRegion equal to the one being
-// verified.
-class GuaranteeDirtyClosure: public MemRegionClosure {
-  CardTableModRefBS* _ct;
-  MemRegion _mr;
-  bool _result;
-public:
-  GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
-    : _ct(ct), _mr(mr), _result(false) {}
-  void do_MemRegion(MemRegion mr) {
-    _result = _mr.equals(mr);
-  }
-  bool result() const { return _result; }
-};
+void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
+  verify_region(mr, dirty_card, false /* val_equals */);
+}
 
 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
-  GuaranteeDirtyClosure blk(this, mr);
-  dirty_card_iterate(mr, &blk);
-  guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
+  verify_region(mr, dirty_card, true /* val_equals */);
 }
 #endif
 
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
 class Generation;
 class OopsInGenClosure;
 class DirtyCardToOopClosure;
+class ClearNoncleanCardWrapper;
 
 class CardTableModRefBS: public ModRefBarrierSet {
   // Some classes get to look at some private stuff.
@@ -149,7 +150,9 @@
   // Mapping from address to card marking array entry
   jbyte* byte_for(const void* p) const {
     assert(_whole_heap.contains(p),
-           "out of bounds access to card marking array");
+           err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of "
+                   " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")",
+                   p, _whole_heap.start(), _whole_heap.end()));
     jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
     assert(result >= _byte_map && result < _byte_map + _byte_map_size,
            "out of bounds accessor for card marking array");
@@ -165,25 +168,27 @@
 
   // Iterate over the portion of the card-table which covers the given
   // region mr in the given space and apply cl to any dirty sub-regions
-  // of mr. cl and dcto_cl must either be the same closure or cl must
-  // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl
-  // may be modified. Note that this function will operate in a parallel
-  // mode if worker threads are available.
-  void non_clean_card_iterate(Space* sp, MemRegion mr,
-                              DirtyCardToOopClosure* dcto_cl,
-                              MemRegionClosure* cl,
-                              bool clear);
+  // of mr. Dirty cards are _not_ cleared by the iterator method itself,
+  // but closures may arrange to do so on their own should they so wish.
+  void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl);
 
-  // Utility function used to implement the other versions below.
-  void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl,
-                                   bool clear);
+  // A variant of the above that will operate in a parallel mode if
+  // worker threads are available, and clear the dirty cards as it
+  // processes them.
+  // XXX ??? MemRegionClosure above vs OopsInGenClosure below XXX
+  // XXX some new_dcto_cl's take OopClosure's, plus as above there are
+  // some MemRegionClosures. Clean this up everywhere. XXX
+  void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
+                                                OopsInGenClosure* cl, CardTableRS* ct);
 
-  void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
-                                       DirtyCardToOopClosure* dcto_cl,
-                                       MemRegionClosure* cl,
-                                       bool clear,
-                                       int n_threads);
+ private:
+  // Work method used to implement non_clean_card_iterate_possibly_parallel()
+  // above in the parallel case.
+  void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
+                                            OopsInGenClosure* cl, CardTableRS* ct,
+                                            int n_threads);
 
+ protected:
   // Dirty the bytes corresponding to "mr" (not all of which must be
   // covered.)
   void dirty_MemRegion(MemRegion mr);
@@ -194,11 +199,6 @@
 
   // *** Support for parallel card scanning.
 
-  enum SomeConstantsForParallelism {
-    StridesPerThread    = 2,
-    CardsPerStrideChunk = 256
-  };
-
   // This is an array, one element per covered region of the card table.
   // Each entry is itself an array, with one element per chunk in the
   // covered region.  Each entry of these arrays is the lowest non-clean
@@ -231,7 +231,7 @@
   // covers the given address.
   uintptr_t addr_to_chunk_index(const void* addr) {
     uintptr_t card = (uintptr_t) byte_for(addr);
-    return card / CardsPerStrideChunk;
+    return card / ParGCCardsPerStrideChunk;
   }
 
   // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
@@ -239,9 +239,8 @@
   void process_stride(Space* sp,
                       MemRegion used,
                       jint stride, int n_strides,
-                      DirtyCardToOopClosure* dcto_cl,
-                      MemRegionClosure* cl,
-                      bool clear,
+                      OopsInGenClosure* cl,
+                      CardTableRS* ct,
                       jbyte** lowest_non_clean,
                       uintptr_t lowest_non_clean_base_chunk_index,
                       size_t lowest_non_clean_chunk_size);
@@ -402,9 +401,6 @@
   virtual void invalidate(MemRegion mr, bool whole_heap = false);
   void clear(MemRegion mr);
   void dirty(MemRegion mr);
-  void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
-                                bool clear = false,
-                                bool before_save_marks = false);
 
   // *** Card-table-RemSet-specific things.
 
@@ -415,18 +411,15 @@
   // *decreasing* address order.  (This order aids with imprecise card
   // marking, where a dirty card may cause scanning, and summarization
   // marking, of objects that extend onto subsequent cards.)
-  // If "clear" is true, the card is (conceptually) marked unmodified before
-  // applying the closure.
-  void mod_card_iterate(MemRegionClosure* cl, bool clear = false) {
-    non_clean_card_iterate_work(_whole_heap, cl, clear);
+  void mod_card_iterate(MemRegionClosure* cl) {
+    non_clean_card_iterate_serial(_whole_heap, cl);
   }
 
   // Like the "mod_cards_iterate" above, except only invokes the closure
   // for cards within the MemRegion "mr" (which is required to be
   // card-aligned and sized.)
-  void mod_card_iterate(MemRegion mr, MemRegionClosure* cl,
-                        bool clear = false) {
-    non_clean_card_iterate_work(mr, cl, clear);
+  void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
+    non_clean_card_iterate_serial(mr, cl);
   }
 
   static uintx ct_max_alignment_constraint();
@@ -460,14 +453,18 @@
     size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
     HeapWord* result = (HeapWord*) (delta << card_shift);
     assert(_whole_heap.contains(result),
-           "out of bounds accessor from card marking array");
+           err_msg("Returning result = "PTR_FORMAT" out of bounds of "
+                   " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")",
+                   result, _whole_heap.start(), _whole_heap.end()));
     return result;
   }
 
   // Mapping from address to card marking array index.
   size_t index_for(void* p) {
     assert(_whole_heap.contains(p),
-           "out of bounds access to card marking array");
+           err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of "
+                   " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")",
+                   p, _whole_heap.start(), _whole_heap.end()));
     return byte_for(p) - _byte_map;
   }
 
@@ -478,11 +475,14 @@
   void verify();
   void verify_guard();
 
-  void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
+  // val_equals -> it will check that all cards covered by mr equal val
+  // !val_equals -> it will check that all cards covered by mr do not equal val
+  void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
+  void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
 
   static size_t par_chunk_heapword_alignment() {
-    return CardsPerStrideChunk * card_size_in_words;
+    return ParGCCardsPerStrideChunk * card_size_in_words;
   }
 
 };
@@ -503,4 +503,5 @@
   void set_CTRS(CardTableRS* rs) { _rs = rs; }
 };
 
+
 #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
--- a/src/share/vm/memory/cardTableRS.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/cardTableRS.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -105,107 +105,111 @@
   g->younger_refs_iterate(blk);
 }
 
-class ClearNoncleanCardWrapper: public MemRegionClosure {
-  MemRegionClosure* _dirty_card_closure;
-  CardTableRS* _ct;
-  bool _is_par;
-private:
-  // Clears the given card, return true if the corresponding card should be
-  // processed.
-  bool clear_card(jbyte* entry) {
-    if (_is_par) {
-      while (true) {
-        // In the parallel case, we may have to do this several times.
-        jbyte entry_val = *entry;
-        assert(entry_val != CardTableRS::clean_card_val(),
-               "We shouldn't be looking at clean cards, and this should "
-               "be the only place they get cleaned.");
-        if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
-            || _ct->is_prev_youngergen_card_val(entry_val)) {
-          jbyte res =
-            Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
-          if (res == entry_val) {
-            break;
-          } else {
-            assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
-                   "The CAS above should only fail if another thread did "
-                   "a GC write barrier.");
-          }
-        } else if (entry_val ==
-                   CardTableRS::cur_youngergen_and_prev_nonclean_card) {
-          // Parallelism shouldn't matter in this case.  Only the thread
-          // assigned to scan the card should change this value.
-          *entry = _ct->cur_youngergen_card_val();
-          break;
-        } else {
-          assert(entry_val == _ct->cur_youngergen_card_val(),
-                 "Should be the only possibility.");
-          // In this case, the card was clean before, and become
-          // cur_youngergen only because of processing of a promoted object.
-          // We don't have to look at the card.
-          return false;
-        }
+inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
+  if (_is_par) {
+    return clear_card_parallel(entry);
+  } else {
+    return clear_card_serial(entry);
+  }
+}
+
+inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
+  while (true) {
+    // In the parallel case, we may have to do this several times.
+    jbyte entry_val = *entry;
+    assert(entry_val != CardTableRS::clean_card_val(),
+           "We shouldn't be looking at clean cards, and this should "
+           "be the only place they get cleaned.");
+    if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
+        || _ct->is_prev_youngergen_card_val(entry_val)) {
+      jbyte res =
+        Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
+      if (res == entry_val) {
+        break;
+      } else {
+        assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
+               "The CAS above should only fail if another thread did "
+               "a GC write barrier.");
       }
-      return true;
+    } else if (entry_val ==
+               CardTableRS::cur_youngergen_and_prev_nonclean_card) {
+      // Parallelism shouldn't matter in this case.  Only the thread
+      // assigned to scan the card should change this value.
+      *entry = _ct->cur_youngergen_card_val();
+      break;
     } else {
-      jbyte entry_val = *entry;
-      assert(entry_val != CardTableRS::clean_card_val(),
-             "We shouldn't be looking at clean cards, and this should "
-             "be the only place they get cleaned.");
-      assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
-             "This should be possible in the sequential case.");
-      *entry = CardTableRS::clean_card_val();
-      return true;
+      assert(entry_val == _ct->cur_youngergen_card_val(),
+             "Should be the only possibility.");
+      // In this case, the card was clean before, and become
+      // cur_youngergen only because of processing of a promoted object.
+      // We don't have to look at the card.
+      return false;
     }
   }
+  return true;
+}
 
-public:
-  ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
-                           CardTableRS* ct) :
+
+inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
+  jbyte entry_val = *entry;
+  assert(entry_val != CardTableRS::clean_card_val(),
+         "We shouldn't be looking at clean cards, and this should "
+         "be the only place they get cleaned.");
+  assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
+         "This should be possible in the sequential case.");
+  *entry = CardTableRS::clean_card_val();
+  return true;
+}
+
+ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
+  DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
     _dirty_card_closure(dirty_card_closure), _ct(ct) {
     _is_par = (SharedHeap::heap()->n_par_threads() > 0);
+}
+
+void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
+  assert(mr.word_size() > 0, "Error");
+  assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
+  // mr.end() may not necessarily be card aligned.
+  jbyte* cur_entry = _ct->byte_for(mr.last());
+  const jbyte* limit = _ct->byte_for(mr.start());
+  HeapWord* end_of_non_clean = mr.end();
+  HeapWord* start_of_non_clean = end_of_non_clean;
+  while (cur_entry >= limit) {
+    HeapWord* cur_hw = _ct->addr_for(cur_entry);
+    if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
+      // Continue the dirty range by opening the
+      // dirty window one card to the left.
+      start_of_non_clean = cur_hw;
+    } else {
+      // We hit a "clean" card; process any non-empty
+      // "dirty" range accumulated so far.
+      if (start_of_non_clean < end_of_non_clean) {
+        const MemRegion mrd(start_of_non_clean, end_of_non_clean);
+        _dirty_card_closure->do_MemRegion(mrd);
+      }
+      // Reset the dirty window, while continuing to look
+      // for the next dirty card that will start a
+      // new dirty window.
+      end_of_non_clean = cur_hw;
+      start_of_non_clean = cur_hw;
+    }
+    // Note that "cur_entry" leads "start_of_non_clean" in
+    // its leftward excursion after this point
+    // in the loop and, when we hit the left end of "mr",
+    // will point off of the left end of the card-table
+    // for "mr".
+    cur_entry--;
   }
-  void do_MemRegion(MemRegion mr) {
-    // We start at the high end of "mr", walking backwards
-    // while accumulating a contiguous dirty range of cards in
-    // [start_of_non_clean, end_of_non_clean) which we then
-    // process en masse.
-    HeapWord* end_of_non_clean = mr.end();
-    HeapWord* start_of_non_clean = end_of_non_clean;
-    jbyte*       entry = _ct->byte_for(mr.last());
-    const jbyte* first_entry = _ct->byte_for(mr.start());
-    while (entry >= first_entry) {
-      HeapWord* cur = _ct->addr_for(entry);
-      if (!clear_card(entry)) {
-        // We hit a clean card; process any non-empty
-        // dirty range accumulated so far.
-        if (start_of_non_clean < end_of_non_clean) {
-          MemRegion mr2(start_of_non_clean, end_of_non_clean);
-          _dirty_card_closure->do_MemRegion(mr2);
-        }
-        // Reset the dirty window while continuing to
-        // look for the next dirty window to process.
-        end_of_non_clean = cur;
-        start_of_non_clean = end_of_non_clean;
-      }
-      // Open the left end of the window one card to the left.
-      start_of_non_clean = cur;
-      // Note that "entry" leads "start_of_non_clean" in
-      // its leftward excursion after this point
-      // in the loop and, when we hit the left end of "mr",
-      // will point off of the left end of the card-table
-      // for "mr".
-      entry--;
-    }
-    // If the first card of "mr" was dirty, we will have
-    // been left with a dirty window, co-initial with "mr",
-    // which we now process.
-    if (start_of_non_clean < end_of_non_clean) {
-      MemRegion mr2(start_of_non_clean, end_of_non_clean);
-      _dirty_card_closure->do_MemRegion(mr2);
-    }
+  // If the first card of "mr" was dirty, we will have
+  // been left with a dirty window, co-initial with "mr",
+  // which we now process.
+  if (start_of_non_clean < end_of_non_clean) {
+    const MemRegion mrd(start_of_non_clean, end_of_non_clean);
+    _dirty_card_closure->do_MemRegion(mrd);
   }
-};
+}
+
 // clean (by dirty->clean before) ==> cur_younger_gen
 // dirty                          ==> cur_youngergen_and_prev_nonclean_card
 // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
@@ -242,12 +246,35 @@
 
 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
                                                 OopsInGenClosure* cl) {
-  DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(),
-                                                   cl->gen_boundary());
-  ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
-
-  _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
-                                dcto_cl, &clear_cl, false);
+  const MemRegion urasm = sp->used_region_at_save_marks();
+#ifdef ASSERT
+  // Convert the assertion check to a warning if we are running
+  // CMS+ParNew until related bug is fixed.
+  MemRegion ur    = sp->used_region();
+  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
+         err_msg("Did you forget to call save_marks()? "
+                 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
+                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
+                 urasm.start(), urasm.end(), ur.start(), ur.end()));
+  // In the case of CMS+ParNew, issue a warning
+  if (!ur.contains(urasm)) {
+    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
+    warning("CMS+ParNew: Did you forget to call save_marks()? "
+            "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
+            "[" PTR_FORMAT ", " PTR_FORMAT ")",
+             urasm.start(), urasm.end(), ur.start(), ur.end());
+    MemRegion ur2 = sp->used_region();
+    MemRegion urasm2 = sp->used_region_at_save_marks();
+    if (!ur.equals(ur2)) {
+      warning("CMS+ParNew: Flickering used_region()!!");
+    }
+    if (!urasm.equals(urasm2)) {
+      warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
+    }
+    ShouldNotReachHere();
+  }
+#endif
+  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
 }
 
 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
--- a/src/share/vm/memory/cardTableRS.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/cardTableRS.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 
 class Space;
 class OopsInGenClosure;
-class DirtyCardToOopClosure;
 
 // This kind of "GenRemSet" uses a card table both as shared data structure
 // for a mod ref barrier set and for the rem set information.
@@ -166,4 +165,21 @@
 
 };
 
+class ClearNoncleanCardWrapper: public MemRegionClosure {
+  DirtyCardToOopClosure* _dirty_card_closure;
+  CardTableRS* _ct;
+  bool _is_par;
+private:
+  // Clears the given card, return true if the corresponding card should be
+  // processed.
+  inline bool clear_card(jbyte* entry);
+  // Work methods called by the clear_card()
+  inline bool clear_card_serial(jbyte* entry);
+  inline bool clear_card_parallel(jbyte* entry);
+
+public:
+  ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct);
+  void do_MemRegion(MemRegion mr);
+};
+
 #endif // SHARE_VM_MEMORY_CARDTABLERS_HPP
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -265,8 +265,6 @@
   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 
   always_do_update_barrier = UseConcMarkSweepGC;
-  BlockOffsetArrayUseUnallocatedBlock =
-      BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
 
   // Check validity of heap flags
   assert(OldSize     % min_alignment() == 0, "old space alignment");
--- a/src/share/vm/memory/dump.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/dump.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -623,24 +623,48 @@
   }
 };
 
-// Itable indices are calculated based on methods array order
-// (see klassItable::compute_itable_index()).  Must reinitialize
+// Vtable and Itable indices are calculated based on methods array
+// order (see klassItable::compute_itable_index()).  Must reinitialize
 // after ALL methods of ALL classes have been reordered.
 // We assume that since checkconstraints is false, this method
 // cannot throw an exception.  An exception here would be
 // problematic since this is the VMThread, not a JavaThread.
 
-class ReinitializeItables: public ObjectClosure {
+class ReinitializeTables: public ObjectClosure {
 private:
   Thread* _thread;
 
 public:
-  ReinitializeItables(Thread* thread) : _thread(thread) {}
+  ReinitializeTables(Thread* thread) : _thread(thread) {}
+
+  // Initialize super vtable first, check if already initialized to avoid
+  // quadradic behavior.  The vtable is cleared in remove_unshareable_info.
+  void reinitialize_vtables(klassOop k) {
+    if (k->blueprint()->oop_is_instanceKlass()) {
+      instanceKlass* ik = instanceKlass::cast(k);
+      if (ik->vtable()->is_initialized()) return;
+      if (ik->super() != NULL) {
+        reinitialize_vtables(ik->super());
+      }
+      ik->vtable()->initialize_vtable(false, _thread);
+    }
+  }
 
   void do_object(oop obj) {
     if (obj->blueprint()->oop_is_instanceKlass()) {
       instanceKlass* ik = instanceKlass::cast((klassOop)obj);
+      ResourceMark rm(_thread);
       ik->itable()->initialize_itable(false, _thread);
+      reinitialize_vtables((klassOop)obj);
+#ifdef ASSERT
+      ik->vtable()->verify(tty, true);
+#endif // ASSERT
+    } else if (obj->blueprint()->oop_is_arrayKlass()) {
+      // The vtable for array klasses are that of its super class,
+      // ie. java.lang.Object.
+      arrayKlass* ak = arrayKlass::cast((klassOop)obj);
+      if (ak->vtable()->is_initialized()) return;
+      ak->vtable()->initialize_vtable(false, _thread);
     }
   }
 };
@@ -1205,9 +1229,9 @@
     gen->ro_space()->object_iterate(&sort);
     gen->rw_space()->object_iterate(&sort);
 
-    ReinitializeItables reinit_itables(THREAD);
-    gen->ro_space()->object_iterate(&reinit_itables);
-    gen->rw_space()->object_iterate(&reinit_itables);
+    ReinitializeTables reinit_tables(THREAD);
+    gen->ro_space()->object_iterate(&reinit_tables);
+    gen->rw_space()->object_iterate(&reinit_tables);
     tty->print_cr("done. ");
     tty->cr();
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -537,7 +537,7 @@
         // Timer for individual generations. Last argument is false: no CR
         TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
         TraceCollectorStats tcs(_gens[i]->counters());
-        TraceMemoryManagerStats tmms(_gens[i]->kind());
+        TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 
         size_t prev_used = _gens[i]->used();
         _gens[i]->stat_record()->invocations++;
@@ -711,15 +711,6 @@
   _gen_process_strong_tasks->set_n_threads(t);
 }
 
-class AssertIsPermClosure: public OopClosure {
-public:
-  void do_oop(oop* p) {
-    assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
-  }
-  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-static AssertIsPermClosure assert_is_perm_closure;
-
 void GenCollectedHeap::
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
@@ -962,6 +953,13 @@
   }
 }
 
+bool GenCollectedHeap::is_in_young(oop p) {
+  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
+  assert(result == _gens[0]->is_in_reserved(p),
+         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
+}
+
 // Returns "TRUE" iff "p" points into the allocated area of the heap.
 bool GenCollectedHeap::is_in(const void* p) const {
   #ifndef ASSERT
@@ -984,10 +982,16 @@
   return false;
 }
 
-// Returns "TRUE" iff "p" points into the allocated area of the heap.
-bool GenCollectedHeap::is_in_youngest(void* p) {
-  return _gens[0]->is_in(p);
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool GenCollectedHeap::is_in_partial_collection(const void* p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is young (low addr), old, perm (high addr)
+  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 }
+#endif
 
 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   for (int i = 0; i < _n_gens; i++) {
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -216,8 +216,18 @@
     }
   }
 
-  // Returns "TRUE" iff "p" points into the youngest generation.
-  bool is_in_youngest(void* p);
+  // Returns true if the reference is to an object in the reserved space
+  // for the young generation.
+  // Assumes the the young gen address range is less than that of the old gen.
+  bool is_in_young(oop p);
+
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr) {
+    return is_in_young((oop)addr);
+  }
 
   // Iteration functions.
   void oop_iterate(OopClosure* cl);
@@ -283,7 +293,7 @@
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
-    return is_in_youngest((void*)new_obj);
+    return is_in_young(new_obj);
   }
 
   // Can a compiler elide a store barrier when it writes
@@ -427,13 +437,13 @@
   // explicitly mark reachable objects in younger generations, to avoid
   // excess storage retention.)  If "collecting_perm_gen" is false, then
   // roots that may only contain references to permGen objects are not
-  // scanned. The "so" argument determines which of the roots
+  // scanned; instead, the older_gens closure is applied to all outgoing
+  // references in the perm gen.  The "so" argument determines which of the roots
   // the closure is applied to:
   // "SO_None" does none;
   // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Symbols_and_Strings" applies the closure to all entries in
-  // SymbolsTable and StringTable.
+  // "SO_Strings" applies the closure to all entries in the StringTable.
   void gen_process_strong_roots(int level,
                                 bool younger_gens_as_roots,
                                 // The remaining arguments are in an order
--- a/src/share/vm/memory/genOopClosures.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/genOopClosures.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -175,7 +175,7 @@
  protected:
   template <class T> inline void do_oop_work(T* p) {
     oop obj = oopDesc::load_decode_heap_oop(p);
-    guarantee(obj->is_oop_or_null(), "invalid oop");
+    guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj));
   }
  public:
   virtual void do_oop(oop* p);
--- a/src/share/vm/memory/modRefBarrierSet.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/modRefBarrierSet.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,15 +88,6 @@
     assert(false, "can't call");
   }
 
-  // Invoke "cl->do_oop" on (the address of) every possibly-modifed
-  // reference field in objects in "sp".  If "clear" is "true", the oops
-  // are no longer considered possibly modified after application of the
-  // closure.  If' "before_save_marks" is true, oops in objects allocated
-  // after the last call to "save_marks" on "sp" will not be considered.
-  virtual void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
-                                        bool clear = false,
-                                        bool before_save_marks = false) = 0;
-
   // Causes all refs in "mr" to be assumed to be modified.  If "whole_heap"
   // is true, the caller asserts that the entire heap is being invalidated,
   // which may admit an optimized implementation for some barriers.
@@ -109,12 +100,6 @@
   // Pass along the argument to the superclass.
   ModRefBarrierSet(int max_covered_regions) :
     BarrierSet(max_covered_regions) {}
-
-#ifndef PRODUCT
-  // Verifies that the given region contains no modified references.
-  virtual void verify_clean_region(MemRegion mr) = 0;
-#endif
-
 };
 
 #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/src/share/vm/memory/sharedHeap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -46,7 +46,6 @@
   SH_PS_Management_oops_do,
   SH_PS_SystemDictionary_oops_do,
   SH_PS_jvmti_oops_do,
-  SH_PS_SymbolTable_oops_do,
   SH_PS_StringTable_oops_do,
   SH_PS_CodeCache_oops_do,
   // Leave this one last.
@@ -103,6 +102,17 @@
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
+#ifdef ASSERT
+class AssertNonScavengableClosure: public OopClosure {
+public:
+  virtual void do_oop(oop* p) {
+    assert(!Universe::heap()->is_in_partial_collection(*p),
+      "Referent should not be scavengable.");  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+static AssertNonScavengableClosure assert_is_non_scavengable_closure;
+#endif
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -161,13 +171,9 @@
   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
     if (so & SO_AllClasses) {
       SystemDictionary::oops_do(roots);
-    } else
-      if (so & SO_SystemClasses) {
-        SystemDictionary::always_strong_oops_do(roots);
-      }
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_SymbolTable_oops_do)) {
+    } else if (so & SO_SystemClasses) {
+      SystemDictionary::always_strong_oops_do(roots);
+    }
   }
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
@@ -201,9 +207,10 @@
         CodeCache::scavenge_root_nmethods_do(code_roots);
       }
     }
-    // Verify if the code cache contents are in the perm gen
-    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
-    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
+    // Verify that the code cache contents are not subject to
+    // movement by a scavenging collection.
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
+    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   }
 
   if (!collecting_perm_gen) {
--- a/src/share/vm/memory/sharedHeap.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/sharedHeap.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -192,9 +192,8 @@
     SO_None                = 0x0,
     SO_AllClasses          = 0x1,
     SO_SystemClasses       = 0x2,
-    SO_Symbols             = 0x4,
-    SO_Strings             = 0x8,
-    SO_CodeCache           = 0x10
+    SO_Strings             = 0x4,
+    SO_CodeCache           = 0x8
   };
 
   FlexibleWorkGang* workers() const { return _workers; }
@@ -208,14 +207,13 @@
 
   // Invoke the "do_oop" method the closure "roots" on all root locations.
   // If "collecting_perm_gen" is false, then roots that may only contain
-  // references to permGen objects are not scanned.  If true, the
-  // "perm_gen" closure is applied to all older-to-younger refs in the
+  // references to permGen objects are not scanned; instead, in that case,
+  // the "perm_blk" closure is applied to all outgoing refs in the
   // permanent generation.  The "so" argument determines which of roots
   // the closure is applied to:
   // "SO_None" does none;
   // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Symbols" applies the closure to all entries in SymbolsTable;
   // "SO_Strings" applies the closure to all entries in StringTable;
   // "SO_CodeCache" applies the closure to all elements of the CodeCache.
   void process_strong_roots(bool activate_scope,
--- a/src/share/vm/memory/space.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/memory/space.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -97,6 +97,14 @@
   }
 }
 
+// We get called with "mr" representing the dirty region
+// that we want to process. Because of imprecise marking,
+// we may need to extend the incoming "mr" to the right,
+// and scan more. However, because we may already have
+// scanned some of that extended region, we may need to
+// trim its right-end back some so we do not scan what
+// we (or another worker thread) may already have scanned
+// or planning to scan.
 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 
   // Some collectors need to do special things whenever their dirty
@@ -148,7 +156,7 @@
   // e.g. the dirty card region is entirely in a now free object
   // -- something that could happen with a concurrent sweeper.
   bottom = MIN2(bottom, top);
-  mr     = MemRegion(bottom, top);
+  MemRegion extended_mr = MemRegion(bottom, top);
   assert(bottom <= top &&
          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
           _min_done == NULL ||
@@ -156,8 +164,8 @@
          "overlap!");
 
   // Walk the region if it is not empty; otherwise there is nothing to do.
-  if (!mr.is_empty()) {
-    walk_mem_region(mr, bottom_obj, top);
+  if (!extended_mr.is_empty()) {
+    walk_mem_region(extended_mr, bottom_obj, top);
   }
 
   // An idempotent closure might be applied in any order, so we don't
--- a/src/share/vm/oops/constantPoolKlass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/constantPoolKlass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -285,10 +285,9 @@
 void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   assert(obj->is_constantPool(), "should be constant pool");
   constantPoolOop cp = (constantPoolOop) obj;
-  if (cp->tags() != NULL &&
-      (!JavaObjectsInPerm || (EnableInvokeDynamic && cp->has_pseudo_string()))) {
+  if (cp->tags() != NULL) {
     for (int i = 1; i < cp->length(); ++i) {
-      if (cp->tag_at(i).is_string()) {
+      if (cp->is_pointer_entry(i)) {
         oop* base = cp->obj_at_addr_raw(i);
         if (PSScavenge::should_scavenge(base)) {
           pm->claim_or_forward_depth(base);
@@ -311,10 +310,14 @@
     st->print(" - flags: 0x%x", cp->flags());
     if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
     if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
+    if (cp->has_preresolution()) st->print(" has_preresolution");
     st->cr();
   }
+  if (cp->pool_holder() != NULL) {
+    bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp);
+    st->print_cr(" - holder: " INTPTR_FORMAT "%s", cp->pool_holder(), (extra? " (extra)" : ""));
+  }
   st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache());
-
   for (int index = 1; index < cp->length(); index++) {      // Index 0 is unused
     st->print(" - %3d : ", index);
     cp->tag_at(index).print_on(st);
@@ -342,6 +345,11 @@
         anObj->print_value_on(st);
         st->print(" {0x%lx}", (address)anObj);
         break;
+      case JVM_CONSTANT_Object :
+        anObj = cp->object_at(index);
+        anObj->print_value_on(st);
+        st->print(" {0x%lx}", (address)anObj);
+        break;
       case JVM_CONSTANT_Integer :
         st->print("%d", cp->int_at(index));
         break;
@@ -410,10 +418,15 @@
   st->print("constant pool [%d]", cp->length());
   if (cp->has_pseudo_string()) st->print("/pseudo_string");
   if (cp->has_invokedynamic()) st->print("/invokedynamic");
+  if (cp->has_preresolution()) st->print("/preresolution");
   if (cp->operands() != NULL)  st->print("/operands[%d]", cp->operands()->length());
   cp->print_address_on(st);
   st->print(" for ");
   cp->pool_holder()->print_value_on(st);
+  if (cp->pool_holder() != NULL) {
+    bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp);
+    if (extra)  st->print(" (extra)");
+  }
   if (cp->cache() != NULL) {
     st->print(" cache=" PTR_FORMAT, cp->cache());
   }
@@ -432,23 +445,21 @@
   guarantee(cp->is_perm(), "should be in permspace");
   if (!cp->partially_loaded()) {
     for (int i = 0; i< cp->length();  i++) {
+      constantTag tag = cp->tag_at(i);
       CPSlot entry = cp->slot_at(i);
-      if (cp->tag_at(i).is_klass()) {
+      if (tag.is_klass()) {
         if (entry.is_oop()) {
           guarantee(entry.get_oop()->is_perm(),     "should be in permspace");
           guarantee(entry.get_oop()->is_klass(),    "should be klass");
         }
-      }
-      if (cp->tag_at(i).is_unresolved_klass()) {
+      } else if (tag.is_unresolved_klass()) {
         if (entry.is_oop()) {
           guarantee(entry.get_oop()->is_perm(),     "should be in permspace");
           guarantee(entry.get_oop()->is_klass(),    "should be klass");
         }
-      }
-      if (cp->tag_at(i).is_symbol()) {
+      } else if (tag.is_symbol()) {
         guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count");
-      }
-      if (cp->tag_at(i).is_unresolved_string()) {
+      } else if (tag.is_unresolved_string()) {
         if (entry.is_oop()) {
           guarantee(entry.get_oop()->is_perm(),     "should be in permspace");
           guarantee(entry.get_oop()->is_instance(), "should be instance");
@@ -456,8 +467,7 @@
         else {
           guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count");
         }
-      }
-      if (cp->tag_at(i).is_string()) {
+      } else if (tag.is_string()) {
         if (!cp->has_pseudo_string()) {
           if (entry.is_oop()) {
             guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(),
@@ -467,8 +477,11 @@
         } else {
           // can be non-perm, can be non-instance (array)
         }
+      } else if (tag.is_object()) {
+        assert(entry.get_oop()->is_oop(), "should be some valid oop");
+      } else {
+        assert(!cp->is_pointer_entry(i), "unhandled oop type in constantPoolKlass::verify_on");
       }
-      // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc.
     }
     guarantee(cp->tags()->is_perm(),         "should be in permspace");
     guarantee(cp->tags()->is_typeArray(),    "should be type array");
--- a/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -266,6 +266,29 @@
 }
 
 
+methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
+                                                   int which, Bytecodes::Code invoke_code) {
+  assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
+  if (cpool->cache() == NULL)  return false;  // nothing to load yet
+  int cache_index = which - CPCACHE_INDEX_TAG;
+  if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
+    if (PrintMiscellaneous && (Verbose||WizardMode)) {
+      tty->print_cr("bad operand %d for %d in:", which, invoke_code); cpool->print();
+    }
+    return NULL;
+  }
+  ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+  if (invoke_code != Bytecodes::_illegal)
+    return e->get_method_if_resolved(invoke_code, cpool);
+  Bytecodes::Code bc;
+  if ((bc = e->bytecode_1()) != (Bytecodes::Code)0)
+    return e->get_method_if_resolved(bc, cpool);
+  if ((bc = e->bytecode_2()) != (Bytecodes::Code)0)
+    return e->get_method_if_resolved(bc, cpool);
+  return NULL;
+}
+
+
 Symbol* constantPoolOopDesc::impl_name_ref_at(int which, bool uncached) {
   int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
   return symbol_at(name_index);
--- a/src/share/vm/oops/constantPoolOop.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/constantPoolOop.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -103,7 +103,8 @@
 
   enum FlagBit {
     FB_has_invokedynamic = 1,
-    FB_has_pseudo_string = 2
+    FB_has_pseudo_string = 2,
+    FB_has_preresolution = 3
   };
 
   int flags() const                         { return _flags; }
@@ -179,8 +180,10 @@
 
   bool has_pseudo_string() const            { return flag_at(FB_has_pseudo_string); }
   bool has_invokedynamic() const            { return flag_at(FB_has_invokedynamic); }
+  bool has_preresolution() const            { return flag_at(FB_has_preresolution); }
   void set_pseudo_string()                  {    set_flag_at(FB_has_pseudo_string); }
   void set_invokedynamic()                  {    set_flag_at(FB_has_invokedynamic); }
+  void set_preresolution()                  {    set_flag_at(FB_has_preresolution); }
 
   // Klass holding pool
   klassOop pool_holder() const              { return _pool_holder; }
@@ -663,6 +666,8 @@
   friend class SystemDictionary;
 
   // Used by compiler to prevent classloading.
+  static methodOop method_at_if_loaded        (constantPoolHandle this_oop, int which,
+                                               Bytecodes::Code bc = Bytecodes::_illegal);
   static klassOop klass_at_if_loaded          (constantPoolHandle this_oop, int which);
   static klassOop klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
   // Same as above - but does LinkResolving.
--- a/src/share/vm/oops/cpCacheOop.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/cpCacheOop.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -104,7 +104,7 @@
   void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
   bool success = (result == NULL);
   if (success) {
-    update_barrier_set(f1_addr, f1);
+    update_barrier_set((void*) f1_addr, f1);
   }
 }
 
@@ -275,24 +275,70 @@
   return (int) bsm_cache_index;
 }
 
-void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
-                                              methodHandle signature_invoker) {
+void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
   assert(is_secondary_entry(), "");
+  // NOTE: it's important that all other values are set before f1 is
+  // set since some users short circuit on f1 being set
+  // (i.e. non-null) and that may result in uninitialized values for
+  // other racing threads (e.g. flags).
   int param_size = signature_invoker->size_of_parameters();
   assert(param_size >= 1, "method argument size must include MH.this");
-  param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
-  if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
-    // racing threads might be trying to install their own favorites
-    set_f1(call_site());
-  }
+  param_size -= 1;  // do not count MH.this; it is not stacked for invokedynamic
   bool is_final = true;
   assert(signature_invoker->is_final_method(), "is_final");
-  set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
+  int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
+  assert(_flags == 0 || _flags == flags, "flags should be the same");
+  set_flags(flags);
   // do not do set_bytecode on a secondary CP cache entry
   //set_bytecode_1(Bytecodes::_invokedynamic);
+  set_f1_if_null_atomic(call_site());  // This must be the last one to set (see NOTE above)!
 }
 
 
+methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) {
+  assert(invoke_code > (Bytecodes::Code)0, "bad query");
+  if (is_secondary_entry()) {
+    return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool);
+  }
+  // Decode the action of set_method and set_interface_call
+  if (bytecode_1() == invoke_code) {
+    oop f1 = _f1;
+    if (f1 != NULL) {
+      switch (invoke_code) {
+      case Bytecodes::_invokeinterface:
+        assert(f1->is_klass(), "");
+        return klassItable::method_for_itable_index(klassOop(f1), (int) f2());
+      case Bytecodes::_invokestatic:
+      case Bytecodes::_invokespecial:
+        assert(f1->is_method(), "");
+        return methodOop(f1);
+      }
+    }
+  }
+  if (bytecode_2() == invoke_code) {
+    switch (invoke_code) {
+    case Bytecodes::_invokevirtual:
+      if (is_vfinal()) {
+        // invokevirtual
+        methodOop m = methodOop((intptr_t) f2());
+        assert(m->is_method(), "");
+        return m;
+      } else {
+        int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
+        if (cpool->tag_at(holder_index).is_klass()) {
+          klassOop klass = cpool->resolved_klass_at(holder_index);
+          if (!Klass::cast(klass)->oop_is_instance())
+            klass = SystemDictionary::Object_klass();
+          return instanceKlass::cast(klass)->method_at_vtable((int) f2());
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+
+
 class LocalOopClosure: public OopClosure {
  private:
   void (*_f)(oop*);
--- a/src/share/vm/oops/cpCacheOop.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/cpCacheOop.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -194,6 +194,8 @@
     methodHandle signature_invoker               // determines signature information
   );
 
+  methodOop get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool);
+
   // For JVM_CONSTANT_InvokeDynamic cache entries:
   void initialize_bootstrap_method_index_in_cache(int bsm_cache_index);
   int  bootstrap_method_index_in_cache();
--- a/src/share/vm/oops/generateOopMap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/generateOopMap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -963,10 +963,21 @@
   // initialize the CellTypeState-related information.
   init_state();
 
-  // We allocate space for all state-vectors for all basicblocks in one huge chuck.
-  // Then in the next part of the code, we set a pointer in each _basic_block that
-  // points to each piece.
-  CellTypeState *basicBlockState = NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
+  // We allocate space for all state-vectors for all basicblocks in one huge
+  // chunk.  Then in the next part of the code, we set a pointer in each
+  // _basic_block that points to each piece.
+
+  // The product of bbNo and _state_len can get large if there are lots of
+  // basic blocks and stack/locals/monitors.  Need to check to make sure
+  // we don't overflow the capacity of a pointer.
+  if ((unsigned)bbNo > UINTPTR_MAX / sizeof(CellTypeState) / _state_len) {
+    report_error("The amount of memory required to analyze this method "
+                 "exceeds addressable range");
+    return;
+  }
+
+  CellTypeState *basicBlockState =
+      NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
   memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
 
   // Make a pass over the basicblocks and assign their state vectors.
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -335,6 +335,9 @@
         this_oop->rewrite_class(CHECK_false);
       }
 
+      // relocate jsrs and link methods after they are all rewritten
+      this_oop->relocate_and_link_methods(CHECK_false);
+
       // Initialize the vtable and interface table after
       // methods have been rewritten since rewrite may
       // fabricate new methodOops.
@@ -365,17 +368,8 @@
 
 
 // Rewrite the byte codes of all of the methods of a class.
-// Three cases:
-//    During the link of a newly loaded class.
-//    During the preloading of classes to be written to the shared spaces.
-//      - Rewrite the methods and update the method entry points.
-//
-//    During the link of a class in the shared spaces.
-//      - The methods were already rewritten, update the metho entry points.
-//
 // The rewriter must be called exactly once. Rewriting must happen after
 // verification but before the first method of the class is executed.
-
 void instanceKlass::rewrite_class(TRAPS) {
   assert(is_loaded(), "must be loaded");
   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
@@ -383,10 +377,19 @@
     assert(this_oop()->is_shared(), "rewriting an unshared class?");
     return;
   }
-  Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
+  Rewriter::rewrite(this_oop, CHECK);
   this_oop->set_rewritten();
 }
 
+// Now relocate and link method entry points after class is rewritten.
+// This is outside is_rewritten flag. In case of an exception, it can be
+// executed more than once.
+void instanceKlass::relocate_and_link_methods(TRAPS) {
+  assert(is_loaded(), "must be loaded");
+  instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+  Rewriter::relocate_and_link(this_oop, CHECK);
+}
+
 
 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   // Make sure klass is linked (verified) before initialization
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -392,6 +392,7 @@
   bool link_class_or_fail(TRAPS); // returns false on failure
   void unlink_class();
   void rewrite_class(TRAPS);
+  void relocate_and_link_methods(TRAPS);
   methodOop class_initializer();
 
   // set the class to initialized if no static initializer is present
@@ -401,6 +402,8 @@
   ReferenceType reference_type() const     { return _reference_type; }
   void set_reference_type(ReferenceType t) { _reference_type = t; }
 
+  static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); }
+
   // find local field, returns true if found
   bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
   // find field in direct superinterfaces, returns the interface in which the field is defined
--- a/src/share/vm/oops/instanceKlassKlass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/instanceKlassKlass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -690,7 +690,8 @@
     guarantee(method_ordering->is_perm(),              "should be in permspace");
     guarantee(method_ordering->is_typeArray(),         "should be type array");
     int length = method_ordering->length();
-    if (JvmtiExport::can_maintain_original_method_order()) {
+    if (JvmtiExport::can_maintain_original_method_order() ||
+        (UseSharedSpaces && length != 0)) {
       guarantee(length == methods->length(),           "invalid method ordering length");
       jlong sum = 0;
       for (j = 0; j < length; j++) {
--- a/src/share/vm/oops/instanceRefKlass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -397,7 +397,7 @@
 
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the referent
       // field is not part of the oop mask and therefore skipped by the
       // regular verify code.
@@ -415,7 +415,7 @@
   if (next != NULL) {
     guarantee(next->is_oop(), "next field verify failed");
     guarantee(next->is_instanceRef(), "next field verify failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the next field is
       // not part of the oop mask and therefore skipped by the regular
       // verify code.
--- a/src/share/vm/oops/klass.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/klass.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -454,6 +454,14 @@
       ik->unlink_class();
     }
   }
+  // Clear the Java vtable if the oop has one.
+  // The vtable isn't shareable because it's in the wrong order wrt the methods
+  // once the method names get moved and resorted.
+  klassVtable* vt = vtable();
+  if (vt != NULL) {
+    assert(oop_is_instance() || oop_is_array(), "nothing else has vtable");
+    vt->clear_vtable();
+  }
   set_subklass(NULL);
   set_next_sibling(NULL);
 }
--- a/src/share/vm/oops/klassVtable.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/klassVtable.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -645,6 +645,15 @@
   }
 }
 
+// CDS/RedefineClasses support - clear vtables so they can be reinitialized
+void klassVtable::clear_vtable() {
+  for (int i = 0; i < _length; i++) table()[i].clear();
+}
+
+bool klassVtable::is_initialized() {
+  return _length == 0 || table()[0].method() != NULL;
+}
+
 
 // Garbage collection
 void klassVtable::oop_follow_contents() {
--- a/src/share/vm/oops/klassVtable.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/klassVtable.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -75,7 +75,15 @@
 
   void initialize_vtable(bool checkconstraints, TRAPS);   // initialize vtable of a new klass
 
-  // conputes vtable length (in words) and the number of miranda methods
+  // CDS/RedefineClasses support - clear vtables so they can be reinitialized
+  // at dump time.  Clearing gives us an easy way to tell if the vtable has
+  // already been reinitialized at dump time (see dump.cpp).  Vtables can
+  // be initialized at run time by RedefineClasses so dumping the right order
+  // is necessary.
+  void clear_vtable();
+  bool is_initialized();
+
+  // computes vtable length (in words) and the number of miranda methods
   static void compute_vtable_size_and_num_mirandas(int &vtable_length, int &num_miranda_methods,
                                                    klassOop super, objArrayOop methods,
                                                    AccessFlags class_flags, Handle classloader,
--- a/src/share/vm/oops/methodDataOop.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/methodDataOop.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1194,7 +1194,7 @@
   // Whole-method sticky bits and flags
 public:
   enum {
-    _trap_hist_limit    = 16,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 17,   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
--- a/src/share/vm/oops/methodOop.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -699,7 +699,10 @@
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
 void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
-  assert(_i2i_entry == NULL, "should only be called once");
+  // If the code cache is full, we may reenter this function for the
+  // leftover methods that weren't linked.
+  if (_i2i_entry != NULL) return;
+
   assert(_adapter == NULL, "init'd to NULL" );
   assert( _code == NULL, "nothing compiled yet" );
 
@@ -931,14 +934,40 @@
   name->increment_refcount();
   signature->increment_refcount();
 
+  // record non-BCP method types in the constant pool
+  GrowableArray<KlassHandle>* extra_klasses = NULL;
+  for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) {
+    oop ptype = (i == -1
+                 ? java_lang_invoke_MethodType::rtype(method_type())
+                 : java_lang_invoke_MethodType::ptype(method_type(), i));
+    klassOop klass = check_non_bcp_klass(java_lang_Class::as_klassOop(ptype));
+    if (klass != NULL) {
+      if (extra_klasses == NULL)
+        extra_klasses = new GrowableArray<KlassHandle>(len+1);
+      bool dup = false;
+      for (int j = 0; j < extra_klasses->length(); j++) {
+        if (extra_klasses->at(j) == klass) { dup = true; break; }
+      }
+      if (!dup)
+        extra_klasses->append(KlassHandle(THREAD, klass));
+    }
+  }
+
+  int extra_klass_count = (extra_klasses == NULL ? 0 : extra_klasses->length());
+  int cp_length = _imcp_limit + extra_klass_count;
   constantPoolHandle cp;
   {
-    constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty));
+    constantPoolOop cp_oop = oopFactory::new_constantPool(cp_length, IsSafeConc, CHECK_(empty));
     cp = constantPoolHandle(THREAD, cp_oop);
   }
   cp->symbol_at_put(_imcp_invoke_name,       name);
   cp->symbol_at_put(_imcp_invoke_signature,  signature);
   cp->string_at_put(_imcp_method_type_value, Universe::the_null_string());
+  for (int j = 0; j < extra_klass_count; j++) {
+    KlassHandle klass = extra_klasses->at(j);
+    cp->klass_at_put(_imcp_limit + j, klass());
+  }
+  cp->set_preresolution();
   cp->set_pool_holder(holder());
 
   // set up the fancy stuff:
@@ -987,6 +1016,14 @@
   return m;
 }
 
+klassOop methodOopDesc::check_non_bcp_klass(klassOop klass) {
+  if (klass != NULL && Klass::cast(klass)->class_loader() != NULL) {
+    if (Klass::cast(klass)->oop_is_objArray())
+      klass = objArrayKlass::cast(klass)->bottom_klass();
+    return klass;
+  }
+  return NULL;
+}
 
 
 methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
--- a/src/share/vm/oops/methodOop.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -606,6 +606,7 @@
                                          Symbol* signature, //anything at all
                                          Handle method_type,
                                          TRAPS);
+  static klassOop check_non_bcp_klass(klassOop klass);
   // these operate only on invoke methods:
   oop method_handle_type() const;
   static jint* method_type_offsets_chain();  // series of pointer-offsets, terminated by -1
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -35,14 +35,16 @@
 
 //=============================================================================
 //------------------------------InlineTree-------------------------------------
-InlineTree::InlineTree( Compile* c,
-                        const InlineTree *caller_tree, ciMethod* callee,
-                        JVMState* caller_jvms, int caller_bci,
-                        float site_invoke_ratio, int site_depth_adjust)
-: C(c), _caller_jvms(caller_jvms),
-  _caller_tree((InlineTree*)caller_tree),
-  _method(callee), _site_invoke_ratio(site_invoke_ratio),
-  _site_depth_adjust(site_depth_adjust),
+InlineTree::InlineTree(Compile* c,
+                       const InlineTree *caller_tree, ciMethod* callee,
+                       JVMState* caller_jvms, int caller_bci,
+                       float site_invoke_ratio, int max_inline_level) :
+  C(c),
+  _caller_jvms(caller_jvms),
+  _caller_tree((InlineTree*) caller_tree),
+  _method(callee),
+  _site_invoke_ratio(site_invoke_ratio),
+  _max_inline_level(max_inline_level),
   _count_inline_bcs(method()->code_size())
 {
   NOT_PRODUCT(_count_inlines = 0;)
@@ -66,10 +68,13 @@
 }
 
 InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
-                       float site_invoke_ratio, int site_depth_adjust)
-: C(c), _caller_jvms(caller_jvms), _caller_tree(NULL),
-  _method(callee_method), _site_invoke_ratio(site_invoke_ratio),
-  _site_depth_adjust(site_depth_adjust),
+                       float site_invoke_ratio, int max_inline_level) :
+  C(c),
+  _caller_jvms(caller_jvms),
+  _caller_tree(NULL),
+  _method(callee_method),
+  _site_invoke_ratio(site_invoke_ratio),
+  _max_inline_level(max_inline_level),
   _count_inline_bcs(method()->code_size())
 {
   NOT_PRODUCT(_count_inlines = 0;)
@@ -89,12 +94,12 @@
 }
 
 // positive filter: should send be inlined?  returns NULL, if yes, or rejection msg
-const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
+const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
     if (PrintInlining && Verbose) {
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
     return NULL;
@@ -102,15 +107,14 @@
 
   // positive filter: should send be inlined?  returns NULL (--> yes)
   // or rejection msg
-  int max_size = C->max_inline_size();
-  int size     = callee_method->code_size();
+  int size = callee_method->code_size();
 
   // Check for too many throws (and not too huge)
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
     if (PrintInlining && Verbose) {
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
     return NULL;
@@ -120,32 +124,50 @@
     return NULL;  // size and frequency are represented in a new way
   }
 
+  int default_max_inline_size = C->max_inline_size();
+  int inline_small_code_size  = InlineSmallCode / 4;
+  int max_inline_size         = default_max_inline_size;
+
   int call_site_count  = method()->scale_count(profile.count());
   int invoke_count     = method()->interpreter_invocation_count();
-  assert( invoke_count != 0, "Require invokation count greater than zero");
-  int freq = call_site_count/invoke_count;
+
+  // Bytecoded method handle adapters do not have interpreter
+  // profiling data but only made up MDO data.  Get the counter from
+  // there.
+  if (caller_method->is_method_handle_adapter()) {
+    assert(method()->method_data_or_null(), "must have an MDO");
+    ciMethodData* mdo = method()->method_data();
+    ciProfileData* mha_profile = mdo->bci_to_data(caller_bci);
+    assert(mha_profile, "must exist");
+    CounterData* cd = mha_profile->as_CounterData();
+    invoke_count = cd->count();
+    call_site_count = invoke_count;  // use the same value
+  }
+
+  assert(invoke_count != 0, "require invocation count greater than zero");
+  int freq = call_site_count / invoke_count;
 
   // bump the max size if the call is frequent
   if ((freq >= InlineFrequencyRatio) ||
       (call_site_count >= InlineFrequencyCount) ||
       is_init_with_ea(callee_method, caller_method, C)) {
 
-    max_size = C->freq_inline_size();
-    if (size <= max_size && TraceFrequencyInlining) {
-      CompileTask::print_inline_indent(inline_depth());
+    max_inline_size = C->freq_inline_size();
+    if (size <= max_inline_size && TraceFrequencyInlining) {
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count);
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       callee_method->print();
       tty->cr();
     }
   } else {
     // Not hot.  Check for medium-sized pre-existing nmethod at cold sites.
     if (callee_method->has_compiled_code() &&
-        callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4)
+        callee_method->instructions_size(CompLevel_full_optimization) > inline_small_code_size)
       return "already compiled into a medium method";
   }
-  if (size > max_size) {
-    if (max_size > C->max_inline_size())
+  if (size > max_inline_size) {
+    if (max_inline_size > default_max_inline_size)
       return "hot method too big";
     return "too big";
   }
@@ -154,7 +176,7 @@
 
 
 // negative filter: should send NOT be inlined?  returns NULL, ok to inline, or rejection msg
-const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
+const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
   // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
   if (!UseOldInlining) {
     const char* fail = NULL;
@@ -269,14 +291,13 @@
   }
 
   const char *msg = NULL;
-  if ((msg = shouldInline(callee_method, caller_method, caller_bci,
-                          profile, wci_result)) != NULL) {
+  msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result);
+  if (msg != NULL)
     return msg;
-  }
-  if ((msg = shouldNotInline(callee_method, caller_method,
-                             wci_result)) != NULL) {
+
+  msg = should_not_inline(callee_method, caller_method, wci_result);
+  if (msg != NULL)
     return msg;
-  }
 
   if (InlineAccessors && callee_method->is_accessor()) {
     // accessor methods are not subject to any of the following limits.
@@ -306,17 +327,18 @@
   if (!C->do_inlining() && InlineAccessors) {
     return "not an accessor";
   }
-  if( inline_depth() > MaxInlineLevel ) {
+  if (inline_level() > _max_inline_level) {
     return "inlining too deep";
   }
 
-  // We need to detect recursive inlining of method handle targets: if
-  // the current method is a method handle adapter and one of the
-  // callers is the same method as the callee, we bail out if
-  // MaxRecursiveInlineLevel is hit.
-  if (method()->is_method_handle_adapter()) {
+  // detect direct and indirect recursive inlining
+  {
+    // count the current method and the callee
+    int inline_level = (method() == callee_method) ? 1 : 0;
+    if (inline_level > MaxRecursiveInlineLevel)
+      return "recursively inlining too deep";
+    // count callers of current method and callee
     JVMState* jvms = caller_jvms();
-    int inline_level = 0;
     while (jvms != NULL && jvms->has_method()) {
       if (jvms->method() == callee_method) {
         inline_level++;
@@ -327,10 +349,6 @@
     }
   }
 
-  if (method() == callee_method && inline_depth() > MaxRecursiveInlineLevel) {
-    return "recursively inlining too deep";
-  }
-
   int size = callee_method->code_size();
 
   if (UseOldInlining && ClipInlining
@@ -376,11 +394,10 @@
   return true;
 }
 
-#ifndef PRODUCT
 //------------------------------print_inlining---------------------------------
 // Really, the failure_msg can be a success message also.
 void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
-  CompileTask::print_inlining(callee_method, inline_depth(), caller_bci, failure_msg ? failure_msg : "inline");
+  CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
   if (callee_method == NULL)  tty->print(" callee not monotonic or profiled");
   if (Verbose && callee_method) {
     const InlineTree *top = this;
@@ -388,7 +405,6 @@
     tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
   }
 }
-#endif
 
 //------------------------------ok_to_inline-----------------------------------
 WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci) {
@@ -489,26 +505,25 @@
   if (old_ilt != NULL) {
     return old_ilt;
   }
-  int new_depth_adjust = 0;
+  int max_inline_level_adjust = 0;
   if (caller_jvms->method() != NULL) {
     if (caller_jvms->method()->is_method_handle_adapter())
-      new_depth_adjust -= 1;  // don't count actions in MH or indy adapter frames
+      max_inline_level_adjust += 1;  // don't count actions in MH or indy adapter frames
     else if (callee_method->is_method_handle_invoke()) {
-      new_depth_adjust -= 1;  // don't count method handle calls from java.lang.invoke implem
+      max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (new_depth_adjust != 0 && PrintInlining) {
-      stringStream nm1; caller_jvms->method()->print_name(&nm1);
-      stringStream nm2; callee_method->print_name(&nm2);
-      tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base());
+    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+      CompileTask::print_inline_indent(inline_level());
+      tty->print_cr(" \\-> discounting inline depth");
     }
-    if (new_depth_adjust != 0 && C->log()) {
+    if (max_inline_level_adjust != 0 && C->log()) {
       int id1 = C->log()->identify(caller_jvms->method());
       int id2 = C->log()->identify(callee_method);
-      C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2);
+      C->log()->elem("inline_level_discount caller='%d' callee='%d'", id1, id2);
     }
   }
-  InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust);
-  _subtrees.append( ilt );
+  InlineTree* ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust);
+  _subtrees.append(ilt);
 
   NOT_PRODUCT( _count_inlines += 1; )
 
@@ -533,7 +548,7 @@
   Compile* C = Compile::current();
 
   // Root of inline tree
-  InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0);
+  InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, MaxInlineLevel);
 
   return ilt;
 }
--- a/src/share/vm/opto/c2_globals.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -183,6 +183,21 @@
   develop(bool, TraceLoopOpts, false,                                       \
           "Trace executed loop optimizations")                              \
                                                                             \
+  diagnostic(bool, LoopLimitCheck, true,                                    \
+          "Generate a loop limits check for overflow")                      \
+                                                                            \
+  develop(bool, TraceLoopLimitCheck, false,                                 \
+          "Trace generation of loop limits checks")                         \
+                                                                            \
+  diagnostic(bool, RangeLimitCheck, true,                                   \
+          "Additional overflow checks during range check elimination")      \
+                                                                            \
+  develop(bool, TraceRangeLimitCheck, false,                                \
+          "Trace additional overflow checks in RCE")                        \
+                                                                            \
+  diagnostic(bool, UnrollLimitCheck, true,                                  \
+          "Additional overflow checks during loop unroll")                  \
+                                                                            \
   product(bool, OptimizeFill, false,                                        \
           "convert fill/copy loops into intrinsic")                         \
                                                                             \
--- a/src/share/vm/opto/callGenerator.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/callGenerator.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -698,6 +698,46 @@
 }
 
 
+CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
+                                                       ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
+  if (method_handle->Opcode() == Op_ConP) {
+    const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
+    ciObject* const_oop = oop_ptr->const_oop();
+    ciMethodHandle* method_handle = const_oop->as_method_handle();
+
+    // Set the callee to have access to the class and signature in
+    // the MethodHandleCompiler.
+    method_handle->set_callee(callee);
+    method_handle->set_caller(caller);
+    method_handle->set_call_profile(profile);
+
+    // Get an adapter for the MethodHandle.
+    ciMethod* target_method = method_handle->get_method_handle_adapter();
+    if (target_method != NULL) {
+      CallGenerator* hit_cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, 1);
+      if (hit_cg != NULL && hit_cg->is_inline())
+        return hit_cg;
+    }
+  } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
+             method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
+    // selectAlternative idiom merging two constant MethodHandles.
+    // Generate a guard so that each can be inlined.  We might want to
+    // do more inputs at later point but this gets the most common
+    // case.
+    const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
+    ciObject* const_oop = oop_ptr->const_oop();
+    ciMethodHandle* mh = const_oop->as_method_handle();
+
+    CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
+    CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
+    if (cg1 != NULL && cg2 != NULL) {
+      return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
+    }
+  }
+  return NULL;
+}
+
+
 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
@@ -707,33 +747,45 @@
     log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
   }
 
-  // Get the constant pool cache from the caller class.
-  ciMethod* caller_method = jvms->method();
-  ciBytecodeStream str(caller_method);
-  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
-  ciCPCache* cpcache = str.get_cpcache();
-
-  // Get the offset of the CallSite from the constant pool cache
-  // pointer.
-  int index = str.get_method_index();
-  size_t call_site_offset = cpcache->get_f1_offset(index);
-
-  // Load the CallSite object from the constant pool cache.
-  const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
-  Node* cpcache_adr   = kit.makecon(cpcache_ptr);
-  Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
-  Node* call_site     = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
-
-  // Load the target MethodHandle from the CallSite object.
-  Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
-  Node* target_mh  = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
-
-  // Check if the MethodHandle is still the same.
   const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
   Node* predicted_mh = kit.makecon(predicted_mh_ptr);
 
-  Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
-  Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  Node* bol = NULL;
+  int bc = jvms->method()->java_code_at_bci(jvms->bci());
+  if (bc == Bytecodes::_invokespecial) {
+    // This is the selectAlternative idiom for guardWithTest
+    Node* receiver = kit.argument(0);
+
+    // Check if the MethodHandle is the expected one
+    Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(receiver, predicted_mh));
+    bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  } else {
+    assert(bc == Bytecodes::_invokedynamic, "must be");
+    // Get the constant pool cache from the caller class.
+    ciMethod* caller_method = jvms->method();
+    ciBytecodeStream str(caller_method);
+    str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+    ciCPCache* cpcache = str.get_cpcache();
+
+    // Get the offset of the CallSite from the constant pool cache
+    // pointer.
+    int index = str.get_method_index();
+    size_t call_site_offset = cpcache->get_f1_offset(index);
+
+    // Load the CallSite object from the constant pool cache.
+    const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+    Node* cpcache_adr   = kit.makecon(cpcache_ptr);
+    Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+    Node* call_site     = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+    // Load the target MethodHandle from the CallSite object.
+    Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
+    Node* target_mh  = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+    // Check if the MethodHandle is still the same.
+    Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
+    bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  }
   IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
   kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
   Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
--- a/src/share/vm/opto/callGenerator.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/callGenerator.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -111,6 +111,8 @@
   static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
 
+  static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
+
   // How to generate a replace a direct call with an inline version
   static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
 
--- a/src/share/vm/opto/cfgnode.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1373,7 +1373,7 @@
 
   // Clone loop predicates
   if (predicate_proj != NULL) {
-    newn = igvn->clone_loop_predicates(predicate_proj, newn);
+    newn = igvn->clone_loop_predicates(predicate_proj, newn, !n->is_CountedLoop());
   }
 
   // Now I can point to the new node.
@@ -1556,7 +1556,9 @@
 
   Node *top = phase->C->top();
   bool new_phi = (outcnt() == 0); // transforming new Phi
-  assert(!can_reshape || !new_phi, "for igvn new phi should be hooked");
+  // No change for igvn if new phi is not hooked
+  if (new_phi && can_reshape)
+    return NULL;
 
   // The are 2 situations when only one valid phi's input is left
   // (in addition to Region input).
--- a/src/share/vm/opto/classes.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/classes.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -156,6 +156,7 @@
 macro(LogD)
 macro(Log10D)
 macro(Loop)
+macro(LoopLimit)
 macro(Mach)
 macro(MachProj)
 macro(MaxI)
--- a/src/share/vm/opto/compile.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/compile.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -629,7 +629,7 @@
     initial_gvn()->transform_no_reclaim(top());
 
     // Set up tf(), start(), and find a CallGenerator.
-    CallGenerator* cg;
+    CallGenerator* cg = NULL;
     if (is_osr_compilation()) {
       const TypeTuple *domain = StartOSRNode::osr_domain();
       const TypeTuple *range = TypeTuple::make_range(method()->signature());
@@ -644,9 +644,24 @@
       StartNode* s = new (this, 2) StartNode(root(), tf()->domain());
       initial_gvn()->set_type_bottom(s);
       init_start(s);
-      float past_uses = method()->interpreter_invocation_count();
-      float expected_uses = past_uses;
-      cg = CallGenerator::for_inline(method(), expected_uses);
+      if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
+        // With java.lang.ref.reference.get() we must go through the
+        // intrinsic when G1 is enabled - even when get() is the root
+        // method of the compile - so that, if necessary, the value in
+        // the referent field of the reference object gets recorded by
+        // the pre-barrier code.
+        // Specifically, if G1 is enabled, the value in the referent
+        // field is recorded by the G1 SATB pre barrier. This will
+        // result in the referent being marked live and the reference
+        // object removed from the list of discovered references during
+        // reference processing.
+        cg = find_intrinsic(method(), false);
+      }
+      if (cg == NULL) {
+        float past_uses = method()->interpreter_invocation_count();
+        float expected_uses = past_uses;
+        cg = CallGenerator::for_inline(method(), expected_uses);
+      }
     }
     if (failing())  return;
     if (cg == NULL) {
@@ -2041,6 +2056,52 @@
   // Note that OffsetBot and OffsetTop are very negative.
 }
 
+// Eliminate trivially redundant StoreCMs and accumulate their
+// precedence edges.
+static void eliminate_redundant_card_marks(Node* n) {
+  assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
+  if (n->in(MemNode::Address)->outcnt() > 1) {
+    // There are multiple users of the same address so it might be
+    // possible to eliminate some of the StoreCMs
+    Node* mem = n->in(MemNode::Memory);
+    Node* adr = n->in(MemNode::Address);
+    Node* val = n->in(MemNode::ValueIn);
+    Node* prev = n;
+    bool done = false;
+    // Walk the chain of StoreCMs eliminating ones that match.  As
+    // long as it's a chain of single users then the optimization is
+    // safe.  Eliminating partially redundant StoreCMs would require
+    // cloning copies down the other paths.
+    while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
+      if (adr == mem->in(MemNode::Address) &&
+          val == mem->in(MemNode::ValueIn)) {
+        // redundant StoreCM
+        if (mem->req() > MemNode::OopStore) {
+          // Hasn't been processed by this code yet.
+          n->add_prec(mem->in(MemNode::OopStore));
+        } else {
+          // Already converted to precedence edge
+          for (uint i = mem->req(); i < mem->len(); i++) {
+            // Accumulate any precedence edges
+            if (mem->in(i) != NULL) {
+              n->add_prec(mem->in(i));
+            }
+          }
+          // Everything above this point has been processed.
+          done = true;
+        }
+        // Eliminate the previous StoreCM
+        prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
+        assert(mem->outcnt() == 0, "should be dead");
+        mem->disconnect_inputs(NULL);
+      } else {
+        prev = mem;
+      }
+      mem = prev->in(MemNode::Memory);
+    }
+  }
+}
+
 //------------------------------final_graph_reshaping_impl----------------------
 // Implement items 1-5 from final_graph_reshaping below.
 static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
@@ -2167,9 +2228,19 @@
     frc.inc_float_count();
     goto handle_mem;
 
+  case Op_StoreCM:
+    {
+      // Convert OopStore dependence into precedence edge
+      Node* prec = n->in(MemNode::OopStore);
+      n->del_req(MemNode::OopStore);
+      n->add_prec(prec);
+      eliminate_redundant_card_marks(n);
+    }
+
+    // fall through
+
   case Op_StoreB:
   case Op_StoreC:
-  case Op_StoreCM:
   case Op_StorePConditional:
   case Op_StoreI:
   case Op_StoreL:
--- a/src/share/vm/opto/doCall.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/doCall.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -62,7 +62,10 @@
 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
                                        JVMState* jvms, bool allow_inline,
                                        float prof_factor) {
-  CallGenerator* cg;
+  CallGenerator*  cg;
+  ciMethod*       caller   = jvms->method();
+  int             bci      = jvms->bci();
+  Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
   guarantee(call_method != NULL, "failed method resolution");
 
   // Dtrace currently doesn't work unless all calls are vanilla
@@ -73,7 +76,7 @@
   // Note: When we get profiling during stage-1 compiles, we want to pull
   // from more specific profile data which pertains to this inlining.
   // Right now, ignore the information in jvms->caller(), and do method[bci].
-  ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci());
+  ciCallProfile profile = caller->call_profile_at_bci(bci);
 
   // See how many times this site has been invoked.
   int site_count = profile.count();
@@ -116,26 +119,13 @@
   // MethodHandle.invoke* are native methods which obviously don't
   // have bytecodes and so normal inlining fails.
   if (call_method->is_method_handle_invoke()) {
-    if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
+    if (bytecode != Bytecodes::_invokedynamic) {
       GraphKit kit(jvms);
       Node* n = kit.argument(0);
 
-      if (n->Opcode() == Op_ConP) {
-        const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
-        ciObject* const_oop = oop_ptr->const_oop();
-        ciMethodHandle* method_handle = const_oop->as_method_handle();
-
-        // Set the actually called method to have access to the class
-        // and signature in the MethodHandleCompiler.
-        method_handle->set_callee(call_method);
-
-        // Get an adapter for the MethodHandle.
-        ciMethod* target_method = method_handle->get_method_handle_adapter();
-        CallGenerator* hit_cg = NULL;
-        if (target_method != NULL)
-          hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
-        if (hit_cg != NULL && hit_cg->is_inline())
-          return hit_cg;
+      CallGenerator* cg = CallGenerator::for_method_handle_inline(n, jvms, caller, call_method, profile);
+      if (cg != NULL) {
+        return cg;
       }
 
       return CallGenerator::for_direct_call(call_method);
@@ -148,18 +138,20 @@
       ciCallSite*     call_site     = str.get_call_site();
       ciMethodHandle* method_handle = call_site->get_target();
 
-      // Set the actually called method to have access to the class
-      // and signature in the MethodHandleCompiler.
+      // Set the callee to have access to the class and signature in
+      // the MethodHandleCompiler.
       method_handle->set_callee(call_method);
+      method_handle->set_caller(caller);
+      method_handle->set_call_profile(profile);
 
       // Get an adapter for the MethodHandle.
       ciMethod* target_method = method_handle->get_invokedynamic_adapter();
-      CallGenerator* hit_cg = NULL;
-      if (target_method != NULL)
-        hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
-      if (hit_cg != NULL && hit_cg->is_inline()) {
-        CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
-        return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
+      if (target_method != NULL) {
+        CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+        if (hit_cg != NULL && hit_cg->is_inline()) {
+          CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
+          return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
+        }
       }
 
       // If something failed, generate a normal dynamic call.
@@ -191,7 +183,7 @@
         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
         float site_invoke_ratio = prof_factor;
         // Note:  ilt is for the root of this parse, not the present call site.
-        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
+        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
       }
       WarmCallInfo scratch_ci;
       if (!UseOldInlining)
--- a/src/share/vm/opto/escape.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/escape.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1437,7 +1437,10 @@
 
   // Update the memory inputs of MemNodes with the value we computed
   // in Phase 2 and move stores memory users to corresponding memory slices.
-#ifdef ASSERT
+
+  // Disable memory split verification code until the fix for 6984348.
+  // Currently it produces false negative results since it does not cover all cases.
+#if 0 // ifdef ASSERT
   visited.Reset();
   Node_Stack old_mems(arena, _compile->unique() >> 2);
 #endif
@@ -1447,7 +1450,7 @@
       Node *n = ptnode_adr(i)->_node;
       assert(n != NULL, "sanity");
       if (n->is_Mem()) {
-#ifdef ASSERT
+#if 0 // ifdef ASSERT
         Node* old_mem = n->in(MemNode::Memory);
         if (!visited.test_set(old_mem->_idx)) {
           old_mems.push(old_mem, old_mem->outcnt());
@@ -1469,13 +1472,13 @@
       }
     }
   }
-#ifdef ASSERT
+#if 0 // ifdef ASSERT
   // Verify that memory was split correctly
   while (old_mems.is_nonempty()) {
     Node* old_mem = old_mems.node();
     uint  old_cnt = old_mems.index();
     old_mems.pop();
-    assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
+    assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
   }
 #endif
 }
@@ -1744,6 +1747,25 @@
   _collecting = false;
   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
 
+  if (EliminateLocks) {
+    // Mark locks before changing ideal graph.
+    int cnt = C->macro_count();
+    for( int i=0; i < cnt; i++ ) {
+      Node *n = C->macro_node(i);
+      if (n->is_AbstractLock()) { // Lock and Unlock nodes
+        AbstractLockNode* alock = n->as_AbstractLock();
+        if (!alock->is_eliminated()) {
+          PointsToNode::EscapeState es = escape_state(alock->obj_node());
+          assert(es != PointsToNode::UnknownEscape, "should know");
+          if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
+            // Mark it eliminated
+            alock->set_eliminated();
+          }
+        }
+      }
+    }
+  }
+
 #ifndef PRODUCT
   if (PrintEscapeAnalysis) {
     dump(); // Dump ConnectionGraph
--- a/src/share/vm/opto/graphKit.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1033,14 +1033,10 @@
       iter.reset_to_bci(bci());
       iter.next();
       ciMethod* method = iter.get_method(ignore);
-      inputs = method->arg_size_no_receiver();
-      // Add a receiver argument, maybe:
-      if (code != Bytecodes::_invokestatic &&
-          code != Bytecodes::_invokedynamic)
-        inputs += 1;
       // (Do not use ciMethod::arg_size(), because
       // it might be an unloaded method, which doesn't
       // know whether it is static or not.)
+      inputs = method->invoke_arg_size(code);
       int size = method->return_type()->size();
       depth = size - inputs;
     }
@@ -1457,19 +1453,22 @@
 }
 
 
-void GraphKit::pre_barrier(Node* ctl,
+void GraphKit::pre_barrier(bool do_load,
+                           Node* ctl,
                            Node* obj,
                            Node* adr,
                            uint  adr_idx,
                            Node* val,
                            const TypeOopPtr* val_type,
+                           Node* pre_val,
                            BasicType bt) {
+
   BarrierSet* bs = Universe::heap()->barrier_set();
   set_control(ctl);
   switch (bs->kind()) {
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
-      g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
+      g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
 
     case BarrierSet::CardTableModRef:
@@ -1532,7 +1531,11 @@
   uint adr_idx = C->get_alias_index(adr_type);
   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 
-  pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
+  pre_barrier(true /* do_load */,
+              control(), obj, adr, adr_idx, val, val_type,
+              NULL /* pre_val */,
+              bt);
+
   Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
   return store;
@@ -2950,8 +2953,7 @@
 
 //---------------------------set_output_for_allocation-------------------------
 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
-                                          const TypeOopPtr* oop_type,
-                                          bool raw_mem_only) {
+                                          const TypeOopPtr* oop_type) {
   int rawidx = Compile::AliasIdxRaw;
   alloc->set_req( TypeFunc::FramePtr, frameptr() );
   add_safepoint_edges(alloc);
@@ -2975,7 +2977,7 @@
                                                  rawoop)->as_Initialize();
   assert(alloc->initialization() == init,  "2-way macro link must work");
   assert(init ->allocation()     == alloc, "2-way macro link must work");
-  if (ReduceFieldZeroing && !raw_mem_only) {
+  {
     // Extract memory strands which may participate in the new object's
     // initialization, and source them from the new InitializeNode.
     // This will allow us to observe initializations when they occur,
@@ -3036,11 +3038,9 @@
 // the type to a constant.
 // The optional arguments are for specialized use by intrinsics:
 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
-//  - If 'raw_mem_only', do not cast the result to an oop.
 //  - If 'return_size_val', report the the total object size to the caller.
 Node* GraphKit::new_instance(Node* klass_node,
                              Node* extra_slow_test,
-                             bool raw_mem_only, // affect only raw memory
                              Node* *return_size_val) {
   // Compute size in doublewords
   // The size is always an integral number of doublewords, represented
@@ -3111,7 +3111,7 @@
                      size, klass_node,
                      initial_slow_test);
 
-  return set_output_for_allocation(alloc, oop_type, raw_mem_only);
+  return set_output_for_allocation(alloc, oop_type);
 }
 
 //-------------------------------new_array-------------------------------------
@@ -3121,7 +3121,6 @@
 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
                           Node* length,         // number of array elements
                           int   nargs,          // number of arguments to push back for uncommon trap
-                          bool raw_mem_only,    // affect only raw memory
                           Node* *return_size_val) {
   jint  layout_con = Klass::_lh_neutral_value;
   Node* layout_val = get_layout_helper(klass_node, layout_con);
@@ -3266,7 +3265,7 @@
     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
   }
 
-  Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
+  Node* javaoop = set_output_for_allocation(alloc, ary_type);
 
   // Cast length on remaining path to be as narrow as possible
   if (map()->find_edge(length) >= 0) {
@@ -3379,6 +3378,10 @@
   if (UseLoopPredicate) {
     add_predicate_impl(Deoptimization::Reason_predicate, nargs);
   }
+  // loop's limit check predicate should be near the loop.
+  if (LoopLimitCheck) {
+    add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
+  }
 }
 
 //----------------------------- store barriers ----------------------------
@@ -3455,9 +3458,22 @@
 
   // Get the alias_index for raw card-mark memory
   int adr_type = Compile::AliasIdxRaw;
+  Node*   zero = __ ConI(0); // Dirty card value
+  BasicType bt = T_BYTE;
+
+  if (UseCondCardMark) {
+    // The classic GC reference write barrier is typically implemented
+    // as a store into the global card mark table.  Unfortunately
+    // unconditional stores can result in false sharing and excessive
+    // coherence traffic as well as false transactional aborts.
+    // UseCondCardMark enables MP "polite" conditional card mark
+    // stores.  In theory we could relax the load from ctrl() to
+    // no_ctrl, but that doesn't buy much latitude.
+    Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
+    __ if_then(card_val, BoolTest::ne, zero);
+  }
+
   // Smash zero into card
-  Node*   zero = __ ConI(0);
-  BasicType bt = T_BYTE;
   if( !UseConcMarkSweepGC ) {
     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
   } else {
@@ -3465,17 +3481,40 @@
     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
   }
 
+  if (UseCondCardMark) {
+    __ end_if();
+  }
+
   // Final sync IdealKit and GraphKit.
   final_sync(ideal);
 }
 
 // G1 pre/post barriers
-void GraphKit::g1_write_barrier_pre(Node* obj,
+void GraphKit::g1_write_barrier_pre(bool do_load,
+                                    Node* obj,
                                     Node* adr,
                                     uint alias_idx,
                                     Node* val,
                                     const TypeOopPtr* val_type,
+                                    Node* pre_val,
                                     BasicType bt) {
+
+  // Some sanity checks
+  // Note: val is unused in this routine.
+
+  if (do_load) {
+    // We need to generate the load of the previous value
+    assert(obj != NULL, "must have a base");
+    assert(adr != NULL, "where are loading from?");
+    assert(pre_val == NULL, "loaded already?");
+    assert(val_type != NULL, "need a type");
+  } else {
+    // In this case both val_type and alias_idx are unused.
+    assert(pre_val != NULL, "must be loaded already");
+    assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
+  }
+  assert(bt == T_OBJECT, "or we shouldn't be here");
+
   IdealKit ideal(this, true);
 
   Node* tls = __ thread(); // ThreadLocalStorage
@@ -3497,32 +3536,28 @@
                                           PtrQueue::byte_offset_of_index());
   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
                                           PtrQueue::byte_offset_of_buf());
+
   // Now the actual pointers into the thread
-
-  // set_control( ctl);
-
   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 
   // Now some of the values
-
   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 
   // if (!marking)
   __ if_then(marking, BoolTest::ne, zero); {
     Node* index   = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
 
-    const Type* t1 = adr->bottom_type();
-    const Type* t2 = val->bottom_type();
-
-    Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
-    // if (orig != NULL)
-    __ if_then(orig, BoolTest::ne, null()); {
-      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
+    if (do_load) {
       // load original value
       // alias_idx correct??
+      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+    }
+
+    // if (pre_val != NULL)
+    __ if_then(pre_val, BoolTest::ne, null()); {
+      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 
       // is the queue for this thread full?
       __ if_then(index, BoolTest::ne, zero, likely); {
@@ -3536,10 +3571,9 @@
         next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
 #endif
 
-        // Now get the buffer location we will log the original value into and store it
+        // Now get the buffer location we will log the previous value into and store it
         Node *log_addr = __ AddP(no_base, buffer, next_indexX);
-        __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
-
+        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
         // update the index
         __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
 
@@ -3547,9 +3581,9 @@
 
         // logging buffer is full, call the runtime
         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
-        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
+        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
       } __ end_if();  // (!index)
-    } __ end_if();  // (orig != NULL)
+    } __ end_if();  // (pre_val != NULL)
   } __ end_if();  // (!marking)
 
   // Final sync IdealKit and GraphKit.
--- a/src/share/vm/opto/graphKit.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/graphKit.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -544,8 +544,10 @@
                              BasicType bt);
 
   // For the few case where the barriers need special help
-  void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
-                   Node* val, const TypeOopPtr* val_type, BasicType bt);
+  void pre_barrier(bool do_load, Node* ctl,
+                   Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
+                   Node* pre_val,
+                   BasicType bt);
 
   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
                     Node* val, BasicType bt, bool use_precise);
@@ -671,11 +673,13 @@
                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 
   // G1 pre/post barriers
-  void g1_write_barrier_pre(Node* obj,
+  void g1_write_barrier_pre(bool do_load,
+                            Node* obj,
                             Node* adr,
                             uint alias_idx,
                             Node* val,
                             const TypeOopPtr* val_type,
+                            Node* pre_val,
                             BasicType bt);
 
   void g1_write_barrier_post(Node* store,
@@ -769,15 +773,13 @@
 
   // implementation of object creation
   Node* set_output_for_allocation(AllocateNode* alloc,
-                                  const TypeOopPtr* oop_type,
-                                  bool raw_mem_only);
+                                  const TypeOopPtr* oop_type);
   Node* get_layout_helper(Node* klass_node, jint& constant_value);
   Node* new_instance(Node* klass_node,
                      Node* slow_test = NULL,
-                     bool raw_mem_only = false,
                      Node* *return_size_val = NULL);
   Node* new_array(Node* klass_node, Node* count_val, int nargs,
-                  bool raw_mem_only = false, Node* *return_size_val = NULL);
+                  Node* *return_size_val = NULL);
 
   // Handy for making control flow
   IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -615,6 +615,7 @@
       }
     }
 
+#ifdef ASSERT
     if (node->debug_orig() != NULL) {
       stringStream dorigStream;
       Node* dorig = node->debug_orig();
@@ -629,6 +630,7 @@
       }
       print_prop("debug_orig", dorigStream.as_string());
     }
+#endif
 
     if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) {
       buffer[0] = 0;
--- a/src/share/vm/opto/ifnode.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/ifnode.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -236,6 +236,7 @@
   }
   Node* predicate_c = NULL;
   Node* predicate_x = NULL;
+  bool counted_loop = r->is_CountedLoop();
 
   Node *region_c = new (igvn->C, req_c + 1) RegionNode(req_c + 1);
   Node *phi_c    = con1;
@@ -294,16 +295,16 @@
   if (predicate_c != NULL) {
     assert(predicate_x == NULL, "only one predicate entry expected");
     // Clone loop predicates to each path
-    iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t);
-    iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f);
+    iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t, !counted_loop);
+    iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f, !counted_loop);
   }
   Node *iff_x_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_x));
   Node *iff_x_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_x));
   if (predicate_x != NULL) {
     assert(predicate_c == NULL, "only one predicate entry expected");
     // Clone loop predicates to each path
-    iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t);
-    iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f);
+    iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t, !counted_loop);
+    iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f, !counted_loop);
   }
 
   // Merge the TRUE paths
@@ -545,6 +546,7 @@
   Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
   igvn->hash_delete( iff );
   iff->set_req_X( 1, new_bol, igvn );
+  igvn->_worklist.push( iff );
 }
 
 //------------------------------up_one_dom-------------------------------------
--- a/src/share/vm/opto/lcm.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/lcm.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -688,20 +688,22 @@
       }
       ready_cnt[n->_idx] = local; // Count em up
 
-      // A few node types require changing a required edge to a precedence edge
-      // before allocation.
+#ifdef ASSERT
       if( UseConcMarkSweepGC || UseG1GC ) {
         if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
-          // Note: Required edges with an index greater than oper_input_base
-          // are not supported by the allocator.
-          // Note2: Can only depend on unmatched edge being last,
-          // can not depend on its absolute position.
-          Node *oop_store = n->in(n->req() - 1);
-          n->del_req(n->req() - 1);
-          n->add_prec(oop_store);
-          assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+          // Check the precedence edges
+          for (uint prec = n->req(); prec < n->len(); prec++) {
+            Node* oop_store = n->in(prec);
+            if (oop_store != NULL) {
+              assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+            }
+          }
         }
       }
+#endif
+
+      // A few node types require changing a required edge to a precedence edge
+      // before allocation.
       if( n->is_Mach() && n->req() > TypeFunc::Parms &&
           (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
            n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
--- a/src/share/vm/opto/library_call.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/library_call.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -166,6 +166,10 @@
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
   int classify_unsafe_addr(Node* &base, Node* &offset);
   Node* make_unsafe_address(Node* base, Node* offset);
+  // Helper for inline_unsafe_access.
+  // Generates the guards that check whether the result of
+  // Unsafe.getObject should be recorded in an SATB log buffer.
+  void insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val);
   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
   bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
   bool inline_unsafe_allocate();
@@ -240,6 +244,8 @@
   bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
   bool inline_bitCount(vmIntrinsics::ID id);
   bool inline_reverseBytes(vmIntrinsics::ID id);
+
+  bool inline_reference_get();
 };
 
 
@@ -336,6 +342,14 @@
     if (!UsePopCountInstruction)  return NULL;
     break;
 
+  case vmIntrinsics::_Reference_get:
+    // It is only when G1 is enabled that we absolutely
+    // need to use the intrinsic version of Reference.get()
+    // so that the value in the referent field, if necessary,
+    // can be registered by the pre-barrier code.
+    if (!UseG1GC) return NULL;
+    break;
+
  default:
     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
@@ -387,6 +401,7 @@
     tty->print_cr("Intrinsic %s", str);
   }
 #endif
+
   if (kit.try_to_inline()) {
     if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
       CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
@@ -402,11 +417,19 @@
   }
 
   if (PrintIntrinsics) {
-    tty->print("Did not inline intrinsic %s%s at bci:%d in",
+    if (jvms->has_method()) {
+      // Not a root compile.
+      tty->print("Did not inline intrinsic %s%s at bci:%d in",
+                 vmIntrinsics::name_at(intrinsic_id()),
+                 (is_virtual() ? " (virtual)" : ""), kit.bci());
+      kit.caller()->print_short_name(tty);
+      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+    } else {
+      // Root compile
+      tty->print("Did not generate intrinsic %s%s at bci:%d in",
                vmIntrinsics::name_at(intrinsic_id()),
                (is_virtual() ? " (virtual)" : ""), kit.bci());
-    kit.caller()->print_short_name(tty);
-    tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+    }
   }
   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
   return NULL;
@@ -418,6 +441,14 @@
   const bool is_native_ptr  = true;
   const bool is_static      = true;
 
+  if (!jvms()->has_method()) {
+    // Root JVMState has a null method.
+    assert(map()->memory()->Opcode() == Op_Parm, "");
+    // Insert the memory aliasing node
+    set_all_memory(reset_memory());
+  }
+  assert(merged_memory(), "");
+
   switch (intrinsic_id()) {
   case vmIntrinsics::_hashCode:
     return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
@@ -658,6 +689,9 @@
   case vmIntrinsics::_getCallerClass:
     return inline_native_Reflection_getCallerClass();
 
+  case vmIntrinsics::_Reference_get:
+    return inline_reference_get();
+
   default:
     // If you get here, it may be that someone has added a new intrinsic
     // to the list in vmSymbols.hpp without implementing it here.
@@ -833,12 +867,10 @@
   Node* str1_offset  = make_load(no_ctrl, str1_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
   Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
 
-  // Pin loads from String::equals() argument since it could be NULL.
-  Node* str2_ctrl = (opcode == Op_StrEquals) ? control() : no_ctrl;
   Node* str2_valuea  = basic_plus_adr(str2, str2, value_offset);
-  Node* str2_value   = make_load(str2_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset));
+  Node* str2_value   = make_load(no_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset));
   Node* str2_offseta = basic_plus_adr(str2, str2, offset_offset);
-  Node* str2_offset  = make_load(str2_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
+  Node* str2_offset  = make_load(no_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
   Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
 
   Node* result = NULL;
@@ -978,14 +1010,15 @@
   if (!stopped()) {
     // Properly cast the argument to String
     argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type));
+    // This path is taken only when argument's type is String:NotNull.
+    argument = cast_not_null(argument, false);
 
     // Get counts for string and argument
     Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
     receiver_cnt  = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 
-    // Pin load from argument string since it could be NULL.
     Node* argument_cnta = basic_plus_adr(argument, argument, count_offset);
-    argument_cnt  = make_load(control(), argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
+    argument_cnt  = make_load(no_ctrl, argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 
     // Check for receiver count != argument count
     Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) );
@@ -2076,6 +2109,106 @@
 
 const static BasicType T_ADDRESS_HOLDER = T_LONG;
 
+// Helper that guards and inserts a G1 pre-barrier.
+void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val) {
+  assert(UseG1GC, "should not call this otherwise");
+
+  // We could be accessing the referent field of a reference object. If so, when G1
+  // is enabled, we need to log the value in the referent field in an SATB buffer.
+  // This routine performs some compile time filters and generates suitable
+  // runtime filters that guard the pre-barrier code.
+
+  // Some compile time checks.
+
+  // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
+  const TypeX* otype = offset->find_intptr_t_type();
+  if (otype != NULL && otype->is_con() &&
+      otype->get_con() != java_lang_ref_Reference::referent_offset) {
+    // Constant offset but not the reference_offset so just return
+    return;
+  }
+
+  // We only need to generate the runtime guards for instances.
+  const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
+  if (btype != NULL) {
+    if (btype->isa_aryptr()) {
+      // Array type so nothing to do
+      return;
+    }
+
+    const TypeInstPtr* itype = btype->isa_instptr();
+    if (itype != NULL) {
+      // Can the klass of base_oop be statically determined
+      // to be _not_ a sub-class of Reference?
+      ciKlass* klass = itype->klass();
+      if (klass->is_subtype_of(env()->Reference_klass()) &&
+          !env()->Reference_klass()->is_subtype_of(klass)) {
+        return;
+      }
+    }
+  }
+
+  // The compile time filters did not reject base_oop/offset so
+  // we need to generate the following runtime filters
+  //
+  // if (offset == java_lang_ref_Reference::_reference_offset) {
+  //   if (base != null) {
+  //     if (klass(base)->reference_type() != REF_NONE)) {
+  //       pre_barrier(_, pre_val, ...);
+  //     }
+  //   }
+  // }
+
+  float likely  = PROB_LIKELY(0.999);
+  float unlikely  = PROB_UNLIKELY(0.999);
+
+  IdealKit ideal(this);
+#define __ ideal.
+
+  const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() +
+                                        sizeof(oopDesc);
+
+  Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
+
+  __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
+    __ if_then(base_oop, BoolTest::ne, null(), likely); {
+
+      // Update graphKit memory and control from IdealKit.
+      sync_kit(ideal);
+
+      Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
+      Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
+
+      // Update IdealKit memory and control from graphKit.
+      __ sync_kit(this);
+
+      Node* one = __ ConI(1);
+
+      __ if_then(is_instof, BoolTest::eq, one, unlikely); {
+
+        // Update graphKit from IdeakKit.
+        sync_kit(ideal);
+
+        // Use the pre-barrier to record the value in the referent field
+        pre_barrier(false /* do_load */,
+                    __ ctrl(),
+                    NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
+                    pre_val /* pre_val */,
+                    T_OBJECT);
+
+        // Update IdealKit from graphKit.
+        __ sync_kit(this);
+
+      } __ end_if(); // _ref_type != ref_none
+    } __ end_if(); // base  != NULL
+  } __ end_if(); // offset == referent_offset
+
+  // Final sync IdealKit and GraphKit.
+  final_sync(ideal);
+#undef __
+}
+
+
 // Interpret Unsafe.fieldOffset cookies correctly:
 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
 
@@ -2152,9 +2285,11 @@
   // Build address expression.  See the code in inline_unsafe_prefetch.
   Node *adr;
   Node *heap_base_oop = top();
+  Node* offset = top();
+
   if (!is_native_ptr) {
     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
-    Node* offset = pop_pair();
+    offset = pop_pair();
     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
     Node* base   = pop();
     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
@@ -2195,6 +2330,13 @@
   // or Compile::must_alias will throw a diagnostic assert.)
   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
 
+  // If we are reading the value of the referent field of a Reference
+  // object (either by using Unsafe directly or through reflection)
+  // then, if G1 is enabled, we need to record the referent in an
+  // SATB log buffer using the pre-barrier mechanism.
+  bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
+                           offset != top() && heap_base_oop != top();
+
   if (!is_store && type == T_OBJECT) {
     // Attempt to infer a sharper value type from the offset and base type.
     ciKlass* sharpened_klass = NULL;
@@ -2278,8 +2420,13 @@
     case T_SHORT:
     case T_INT:
     case T_FLOAT:
+      push(p);
+      break;
     case T_OBJECT:
-      push( p );
+      if (need_read_barrier) {
+        insert_g1_pre_barrier(heap_base_oop, offset, p);
+      }
+      push(p);
       break;
     case T_ADDRESS:
       // Cast to an int type.
@@ -2534,7 +2681,10 @@
   case T_OBJECT:
      // reference stores need a store barrier.
     // (They don't if CAS fails, but it isn't worth checking.)
-    pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
+    pre_barrier(true /* do_load*/,
+                control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                NULL /* pre_val*/,
+                T_OBJECT);
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -3376,8 +3526,7 @@
       Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
 
-      const bool raw_mem_only = true;
-      newcopy = new_array(klass_node, length, 0, raw_mem_only);
+      newcopy = new_array(klass_node, length, 0);
 
       // Generate a direct call to the right arraycopy function(s).
       // We know the copy is disjoint but we might not know if the
@@ -4174,8 +4323,6 @@
 
     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
     int raw_adr_idx = Compile::AliasIdxRaw;
-    const bool raw_mem_only = true;
-
 
     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
     if (array_ctl != NULL) {
@@ -4184,8 +4331,7 @@
       set_control(array_ctl);
       Node* obj_length = load_array_length(obj);
       Node* obj_size  = NULL;
-      Node* alloc_obj = new_array(obj_klass, obj_length, 0,
-                                  raw_mem_only, &obj_size);
+      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
 
       if (!use_ReduceInitialCardMarks()) {
         // If it is an oop array, it requires very special treatment,
@@ -4257,7 +4403,7 @@
       // It's an instance, and it passed the slow-path tests.
       PreserveJVMState pjvms(this);
       Node* obj_size  = NULL;
-      Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
+      Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
 
       copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
 
@@ -5079,15 +5225,16 @@
 
   // Look at the alignment of the starting offsets.
   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
-  const intptr_t BIG_NEG = -128;
-  assert(BIG_NEG + 2*abase < 0, "neg enough");
-
-  intptr_t src_off  = abase + ((intptr_t) find_int_con(src_offset, -1)  << scale);
-  intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale);
-  if (src_off < 0 || dest_off < 0)
+
+  intptr_t src_off_con  = (intptr_t) find_int_con(src_offset, -1);
+  intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
+  if (src_off_con < 0 || dest_off_con < 0)
     // At present, we can only understand constants.
     return false;
 
+  intptr_t src_off  = abase + (src_off_con  << scale);
+  intptr_t dest_off = abase + (dest_off_con << scale);
+
   if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
     // Non-aligned; too bad.
     // One more chance:  Pick off an initial 32-bit word.
@@ -5235,3 +5382,44 @@
                     copyfunc_addr, copyfunc_name, adr_type,
                     src_start, dest_start, copy_length XTOP);
 }
+
+//----------------------------inline_reference_get----------------------------
+
+bool LibraryCallKit::inline_reference_get() {
+  const int nargs = 1; // self
+
+  guarantee(java_lang_ref_Reference::referent_offset > 0,
+            "should have already been set");
+
+  int referent_offset = java_lang_ref_Reference::referent_offset;
+
+  // Restore the stack and pop off the argument
+  _sp += nargs;
+  Node *reference_obj = pop();
+
+  // Null check on self without removing any arguments.
+  _sp += nargs;
+  reference_obj = do_null_check(reference_obj, T_OBJECT);
+  _sp -= nargs;;
+
+  if (stopped()) return true;
+
+  Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
+
+  ciInstanceKlass* klass = env()->Object_klass();
+  const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
+
+  Node* no_ctrl = NULL;
+  Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
+
+  // Use the pre-barrier to record the value in the referent field
+  pre_barrier(false /* do_load */,
+              control(),
+              NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
+              result /* pre_val */,
+              T_OBJECT);
+
+  push(result);
+  return true;
+}
+
--- a/src/share/vm/opto/loopPredicate.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopPredicate.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -341,7 +341,7 @@
   // Cut predicate from old place.
   Node* old = predicate_proj;
   igvn->_worklist.push(old);
-  for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
+  for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin;) {
     Node* use = old->last_out(i);  // for each use...
     igvn->hash_delete(use);
     igvn->_worklist.push(use);
@@ -384,24 +384,25 @@
 
 //--------------------------clone_loop_predicates-----------------------
 // Interface from IGVN
-Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry) {
-  return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, NULL, this);
+Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
+  return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, NULL, this);
 }
-Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry) {
-  return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, NULL, this);
+Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
+  return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, NULL, this);
 }
 
 // Interface from PhaseIdealLoop
-Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry) {
-  return clone_loop_predicates(old_entry, new_entry, false, this, &this->_igvn);
+Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
+  return clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, this, &this->_igvn);
 }
-Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry) {
-  return clone_loop_predicates(old_entry, new_entry, true, this, &this->_igvn);
+Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
+  return clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, this, &this->_igvn);
 }
 
 // Clone loop predicates to cloned loops (peeled, unswitched, split_if).
 Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry,
                                                 bool move_predicates,
+                                                bool clone_limit_check,
                                                 PhaseIdealLoop* loop_phase,
                                                 PhaseIterGVN* igvn) {
 #ifdef ASSERT
@@ -413,10 +414,16 @@
 #endif
   // Search original predicates
   Node* entry = old_entry;
+  ProjNode* limit_check_proj = NULL;
+  if (LoopLimitCheck) {
+    limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (limit_check_proj != NULL) {
+      entry = entry->in(0)->in(0);
+    }
+  }
   if (UseLoopPredicate) {
     ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
     if (predicate_proj != NULL) { // right pattern that can be used by loop predication
-      assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
       if (move_predicates) {
         new_entry =  move_predicate(predicate_proj, new_entry,
                                     Deoptimization::Reason_predicate,
@@ -435,11 +442,37 @@
       }
     }
   }
+  if (limit_check_proj != NULL && clone_limit_check) {
+    // Clone loop limit check last to insert it before loop.
+    // Don't clone a limit check which was already finalized
+    // for this counted loop (only one limit check is needed).
+    if (move_predicates) {
+      new_entry =  move_predicate(limit_check_proj, new_entry,
+                                  Deoptimization::Reason_loop_limit_check,
+                                  loop_phase, igvn);
+      assert(new_entry == limit_check_proj, "old limit check fall through projection");
+    } else {
+      new_entry = clone_predicate(limit_check_proj, new_entry,
+                                  Deoptimization::Reason_loop_limit_check,
+                                  loop_phase, igvn);
+      assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone limit check");
+    }
+    if (TraceLoopLimitCheck) {
+      tty->print_cr("Loop Limit Check %s: ", move_predicates ? "moved" : "cloned");
+      debug_only( new_entry->in(0)->dump(); )
+    }
+  }
   return new_entry;
 }
 
 //--------------------------eliminate_loop_predicates-----------------------
 void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) {
+  if (LoopLimitCheck) {
+    Node* predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (predicate != NULL) {
+      entry = entry->in(0)->in(0);
+    }
+  }
   if (UseLoopPredicate) {
     ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
     if (predicate_proj != NULL) { // right pattern that can be used by loop predication
@@ -456,10 +489,15 @@
 // Skip related predicates.
 Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
   Node* predicate = NULL;
+  if (LoopLimitCheck) {
+    predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (predicate != NULL) {
+      entry = entry->in(0)->in(0);
+    }
+  }
   if (UseLoopPredicate) {
     predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
     if (predicate != NULL) { // right pattern that can be used by loop predication
-      assert(entry->is_Proj() && entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
       IfNode* iff = entry->in(0)->as_If();
       ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con);
       Node* rgn = uncommon_proj->unique_ctrl_out();
@@ -491,10 +529,15 @@
 // Find a predicate
 Node* PhaseIdealLoop::find_predicate(Node* entry) {
   Node* predicate = NULL;
+  if (LoopLimitCheck) {
+    predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (predicate != NULL) { // right pattern that can be used by loop predication
+      return entry;
+    }
+  }
   if (UseLoopPredicate) {
     predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
     if (predicate != NULL) { // right pattern that can be used by loop predication
-      assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
       return entry;
     }
   }
@@ -658,7 +701,7 @@
   Node* range = cmp->in(2);
   if (range->Opcode() != Op_LoadRange) {
     const TypeInt* tint = phase->_igvn.type(range)->isa_int();
-    if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) {
+    if (tint == NULL || tint->empty() || tint->_lo < 0) {
       // Allow predication on positive values that aren't LoadRanges.
       // This allows optimization of loops where the length of the
       // array is a known value and doesn't need to be loaded back
@@ -696,36 +739,49 @@
 //   max(scale*i + offset) = scale*(limit-stride) + offset
 // (2) stride*scale < 0
 //   max(scale*i + offset) = scale*init + offset
-BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
+BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
                                        int scale, Node* offset,
                                        Node* init, Node* limit, Node* stride,
                                        Node* range, bool upper) {
-  DEBUG_ONLY(ttyLocker ttyl);
-  if (TraceLoopPredicate) tty->print("rc_predicate ");
+  stringStream* predString = NULL;
+  if (TraceLoopPredicate) {
+    predString = new stringStream();
+    predString->print("rc_predicate ");
+  }
 
   Node* max_idx_expr  = init;
   int stride_con = stride->get_int();
   if ((stride_con > 0) == (scale > 0) == upper) {
-    max_idx_expr = new (C, 3) SubINode(limit, stride);
-    register_new_node(max_idx_expr, ctrl);
-    if (TraceLoopPredicate) tty->print("(limit - stride) ");
+    if (LoopLimitCheck) {
+      // With LoopLimitCheck limit is not exact.
+      // Calculate exact limit here.
+      // Note, counted loop's test is '<' or '>'.
+      limit = exact_limit(loop);
+      max_idx_expr = new (C, 3) SubINode(limit, stride);
+      register_new_node(max_idx_expr, ctrl);
+      if (TraceLoopPredicate) predString->print("(limit - stride) ");
+    } else {
+      max_idx_expr = new (C, 3) SubINode(limit, stride);
+      register_new_node(max_idx_expr, ctrl);
+      if (TraceLoopPredicate) predString->print("(limit - stride) ");
+    }
   } else {
-    if (TraceLoopPredicate) tty->print("init ");
+    if (TraceLoopPredicate) predString->print("init ");
   }
 
   if (scale != 1) {
     ConNode* con_scale = _igvn.intcon(scale);
     max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
     register_new_node(max_idx_expr, ctrl);
-    if (TraceLoopPredicate) tty->print("* %d ", scale);
+    if (TraceLoopPredicate) predString->print("* %d ", scale);
   }
 
   if (offset && (!offset->is_Con() || offset->get_int() != 0)){
     max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
     register_new_node(max_idx_expr, ctrl);
     if (TraceLoopPredicate)
-      if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
-      else tty->print("+ offset ");
+      if (offset->is_Con()) predString->print("+ %d ", offset->get_int());
+      else predString->print("+ offset ");
   }
 
   CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
@@ -733,7 +789,10 @@
   BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
   register_new_node(bol, ctrl);
 
-  if (TraceLoopPredicate) tty->print_cr("<u range");
+  if (TraceLoopPredicate) {
+    predString->print_cr("<u range");
+    tty->print(predString->as_string());
+  }
   return bol;
 }
 
@@ -746,29 +805,36 @@
     // Could be a simple region when irreducible loops are present.
     return false;
   }
+  LoopNode* head = loop->_head->as_Loop();
 
-  if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
+  if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
     // do nothing for infinite loops
     return false;
   }
 
   CountedLoopNode *cl = NULL;
-  if (loop->_head->is_CountedLoop()) {
-    cl = loop->_head->as_CountedLoop();
+  if (head->is_CountedLoop()) {
+    cl = head->as_CountedLoop();
     // do nothing for iteration-splitted loops
     if (!cl->is_normal_loop()) return false;
   }
 
-  LoopNode *lpn  = loop->_head->as_Loop();
-  Node* entry = lpn->in(LoopNode::EntryControl);
+  Node* entry = head->in(LoopNode::EntryControl);
+  ProjNode *predicate_proj = NULL;
+  // Loop limit check predicate should be near the loop.
+  if (LoopLimitCheck) {
+    predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (predicate_proj != NULL)
+      entry = predicate_proj->in(0)->in(0);
+  }
 
-  ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
+  predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
   if (!predicate_proj) {
 #ifndef PRODUCT
     if (TraceLoopPredicate) {
       tty->print("missing predicate:");
       loop->dump_head();
-      lpn->dump(1);
+      head->dump(1);
     }
 #endif
     return false;
@@ -782,7 +848,6 @@
   // Create list of if-projs such that a newer proj dominates all older
   // projs in the list, and they all dominate loop->tail()
   Node_List if_proj_list(area);
-  LoopNode *head  = loop->_head->as_Loop();
   Node *current_proj = loop->tail(); //start from tail
   while (current_proj != head) {
     if (loop == get_loop(current_proj) && // still in the loop ?
@@ -856,8 +921,8 @@
       const Node*    cmp    = bol->in(1)->as_Cmp();
       Node*          idx    = cmp->in(1);
       assert(!invar.is_invariant(idx), "index is variant");
-      assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
       Node* rng = cmp->in(2);
+      assert(rng->Opcode() == Op_LoadRange || _igvn.type(rng)->is_int() >= 0, "must be");
       assert(invar.is_invariant(rng), "range must be invariant");
       int scale    = 1;
       Node* offset = zero;
@@ -886,14 +951,14 @@
       }
 
       // Test the lower bound
-      Node*  lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false);
+      Node*  lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
       IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
       _igvn.hash_delete(lower_bound_iff);
       lower_bound_iff->set_req(1, lower_bound_bol);
       if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
 
       // Test the upper bound
-      Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true);
+      Node* upper_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, true);
       IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
       _igvn.hash_delete(upper_bound_iff);
       upper_bound_iff->set_req(1, upper_bound_bol);
@@ -957,4 +1022,3 @@
 
   return hoisted;
 }
-
--- a/src/share/vm/opto/loopTransform.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -510,7 +510,7 @@
   //         the pre-loop with only 1 user (the new peeled iteration), but the
   //         peeled-loop backedge has 2 users.
   Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx];
-  new_exit_value = move_loop_predicates(entry, new_exit_value);
+  new_exit_value = move_loop_predicates(entry, new_exit_value, !counted_loop);
   _igvn.hash_delete(head);
   head->set_req(LoopNode::EntryControl, new_exit_value);
   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
@@ -593,6 +593,12 @@
     return false;
   }
 
+  // Fully unroll a loop with few iterations regardless next
+  // conditions since following loop optimizations will split
+  // such loop anyway (pre-main-post).
+  if (trip_count <= 3)
+    return true;
+
   // Take into account that after unroll conjoined heads and tails will fold,
   // otherwise policy_unroll() may allow more unrolling than max unrolling.
   uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
@@ -605,15 +611,6 @@
     return false;
   }
 
-  // Currently we don't have policy to optimize one iteration loops.
-  // Maximally unrolling transformation is used for that:
-  // it is peeled and the original loop become non reachable (dead).
-  // Also fully unroll a loop with few iterations regardless next
-  // conditions since following loop optimizations will split
-  // such loop anyway (pre-main-post).
-  if (trip_count <= 3)
-    return true;
-
   // Do not unroll a loop with String intrinsics code.
   // String intrinsics are large and have loops.
   for (uint k = 0; k < _body.size(); k++) {
@@ -632,6 +629,8 @@
 }
 
 
+#define MAX_UNROLL 16 // maximum number of unrolls for main loop
+
 //------------------------------policy_unroll----------------------------------
 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 // the loop is a CountedLoop and the body is small enough.
@@ -643,13 +642,15 @@
   if (!cl->is_valid_counted_loop())
     return false; // Malformed counted loop
 
-  // protect against over-unrolling
-  if (cl->trip_count() <= 1) return false;
-
-  // Check for stride being a small enough constant
-  if (abs(cl->stride_con()) > (1<<3)) return false;
+  // Protect against over-unrolling.
+  // After split at least one iteration will be executed in pre-loop.
+  if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
 
   int future_unroll_ct = cl->unrolled_count() * 2;
+  if (future_unroll_ct > MAX_UNROLL) return false;
+
+  // Check for initial stride being a small enough constant
+  if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
 
   // Don't unroll if the next round of unrolling would push us
   // over the expected trip count of the loop.  One is subtracted
@@ -675,6 +676,7 @@
 
   Node *init_n = cl->init_trip();
   Node *limit_n = cl->limit();
+  int stride_con = cl->stride_con();
   // Non-constant bounds.
   // Protect against over-unrolling when init or/and limit are not constant
   // (so that trip_count's init value is maxint) but iv range is known.
@@ -684,7 +686,7 @@
     if (phi != NULL) {
       assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
       const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
-      int next_stride = cl->stride_con() * 2; // stride after this unroll
+      int next_stride = stride_con * 2; // stride after this unroll
       if (next_stride > 0) {
         if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
             iv_type->_lo + next_stride >  iv_type->_hi) {
@@ -699,15 +701,19 @@
     }
   }
 
+  // After unroll limit will be adjusted: new_limit = limit-stride.
+  // Bailout if adjustment overflow.
+  const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
+  if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) ||
+      stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo))
+    return false;  // overflow
+
   // Adjust body_size to determine if we unroll or not
   uint body_size = _body.size();
-  // Key test to unroll CaffeineMark's Logic test
-  int xors_in_loop = 0;
   // Also count ModL, DivL and MulL which expand mightly
   for (uint k = 0; k < _body.size(); k++) {
     Node* n = _body.at(k);
     switch (n->Opcode()) {
-      case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
@@ -724,8 +730,7 @@
 
   // Check for being too big
   if (body_size > (uint)LoopUnrollLimit) {
-    if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
-    // Normal case: loop too big
+     // Normal case: loop too big
     return false;
   }
 
@@ -747,28 +752,31 @@
 // Return TRUE or FALSE if the loop should be range-check-eliminated.
 // Actually we do iteration-splitting, a more powerful form of RCE.
 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
-  if( !RangeCheckElimination ) return false;
+  if (!RangeCheckElimination) return false;
 
   CountedLoopNode *cl = _head->as_CountedLoop();
   // If we unrolled with no intention of doing RCE and we later
   // changed our minds, we got no pre-loop.  Either we need to
   // make a new pre-loop, or we gotta disallow RCE.
-  if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now.
+  if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
   Node *trip_counter = cl->phi();
 
   // Check loop body for tests of trip-counter plus loop-invariant vs
   // loop-invariant.
-  for( uint i = 0; i < _body.size(); i++ ) {
+  for (uint i = 0; i < _body.size(); i++) {
     Node *iff = _body[i];
-    if( iff->Opcode() == Op_If ) { // Test?
+    if (iff->Opcode() == Op_If) { // Test?
 
       // Comparing trip+off vs limit
       Node *bol = iff->in(1);
-      if( bol->req() != 2 ) continue; // dead constant test
+      if (bol->req() != 2) continue; // dead constant test
       if (!bol->is_Bool()) {
         assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
         continue;
       }
+      if (bol->as_Bool()->_test._test == BoolTest::ne)
+        continue; // not RC
+
       Node *cmp = bol->in(1);
 
       Node *rc_exp = cmp->in(1);
@@ -1062,6 +1070,9 @@
   // direction:
   // positive stride use <
   // negative stride use >
+  //
+  // not-equal test is kept for post loop to handle case
+  // when init > limit when stride > 0 (and reverse).
 
   if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
 
@@ -1090,6 +1101,9 @@
   main_head->set_main_loop();
   if( peel_only ) main_head->set_main_no_pre_loop();
 
+  // Subtract a trip count for the pre-loop.
+  main_head->set_trip_count(main_head->trip_count() - 1);
+
   // It's difficult to be precise about the trip-counts
   // for the pre/post loops.  They are usually very short,
   // so guess that 4 trips is a reasonable value.
@@ -1123,9 +1137,9 @@
     loop->dump_head();
   } else if (TraceLoopOpts) {
     if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
-      tty->print("Unroll  %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
+      tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
     } else {
-      tty->print("Unroll  %d     ", loop_head->unrolled_count()*2);
+      tty->print("Unroll %d     ", loop_head->unrolled_count()*2);
     }
     loop->dump_head();
   }
@@ -1141,7 +1155,8 @@
   Node *stride = loop_head->stride();
 
   Node *opaq = NULL;
-  if( adjust_min_trip ) {       // If not maximally unrolling, need adjustment
+  if (adjust_min_trip) {       // If not maximally unrolling, need adjustment
+    // Search for zero-trip guard.
     assert( loop_head->is_main_loop(), "" );
     assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
     Node *iff = ctrl->in(0);
@@ -1151,63 +1166,224 @@
     Node *cmp = bol->in(1);
     assert( cmp->Opcode() == Op_CmpI, "" );
     opaq = cmp->in(2);
-    // Occasionally it's possible for a pre-loop Opaque1 node to be
+    // Occasionally it's possible for a zero-trip guard Opaque1 node to be
     // optimized away and then another round of loop opts attempted.
     // We can not optimize this particular loop in that case.
-    if( opaq->Opcode() != Op_Opaque1 )
-      return;                   // Cannot find pre-loop!  Bail out!
+    if (opaq->Opcode() != Op_Opaque1)
+      return; // Cannot find zero-trip guard!  Bail out!
+    // Zero-trip test uses an 'opaque' node which is not shared.
+    assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
   }
 
   C->set_major_progress();
 
-  // Adjust max trip count. The trip count is intentionally rounded
-  // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
-  // the main, unrolled, part of the loop will never execute as it is protected
-  // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
-  // and later determined that part of the unrolled loop was dead.
-  loop_head->set_trip_count(loop_head->trip_count() / 2);
+  Node* new_limit = NULL;
+  if (UnrollLimitCheck) {
+    int stride_con = stride->get_int();
+    int stride_p = (stride_con > 0) ? stride_con : -stride_con;
+    uint old_trip_count = loop_head->trip_count();
+    // Verify that unroll policy result is still valid.
+    assert(old_trip_count > 1 &&
+           (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
 
-  // Double the count of original iterations in the unrolled loop body.
-  loop_head->double_unrolled_count();
+    // Adjust loop limit to keep valid iterations number after unroll.
+    // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
+    // which may overflow.
+    if (!adjust_min_trip) {
+      assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
+             "odd trip count for maximally unroll");
+      // Don't need to adjust limit for maximally unroll since trip count is even.
+    } else if (loop_head->has_exact_trip_count() && init->is_Con()) {
+      // Loop's limit is constant. Loop's init could be constant when pre-loop
+      // become peeled iteration.
+      long init_con = init->get_int();
+      // We can keep old loop limit if iterations count stays the same:
+      //   old_trip_count == new_trip_count * 2
+      // Note: since old_trip_count >= 2 then new_trip_count >= 1
+      // so we also don't need to adjust zero trip test.
+      long limit_con  = limit->get_int();
+      // (stride_con*2) not overflow since stride_con <= 8.
+      int new_stride_con = stride_con * 2;
+      int stride_m    = new_stride_con - (stride_con > 0 ? 1 : -1);
+      long trip_count = (limit_con - init_con + stride_m)/new_stride_con;
+      // New trip count should satisfy next conditions.
+      assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
+      uint new_trip_count = (uint)trip_count;
+      adjust_min_trip = (old_trip_count != new_trip_count*2);
+    }
+
+    if (adjust_min_trip) {
+      // Step 2: Adjust the trip limit if it is called for.
+      // The adjustment amount is -stride. Need to make sure if the
+      // adjustment underflows or overflows, then the main loop is skipped.
+      Node* cmp = loop_end->cmp_node();
+      assert(cmp->in(2) == limit, "sanity");
+      assert(opaq != NULL && opaq->in(1) == limit, "sanity");
+
+      // Verify that policy_unroll result is still valid.
+      const TypeInt* limit_type = _igvn.type(limit)->is_int();
+      assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
+             stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
 
-  // -----------
-  // Step 2: Cut back the trip counter for an unroll amount of 2.
-  // Loop will normally trip (limit - init)/stride_con.  Since it's a
-  // CountedLoop this is exact (stride divides limit-init exactly).
-  // We are going to double the loop body, so we want to knock off any
-  // odd iteration: (trip_cnt & ~1).  Then back compute a new limit.
-  Node *span = new (C, 3) SubINode( limit, init );
-  register_new_node( span, ctrl );
-  Node *trip = new (C, 3) DivINode( 0, span, stride );
-  register_new_node( trip, ctrl );
-  Node *mtwo = _igvn.intcon(-2);
-  set_ctrl(mtwo, C->root());
-  Node *rond = new (C, 3) AndINode( trip, mtwo );
-  register_new_node( rond, ctrl );
-  Node *spn2 = new (C, 3) MulINode( rond, stride );
-  register_new_node( spn2, ctrl );
-  Node *lim2 = new (C, 3) AddINode( spn2, init );
-  register_new_node( lim2, ctrl );
+      if (limit->is_Con()) {
+        // The check in policy_unroll and the assert above guarantee
+        // no underflow if limit is constant.
+        new_limit = _igvn.intcon(limit->get_int() - stride_con);
+        set_ctrl(new_limit, C->root());
+      } else {
+        // Limit is not constant.
+        if (loop_head->unrolled_count() == 1) { // only for first unroll
+          // Separate limit by Opaque node in case it is an incremented
+          // variable from previous loop to avoid using pre-incremented
+          // value which could increase register pressure.
+          // Otherwise reorg_offsets() optimization will create a separate
+          // Opaque node for each use of trip-counter and as result
+          // zero trip guard limit will be different from loop limit.
+          assert(has_ctrl(opaq), "should have it");
+          Node* opaq_ctrl = get_ctrl(opaq);
+          limit = new (C, 2) Opaque2Node( C, limit );
+          register_new_node( limit, opaq_ctrl );
+        }
+        if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) ||
+                   stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) {
+          // No underflow.
+          new_limit = new (C, 3) SubINode(limit, stride);
+        } else {
+          // (limit - stride) may underflow.
+          // Clamp the adjustment value with MININT or MAXINT:
+          //
+          //   new_limit = limit-stride
+          //   if (stride > 0)
+          //     new_limit = (limit < new_limit) ? MININT : new_limit;
+          //   else
+          //     new_limit = (limit > new_limit) ? MAXINT : new_limit;
+          //
+          BoolTest::mask bt = loop_end->test_trip();
+          assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
+          Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
+          set_ctrl(adj_max, C->root());
+          Node* old_limit = NULL;
+          Node* adj_limit = NULL;
+          Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
+          if (loop_head->unrolled_count() > 1 &&
+              limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
+              limit->in(CMoveNode::IfTrue) == adj_max &&
+              bol->as_Bool()->_test._test == bt &&
+              bol->in(1)->Opcode() == Op_CmpI &&
+              bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
+            // Loop was unrolled before.
+            // Optimize the limit to avoid nested CMove:
+            // use original limit as old limit.
+            old_limit = bol->in(1)->in(1);
+            // Adjust previous adjusted limit.
+            adj_limit = limit->in(CMoveNode::IfFalse);
+            adj_limit = new (C, 3) SubINode(adj_limit, stride);
+          } else {
+            old_limit = limit;
+            adj_limit = new (C, 3) SubINode(limit, stride);
+          }
+          assert(old_limit != NULL && adj_limit != NULL, "");
+          register_new_node( adj_limit, ctrl ); // adjust amount
+          Node* adj_cmp = new (C, 3) CmpINode(old_limit, adj_limit);
+          register_new_node( adj_cmp, ctrl );
+          Node* adj_bool = new (C, 2) BoolNode(adj_cmp, bt);
+          register_new_node( adj_bool, ctrl );
+          new_limit = new (C, 4) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
+        }
+        register_new_node(new_limit, ctrl);
+      }
+      assert(new_limit != NULL, "");
+      // Replace in loop test.
+      assert(loop_end->in(1)->in(1) == cmp, "sanity");
+      if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
+        // Don't need to create new test since only one user.
+        _igvn.hash_delete(cmp);
+        cmp->set_req(2, new_limit);
+      } else {
+        // Create new test since it is shared.
+        Node* ctrl2 = loop_end->in(0);
+        Node* cmp2  = cmp->clone();
+        cmp2->set_req(2, new_limit);
+        register_new_node(cmp2, ctrl2);
+        Node* bol2 = loop_end->in(1)->clone();
+        bol2->set_req(1, cmp2);
+        register_new_node(bol2, ctrl2);
+        _igvn.hash_delete(loop_end);
+        loop_end->set_req(1, bol2);
+      }
+      // Step 3: Find the min-trip test guaranteed before a 'main' loop.
+      // Make it a 1-trip test (means at least 2 trips).
 
-  // Hammer in the new limit
-  Node *ctrl2 = loop_end->in(0);
-  Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 );
-  register_new_node( cmp2, ctrl2 );
-  Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
-  register_new_node( bol2, ctrl2 );
-  _igvn.hash_delete(loop_end);
-  loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
+      // Guard test uses an 'opaque' node which is not shared.  Hence I
+      // can edit it's inputs directly.  Hammer in the new limit for the
+      // minimum-trip guard.
+      assert(opaq->outcnt() == 1, "");
+      _igvn.hash_delete(opaq);
+      opaq->set_req(1, new_limit);
+    }
+
+    // Adjust max trip count. The trip count is intentionally rounded
+    // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
+    // the main, unrolled, part of the loop will never execute as it is protected
+    // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
+    // and later determined that part of the unrolled loop was dead.
+    loop_head->set_trip_count(old_trip_count / 2);
+
+    // Double the count of original iterations in the unrolled loop body.
+    loop_head->double_unrolled_count();
+
+  } else { // LoopLimitCheck
+
+    // Adjust max trip count. The trip count is intentionally rounded
+    // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
+    // the main, unrolled, part of the loop will never execute as it is protected
+    // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
+    // and later determined that part of the unrolled loop was dead.
+    loop_head->set_trip_count(loop_head->trip_count() / 2);
+
+    // Double the count of original iterations in the unrolled loop body.
+    loop_head->double_unrolled_count();
 
-  // Step 3: Find the min-trip test guaranteed before a 'main' loop.
-  // Make it a 1-trip test (means at least 2 trips).
-  if( adjust_min_trip ) {
-    // Guard test uses an 'opaque' node which is not shared.  Hence I
-    // can edit it's inputs directly.  Hammer in the new limit for the
-    // minimum-trip guard.
-    assert( opaq->outcnt() == 1, "" );
-    _igvn.hash_delete(opaq);
-    opaq->set_req(1, lim2);
-  }
+    // -----------
+    // Step 2: Cut back the trip counter for an unroll amount of 2.
+    // Loop will normally trip (limit - init)/stride_con.  Since it's a
+    // CountedLoop this is exact (stride divides limit-init exactly).
+    // We are going to double the loop body, so we want to knock off any
+    // odd iteration: (trip_cnt & ~1).  Then back compute a new limit.
+    Node *span = new (C, 3) SubINode( limit, init );
+    register_new_node( span, ctrl );
+    Node *trip = new (C, 3) DivINode( 0, span, stride );
+    register_new_node( trip, ctrl );
+    Node *mtwo = _igvn.intcon(-2);
+    set_ctrl(mtwo, C->root());
+    Node *rond = new (C, 3) AndINode( trip, mtwo );
+    register_new_node( rond, ctrl );
+    Node *spn2 = new (C, 3) MulINode( rond, stride );
+    register_new_node( spn2, ctrl );
+    new_limit = new (C, 3) AddINode( spn2, init );
+    register_new_node( new_limit, ctrl );
+
+    // Hammer in the new limit
+    Node *ctrl2 = loop_end->in(0);
+    Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), new_limit );
+    register_new_node( cmp2, ctrl2 );
+    Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
+    register_new_node( bol2, ctrl2 );
+    _igvn.hash_delete(loop_end);
+    loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
+
+    // Step 3: Find the min-trip test guaranteed before a 'main' loop.
+    // Make it a 1-trip test (means at least 2 trips).
+    if( adjust_min_trip ) {
+      assert( new_limit != NULL, "" );
+      // Guard test uses an 'opaque' node which is not shared.  Hence I
+      // can edit it's inputs directly.  Hammer in the new limit for the
+      // minimum-trip guard.
+      assert( opaq->outcnt() == 1, "" );
+      _igvn.hash_delete(opaq);
+      opaq->set_req(1, new_limit);
+    }
+  } // LoopLimitCheck
 
   // ---------
   // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
@@ -1263,6 +1439,7 @@
 
 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
   CountedLoopNode *cl = loop->_head->as_CountedLoop();
+  assert(cl->has_exact_trip_count(), "trip count is not exact");
   assert(cl->trip_count() > 0, "");
 #ifndef PRODUCT
   if (TraceLoopOpts) {
@@ -1279,6 +1456,7 @@
   // Now its tripping an even number of times remaining.  Double loop body.
   // Do not adjust pre-guards; they are not needed and do not exist.
   if (cl->trip_count() > 0) {
+    assert((cl->trip_count() & 1) == 0, "missed peeling");
     do_unroll(loop, old_new, false);
   }
 }
@@ -1291,23 +1469,31 @@
   return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
 }
 
+//------------------------------adjust_limit-----------------------------------
+// Helper function for add_constraint().
+Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
+  // Compute "I :: (limit-offset)/scale"
+  Node *con = new (C, 3) SubINode(rc_limit, offset);
+  register_new_node(con, pre_ctrl);
+  Node *X = new (C, 3) DivINode(0, con, scale);
+  register_new_node(X, pre_ctrl);
+
+  // Adjust loop limit
+  loop_limit = (stride_con > 0)
+               ? (Node*)(new (C, 3) MinINode(loop_limit, X))
+               : (Node*)(new (C, 3) MaxINode(loop_limit, X));
+  register_new_node(loop_limit, pre_ctrl);
+  return loop_limit;
+}
+
 //------------------------------add_constraint---------------------------------
-// Constrain the main loop iterations so the condition:
-//    scale_con * I + offset  <  limit
+// Constrain the main loop iterations so the conditions:
+//    low_limit <= scale_con * I + offset  <  upper_limit
 // always holds true.  That is, either increase the number of iterations in
 // the pre-loop or the post-loop until the condition holds true in the main
 // loop.  Stride, scale, offset and limit are all loop invariant.  Further,
 // stride and scale are constants (offset and limit often are).
-void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
-
-  // Compute "I :: (limit-offset)/scale_con"
-  Node *con = new (C, 3) SubINode( limit, offset );
-  register_new_node( con, pre_ctrl );
-  Node *scale = _igvn.intcon(scale_con);
-  set_ctrl(scale, C->root());
-  Node *X = new (C, 3) DivINode( 0, con, scale );
-  register_new_node( X, pre_ctrl );
-
+void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
   // For positive stride, the pre-loop limit always uses a MAX function
   // and the main loop a MIN function.  For negative stride these are
   // reversed.
@@ -1316,48 +1502,120 @@
   // pre-loop must check for underflow and the post-loop for overflow.
   // Negative stride*scale reverses this; pre-loop checks for overflow and
   // post-loop for underflow.
-  if( stride_con*scale_con > 0 ) {
-    // Compute I < (limit-offset)/scale_con
-    // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
-    *main_limit = (stride_con > 0)
-      ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
-      : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
-    register_new_node( *main_limit, pre_ctrl );
+
+  Node *scale = _igvn.intcon(scale_con);
+  set_ctrl(scale, C->root());
+
+  if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
+    // The overflow limit: scale*I+offset < upper_limit
+    // For main-loop compute
+    //   ( if (scale > 0) /* and stride > 0 */
+    //       I < (upper_limit-offset)/scale
+    //     else /* scale < 0 and stride < 0 */
+    //       I > (upper_limit-offset)/scale
+    //   )
+    //
+    // (upper_limit-offset) may overflow or underflow.
+    // But it is fine since main loop will either have
+    // less iterations or will be skipped in such case.
+    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
 
-  } else {
-    // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I
-    // Add the negation of the main-loop constraint to the pre-loop.
-    // See footnote [++] below for a derivation of the limit expression.
-    Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1);
-    set_ctrl(incr, C->root());
-    Node *adj = new (C, 3) AddINode( X, incr );
-    register_new_node( adj, pre_ctrl );
-    *pre_limit = (scale_con > 0)
-      ? (Node*)new (C, 3) MinINode( *pre_limit, adj )
-      : (Node*)new (C, 3) MaxINode( *pre_limit, adj );
-    register_new_node( *pre_limit, pre_ctrl );
+    // The underflow limit: low_limit <= scale*I+offset.
+    // For pre-loop compute
+    //   NOT(scale*I+offset >= low_limit)
+    //   scale*I+offset < low_limit
+    //   ( if (scale > 0) /* and stride > 0 */
+    //       I < (low_limit-offset)/scale
+    //     else /* scale < 0 and stride < 0 */
+    //       I > (low_limit-offset)/scale
+    //   )
+
+    if (low_limit->get_int() == -max_jint) {
+      if (!RangeLimitCheck) return;
+      // We need this guard when scale*pre_limit+offset >= limit
+      // due to underflow. So we need execute pre-loop until
+      // scale*I+offset >= min_int. But (min_int-offset) will
+      // underflow when offset > 0 and X will be > original_limit
+      // when stride > 0. To avoid it we replace positive offset with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
+      Node* shift = _igvn.intcon(31);
+      set_ctrl(shift, C->root());
+      Node* sign = new (C, 3) RShiftINode(offset, shift);
+      register_new_node(sign, pre_ctrl);
+      offset = new (C, 3) AndINode(offset, sign);
+      register_new_node(offset, pre_ctrl);
+    } else {
+      assert(low_limit->get_int() == 0, "wrong low limit for range check");
+      // The only problem we have here when offset == min_int
+      // since (0-min_int) == min_int. It may be fine for stride > 0
+      // but for stride < 0 X will be < original_limit. To avoid it
+      // max(pre_limit, original_limit) is used in do_range_check().
+    }
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
 
-//   [++] Here's the algebra that justifies the pre-loop limit expression:
-//
-//   NOT( scale_con * I + offset  <  limit )
-//      ==
-//   scale_con * I + offset  >=  limit
-//      ==
-//   SGN(scale_con) * I  >=  (limit-offset)/|scale_con|
-//      ==
-//   (limit-offset)/|scale_con|   <=  I * SGN(scale_con)
-//      ==
-//   (limit-offset)/|scale_con|-1  <  I * SGN(scale_con)
-//      ==
-//   ( if (scale_con > 0) /*common case*/
-//       (limit-offset)/scale_con - 1  <  I
-//     else
-//       (limit-offset)/scale_con + 1  >  I
-//    )
-//   ( if (scale_con > 0) /*common case*/
-//       (limit-offset)/scale_con + SGN(-scale_con)  <  I
-//     else
-//       (limit-offset)/scale_con + SGN(-scale_con)  >  I
+  } else { // stride_con*scale_con < 0
+    // For negative stride*scale pre-loop checks for overflow and
+    // post-loop for underflow.
+    //
+    // The overflow limit: scale*I+offset < upper_limit
+    // For pre-loop compute
+    //   NOT(scale*I+offset < upper_limit)
+    //   scale*I+offset >= upper_limit
+    //   scale*I+offset+1 > upper_limit
+    //   ( if (scale < 0) /* and stride > 0 */
+    //       I < (upper_limit-(offset+1))/scale
+    //     else /* scale > 0 and stride < 0 */
+    //       I > (upper_limit-(offset+1))/scale
+    //   )
+    //
+    // (upper_limit-offset-1) may underflow or overflow.
+    // To avoid it min(pre_limit, original_limit) is used
+    // in do_range_check() for stride > 0 and max() for < 0.
+    Node *one  = _igvn.intcon(1);
+    set_ctrl(one, C->root());
+
+    Node *plus_one = new (C, 3) AddINode(offset, one);
+    register_new_node( plus_one, pre_ctrl );
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
+
+    if (low_limit->get_int() == -max_jint) {
+      if (!RangeLimitCheck) return;
+      // We need this guard when scale*main_limit+offset >= limit
+      // due to underflow. So we need execute main-loop while
+      // scale*I+offset+1 > min_int. But (min_int-offset-1) will
+      // underflow when (offset+1) > 0 and X will be < main_limit
+      // when scale < 0 (and stride > 0). To avoid it we replace
+      // positive (offset+1) with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
+      Node* shift = _igvn.intcon(31);
+      set_ctrl(shift, C->root());
+      Node* sign = new (C, 3) RShiftINode(plus_one, shift);
+      register_new_node(sign, pre_ctrl);
+      plus_one = new (C, 3) AndINode(plus_one, sign);
+      register_new_node(plus_one, pre_ctrl);
+    } else {
+      assert(low_limit->get_int() == 0, "wrong low limit for range check");
+      // The only problem we have here when offset == max_int
+      // since (max_int+1) == min_int and (0-min_int) == min_int.
+      // But it is fine since main loop will either have
+      // less iterations or will be skipped in such case.
+    }
+    // The underflow limit: low_limit <= scale*I+offset.
+    // For main-loop compute
+    //   scale*I+offset+1 > low_limit
+    //   ( if (scale < 0) /* and stride > 0 */
+    //       I < (low_limit-(offset+1))/scale
+    //     else /* scale > 0 and stride < 0 */
+    //       I > (low_limit-(offset+1))/scale
+    //   )
+
+    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
   }
 }
 
@@ -1488,7 +1746,7 @@
   Node *cmpzm = bolzm->in(1);
   assert(cmpzm->is_Cmp(), "");
   Node *opqzm = cmpzm->in(2);
-  // Can not optimize a loop if pre-loop Opaque1 node is optimized
+  // Can not optimize a loop if zero-trip Opaque1 node is optimized
   // away and then another round of loop opts attempted.
   if (opqzm->Opcode() != Op_Opaque1)
     return;
@@ -1523,8 +1781,11 @@
   int stride_con = cl->stride_con();
   Node *zero = _igvn.intcon(0);
   Node *one  = _igvn.intcon(1);
+  // Use symmetrical int range [-max_jint,max_jint]
+  Node *mini = _igvn.intcon(-max_jint);
   set_ctrl(zero, C->root());
   set_ctrl(one,  C->root());
+  set_ctrl(mini, C->root());
 
   // Range checks that do not dominate the loop backedge (ie.
   // conditionally executed) can lengthen the pre loop limit beyond
@@ -1599,7 +1860,12 @@
       if( offset_c == ctrl ) {
         continue; // Don't rce this check but continue looking for other candidates.
       }
-
+#ifdef ASSERT
+      if (TraceRangeLimitCheck) {
+        tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
+        bol->dump(2);
+      }
+#endif
       // At this point we have the expression as:
       //   scale_con * trip_counter + offset :: limit
       // where scale_con, offset and limit are loop invariant.  Trip_counter
@@ -1610,17 +1876,11 @@
       // Adjust pre and main loop limits to guard the correct iteration set
       if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
         if( b_test._test == BoolTest::lt ) { // Range checks always use lt
-          // The overflow limit: scale*I+offset < limit
-          add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
-          // The underflow limit: 0 <= scale*I+offset.
-          // Some math yields: -scale*I-(offset+1) < 0
-          Node *plus_one = new (C, 3) AddINode( offset, one );
-          register_new_node( plus_one, pre_ctrl );
-          Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
-          register_new_node( neg_offset, pre_ctrl );
-          add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
+          // The underflow and overflow limits: 0 <= scale*I+offset < limit
+          add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
+            // (0-offset)/scale could be outside of loop iterations range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
         } else {
 #ifndef PRODUCT
@@ -1631,21 +1891,33 @@
         }
       } else {                  // Otherwise work on normal compares
         switch( b_test._test ) {
-        case BoolTest::ge:      // Convert X >= Y to -X <= -Y
+        case BoolTest::gt:
+          // Fall into GE case
+        case BoolTest::ge:
+          // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
           scale_con = -scale_con;
           offset = new (C, 3) SubINode( zero, offset );
           register_new_node( offset, pre_ctrl );
           limit  = new (C, 3) SubINode( zero, limit  );
           register_new_node( limit, pre_ctrl );
           // Fall into LE case
-        case BoolTest::le:      // Convert X <= Y to X < Y+1
-          limit = new (C, 3) AddINode( limit, one );
-          register_new_node( limit, pre_ctrl );
+        case BoolTest::le:
+          if (b_test._test != BoolTest::gt) {
+            // Convert X <= Y to X < Y+1
+            limit = new (C, 3) AddINode( limit, one );
+            register_new_node( limit, pre_ctrl );
+          }
           // Fall into LT case
         case BoolTest::lt:
-          add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
+          // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
+          // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
+          // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
+          add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
+            // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
+            // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
+            // still be outside of loop range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
           break;
         default:
@@ -1696,7 +1968,8 @@
 
   // Note:: we are making the main loop limit no longer precise;
   // need to round up based on stride.
-  if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case
+  cl->set_nonexact_trip_count();
+  if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case
     // "Standard" round-up logic:  ([main_limit-init+(y-1)]/y)*y+init
     // Hopefully, compiler will optimize for powers of 2.
     Node *ctrl = get_ctrl(main_limit);
@@ -1876,7 +2149,19 @@
   // iteration.  Then the CountedLoopEnd will collapse (backedge never
   // taken) and all loop-invariant uses of the exit values will be correct.
   Node *phi = cl->phi();
-  Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
+  Node *exact_limit = phase->exact_limit(this);
+  if (exact_limit != cl->limit()) {
+    // We also need to replace the original limit to collapse loop exit.
+    Node* cmp = cl->loopexit()->cmp_node();
+    assert(cl->limit() == cmp->in(2), "sanity");
+    phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
+    phase->_igvn.hash_delete(cmp);
+    cmp->set_req(2, exact_limit);
+    phase->_igvn._worklist.push(cmp);        // put cmp on worklist
+  }
+  // Note: the final value after increment should not overflow since
+  // counted loop has limit check predicate.
+  Node *final = new (phase->C, 3) SubINode( exact_limit, cl->stride() );
   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
   phase->_igvn.replace_node(phi,final);
   phase->C->set_major_progress();
--- a/src/share/vm/opto/loopUnswitch.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopUnswitch.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -130,6 +130,11 @@
   Node* uniqc = proj_true->unique_ctrl_out();
   Node* entry = head->in(LoopNode::EntryControl);
   Node* predicate = find_predicate(entry);
+  if (predicate != NULL && LoopLimitCheck && UseLoopPredicate) {
+    // We may have two predicates, find first.
+    entry = find_predicate(entry->in(0)->in(0));
+    if (entry != NULL) predicate = entry;
+  }
   if (predicate != NULL) predicate = predicate->in(0);
   assert(proj_true->is_IfTrue() &&
          (predicate == NULL && uniqc == head ||
@@ -217,6 +222,7 @@
 ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
                                                       Node_List &old_new) {
   LoopNode* head  = loop->_head->as_Loop();
+  bool counted_loop = head->is_CountedLoop();
   Node*     entry = head->in(LoopNode::EntryControl);
   _igvn.hash_delete(entry);
   _igvn._worklist.push(entry);
@@ -242,14 +248,14 @@
   assert(old_new[head->_idx]->is_Loop(), "" );
 
   // Fast (true) control
-  Node* iffast_pred = clone_loop_predicates(entry, iffast);
+  Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
   _igvn.hash_delete(head);
   head->set_req(LoopNode::EntryControl, iffast_pred);
   set_idom(head, iffast_pred, dom_depth(head));
   _igvn._worklist.push(head);
 
   // Slow (false) control
-  Node* ifslow_pred = move_loop_predicates(entry, ifslow);
+  Node* ifslow_pred = move_loop_predicates(entry, ifslow, !counted_loop);
   LoopNode* slow_head = old_new[head->_idx]->as_Loop();
   _igvn.hash_delete(slow_head);
   slow_head->set_req(LoopNode::EntryControl, ifslow_pred);
--- a/src/share/vm/opto/loopnode.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopnode.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -206,7 +206,7 @@
   // Get backedge compare
   Node *cmp = test->in(1);
   int cmp_op = cmp->Opcode();
-  if( cmp_op != Op_CmpI )
+  if (cmp_op != Op_CmpI)
     return false;                // Avoid pointer & float compares
 
   // Find the trip-counter increment & limit.  Limit must be loop invariant.
@@ -259,7 +259,8 @@
   }
   // Stride must be constant
   int stride_con = stride->get_int();
-  assert(stride_con != 0, "missed some peephole opt");
+  if (stride_con == 0)
+    return false; // missed some peephole opt
 
   if (!xphi->is_Phi())
     return false; // Too much math on the trip counter
@@ -319,7 +320,7 @@
       // Count down loop rolls through MAXINT
       (bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 ||
       // Count up loop rolls through MININT
-      (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0 ) {
+      (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0) {
     return false; // Bail out
   }
 
@@ -341,12 +342,142 @@
   //
   assert(x->Opcode() == Op_Loop, "regular loops only");
   C->print_method("Before CountedLoop", 3);
+
+  Node *hook = new (C, 6) Node(6);
+
+  if (LoopLimitCheck) {
+
+  // ===================================================
+  // Generate loop limit check to avoid integer overflow
+  // in cases like next (cyclic loops):
+  //
+  // for (i=0; i <= max_jint; i++) {}
+  // for (i=0; i <  max_jint; i+=2) {}
+  //
+  //
+  // Limit check predicate depends on the loop test:
+  //
+  // for(;i != limit; i++)       --> limit <= (max_jint)
+  // for(;i <  limit; i+=stride) --> limit <= (max_jint - stride + 1)
+  // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride    )
+  //
+
+  // Check if limit is excluded to do more precise int overflow check.
+  bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
+  int stride_m  = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));
+
+  // If compare points directly to the phi we need to adjust
+  // the compare so that it points to the incr. Limit have
+  // to be adjusted to keep trip count the same and the
+  // adjusted limit should be checked for int overflow.
+  if (phi_incr != NULL) {
+    stride_m  += stride_con;
+  }
+
+  if (limit->is_Con()) {
+    int limit_con = limit->get_int();
+    if ((stride_con > 0 && limit_con > (max_jint - stride_m)) ||
+        (stride_con < 0 && limit_con < (min_jint - stride_m))) {
+      // Bailout: it could be integer overflow.
+      return false;
+    }
+  } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) ||
+             (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) {
+      // Limit's type may satisfy the condition, for example,
+      // when it is an array length.
+  } else {
+    // Generate loop's limit check.
+    // Loop limit check predicate should be near the loop.
+    ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check);
+    if (!limit_check_proj) {
+      // The limit check predicate is not generated if this method trapped here before.
+#ifdef ASSERT
+      if (TraceLoopLimitCheck) {
+        tty->print("missing loop limit check:");
+        loop->dump_head();
+        x->dump(1);
+      }
+#endif
+      return false;
+    }
+
+    IfNode* check_iff = limit_check_proj->in(0)->as_If();
+    Node* cmp_limit;
+    Node* bol;
+
+    if (stride_con > 0) {
+      cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(max_jint - stride_m));
+      bol = new (C, 2) BoolNode(cmp_limit, BoolTest::le);
+    } else {
+      cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(min_jint - stride_m));
+      bol = new (C, 2) BoolNode(cmp_limit, BoolTest::ge);
+    }
+    cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
+    bol = _igvn.register_new_node_with_optimizer(bol);
+    set_subtree_ctrl(bol);
+
+    // Replace condition in original predicate but preserve Opaque node
+    // so that previous predicates could be found.
+    assert(check_iff->in(1)->Opcode() == Op_Conv2B &&
+           check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, "");
+    Node* opq = check_iff->in(1)->in(1);
+    _igvn.hash_delete(opq);
+    opq->set_req(1, bol);
+    // Update ctrl.
+    set_ctrl(opq, check_iff->in(0));
+    set_ctrl(check_iff->in(1), check_iff->in(0));
+
 #ifndef PRODUCT
-  if (TraceLoopOpts) {
-    tty->print("Counted      ");
-    loop->dump_head();
+    // report that the loop predication has been actually performed
+    // for this loop
+    if (TraceLoopLimitCheck) {
+      tty->print_cr("Counted Loop Limit Check generated:");
+      debug_only( bol->dump(2); )
+    }
+#endif
+  }
+
+  if (phi_incr != NULL) {
+    // If compare points directly to the phi we need to adjust
+    // the compare so that it points to the incr. Limit have
+    // to be adjusted to keep trip count the same and we
+    // should avoid int overflow.
+    //
+    //   i = init; do {} while(i++ < limit);
+    // is converted to
+    //   i = init; do {} while(++i < limit+1);
+    //
+    limit = gvn->transform(new (C, 3) AddINode(limit, stride));
   }
-#endif
+
+  // Now we need to canonicalize loop condition.
+  if (bt == BoolTest::ne) {
+    assert(stride_con == 1 || stride_con == -1, "simple increment only");
+    // 'ne' can be replaced with 'lt' only when init < limit.
+    if (stride_con > 0 && init_t->_hi < limit_t->_lo)
+      bt = BoolTest::lt;
+    // 'ne' can be replaced with 'gt' only when init > limit.
+    if (stride_con < 0 && init_t->_lo > limit_t->_hi)
+      bt = BoolTest::gt;
+  }
+
+  if (incl_limit) {
+    // The limit check guaranties that 'limit <= (max_jint - stride)' so
+    // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
+    //
+    Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1);
+    limit = gvn->transform(new (C, 3) AddINode(limit, one));
+    if (bt == BoolTest::le)
+      bt = BoolTest::lt;
+    else if (bt == BoolTest::ge)
+      bt = BoolTest::gt;
+    else
+      ShouldNotReachHere();
+  }
+  set_subtree_ctrl( limit );
+
+  } else { // LoopLimitCheck
+
   // If compare points to incr, we are ok.  Otherwise the compare
   // can directly point to the phi; in this case adjust the compare so that
   // it points to the incr by adjusting the limit.
@@ -359,7 +490,6 @@
   Node *one_m = gvn->intcon(-1);
 
   Node *trip_count = NULL;
-  Node *hook = new (C, 6) Node(6);
   switch( bt ) {
   case BoolTest::eq:
     ShouldNotReachHere();
@@ -441,6 +571,8 @@
   limit = gvn->transform(new (C, 3) AddINode(span,init_trip));
   set_subtree_ctrl( limit );
 
+  } // LoopLimitCheck
+
   // Check for SafePoint on backedge and remove
   Node *sfpt = x->in(LoopNode::LoopBackControl);
   if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
@@ -531,7 +663,7 @@
 
   // Check for immediately preceding SafePoint and remove
   Node *sfpt2 = le->in(0);
-  if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
+  if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
     lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
 
   // Free up intermediate goo
@@ -541,12 +673,56 @@
   assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
   assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" );
 #endif
+#ifndef PRODUCT
+  if (TraceLoopOpts) {
+    tty->print("Counted      ");
+    loop->dump_head();
+  }
+#endif
 
   C->print_method("After CountedLoop", 3);
 
   return true;
 }
 
+//----------------------exact_limit-------------------------------------------
+Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
+  assert(loop->_head->is_CountedLoop(), "");
+  CountedLoopNode *cl = loop->_head->as_CountedLoop();
+
+  if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 ||
+      cl->limit()->Opcode() == Op_LoopLimit) {
+    // Old code has exact limit (it could be incorrect in case of int overflow).
+    // Loop limit is exact with stride == 1. And loop may already have exact limit.
+    return cl->limit();
+  }
+  Node *limit = NULL;
+#ifdef ASSERT
+  BoolTest::mask bt = cl->loopexit()->test_trip();
+  assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
+#endif
+  if (cl->has_exact_trip_count()) {
+    // Simple case: loop has constant boundaries.
+    // Use longs to avoid integer overflow.
+    int stride_con = cl->stride_con();
+    long  init_con = cl->init_trip()->get_int();
+    long limit_con = cl->limit()->get_int();
+    julong trip_cnt = cl->trip_count();
+    long final_con = init_con + trip_cnt*stride_con;
+    final_con -= stride_con;
+    int final_int = (int)final_con;
+    // The final value should be in integer range since the loop
+    // is counted and the limit was checked for overflow.
+    assert(final_con == (long)final_int, "final value should be integer");
+    limit = _igvn.intcon(final_int);
+  } else {
+    // Create new LoopLimit node to get exact limit (final iv value).
+    limit = new (C, 4) LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
+    register_new_node(limit, cl->in(LoopNode::EntryControl));
+  }
+  assert(limit != NULL, "sanity");
+  return limit;
+}
 
 //------------------------------Ideal------------------------------------------
 // Return a node which is more "ideal" than the current node.
@@ -572,14 +748,12 @@
 #ifndef PRODUCT
 void CountedLoopNode::dump_spec(outputStream *st) const {
   LoopNode::dump_spec(st);
-  if( stride_is_con() ) {
+  if (stride_is_con()) {
     st->print("stride: %d ",stride_con());
-  } else {
-    st->print("stride: not constant ");
   }
-  if( is_pre_loop () ) st->print("pre of N%d" , _main_idx );
-  if( is_main_loop() ) st->print("main of N%d", _idx );
-  if( is_post_loop() ) st->print("post of N%d", _main_idx );
+  if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
+  if (is_main_loop()) st->print("main of N%d", _idx);
+  if (is_post_loop()) st->print("post of N%d", _main_idx);
 }
 #endif
 
@@ -588,7 +762,130 @@
   return stride()->bottom_type()->is_int()->get_con();
 }
 
-
+//=============================================================================
+//------------------------------Value-----------------------------------------
+const Type *LoopLimitNode::Value( PhaseTransform *phase ) const {
+  const Type* init_t   = phase->type(in(Init));
+  const Type* limit_t  = phase->type(in(Limit));
+  const Type* stride_t = phase->type(in(Stride));
+  // Either input is TOP ==> the result is TOP
+  if (init_t   == Type::TOP) return Type::TOP;
+  if (limit_t  == Type::TOP) return Type::TOP;
+  if (stride_t == Type::TOP) return Type::TOP;
+
+  int stride_con = stride_t->is_int()->get_con();
+  if (stride_con == 1)
+    return NULL;  // Identity
+
+  if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
+    // Use longs to avoid integer overflow.
+    long init_con   =  init_t->is_int()->get_con();
+    long limit_con  = limit_t->is_int()->get_con();
+    int  stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
+    long trip_count = (limit_con - init_con + stride_m)/stride_con;
+    long final_con  = init_con + stride_con*trip_count;
+    int final_int = (int)final_con;
+    // The final value should be in integer range since the loop
+    // is counted and the limit was checked for overflow.
+    assert(final_con == (long)final_int, "final value should be integer");
+    return TypeInt::make(final_int);
+  }
+
+  return bottom_type(); // TypeInt::INT
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (phase->type(in(Init))   == Type::TOP ||
+      phase->type(in(Limit))  == Type::TOP ||
+      phase->type(in(Stride)) == Type::TOP)
+    return NULL;  // Dead
+
+  int stride_con = phase->type(in(Stride))->is_int()->get_con();
+  if (stride_con == 1)
+    return NULL;  // Identity
+
+  if (in(Init)->is_Con() && in(Limit)->is_Con())
+    return NULL;  // Value
+
+  // Delay following optimizations until all loop optimizations
+  // done to keep Ideal graph simple.
+  if (!can_reshape || phase->C->major_progress())
+    return NULL;
+
+  const TypeInt* init_t  = phase->type(in(Init) )->is_int();
+  const TypeInt* limit_t = phase->type(in(Limit))->is_int();
+  int stride_p;
+  long lim, ini;
+  julong max;
+  if (stride_con > 0) {
+    stride_p = stride_con;
+    lim = limit_t->_hi;
+    ini = init_t->_lo;
+    max = (julong)max_jint;
+  } else {
+    stride_p = -stride_con;
+    lim = init_t->_hi;
+    ini = limit_t->_lo;
+    max = (julong)min_jint;
+  }
+  julong range = lim - ini + stride_p;
+  if (range <= max) {
+    // Convert to integer expression if it is not overflow.
+    Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
+    Node *range = phase->transform(new (phase->C, 3) SubINode(in(Limit), in(Init)));
+    Node *bias  = phase->transform(new (phase->C, 3) AddINode(range, stride_m));
+    Node *trip  = phase->transform(new (phase->C, 3) DivINode(0, bias, in(Stride)));
+    Node *span  = phase->transform(new (phase->C, 3) MulINode(trip, in(Stride)));
+    return new (phase->C, 3) AddINode(span, in(Init)); // exact limit
+  }
+
+  if (is_power_of_2(stride_p) ||                // divisor is 2^n
+      !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
+    // Convert to long expression to avoid integer overflow
+    // and let igvn optimizer convert this division.
+    //
+    Node*   init   = phase->transform( new (phase->C, 2) ConvI2LNode(in(Init)));
+    Node*  limit   = phase->transform( new (phase->C, 2) ConvI2LNode(in(Limit)));
+    Node* stride   = phase->longcon(stride_con);
+    Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
+
+    Node *range = phase->transform(new (phase->C, 3) SubLNode(limit, init));
+    Node *bias  = phase->transform(new (phase->C, 3) AddLNode(range, stride_m));
+    Node *span;
+    if (stride_con > 0 && is_power_of_2(stride_p)) {
+      // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
+      // and avoid generating rounding for division. Zero trip guard should
+      // guarantee that init < limit but sometimes the guard is missing and
+      // we can get situation when init > limit. Note, for the empty loop
+      // optimization zero trip guard is generated explicitly which leaves
+      // only RCE predicate where exact limit is used and the predicate
+      // will simply fail forcing recompilation.
+      Node* neg_stride   = phase->longcon(-stride_con);
+      span = phase->transform(new (phase->C, 3) AndLNode(bias, neg_stride));
+    } else {
+      Node *trip  = phase->transform(new (phase->C, 3) DivLNode(0, bias, stride));
+      span = phase->transform(new (phase->C, 3) MulLNode(trip, stride));
+    }
+    // Convert back to int
+    Node *span_int = phase->transform(new (phase->C, 2) ConvL2INode(span));
+    return new (phase->C, 3) AddINode(span_int, in(Init)); // exact limit
+  }
+
+  return NULL;    // No progress
+}
+
+//------------------------------Identity---------------------------------------
+// If stride == 1 return limit node.
+Node *LoopLimitNode::Identity( PhaseTransform *phase ) {
+  int stride_con = phase->type(in(Stride))->is_int()->get_con();
+  if (stride_con == 1 || stride_con == -1)
+    return in(Limit);
+  return this;
+}
+
+//=============================================================================
 //----------------------match_incr_with_optional_truncation--------------------
 // Match increment with optional truncation:
 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
@@ -870,7 +1167,7 @@
   outer = igvn.register_new_node_with_optimizer(outer, _head);
   phase->set_created_loop_node();
 
-  Node* pred = phase->clone_loop_predicates(ctl, outer);
+  Node* pred = phase->clone_loop_predicates(ctl, outer, true);
   // Outermost loop falls into '_head' loop
   _head->set_req(LoopNode::EntryControl, pred);
   _head->del_req(outer_idx);
@@ -1440,9 +1737,16 @@
     tty->print("  ");
   tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
   if (_irreducible) tty->print(" IRREDUCIBLE");
+  Node* entry = _head->in(LoopNode::EntryControl);
+  if (LoopLimitCheck) {
+    Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+    if (predicate != NULL ) {
+      tty->print(" limit_check");
+      entry = entry->in(0)->in(0);
+    }
+  }
   if (UseLoopPredicate) {
-    Node* entry = PhaseIdealLoop::find_predicate_insertion_point(_head->in(LoopNode::EntryControl),
-                                                                 Deoptimization::Reason_predicate);
+    entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
     if (entry != NULL) {
       tty->print(" predicated");
     }
@@ -1528,10 +1832,15 @@
       !loop->tail()->is_top()) {
     LoopNode* lpn = loop->_head->as_Loop();
     Node* entry = lpn->in(LoopNode::EntryControl);
-    Node* predicate_proj = find_predicate(entry);
+    Node* predicate_proj = find_predicate(entry); // loop_limit_check first
     if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
       assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be");
       useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
+      entry = entry->in(0)->in(0);
+    }
+    predicate_proj = find_predicate(entry); // Predicate
+    if (predicate_proj != NULL ) {
+      useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
     }
   }
 
@@ -1542,6 +1851,8 @@
 
 //------------------------eliminate_useless_predicates-----------------------------
 // Eliminate all inserted predicates if they could not be used by loop predication.
+// Note: it will also eliminates loop limits check predicate since it also uses
+// Opaque1 node (see Parse::add_predicate()).
 void PhaseIdealLoop::eliminate_useless_predicates() {
   if (C->predicate_count() == 0)
     return; // no predicate left
@@ -1731,7 +2042,7 @@
   // Some parser-inserted loop predicates could never be used by loop
   // predication or they were moved away from loop during some optimizations.
   // For example, peeling. Eliminate them before next loop optimizations.
-  if (UseLoopPredicate) {
+  if (UseLoopPredicate || LoopLimitCheck) {
     eliminate_useless_predicates();
   }
 
--- a/src/share/vm/opto/loopnode.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -289,6 +289,28 @@
 inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; }
 inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; }
 
+//------------------------------LoopLimitNode-----------------------------
+// Counted Loop limit node which represents exact final iterator value:
+// trip_count = (limit - init_trip + stride - 1)/stride
+// final_value= trip_count * stride + init_trip.
+// Use HW instructions to calculate it when it can overflow in integer.
+// Note, final_value should fit into integer since counted loop has
+// limit check: limit <= max_int-stride.
+class LoopLimitNode : public Node {
+  enum { Init=1, Limit=2, Stride=3 };
+ public:
+  LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
+    // Put it on the Macro nodes list to optimize during macro nodes expansion.
+    init_flags(Flag_is_macro);
+    C->add_macro_node(this);
+  }
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual Node *Identity( PhaseTransform *phase );
+};
 
 // -----------------------------IdealLoopTree----------------------------------
 class IdealLoopTree : public ResourceObj {
@@ -775,6 +797,8 @@
 
   bool is_counted_loop( Node *x, IdealLoopTree *loop );
 
+  Node* exact_limit( IdealLoopTree *loop );
+
   // Return a post-walked LoopNode
   IdealLoopTree *get_loop( Node *n ) const {
     // Dead nodes have no loop, so return the top level loop instead
@@ -837,7 +861,6 @@
   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 
   // Return true if proj is for "proj->[region->..]call_uct"
-  // Return true if proj is for "proj->[region->..]call_uct"
   static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
   // Return true for    "if(test)-> proj -> ...
   //                          |
@@ -860,10 +883,11 @@
                                    PhaseIterGVN* igvn);
   static Node* clone_loop_predicates(Node* old_entry, Node* new_entry,
                                          bool move_predicates,
+                                         bool clone_limit_check,
                                          PhaseIdealLoop* loop_phase,
                                          PhaseIterGVN* igvn);
-  Node* clone_loop_predicates(Node* old_entry, Node* new_entry);
-  Node*  move_loop_predicates(Node* old_entry, Node* new_entry);
+  Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
+  Node*  move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
 
   void eliminate_loop_predicates(Node* entry);
   static Node* skip_loop_predicates(Node* entry);
@@ -873,7 +897,7 @@
   // Find a predicate
   static Node* find_predicate(Node* entry);
   // Construct a range check for a predicate if
-  BoolNode* rc_predicate(Node* ctrl,
+  BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
                          int scale, Node* offset,
                          Node* init, Node* limit, Node* stride,
                          Node* range, bool upper);
@@ -903,11 +927,13 @@
 
   // Range Check Elimination uses this function!
   // Constrain the main loop iterations so the affine function:
-  //    scale_con * I + offset  <  limit
+  //    low_limit <= scale_con * I + offset  <  upper_limit
   // always holds true.  That is, either increase the number of iterations in
   // the pre-loop or the post-loop until the condition holds true in the main
   // loop.  Scale_con, offset and limit are all loop invariant.
-  void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
+  void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
+  // Helper function for add_constraint().
+  Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
 
   // Partially peel loop up through last_peel node.
   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
--- a/src/share/vm/opto/loopopts.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/loopopts.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2262,6 +2262,9 @@
 //                  stmt1
 //                    |
 //                    v
+//               loop predicate
+//                    |
+//                    v
 //                  stmt2 clone
 //                    |
 //                    v
@@ -2272,9 +2275,6 @@
 //         :  false   true
 //         :  |       |
 //         :  |       v
-//         :  | loop predicate
-//         :  |       |
-//         :  |       v
 //         :  |    newloop<-----+
 //         :  |        |        |
 //         :  |     stmt3 clone |
@@ -2330,7 +2330,6 @@
     }
   }
 
-  Node* entry = head->in(LoopNode::EntryControl);
   int dd = dom_depth(head);
 
   // Step 1: find cut point
@@ -2627,8 +2626,6 @@
 
   // Backedge of the surviving new_head (the clone) is original last_peel
   _igvn.hash_delete(new_head_clone);
-  Node* new_entry = move_loop_predicates(entry, new_head_clone->in(LoopNode::EntryControl));
-  new_head_clone->set_req(LoopNode::EntryControl, new_entry);
   new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
   _igvn._worklist.push(new_head_clone);
 
--- a/src/share/vm/opto/macro.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/macro.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -221,9 +221,16 @@
     Node *shift = p2x->unique_out();
     Node *addp = shift->unique_out();
     for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
-      Node *st = addp->last_out(j);
-      assert(st->is_Store(), "store required");
-      _igvn.replace_node(st, st->in(MemNode::Memory));
+      Node *mem = addp->last_out(j);
+      if (UseCondCardMark && mem->is_Load()) {
+        assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
+        // The load is checking if the card has been written so
+        // replace it with zero to fold the test.
+        _igvn.replace_node(mem, intcon(0));
+        continue;
+      }
+      assert(mem->is_Store(), "store required");
+      _igvn.replace_node(mem, mem->in(MemNode::Memory));
     }
   } else {
     // G1 pre/post barriers
@@ -1686,25 +1693,31 @@
                          OptoRuntime::new_array_Java());
 }
 
-
-// we have determined that this lock/unlock can be eliminated, we simply
-// eliminate the node without expanding it.
-//
-// Note:  The membar's associated with the lock/unlock are currently not
-//        eliminated.  This should be investigated as a future enhancement.
-//
-bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
-
+//-----------------------mark_eliminated_locking_nodes-----------------------
+// During EA obj may point to several objects but after few ideal graph
+// transformations (CCP) it may point to only one non escaping object
+// (but still using phi), corresponding locks and unlocks will be marked
+// for elimination. Later obj could be replaced with a new node (new phi)
+// and which does not have escape information. And later after some graph
+// reshape other locks and unlocks (which were not marked for elimination
+// before) are connected to this new obj (phi) but they still will not be
+// marked for elimination since new obj has no escape information.
+// Mark all associated (same box and obj) lock and unlock nodes for
+// elimination if some of them marked already.
+void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
   if (!alock->is_eliminated()) {
-    return false;
+    return;
   }
-  if (alock->is_Lock() && !alock->is_coarsened()) {
+  if (!alock->is_coarsened()) { // Eliminated by EA
       // Create new "eliminated" BoxLock node and use it
       // in monitor debug info for the same object.
       BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
       Node* obj = alock->obj_node();
       if (!oldbox->is_eliminated()) {
         BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
+        // Note: BoxLock node is marked eliminated only here
+        // and it is used to indicate that all associated lock
+        // and unlock nodes are marked for elimination.
         newbox->set_eliminated();
         transform_later(newbox);
         // Replace old box node with new box for all users
@@ -1713,22 +1726,14 @@
 
           bool next_edge = true;
           Node* u = oldbox->raw_out(i);
-          if (u == alock) {
-            i++;
-            continue; // It will be removed below
-          }
-          if (u->is_Lock() &&
-              u->as_Lock()->obj_node() == obj &&
-              // oldbox could be referenced in debug info also
-              u->as_Lock()->box_node() == oldbox) {
-            assert(u->as_Lock()->is_eliminated(), "sanity");
+          if (u->is_AbstractLock() &&
+              u->as_AbstractLock()->obj_node() == obj &&
+              u->as_AbstractLock()->box_node() == oldbox) {
+            // Mark all associated locks and unlocks.
+            u->as_AbstractLock()->set_eliminated();
             _igvn.hash_delete(u);
             u->set_req(TypeFunc::Parms + 1, newbox);
             next_edge = false;
-#ifdef ASSERT
-          } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) {
-            assert(u->as_Unlock()->is_eliminated(), "sanity");
-#endif
           }
           // Replace old box in monitor debug info.
           if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
@@ -1754,8 +1759,27 @@
           if (next_edge) i++;
         } // for (uint i = 0; i < oldbox->outcnt();)
       } // if (!oldbox->is_eliminated())
-  } // if (alock->is_Lock() && !lock->is_coarsened())
+  } // if (!alock->is_coarsened())
+}
+
+// we have determined that this lock/unlock can be eliminated, we simply
+// eliminate the node without expanding it.
+//
+// Note:  The membar's associated with the lock/unlock are currently not
+//        eliminated.  This should be investigated as a future enhancement.
+//
+bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
 
+  if (!alock->is_eliminated()) {
+    return false;
+  }
+#ifdef ASSERT
+  if (alock->is_Lock() && !alock->is_coarsened()) {
+    // Check that new "eliminated" BoxLock node is created.
+    BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
+    assert(oldbox->is_eliminated(), "should be done already");
+  }
+#endif
   CompileLog* log = C->log();
   if (log != NULL) {
     log->head("eliminate_lock lock='%d'",
@@ -2138,6 +2162,15 @@
   if (C->macro_count() == 0)
     return false;
   // First, attempt to eliminate locks
+  int cnt = C->macro_count();
+  for (int i=0; i < cnt; i++) {
+    Node *n = C->macro_node(i);
+    if (n->is_AbstractLock()) { // Lock and Unlock nodes
+      // Before elimination mark all associated (same box and obj)
+      // lock and unlock nodes.
+      mark_eliminated_locking_nodes(n->as_AbstractLock());
+    }
+  }
   bool progress = true;
   while (progress) {
     progress = false;
@@ -2147,6 +2180,11 @@
       debug_only(int old_macro_count = C->macro_count(););
       if (n->is_AbstractLock()) {
         success = eliminate_locking_node(n->as_AbstractLock());
+      } else if (n->Opcode() == Op_LoopLimit) {
+        // Remove it from macro list and put on IGVN worklist to optimize.
+        C->remove_macro_node(n);
+        _igvn._worklist.push(n);
+        success = true;
       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
         _igvn.replace_node(n, n->in(1));
         success = true;
--- a/src/share/vm/opto/macro.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/macro.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -92,6 +92,7 @@
   void process_users_of_allocation(AllocateNode *alloc);
 
   void eliminate_card_mark(Node *cm);
+  void mark_eliminated_locking_nodes(AbstractLockNode *alock);
   bool eliminate_locking_node(AbstractLockNode *alock);
   void expand_lock_node(LockNode *lock);
   void expand_unlock_node(UnlockNode *unlock);
--- a/src/share/vm/opto/matcher.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/matcher.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -2086,6 +2086,13 @@
         n->del_req(3);
         break;
       }
+      case Op_LoopLimit: {
+        Node *pair1 = new (C, 3) BinaryNode(n->in(1),n->in(2));
+        n->set_req(1,pair1);
+        n->set_req(2,n->in(3));
+        n->del_req(3);
+        break;
+      }
       case Op_StrEquals: {
         Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
         n->set_req(2,pair1);
--- a/src/share/vm/opto/memnode.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/memnode.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1259,15 +1259,18 @@
     return NULL; // Wait stable graph
   }
   uint cnt = mem->req();
-  for( uint i = 1; i < cnt; i++ ) {
+  for (uint i = 1; i < cnt; i++) {
+    Node* rc = region->in(i);
+    if (rc == NULL || phase->type(rc) == Type::TOP)
+      return NULL; // Wait stable graph
     Node *in = mem->in(i);
-    if( in == NULL ) {
+    if (in == NULL) {
       return NULL; // Wait stable graph
     }
   }
   // Check for loop invariant.
   if (cnt == 3) {
-    for( uint i = 1; i < cnt; i++ ) {
+    for (uint i = 1; i < cnt; i++) {
       Node *in = mem->in(i);
       Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
       if (m == mem) {
@@ -1281,38 +1284,37 @@
 
   // Do nothing here if Identity will find a value
   // (to avoid infinite chain of value phis generation).
-  if ( !phase->eqv(this, this->Identity(phase)) )
+  if (!phase->eqv(this, this->Identity(phase)))
     return NULL;
 
   // Skip the split if the region dominates some control edge of the address.
-  if (cnt == 3 && !MemNode::all_controls_dominate(address, region))
+  if (!MemNode::all_controls_dominate(address, region))
     return NULL;
 
   const Type* this_type = this->bottom_type();
   int this_index  = phase->C->get_alias_index(addr_t);
   int this_offset = addr_t->offset();
   int this_iid    = addr_t->is_oopptr()->instance_id();
-  int wins = 0;
   PhaseIterGVN *igvn = phase->is_IterGVN();
   Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
-  for( uint i = 1; i < region->req(); i++ ) {
+  for (uint i = 1; i < region->req(); i++) {
     Node *x;
     Node* the_clone = NULL;
-    if( region->in(i) == phase->C->top() ) {
+    if (region->in(i) == phase->C->top()) {
       x = phase->C->top();      // Dead path?  Use a dead data op
     } else {
       x = this->clone();        // Else clone up the data op
       the_clone = x;            // Remember for possible deletion.
       // Alter data node to use pre-phi inputs
-      if( this->in(0) == region ) {
-        x->set_req( 0, region->in(i) );
+      if (this->in(0) == region) {
+        x->set_req(0, region->in(i));
       } else {
-        x->set_req( 0, NULL );
+        x->set_req(0, NULL);
       }
-      for( uint j = 1; j < this->req(); j++ ) {
+      for (uint j = 1; j < this->req(); j++) {
         Node *in = this->in(j);
-        if( in->is_Phi() && in->in(0) == region )
-          x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
+        if (in->is_Phi() && in->in(0) == region)
+          x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
       }
     }
     // Check for a 'win' on some paths
@@ -1321,12 +1323,11 @@
     bool singleton = t->singleton();
 
     // See comments in PhaseIdealLoop::split_thru_phi().
-    if( singleton && t == Type::TOP ) {
+    if (singleton && t == Type::TOP) {
       singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
     }
 
-    if( singleton ) {
-      wins++;
+    if (singleton) {
       x = igvn->makecon(t);
     } else {
       // We now call Identity to try to simplify the cloned node.
@@ -1340,13 +1341,11 @@
       // igvn->type(x) is set to x->Value() already.
       x->raise_bottom_type(t);
       Node *y = x->Identity(igvn);
-      if( y != x ) {
-        wins++;
+      if (y != x) {
         x = y;
       } else {
         y = igvn->hash_find(x);
-        if( y ) {
-          wins++;
+        if (y) {
           x = y;
         } else {
           // Else x is a new node we are keeping
@@ -1360,13 +1359,9 @@
       igvn->remove_dead_node(the_clone);
     phi->set_req(i, x);
   }
-  if( wins > 0 ) {
-    // Record Phi
-    igvn->register_new_node_with_optimizer(phi);
-    return phi;
-  }
-  igvn->remove_dead_node(phi);
-  return NULL;
+  // Record Phi
+  igvn->register_new_node_with_optimizer(phi);
+  return phi;
 }
 
 //------------------------------Ideal------------------------------------------
@@ -1677,14 +1672,15 @@
   // If we are loading from a freshly-allocated object, produce a zero,
   // if the load is provably beyond the header of the object.
   // (Also allow a variable load from a fresh array to produce zero.)
-  if (ReduceFieldZeroing) {
+  const TypeOopPtr *tinst = tp->isa_oopptr();
+  bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
+  if (ReduceFieldZeroing || is_instance) {
     Node* value = can_see_stored_value(mem,phase);
     if (value != NULL && value->is_Con())
       return value->bottom_type();
   }
 
-  const TypeOopPtr *tinst = tp->isa_oopptr();
-  if (tinst != NULL && tinst->is_known_instance_field()) {
+  if (is_instance) {
     // If we have an instance type and our memory input is the
     // programs's initial memory state, there is no matching store,
     // so just return a zero of the appropriate type
@@ -2159,9 +2155,12 @@
   Node* mem     = in(MemNode::Memory);
   Node* address = in(MemNode::Address);
 
-  // Back-to-back stores to same address?  Fold em up.
-  // Generally unsafe if I have intervening uses...
-  if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) {
+  // Back-to-back stores to same address?  Fold em up.  Generally
+  // unsafe if I have intervening uses...  Also disallowed for StoreCM
+  // since they must follow each StoreP operation.  Redundant StoreCMs
+  // are eliminated just before matching in final_graph_reshape.
+  if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) &&
+      mem->Opcode() != Op_StoreCM) {
     // Looking at a dead closed cycle of memory?
     assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
 
--- a/src/share/vm/opto/output.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/output.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -911,7 +911,7 @@
         }
       } else {
         const TypePtr *tp = obj_node->bottom_type()->make_ptr();
-        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
+        scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
       }
 
       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
@@ -1354,15 +1354,20 @@
         // Check that oop-store precedes the card-mark
         else if( mach->ideal_Opcode() == Op_StoreCM ) {
           uint storeCM_idx = j;
-          Node *oop_store = mach->in(mach->_cnt);  // First precedence edge
-          assert( oop_store != NULL, "storeCM expects a precedence edge");
-          uint i4;
-          for( i4 = 0; i4 < last_inst; ++i4 ) {
-            if( b->_nodes[i4] == oop_store ) break;
+          int count = 0;
+          for (uint prec = mach->req(); prec < mach->len(); prec++) {
+            Node *oop_store = mach->in(prec);  // Precedence edge
+            if (oop_store == NULL) continue;
+            count++;
+            uint i4;
+            for( i4 = 0; i4 < last_inst; ++i4 ) {
+              if( b->_nodes[i4] == oop_store ) break;
+            }
+            // Note: This test can provide a false failure if other precedence
+            // edges have been added to the storeCMNode.
+            assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
           }
-          // Note: This test can provide a false failure if other precedence
-          // edges have been added to the storeCMNode.
-          assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
+          assert(count > 0, "storeCM expects at least one precedence edge");
         }
 #endif
 
--- a/src/share/vm/opto/parse.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/parse.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -50,7 +50,7 @@
   // Always between 0.0 and 1.0.  Represents the percentage of the method's
   // total execution time used at this call site.
   const float _site_invoke_ratio;
-  const int   _site_depth_adjust;
+  const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
   float compute_callee_frequency( int caller_bci ) const;
 
   GrowableArray<InlineTree*> _subtrees;
@@ -63,18 +63,18 @@
              JVMState* caller_jvms,
              int caller_bci,
              float site_invoke_ratio,
-             int site_depth_adjust);
+             int max_inline_level);
   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
                                            JVMState* caller_jvms,
                                            int caller_bci);
   const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
-  const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
-  const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
-  void        print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN;
+  const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
+  const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
+  void        print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const;
 
   InlineTree *caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
-  int         inline_depth()      const { return stack_depth() + _site_depth_adjust; }
+  int         inline_level()      const { return stack_depth(); }
   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 
 public:
@@ -82,7 +82,7 @@
   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
 
   // For temporary (stack-allocated, stateless) ilts:
-  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
+  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 
   // InlineTree enum
   enum InlineStyle {
--- a/src/share/vm/opto/parse1.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/parse1.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -638,7 +638,7 @@
         ensure_phis_everywhere();
 
         if (block->is_SEL_head() &&
-            UseLoopPredicate) {
+            (UseLoopPredicate || LoopLimitCheck)) {
           // Add predicate to single entry (not irreducible) loop head.
           assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
           // Need correct bci for predicate.
--- a/src/share/vm/opto/phaseX.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/phaseX.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -472,8 +472,8 @@
   }
 
   // Clone loop predicates. Defined in loopTransform.cpp.
-  Node* clone_loop_predicates(Node* old_entry, Node* new_entry);
-  Node*  move_loop_predicates(Node* old_entry, Node* new_entry);
+  Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
+  Node*  move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check);
   // Create a new if below new_entry for the predicate to be cloned
   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
                                         Deoptimization::DeoptReason reason);
--- a/src/share/vm/opto/runtime.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/runtime.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -120,6 +120,7 @@
 address OptoRuntime::_zap_dead_native_locals_Java                 = NULL;
 # endif
 
+ExceptionBlob* OptoRuntime::_exception_blob;
 
 // This should be called in an assertion at the start of OptoRuntime routines
 // which are entered from compiled code (all of them)
--- a/src/share/vm/opto/stringopts.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/stringopts.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -768,6 +768,7 @@
         tty->cr();
       }
 #endif
+      fail = true;
       break;
     } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
       ptr = ptr->in(0)->in(0);
@@ -1172,16 +1173,16 @@
 
 Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
   Node* string = str;
-  Node* offset = kit.make_load(NULL,
+  Node* offset = kit.make_load(kit.control(),
                                kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
                                TypeInt::INT, T_INT, offset_field_idx);
-  Node* count = kit.make_load(NULL,
+  Node* count = kit.make_load(kit.control(),
                               kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
                               TypeInt::INT, T_INT, count_field_idx);
   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
-  Node* value = kit.make_load(NULL,
+  Node* value = kit.make_load(kit.control(),
                               kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
                               value_type, T_OBJECT, value_field_idx);
 
@@ -1342,7 +1343,7 @@
         }
         //         Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
         //                                      TypeInt::INT, T_INT, offset_field_idx);
-        Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
+        Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
                                     TypeInt::INT, T_INT, count_field_idx);
         length = __ AddI(length, count);
         string_sizes->init_req(argi, NULL);
--- a/src/share/vm/opto/subnode.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/subnode.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1101,6 +1101,7 @@
   if( cmp2_type == TypeInt::ZERO &&
       cmp1_op == Op_XorI &&
       j_xor->in(1) != j_xor &&          // An xor of itself is dead
+      phase->type( j_xor->in(1) ) == TypeInt::BOOL &&
       phase->type( j_xor->in(2) ) == TypeInt::ONE &&
       (_test._test == BoolTest::eq ||
        _test._test == BoolTest::ne) ) {
@@ -1223,21 +1224,6 @@
 }
 
 //=============================================================================
-//------------------------------NegNode----------------------------------------
-Node *NegFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_SubF )
-    return new (phase->C, 3) SubFNode( in(1)->in(2), in(1)->in(1) );
-  return NULL;
-}
-
-Node *NegDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_SubD )
-    return new (phase->C, 3) SubDNode( in(1)->in(2), in(1)->in(1) );
-  return NULL;
-}
-
-
-//=============================================================================
 //------------------------------Value------------------------------------------
 // Compute sqrt
 const Type *SqrtDNode::Value( PhaseTransform *phase ) const {
--- a/src/share/vm/opto/subnode.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/opto/subnode.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -377,7 +377,6 @@
 public:
   NegFNode( Node *in1 ) : NegNode(in1) {}
   virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   const Type *bottom_type() const { return Type::FLOAT; }
   virtual uint ideal_reg() const { return Op_RegF; }
 };
@@ -391,7 +390,6 @@
 public:
   NegDNode( Node *in1 ) : NegNode(in1) {}
   virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
 };
--- a/src/share/vm/prims/jni.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jni.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -30,6 +30,9 @@
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "graal/graalCompiler.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // SERIALGC
 #include "memory/allocation.inline.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/oopFactory.hpp"
@@ -1725,6 +1728,26 @@
     o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
   }
   jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
+#ifndef SERIALGC
+  // If G1 is enabled and we are accessing the value of the referent
+  // field in a reference object then we need to register a non-null
+  // referent with the SATB barrier.
+  if (UseG1GC) {
+    bool needs_barrier = false;
+
+    if (ret != NULL &&
+        offset == java_lang_ref_Reference::referent_offset &&
+        instanceKlass::cast(k)->reference_type() != REF_NONE) {
+      assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+      needs_barrier = true;
+    }
+
+    if (needs_barrier) {
+      oop referent = JNIHandles::resolve(ret);
+      G1SATBCardTableModRefBS::enqueue(referent);
+    }
+  }
+#endif // SERIALGC
   DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
   return ret;
 JNI_END
--- a/src/share/vm/prims/jvmti.xml	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmti.xml	Wed Jul 27 17:32:44 2011 -0700
@@ -280,10 +280,8 @@
    <!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*>
    <!ATTLIST externallink id CDATA #REQUIRED>
 
-   <!ELEMENT vmspeclink EMPTY>
-   <!ATTLIST vmspeclink id CDATA #IMPLIED>
-   <!ATTLIST vmspeclink name CDATA #IMPLIED>
-   <!ATTLIST vmspeclink preposition CDATA #IMPLIED>
+   <!ELEMENT vmspec EMPTY>
+   <!ATTLIST vmspec chapter CDATA #IMPLIED>
 
    <!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*>
    <!ATTLIST internallink id CDATA #REQUIRED>
@@ -2285,9 +2283,8 @@
         Stack frames are referenced by depth.
         The frame at depth zero is the current frame.
         <p/>
-        Stack frames are as described in the 
-        <vmspeclink id="Overview.doc.html#17257"
-                    name="Frames section"/>.  
+        Stack frames are as described in
+        <vmspec chapter="3.6"/>,
         That is, they correspond to method 
         invocations (including native methods) but do not correspond to platform native or 
         VM internal frames.
@@ -2627,7 +2624,7 @@
         <param id="use_java_stack">
 	  <jboolean/>
 	  <description>
-	    Return the stack showing the <vmspeclink/>
+	    Return the stack showing <vmspec/>
 	    model of the stack; 
 	    otherwise, show the internal representation of the stack with
 	    inlined and optimized methods missing.  If the virtual machine
@@ -2707,7 +2704,7 @@
 	When the thread is resumed, the execution 
 	state of the thread is reset to the state
 	immediately before the called method was invoked.
-	That is (using the <vmspeclink/> terminology):
+	That is (using <vmspec/> terminology):
 	  <ul>
 	    <li>the current frame is discarded as the previous frame becomes the current one</li>
 	    <li>the operand stack is restored--the argument values are added back
@@ -2868,9 +2865,8 @@
       to return at any point during its execution.
       The method which will return early is referred to as the <i>called method</i>.
       The called method is the current method
-      (as defined by the 
-      <vmspeclink id="Overview.doc.html#17257"
-                  name="Frames section"/>) 
+      (as defined by
+      <vmspec chapter="3.6"/>) 
       for the specified thread at
       the time the function is called.
       <p/>
@@ -3576,10 +3572,8 @@
 	<field id="index">
 	  <jint/>
 	  <description>	    
-	    The index into the constant pool of the class. See the
-            <vmspeclink id="ClassFile.doc.html#20080"
-                        name="Constant Pool section"/>
-	    description.
+	    The index into the constant pool of the class. See the description in 
+      <vmspec chapter="4.4"/>.
 	  </description>
 	</field>
       </typedef>
@@ -5006,9 +5000,8 @@
 	    For references of this kind the <code>referrer_index</code>
             parameter to the <internallink id="jvmtiObjectReferenceCallback">
             jvmtiObjectReferenceCallback</internallink> is the index into
-            constant pool table of the class, starting at 1. See the
-            <vmspeclink id="ClassFile.doc.html#20080"
-                        name="Constant Pool section"/>
+            constant pool table of the class, starting at 1. See
+            <vmspec chapter="4.4"/>.
 	  </constant>
 	</constants>
 
@@ -6441,9 +6434,7 @@
 	been recorded as an initiating loader. Each 
 	class in the returned array was created by this class loader, 
 	either by defining it directly or by delegation to another class loader.
-        See the 
-        <vmspeclink id="ConstantPool.doc.html#72007"
-                    name="Creation and Loading section"/>.
+	See <vmspec chapter="5.3"/>.
 	<p/>
 	For JDK version 1.1 implementations that don't
 	recognize the distinction between initiating and defining class loaders,
@@ -6626,9 +6617,7 @@
 	For the class indicated by <code>klass</code>, return the access
 	flags
 	via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
 	<p/>
 	If the class is an array class, then its public, private, and protected 
 	modifiers are the same as those of its component type. For arrays of 
@@ -6794,9 +6783,8 @@
       <description>
         For the class indicated by <code>klass</code>, 
         return the minor and major version numbers,
-        as defined in the
-        <vmspeclink id="ClassFile.doc.html"
-                        name="Class File Format chapter"/>.
+        as defined in
+        <vmspec chapter="4"/>. 
       </description>
       <origin>new</origin>
       <capabilities>
@@ -6839,10 +6827,8 @@
       <description>
 	For the class indicated by <code>klass</code>, 
         return the raw bytes of the constant pool in the format of the
-        <code>constant_pool</code> item of the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format"
-                    preposition="in"/>.
+        <code>constant_pool</code> item of 
+        <vmspec chapter="4"/>.
         The format of the constant pool may differ between versions
         of the Class File Format, so, the 
         <functionlink id="GetClassVersionNumbers">minor and major 
@@ -7286,9 +7272,7 @@
 	<field id="class_bytes">
 	  <inbuf incount="class_byte_count"><uchar/></inbuf>
 	  <description>
-            Bytes defining class (in the 
-            <vmspeclink id="ClassFile.doc.html"
-                        name="Class File Format"/>)
+            Bytes defining class (in <vmspec chapter="4"/>)
 	  </description>
 	</field>
       </typedef>
@@ -7611,10 +7595,8 @@
 	<paramlink id="signature_ptr"/>.
 	<p/>
         Field signatures are defined in the JNI Specification and 
-        are referred to as 
-        <vmspeclink id="ClassFile.doc.html#14152"
-                    name="field descriptors"
-                    preposition="in"/>.
+        are referred to as <code>field descriptors</code> in
+        <vmspec chapter="4.3.2"/>.
       </description>
       <origin>jvmdiClone</origin>
       <capabilities>
@@ -7709,9 +7691,7 @@
       <description>
 	For the field indicated by <code>klass</code> and <code>field</code>
 	return the access flags via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -7810,10 +7790,9 @@
 	return the method name via <code>name_ptr</code> and method signature via
 	<code>signature_ptr</code>.
         <p/>
-        Method signatures are defined in the JNI Specification and are referred to as
-        <vmspeclink id="ClassFile.doc.html#7035"
-                    name="method descriptors"
-                    preposition="in"/>.
+        Method signatures are defined in the JNI Specification and are 
+        referred to as <code>method descriptors</code> in 
+        <vmspec chapter="4.3.3"/>.
 	Note this is different
 	than method signatures as defined in the <i>Java Language Specification</i>.
       </description>
@@ -7902,9 +7881,7 @@
       <description>
 	For the method indicated by <code>method</code>,
 	return the access flags via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -7941,9 +7918,7 @@
 	  including the local variables used to pass parameters to the
 	  method on its invocation. 
 	  <p/>
-	  See <code>max_locals</code> in the    
-          <vmspeclink id="ClassFile.doc.html#1546"
-                      name="Code Attribute section"/>.
+	  See <code>max_locals</code> in <vmspec chapter="4.7.3"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -8150,8 +8125,7 @@
 	    The local variable's type signature, encoded as a
 	    <internallink id="mUTF">modified UTF-8</internallink> string.
 	    The signature format is the same as that defined in
-            <vmspeclink id="ClassFile.doc.html#14152"
-                        name="Field Descriptors section"/>
+	    <vmspec chapter="4.3.2"/>.
 	  </description>
 	</field>
 	<field id="generic_signature">
@@ -10460,10 +10434,7 @@
       <synopsis>Add To Bootstrap Class Loader Search</synopsis>
       <description>
           This function can be used to cause instrumentation classes to be defined by the 
-          bootstrap class loader. See
-          <vmspeclink id="ConstantPool.doc.html#79383"
-                      name="Loading Using the Bootstrap Class Loader"
-                      preposition="in"/>.
+          bootstrap class loader. See <vmspec chapter="5.3.1"/>.
           After the bootstrap
 	  class loader unsuccessfully searches for a class, the specified platform-dependent 
 	  search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in 
@@ -10480,7 +10451,7 @@
           contain any classes or resources other than those to be defined by the bootstrap
           class loader for the purposes of instrumentation.
           <p/>
-          The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
+          <vmspec/> specifies that a subsequent attempt to resolve a symbolic
           reference that the Java virtual machine has previously unsuccessfully attempted
           to resolve always fails with the same error that was thrown as a result of the
           initial resolution attempt. Consequently, if the JAR file contains an entry
@@ -10512,10 +10483,7 @@
       <synopsis>Add To System Class Loader Search</synopsis>
       <description>
 	  This function can be used to cause instrumentation classes to be
-	  defined by the system class loader. See
-          <vmspeclink id="ConstantPool.doc.html#79441"
-                      name="Loading Using a User-defined Class Loader"
-                      preposition="in"/>. 
+	  defined by the system class loader. See <vmspec chapter="5.3.2"/>.
 	  After the class loader unsuccessfully searches for a class, the specified platform-dependent search 
 	  path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the 
 	  <paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the 
@@ -10536,7 +10504,7 @@
 	  which takes a single parameter of type <code>java.lang.String</code>. The method is not required 
 	  to have <code>public</code> access. 
 	  <p/>
-          The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
+          <vmspec/> specifies that a subsequent attempt to resolve a symbolic
           reference that the Java virtual machine has previously unsuccessfully attempted
           to resolve always fails with the same error that was thrown as a result of the
           initial resolution attempt. Consequently, if the JAR file contains an entry
@@ -11438,7 +11406,7 @@
       at the finest granularity allowed by the VM. A single step event is
       generated whenever a thread reaches a new location. 
       Typically, single step events represent the completion of one VM 
-      instruction as defined in the <vmspeclink/>. However, some implementations 
+      instruction as defined in <vmspec/>. However, some implementations 
       may define locations differently. In any case the 
       <code>method</code> and <code>location</code>
       parameters  uniquely identify the current location and allow
@@ -13841,7 +13809,7 @@
       and can_get_source_debug_extension.
       PopFrame cannot have a native calling method.
       Removed incorrect statement in GetClassloaderClasses 
-      (see http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79383).
+      (see <vmspec chapter="4.4"/>).
   </change>
   <change date="24 July 2003" version="v79">
       XML and text fixes.
--- a/src/share/vm/prims/jvmti.xsl	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmti.xsl	Wed Jul 27 17:32:44 2011 -0700
@@ -1039,34 +1039,14 @@
   </a>
 </xsl:template>
 
-<xsl:template match="vmspeclink">
-  <xsl:if test="count(@id)=1">
-    <a>
-      <xsl:attribute name="href">
-        <xsl:text>http://java.sun.com/docs/books/vmspec/2nd-edition/html/</xsl:text>
-        <xsl:value-of select="@id"/>
-      </xsl:attribute>
-      <xsl:value-of select="@name"/>
-    </a>
-    <xsl:text> </xsl:text>
-    <xsl:choose>
-      <xsl:when test="count(@preposition)=1">
-        <xsl:value-of select="@preposition"/>
-      </xsl:when>
-      <xsl:otherwise>
-        <xsl:text>of</xsl:text>
-      </xsl:otherwise>
-    </xsl:choose>
-    <xsl:text> the </xsl:text>
-  </xsl:if>
-  <a>
-    <xsl:attribute name="href">
-      <xsl:text>http://java.sun.com/docs/books/vmspec/</xsl:text>
-    </xsl:attribute>
-    <i>
-      <xsl:text>Java Virtual Machine Specification</xsl:text>
-    </i>
-  </a>
+<xsl:template match="vmspec">
+  <cite>
+    <xsl:text>The Java&#8482; Virtual Machine Specification</xsl:text>
+    <xsl:if test="count(@chapter)=1">
+      <xsl:text>, Chapter </xsl:text> 
+      <xsl:value-of select="@chapter"/>
+    </xsl:if>
+  </cite>
 </xsl:template>
 
 <xsl:template match="internallink">
--- a/src/share/vm/prims/jvmtiEnv.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -525,7 +525,7 @@
     ObjectLocker ol(loader, THREAD);
 
     // need the path as java.lang.String
-    Handle path = java_lang_String::create_from_str(segment, THREAD);
+    Handle path = java_lang_String::create_from_platform_dependent_str(segment, THREAD);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       return JVMTI_ERROR_INTERNAL;
--- a/src/share/vm/prims/jvmtiExport.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmtiExport.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1804,6 +1804,8 @@
 }
 
 void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
+  assert(name != NULL && name[0] != '\0', "sanity check");
+
   JavaThread* thread = JavaThread::current();
   // In theory everyone coming thru here is in_vm but we need to be certain
   // because a callee will do a vm->native transition
--- a/src/share/vm/prims/jvmtiImpl.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -38,6 +38,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "runtime/serviceThread.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/vframe.hpp"
@@ -939,10 +940,15 @@
   nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
   return event;
 }
+
 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
       const char* name, const void* code_begin, const void* code_end) {
   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
-  event._event_data.dynamic_code_generated.name = name;
+  // Need to make a copy of the name since we don't know how long
+  // the event poster will keep it around after we enqueue the
+  // deferred event and return. strdup() failure is handled in
+  // the post() routine below.
+  event._event_data.dynamic_code_generated.name = os::strdup(name);
   event._event_data.dynamic_code_generated.code_begin = code_begin;
   event._event_data.dynamic_code_generated.code_end = code_end;
   return event;
@@ -968,12 +974,19 @@
       nmethodLocker::unlock_nmethod(nm);
       break;
     }
-    case TYPE_DYNAMIC_CODE_GENERATED:
+    case TYPE_DYNAMIC_CODE_GENERATED: {
       JvmtiExport::post_dynamic_code_generated_internal(
-        _event_data.dynamic_code_generated.name,
+        // if strdup failed give the event a default name
+        (_event_data.dynamic_code_generated.name == NULL)
+          ? "unknown_code" : _event_data.dynamic_code_generated.name,
         _event_data.dynamic_code_generated.code_begin,
         _event_data.dynamic_code_generated.code_end);
+      if (_event_data.dynamic_code_generated.name != NULL) {
+        // release our copy
+        os::free((void *)_event_data.dynamic_code_generated.name);
+      }
       break;
+    }
     default:
       ShouldNotReachHere();
   }
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -992,6 +992,9 @@
     }
 
     Rewriter::rewrite(scratch_class, THREAD);
+    if (!HAS_PENDING_EXCEPTION) {
+      Rewriter::relocate_and_link(scratch_class, THREAD);
+    }
     if (HAS_PENDING_EXCEPTION) {
       Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
       CLEAR_PENDING_EXCEPTION;
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -3158,6 +3158,9 @@
         if (fr->is_entry_frame()) {
           last_entry_frame = fr;
         }
+        if (fr->is_ricochet_frame()) {
+          fr->oops_ricochet_do(blk, vf->register_map());
+        }
       }
 
       vf = vf->sender();
--- a/src/share/vm/prims/methodHandleWalk.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -31,6 +31,11 @@
  * JSR 292 reference implementation: method handle structure analysis
  */
 
+#ifdef PRODUCT
+#define print_method_handle(mh) {}
+#else //PRODUCT
+extern "C" void print_method_handle(oop mh);
+#endif //PRODUCT
 
 // -----------------------------------------------------------------------------
 // MethodHandleChain
@@ -82,10 +87,8 @@
 
 void MethodHandleChain::set_last_method(oop target, TRAPS) {
   _is_last = true;
-  klassOop receiver_limit_oop = NULL;
-  int flags = 0;
-  methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags);
-  _last_method = methodHandle(THREAD, m);
+  KlassHandle receiver_limit; int flags = 0;
+  _last_method = MethodHandles::decode_method(target, receiver_limit, flags);
   if ((flags & MethodHandles::_dmf_has_receiver) == 0)
     _last_invoke = Bytecodes::_invokestatic;
   else if ((flags & MethodHandles::_dmf_does_dispatch) == 0)
@@ -138,6 +141,12 @@
 
 void MethodHandleChain::lose(const char* msg, TRAPS) {
   _lose_message = msg;
+#ifdef ASSERT
+  if (Verbose) {
+    tty->print_cr(INTPTR_FORMAT " lose: %s", _method_handle(), msg);
+    print();
+  }
+#endif
   if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
     // throw a preallocated exception
     THROW_OOP(Universe::virtual_machine_error_instance());
@@ -146,6 +155,155 @@
 }
 
 
+#ifdef ASSERT
+static const char* adapter_ops[] = {
+  "retype_only"  ,
+  "retype_raw"   ,
+  "check_cast"   ,
+  "prim_to_prim" ,
+  "ref_to_prim"  ,
+  "prim_to_ref"  ,
+  "swap_args"    ,
+  "rot_args"     ,
+  "dup_args"     ,
+  "drop_args"    ,
+  "collect_args" ,
+  "spread_args"  ,
+  "fold_args"
+};
+
+static const char* adapter_op_to_string(int op) {
+  if (op >= 0 && op < (int)ARRAY_SIZE(adapter_ops))
+    return adapter_ops[op];
+  return "unknown_op";
+}
+
+void MethodHandleChain::print(oopDesc* m) {
+  HandleMark hm;
+  ResourceMark rm;
+  Handle mh(m);
+  print(mh);
+}
+
+void MethodHandleChain::print(Handle mh) {
+  EXCEPTION_MARK;
+  MethodHandleChain mhc(mh, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = THREAD->pending_exception();
+    CLEAR_PENDING_EXCEPTION;
+    ex->print();
+    return;
+  }
+  mhc.print();
+}
+
+
+void MethodHandleChain::print() {
+  EXCEPTION_MARK;
+  print_impl(THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = THREAD->pending_exception();
+    CLEAR_PENDING_EXCEPTION;
+    ex->print();
+  }
+}
+
+void MethodHandleChain::print_impl(TRAPS) {
+  ResourceMark rm;
+
+  MethodHandleChain chain(_root, CHECK);
+  for (;;) {
+    tty->print(INTPTR_FORMAT ": ", chain.method_handle()());
+    if (chain.is_bound()) {
+      tty->print("bound: arg_type %s arg_slot %d",
+                 type2name(chain.bound_arg_type()),
+                 chain.bound_arg_slot());
+      oop o = chain.bound_arg_oop();
+      if (o != NULL) {
+        if (o->is_instance()) {
+          tty->print(" instance %s", o->klass()->klass_part()->internal_name());
+        } else {
+          o->print();
+        }
+      }
+    } else if (chain.is_adapter()) {
+      tty->print("adapter: arg_slot %d conversion op %s",
+                 chain.adapter_arg_slot(),
+                 adapter_op_to_string(chain.adapter_conversion_op()));
+      switch (chain.adapter_conversion_op()) {
+        case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY:
+        case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW:
+        case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST:
+        case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM:
+        case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM:
+          break;
+
+        case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: {
+          tty->print(" src_type = %s", type2name(chain.adapter_conversion_src_type()));
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: {
+          int dest_arg_slot = chain.adapter_conversion_vminfo();
+          tty->print(" dest_arg_slot %d type %s", dest_arg_slot, type2name(chain.adapter_conversion_src_type()));
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: {
+          int dup_slots = chain.adapter_conversion_stack_pushes();
+          tty->print(" pushes %d", dup_slots);
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: {
+          int coll_slots = chain.MethodHandle_vmslots();
+          tty->print(" coll_slots %d", coll_slots);
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: {
+          // Check the required length.
+          int spread_slots = 1 + chain.adapter_conversion_stack_pushes();
+          tty->print(" spread_slots %d", spread_slots);
+          break;
+        }
+
+        default:
+          tty->print_cr("bad adapter conversion");
+          break;
+      }
+    } else {
+      // DMH
+      tty->print("direct: ");
+      chain.last_method_oop()->print_short_name(tty);
+    }
+
+    tty->print(" (");
+    objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain.method_type_oop());
+    for (int i = ptypes->length() - 1; i >= 0; i--) {
+      BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i));
+      if (t == T_ARRAY) t = T_OBJECT;
+      tty->print("%c", type2char(t));
+      if (t == T_LONG || t == T_DOUBLE) tty->print("_");
+    }
+    tty->print(")");
+    BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(chain.method_type_oop()));
+    if (rtype == T_ARRAY) rtype = T_OBJECT;
+    tty->print("%c", type2char(rtype));
+    tty->cr();
+    if (!chain.is_last()) {
+      chain.next(CHECK);
+    } else {
+      break;
+    }
+  }
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // MethodHandleWalker
 
@@ -202,14 +360,22 @@
     if (chain().is_adapter()) {
       int conv_op = chain().adapter_conversion_op();
       int arg_slot = chain().adapter_arg_slot();
-      SlotState* arg_state = slot_state(arg_slot);
-      if (arg_state == NULL
-          && conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) {
-        lose("bad argument index", CHECK_(empty));
+
+      // Check that the arg_slot is valid.  In most cases it must be
+      // within range of the current arguments but there are some
+      // exceptions.  Those are sanity checked in their implemention
+      // below.
+      if ((arg_slot < 0 || arg_slot >= _outgoing.length()) &&
+          conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW &&
+          conv_op != java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS &&
+          conv_op != java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) {
+        lose(err_msg("bad argument index %d", arg_slot), CHECK_(empty));
       }
 
+      bool retain_original_args = false;  // used by fold/collect logic
+
       // perform the adapter action
-      switch (chain().adapter_conversion_op()) {
+      switch (conv_op) {
       case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY:
         // No changes to arguments; pass the bits through.
         break;
@@ -218,51 +384,35 @@
         // To keep the verifier happy, emit bitwise ("raw") conversions as needed.
         // See MethodHandles::same_basic_type_for_arguments for allowed conversions.
         Handle incoming_mtype(THREAD, chain().method_type_oop());
-        oop outgoing_mh_oop = chain().vmtarget_oop();
-        if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop))
-          lose("outgoing target not a MethodHandle", CHECK_(empty));
-        Handle outgoing_mtype(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop));
-        outgoing_mh_oop = NULL;  // GC safety
+        Handle outgoing_mtype;
+        {
+          oop outgoing_mh_oop = chain().vmtarget_oop();
+          if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop))
+            lose("outgoing target not a MethodHandle", CHECK_(empty));
+          outgoing_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop));
+        }
 
         int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype());
         if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype()))
           lose("incoming and outgoing parameter count do not agree", CHECK_(empty));
 
+        // Argument types.
         for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) {
-          SlotState* arg_state = slot_state(slot);
-          if (arg_state->_type == T_VOID)  continue;
-          ArgToken arg = _outgoing.at(slot)._arg;
-
-          klassOop  in_klass  = NULL;
-          klassOop  out_klass = NULL;
-          BasicType inpbt  = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &in_klass);
-          BasicType outpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &out_klass);
-          assert(inpbt == arg.basic_type(), "sanity");
+          if (arg_type(slot) == T_VOID)  continue;
 
-          if (inpbt != outpbt) {
-            vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(inpbt, outpbt);
-            if (iid == vmIntrinsics::_none) {
-              lose("no raw conversion method", CHECK_(empty));
-            }
-            ArgToken arglist[2];
-            arglist[0] = arg;         // outgoing 'this'
-            arglist[1] = ArgToken();  // sentinel
-            arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
-            change_argument(inpbt, slot, outpbt, arg);
-          }
-
+          klassOop  src_klass = NULL;
+          klassOop  dst_klass = NULL;
+          BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &src_klass);
+          BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &dst_klass);
+          retype_raw_argument_type(src, dst, slot, CHECK_(empty));
           i++;  // We need to skip void slots at the top of the loop.
         }
 
-        BasicType inrbt  = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype()));
-        BasicType outrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype()));
-        if (inrbt != outrbt) {
-          if (inrbt == T_INT && outrbt == T_VOID) {
-            // See comments in MethodHandles::same_basic_type_for_arguments.
-          } else {
-            assert(false, "IMPLEMENT ME");
-            lose("no raw conversion method", CHECK_(empty));
-          }
+        // Return type.
+        {
+          BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype()));
+          BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype()));
+          retype_raw_return_type(src, dst, CHECK_(empty));
         }
         break;
       }
@@ -272,10 +422,11 @@
         klassOop dest_klass = NULL;
         BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass);
         assert(dest == T_OBJECT, "");
-        assert(dest == arg_state->_type, "");
-        ArgToken arg = arg_state->_arg;
-        ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
-        assert(arg.index() == new_arg.index(), "should be the same index");
+        ArgToken arg = _outgoing.at(arg_slot);
+        assert(dest == arg.basic_type(), "");
+        arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
+        // replace the object by the result of the cast, to make the compiler happy:
+        change_argument(T_OBJECT, arg_slot, T_OBJECT, arg);
         debug_only(dest_klass = (klassOop)badOop);
         break;
       }
@@ -284,8 +435,8 @@
         // i2l, etc., on the Nth outgoing argument in place
         BasicType src = chain().adapter_conversion_src_type(),
                   dest = chain().adapter_conversion_dest_type();
+        ArgToken arg = _outgoing.at(arg_slot);
         Bytecodes::Code bc = conversion_code(src, dest);
-        ArgToken arg = arg_state->_arg;
         if (bc == Bytecodes::_nop) {
           break;
         } else if (bc != Bytecodes::_illegal) {
@@ -299,7 +450,7 @@
           }
         }
         if (bc == Bytecodes::_illegal) {
-          lose("bad primitive conversion", CHECK_(empty));
+          lose(err_msg("bad primitive conversion for %s -> %s", type2name(src), type2name(dest)), CHECK_(empty));
         }
         change_argument(src, arg_slot, dest, arg);
         break;
@@ -308,7 +459,7 @@
       case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: {
         // checkcast to wrapper type & call intValue, etc.
         BasicType dest = chain().adapter_conversion_dest_type();
-        ArgToken arg = arg_state->_arg;
+        ArgToken arg = _outgoing.at(arg_slot);
         arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest),
                               Bytecodes::_checkcast, arg, CHECK_(empty));
         vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest);
@@ -318,7 +469,7 @@
         ArgToken arglist[2];
         arglist[0] = arg;         // outgoing 'this'
         arglist[1] = ArgToken();  // sentinel
-        arg = make_invoke(NULL, unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
+        arg = make_invoke(methodHandle(), unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
         change_argument(T_OBJECT, arg_slot, dest, arg);
         break;
       }
@@ -326,55 +477,63 @@
       case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: {
         // call wrapper type.valueOf
         BasicType src = chain().adapter_conversion_src_type();
-        ArgToken arg = arg_state->_arg;
         vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src);
         if (boxer == vmIntrinsics::_none) {
           lose("no boxing method", CHECK_(empty));
         }
+        ArgToken arg = _outgoing.at(arg_slot);
         ArgToken arglist[2];
         arglist[0] = arg;         // outgoing value
         arglist[1] = ArgToken();  // sentinel
-        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
+        arg = make_invoke(methodHandle(), boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
         change_argument(src, arg_slot, T_OBJECT, arg);
         break;
       }
 
       case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: {
         int dest_arg_slot = chain().adapter_conversion_vminfo();
-        if (!slot_has_argument(dest_arg_slot)) {
+        if (!has_argument(dest_arg_slot)) {
           lose("bad swap index", CHECK_(empty));
         }
         // a simple swap between two arguments
-        SlotState* dest_arg_state = slot_state(dest_arg_slot);
-        SlotState temp = (*dest_arg_state);
-        (*dest_arg_state) = (*arg_state);
-        (*arg_state) = temp;
+        if (arg_slot > dest_arg_slot) {
+          int tmp = arg_slot;
+          arg_slot = dest_arg_slot;
+          dest_arg_slot = tmp;
+        }
+        ArgToken a1 = _outgoing.at(arg_slot);
+        ArgToken a2 = _outgoing.at(dest_arg_slot);
+        change_argument(a2.basic_type(), dest_arg_slot, a1);
+        change_argument(a1.basic_type(), arg_slot, a2);
         break;
       }
 
       case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: {
-        int dest_arg_slot = chain().adapter_conversion_vminfo();
-        if (!slot_has_argument(dest_arg_slot) || arg_slot == dest_arg_slot) {
+        int limit_raw  = chain().adapter_conversion_vminfo();
+        bool rot_down  = (arg_slot < limit_raw);
+        int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0);
+        int limit_slot = limit_raw - limit_bias;
+        if ((uint)limit_slot > (uint)_outgoing.length()) {
           lose("bad rotate index", CHECK_(empty));
         }
-        SlotState* dest_arg_state = slot_state(dest_arg_slot);
         // Rotate the source argument (plus following N slots) into the
         // position occupied by the dest argument (plus following N slots).
-        int rotate_count = type2size[dest_arg_state->_type];
+        int rotate_count = type2size[chain().adapter_conversion_src_type()];
         // (no other rotate counts are currently supported)
-        if (arg_slot < dest_arg_slot) {
+        if (rot_down) {
           for (int i = 0; i < rotate_count; i++) {
-            SlotState temp = _outgoing.at(arg_slot);
+            ArgToken temp = _outgoing.at(arg_slot);
             _outgoing.remove_at(arg_slot);
-            _outgoing.insert_before(dest_arg_slot + rotate_count - 1, temp);
+            _outgoing.insert_before(limit_slot - 1, temp);
           }
-        } else { // arg_slot > dest_arg_slot
+        } else { // arg_slot > limit_slot => rotate_up
           for (int i = 0; i < rotate_count; i++) {
-            SlotState temp = _outgoing.at(arg_slot + rotate_count - 1);
+            ArgToken temp = _outgoing.at(arg_slot + rotate_count - 1);
             _outgoing.remove_at(arg_slot + rotate_count - 1);
-            _outgoing.insert_before(dest_arg_slot, temp);
+            _outgoing.insert_before(limit_slot, temp);
           }
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
@@ -384,11 +543,11 @@
           lose("bad dup count", CHECK_(empty));
         }
         for (int i = 0; i < dup_slots; i++) {
-          SlotState* dup = slot_state(arg_slot + 2*i);
-          if (dup == NULL)              break;  // safety net
-          if (dup->_type != T_VOID)     _outgoing_argc += 1;
-          _outgoing.insert_before(i, (*dup));
+          ArgToken dup = _outgoing.at(arg_slot + 2*i);
+          if (dup.basic_type() != T_VOID)     _outgoing_argc += 1;
+          _outgoing.insert_before(i, dup);
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
@@ -398,16 +557,66 @@
           lose("bad drop count", CHECK_(empty));
         }
         for (int i = 0; i < drop_slots; i++) {
-          SlotState* drop = slot_state(arg_slot);
-          if (drop == NULL)             break;  // safety net
-          if (drop->_type != T_VOID)    _outgoing_argc -= 1;
+          ArgToken drop = _outgoing.at(arg_slot);
+          if (drop.basic_type() != T_VOID)    _outgoing_argc -= 1;
           _outgoing.remove_at(arg_slot);
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
-      case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC
-        lose("unimplemented", CHECK_(empty));
+      case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS:
+        retain_original_args = true;   // and fall through:
+      case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: {
+        // call argument MH recursively
+        //{static int x; if (!x++) print_method_handle(chain().method_handle_oop()); --x;}
+        Handle recursive_mh(THREAD, chain().adapter_arg_oop());
+        if (!java_lang_invoke_MethodHandle::is_instance(recursive_mh())) {
+          lose("recursive target not a MethodHandle", CHECK_(empty));
+        }
+        Handle recursive_mtype(THREAD, java_lang_invoke_MethodHandle::type(recursive_mh()));
+        int argc = java_lang_invoke_MethodType::ptype_count(recursive_mtype());
+        int coll_slots = java_lang_invoke_MethodHandle::vmslots(recursive_mh());
+        BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(recursive_mtype()));
+        ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, 1 + argc + 1);  // 1+: mh, +1: sentinel
+        arglist[0] = make_oop_constant(recursive_mh(), CHECK_(empty));
+        if (arg_slot < 0 || coll_slots < 0 || arg_slot + coll_slots > _outgoing.length()) {
+          lose("bad fold/collect arg slot", CHECK_(empty));
+        }
+        for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) {
+          ArgToken arg_state = _outgoing.at(slot);
+          BasicType  arg_type  = arg_state.basic_type();
+          if (arg_type == T_VOID)  continue;
+          ArgToken arg = _outgoing.at(slot);
+          if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); }
+          arglist[1+i] = arg;
+          if (!retain_original_args)
+            change_argument(arg_type, slot, T_VOID, ArgToken(tt_void));
+          i++;
+        }
+        arglist[1+argc] = ArgToken();  // sentinel
+        oop invoker = java_lang_invoke_MethodTypeForm::vmlayout(
+                          java_lang_invoke_MethodType::form(recursive_mtype()) );
+        if (invoker == NULL || !invoker->is_method()) {
+          lose("bad vmlayout slot", CHECK_(empty));
+        }
+        // FIXME: consider inlining the invokee at the bytecode level
+        ArgToken ret = make_invoke(methodHandle(THREAD, methodOop(invoker)), vmIntrinsics::_invokeGeneric,
+                                   Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty));
+        // The iid = _invokeGeneric really means to adjust reference types as needed.
+        DEBUG_ONLY(invoker = NULL);
+        if (rtype == T_OBJECT) {
+          klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) );
+          if (rklass != SystemDictionary::Object_klass() &&
+              !Klass::cast(rklass)->is_interface()) {
+            // preserve type safety
+            ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty));
+          }
+        }
+        if (rtype != T_VOID) {
+          int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0);
+          change_argument(T_VOID, ret_slot, rtype, ret);
+        }
         break;
       }
 
@@ -427,8 +636,9 @@
         debug_only(element_klass_oop = (klassOop)badOop);
 
         // Fetch the argument, which we will cast to the required array type.
-        assert(arg_state->_type == T_OBJECT, "");
-        ArgToken array_arg = arg_state->_arg;
+        ArgToken arg = _outgoing.at(arg_slot);
+        assert(arg.basic_type() == T_OBJECT, "");
+        ArgToken array_arg = arg;
         array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty));
         change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void));
 
@@ -450,13 +660,22 @@
         arglist[0] = array_arg;   // value to check
         arglist[1] = length_arg;  // length to check
         arglist[2] = ArgToken();  // sentinel
-        make_invoke(NULL, vmIntrinsics::_checkSpreadArgument,
-                    Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
+        make_invoke(methodHandle(), vmIntrinsics::_checkSpreadArgument,
+                    Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty));
 
         // Spread out the array elements.
-        Bytecodes::Code aload_op = Bytecodes::_aaload;
-        if (element_type != T_OBJECT) {
-          lose("primitive array NYI", CHECK_(empty));
+        Bytecodes::Code aload_op = Bytecodes::_nop;
+        switch (element_type) {
+        case T_INT:       aload_op = Bytecodes::_iaload; break;
+        case T_LONG:      aload_op = Bytecodes::_laload; break;
+        case T_FLOAT:     aload_op = Bytecodes::_faload; break;
+        case T_DOUBLE:    aload_op = Bytecodes::_daload; break;
+        case T_OBJECT:    aload_op = Bytecodes::_aaload; break;
+        case T_BOOLEAN:   // fall through:
+        case T_BYTE:      aload_op = Bytecodes::_baload; break;
+        case T_CHAR:      aload_op = Bytecodes::_caload; break;
+        case T_SHORT:     aload_op = Bytecodes::_saload; break;
+        default:          lose("primitive array NYI", CHECK_(empty));
         }
         int ap = arg_slot;
         for (int i = 0; i < spread_length; i++) {
@@ -464,16 +683,11 @@
           ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty));
           ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty));
           change_argument(T_VOID, ap, element_type, element_arg);
-          ap += type2size[element_type];
+          //ap += type2size[element_type];  // don't do this; insert next arg to *right* of previous
         }
         break;
       }
 
-      case java_lang_invoke_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code
-      case java_lang_invoke_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code
-        lose("unimplemented", CHECK_(empty));
-        break;
-
       default:
         lose("bad adapter conversion", CHECK_(empty));
         break;
@@ -491,13 +705,13 @@
       } else {
         jvalue arg_value;
         BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value);
-        if (bt == arg_type) {
+        if (bt == arg_type || (bt == T_INT && is_subword_type(arg_type))) {
           arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty));
         } else {
-          lose("bad bound value", CHECK_(empty));
+          lose(err_msg("bad bound value: arg_type %s boxing %s", type2name(arg_type), type2name(bt)), CHECK_(empty));
         }
       }
-      debug_only(arg_oop = badOop);
+      DEBUG_ONLY(arg_oop = badOop);
       change_argument(T_VOID, arg_slot, arg_type, arg);
     }
 
@@ -514,13 +728,13 @@
   ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1);
   int ap = 0;
   for (int i = _outgoing.length() - 1; i >= 0; i--) {
-    SlotState* arg_state = slot_state(i);
-    if (arg_state->_type == T_VOID)  continue;
-    arglist[ap++] = _outgoing.at(i)._arg;
+    ArgToken arg_state = _outgoing.at(i);
+    if (arg_state.basic_type() == T_VOID)  continue;
+    arglist[ap++] = _outgoing.at(i);
   }
   assert(ap == _outgoing_argc, "");
   arglist[ap] = ArgToken();  // add a sentinel, for the sake of asserts
-  return make_invoke(chain().last_method_oop(),
+  return make_invoke(chain().last_method(),
                      vmIntrinsics::_none,
                      chain().last_invoke_code(), true,
                      ap, arglist, THREAD);
@@ -536,60 +750,81 @@
   _outgoing_argc = nptypes;
   int argp = nptypes - 1;
   if (argp >= 0) {
-    _outgoing.at_grow(argp, make_state(T_VOID, ArgToken(tt_void))); // presize
+    _outgoing.at_grow(argp, ArgToken(tt_void)); // presize
   }
   for (int i = 0; i < nptypes; i++) {
     klassOop  arg_type_klass = NULL;
-    BasicType arg_type = java_lang_Class::as_BasicType(
-                java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass);
+    BasicType arg_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass);
     int index = new_local_index(arg_type);
     ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK);
-    debug_only(arg_type_klass = (klassOop) NULL);
-    _outgoing.at_put(argp, make_state(arg_type, arg));
+    DEBUG_ONLY(arg_type_klass = (klassOop) NULL);
+    _outgoing.at_put(argp, arg);
     if (type2size[arg_type] == 2) {
       // add the extra slot, so we can model the JVM stack
-      _outgoing.insert_before(argp+1, make_state(T_VOID, ArgToken(tt_void)));
+      _outgoing.insert_before(argp+1, ArgToken(tt_void));
     }
     --argp;
   }
   // call make_parameter at the end of the list for the return type
   klassOop  ret_type_klass = NULL;
-  BasicType ret_type = java_lang_Class::as_BasicType(
-              java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass);
+  BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass);
   ArgToken  ret = make_parameter(ret_type, ret_type_klass, -1, CHECK);
   // ignore ret; client can catch it if needed
+
+  assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
+
+  verify_args_and_signature(CHECK);
 }
 
 
+#ifdef ASSERT
+void MethodHandleWalker::verify_args_and_signature(TRAPS) {
+  int index = _outgoing.length() - 1;
+  objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain().method_type_oop());
+  for (int i = 0, limit = ptypes->length(); i < limit; i++) {
+    BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i));
+    if (t == T_ARRAY) t = T_OBJECT;
+    if (t == T_LONG || t == T_DOUBLE) {
+      assert(T_VOID == _outgoing.at(index).basic_type(), "types must match");
+      index--;
+    }
+    assert(t == _outgoing.at(index).basic_type(), "types must match");
+    index--;
+  }
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // MethodHandleWalker::change_argument
 //
 // This is messy because some kinds of arguments are paired with
 // companion slots containing an empty value.
-void MethodHandleWalker::change_argument(BasicType old_type, int slot, BasicType new_type,
-                                         const ArgToken& new_arg) {
+void MethodHandleWalker::change_argument(BasicType old_type, int slot, const ArgToken& new_arg) {
+  BasicType new_type = new_arg.basic_type();
   int old_size = type2size[old_type];
   int new_size = type2size[new_type];
   if (old_size == new_size) {
     // simple case first
-    _outgoing.at_put(slot, make_state(new_type, new_arg));
+    _outgoing.at_put(slot, new_arg);
   } else if (old_size > new_size) {
     for (int i = old_size - 1; i >= new_size; i--) {
-      assert((i != 0) == (_outgoing.at(slot + i)._type == T_VOID), "");
+      assert((i != 0) == (_outgoing.at(slot + i).basic_type() == T_VOID), "");
       _outgoing.remove_at(slot + i);
     }
     if (new_size > 0)
-      _outgoing.at_put(slot, make_state(new_type, new_arg));
+      _outgoing.at_put(slot, new_arg);
     else
       _outgoing_argc -= 1;      // deleted a real argument
   } else {
     for (int i = old_size; i < new_size; i++) {
-      _outgoing.insert_before(slot + i, make_state(T_VOID, ArgToken(tt_void)));
+      _outgoing.insert_before(slot + i, ArgToken(tt_void));
     }
-    _outgoing.at_put(slot, make_state(new_type, new_arg));
+    _outgoing.at_put(slot, new_arg);
     if (old_size == 0)
       _outgoing_argc += 1;      // inserted a real argument
   }
+  assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
 }
 
 
@@ -597,8 +832,15 @@
 int MethodHandleWalker::argument_count_slow() {
   int args_seen = 0;
   for (int i = _outgoing.length() - 1; i >= 0; i--) {
-    if (_outgoing.at(i)._type != T_VOID) {
+    if (_outgoing.at(i).basic_type() != T_VOID) {
       ++args_seen;
+      if (_outgoing.at(i).basic_type() == T_LONG ||
+          _outgoing.at(i).basic_type() == T_DOUBLE) {
+        assert(_outgoing.at(i + 1).basic_type() == T_VOID, "should only follow two word");
+      }
+    } else {
+      assert(_outgoing.at(i - 1).basic_type() == T_LONG ||
+             _outgoing.at(i - 1).basic_type() == T_DOUBLE, "should only follow two word");
     }
   }
   return args_seen;
@@ -607,14 +849,62 @@
 
 
 // -----------------------------------------------------------------------------
+// MethodHandleWalker::retype_raw_conversion
+//
+// Do the raw retype conversions for OP_RETYPE_RAW.
+void MethodHandleWalker::retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS) {
+  if (src != dst) {
+    if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) {
+      if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) {
+        vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst);
+        if (iid == vmIntrinsics::_none) {
+          lose("no raw conversion method", CHECK);
+        }
+        ArgToken arglist[2];
+        if (!for_return) {
+          // argument type conversion
+          ArgToken arg = _outgoing.at(slot);
+          assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity");
+          arglist[0] = arg;         // outgoing 'this'
+          arglist[1] = ArgToken();  // sentinel
+          arg = make_invoke(methodHandle(), iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK);
+          change_argument(src, slot, dst, arg);
+        } else {
+          // return type conversion
+          if (_return_conv == vmIntrinsics::_none) {
+            _return_conv = iid;
+          } else if (_return_conv == vmIntrinsics::for_raw_conversion(dst, src)) {
+            _return_conv = vmIntrinsics::_none;
+          } else if (_return_conv != zero_return_conv()) {
+            lose(err_msg("requested raw return conversion not allowed: %s -> %s (before %s)", type2name(src), type2name(dst), vmIntrinsics::name_at(_return_conv)), CHECK);
+          }
+        }
+      } else {
+        // Nothing to do.
+      }
+    } else if (for_return && (!is_subword_type(src) || !is_subword_type(dst))) {
+      // This can occur in exception-throwing MHs, which have a fictitious return value encoded as Void or Empty.
+      _return_conv = zero_return_conv();
+    } else if (src == T_OBJECT && is_java_primitive(dst)) {
+      // ref-to-prim: discard ref, push zero
+      lose("requested ref-to-prim conversion not expected", CHECK);
+    } else {
+      lose(err_msg("requested raw conversion not allowed: %s -> %s", type2name(src), type2name(dst)), CHECK);
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
 // MethodHandleCompiler
 
-MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, bool is_invokedynamic, TRAPS)
+MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS)
   : MethodHandleWalker(root, is_invokedynamic, THREAD),
-    _callee(callee),
+    _invoke_count(invoke_count),
     _thread(THREAD),
     _bytecode(THREAD, 50),
     _constants(THREAD, 10),
+    _non_bcp_klasses(THREAD, 5),
     _cur_stack(0),
     _max_stack(0),
     _rtype(T_ILLEGAL)
@@ -624,8 +914,17 @@
   (void) _constants.append(NULL);
 
   // Set name and signature index.
-  _name_index      = cpool_symbol_put(_callee->name());
-  _signature_index = cpool_symbol_put(_callee->signature());
+  _name_index      = cpool_symbol_put(name);
+  _signature_index = cpool_symbol_put(signature);
+
+  // To make the resulting methods more recognizable by
+  // stack walkers and compiler heuristics,
+  // we put them in holder class MethodHandle.
+  // See klass_is_method_handle_adapter_holder
+  // and methodOopDesc::is_method_handle_adapter.
+  _target_klass = SystemDictionaryHandles::MethodHandle_klass();
+
+  check_non_bcp_klasses(java_lang_invoke_MethodHandle::type(root()), CHECK);
 
   // Get return type klass.
   Handle first_mtype(THREAD, chain().method_type_oop());
@@ -633,7 +932,8 @@
   _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass);
   if (_rtype == T_ARRAY)  _rtype = T_OBJECT;
 
-  int params = _callee->size_of_parameters();  // Incoming arguments plus receiver.
+  ArgumentSizeComputer args(signature);
+  int params = args.size() + 1;  // Incoming arguments plus receiver.
   _num_params = for_invokedynamic() ? params - 1 : params;  // XXX Check if callee is static?
 }
 
@@ -647,11 +947,12 @@
   assert(_thread == THREAD, "must be same thread");
   methodHandle nullHandle;
   (void) walk(CHECK_(nullHandle));
+  record_non_bcp_klasses();
   return get_method_oop(CHECK_(nullHandle));
 }
 
 
-void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
+void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) {
   Bytecodes::check(op);  // Are we legal?
 
   switch (op) {
@@ -711,6 +1012,7 @@
   case Bytecodes::_astore_1:
   case Bytecodes::_astore_2:
   case Bytecodes::_astore_3:
+  case Bytecodes::_iand:
   case Bytecodes::_i2l:
   case Bytecodes::_i2f:
   case Bytecodes::_i2d:
@@ -726,6 +1028,14 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
+  case Bytecodes::_iaload:
+  case Bytecodes::_laload:
+  case Bytecodes::_faload:
+  case Bytecodes::_daload:
+  case Bytecodes::_aaload:
+  case Bytecodes::_baload:
+  case Bytecodes::_caload:
+  case Bytecodes::_saload:
   case Bytecodes::_ireturn:
   case Bytecodes::_lreturn:
   case Bytecodes::_freturn:
@@ -739,9 +1049,14 @@
   // bi
   case Bytecodes::_ldc:
     assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      _bytecode.push(Bytecodes::_ldc_w);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   case Bytecodes::_iload:
@@ -755,9 +1070,16 @@
   case Bytecodes::_dstore:
   case Bytecodes::_astore:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      // doesn't fit in a u2
+      _bytecode.push(Bytecodes::_wide);
+      _bytecode.push(op);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   // bkk
@@ -765,7 +1087,7 @@
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
@@ -776,12 +1098,23 @@
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokevirtual:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
     break;
 
+  case Bytecodes::_invokeinterface:
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
+    assert(args_size > 0, "valid args_size");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    _bytecode.push(args_size);
+    _bytecode.push(0);
+    break;
+
   default:
     ShouldNotReachHere();
   }
@@ -847,6 +1180,7 @@
 
 void MethodHandleCompiler::emit_load_constant(ArgToken arg) {
   BasicType bt = arg.basic_type();
+  if (is_subword_type(bt)) bt = T_INT;
   switch (bt) {
   case T_INT: {
     jint value = arg.get_jint();
@@ -882,10 +1216,18 @@
   }
   case T_OBJECT: {
     Handle value = arg.object();
-    if (value.is_null())
+    if (value.is_null()) {
       emit_bc(Bytecodes::_aconst_null);
-    else
-      emit_bc(Bytecodes::_ldc, cpool_object_put(value));
+      break;
+    }
+    if (java_lang_Class::is_instance(value())) {
+      klassOop k = java_lang_Class::as_klassOop(value());
+      if (k != NULL) {
+        emit_bc(Bytecodes::_ldc, cpool_klass_put(k));
+        break;
+      }
+    }
+    emit_bc(Bytecodes::_ldc, cpool_object_put(value));
     break;
   }
   default:
@@ -900,7 +1242,8 @@
                                       const ArgToken& src, TRAPS) {
 
   BasicType srctype = src.basic_type();
-  int index = src.index();
+  TokenType tt = src.token_type();
+  int index = -1;
 
   switch (op) {
   case Bytecodes::_i2l:
@@ -921,23 +1264,46 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+    }
     stack_pop(srctype);  // pop the src type
     emit_bc(op);
     stack_push(type);    // push the dest value
-    if (srctype != type)
+    if (tt != tt_constant)
+      index = src.index();
+    if (srctype != type || index == -1)
       index = new_local_index(type);
     emit_store(type, index);
     break;
 
   case Bytecodes::_checkcast:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+      index = src.index();
+    }
     emit_bc(op, cpool_klass_put(tk));
+    check_non_bcp_klass(tk, CHECK_(src));
+    // Allocate a new local for the type so that we don't hide the
+    // previous type from the verifier.
+    index = new_local_index(type);
     emit_store(srctype, index);
     break;
 
+  case Bytecodes::_nop:
+    // nothing to do
+    return src;
+
   default:
-    ShouldNotReachHere();
+    if (op == Bytecodes::_illegal)
+      lose(err_msg("no such primitive conversion: %s -> %s", type2name(src.basic_type()), type2name(type)), THREAD);
+    else
+      lose(err_msg("bad primitive conversion op: %s", Bytecodes::name(op)), THREAD);
+    return make_prim_constant(type, &zero_jvalue, THREAD);
   }
 
   return make_parameter(type, tk, index, THREAD);
@@ -948,34 +1314,69 @@
 // MethodHandleCompiler
 //
 
-static jvalue zero_jvalue;
+// Values used by the compiler.
+jvalue MethodHandleCompiler::zero_jvalue = { 0 };
+jvalue MethodHandleCompiler::one_jvalue  = { 1 };
 
 // Emit bytecodes for the given invoke instruction.
 MethodHandleWalker::ArgToken
-MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
+MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid,
                                   Bytecodes::Code op, bool tailcall,
                                   int argc, MethodHandleWalker::ArgToken* argv,
                                   TRAPS) {
-  if (m == NULL) {
+  ArgToken zero;
+  if (m.is_null()) {
     // Get the intrinsic methodOop.
-    m = vmIntrinsics::method_for(iid);
-    if (m == NULL) {
-      ArgToken zero;
+    m = methodHandle(THREAD, vmIntrinsics::method_for(iid));
+    if (m.is_null()) {
       lose(vmIntrinsics::name_at(iid), CHECK_(zero));
     }
   }
 
-  klassOop  klass   = m->method_holder();
-  Symbol* name      = m->name();
-  Symbol* signature = m->signature();
+  klassOop klass     = m->method_holder();
+  Symbol*  name      = m->name();
+  Symbol*  signature = m->signature();
 
-  if (tailcall) {
-    // Actually, in order to make these methods more recognizable,
-    // let's put them in holder class MethodHandle.  That way stack
-    // walkers and compiler heuristics can recognize them.
-    _target_klass = SystemDictionary::MethodHandle_klass();
+  if (iid == vmIntrinsics::_invokeGeneric &&
+      argc >= 1 && argv[0].token_type() == tt_constant) {
+    assert(m->intrinsic_id() == vmIntrinsics::_invokeExact, "");
+    Handle receiver = argv[0].object();
+    Handle rtype(THREAD, java_lang_invoke_MethodHandle::type(receiver()));
+    Handle mtype(THREAD, m->method_handle_type());
+    if (rtype() != mtype()) {
+      assert(java_lang_invoke_MethodType::form(rtype()) ==
+             java_lang_invoke_MethodType::form(mtype()),
+             "must be the same shape");
+      // customize m to the exact required rtype
+      bool has_non_bcp_klass = check_non_bcp_klasses(rtype(), CHECK_(zero));
+      TempNewSymbol sig2 = java_lang_invoke_MethodType::as_signature(rtype(), true, CHECK_(zero));
+      methodHandle m2;
+      if (!has_non_bcp_klass) {
+        methodOop m2_oop = SystemDictionary::find_method_handle_invoke(m->name(), sig2,
+                                                                       KlassHandle(), CHECK_(zero));
+        m2 = methodHandle(THREAD, m2_oop);
+      }
+      if (m2.is_null()) {
+        // just build it fresh
+        m2 = methodOopDesc::make_invoke_method(klass, m->name(), sig2, rtype, CHECK_(zero));
+        if (m2.is_null())
+          lose(err_msg("no customized invoker %s", sig2->as_utf8()), CHECK_(zero));
+      }
+      m = m2;
+      signature = m->signature();
+    }
   }
 
+  check_non_bcp_klass(klass, CHECK_(zero));
+  if (m->is_method_handle_invoke()) {
+    check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero));
+  }
+
+  // Count the number of arguments, not the size
+  ArgumentCount asc(signature);
+  assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1),
+         "argc mismatch");
+
   // Inline the method.
   InvocationCounter* ic = m->invocation_counter();
   ic->set_carry_flag();
@@ -1008,7 +1409,7 @@
   int signature_index     = cpool_symbol_put(signature);
   int name_and_type_index = cpool_name_and_type_put(name_index, signature_index);
   int klass_index         = cpool_klass_put(klass);
-  int methodref_index     = cpool_methodref_put(klass_index, name_and_type_index);
+  int methodref_index     = cpool_methodref_put(op, klass_index, name_and_type_index, m);
 
   // Generate invoke.
   switch (op) {
@@ -1017,9 +1418,13 @@
   case Bytecodes::_invokevirtual:
     emit_bc(op, methodref_index);
     break;
-  case Bytecodes::_invokeinterface:
-    Unimplemented();
+
+  case Bytecodes::_invokeinterface: {
+    ArgumentSizeComputer asc(signature);
+    emit_bc(op, methodref_index, asc.size() + 1);
     break;
+  }
+
   default:
     ShouldNotReachHere();
   }
@@ -1028,12 +1433,26 @@
   // Otherwise, make a recursive call to some helper routine.
   BasicType rbt = m->result_type();
   if (rbt == T_ARRAY)  rbt = T_OBJECT;
+  stack_push(rbt);  // The return value is already pushed onto the stack.
   ArgToken ret;
   if (tailcall) {
+    if (return_conv() == zero_return_conv()) {
+      rbt = T_VOID;  // discard value
+    } else if (return_conv() != vmIntrinsics::_none) {
+      // return value conversion
+      int index = new_local_index(rbt);
+      emit_store(rbt, index);
+      ArgToken arglist[2];
+      arglist[0] = ArgToken(tt_temporary, rbt, index);
+      arglist[1] = ArgToken();  // sentinel
+      ret = make_invoke(methodHandle(), return_conv(), Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(zero));
+      set_return_conv(vmIntrinsics::_none);
+      rbt = ret.basic_type();
+      emit_load(rbt, ret.index());
+    }
     if (rbt != _rtype) {
       if (rbt == T_VOID) {
         // push a zero of the right sort
-        ArgToken zero;
         if (_rtype == T_OBJECT) {
           zero = make_oop_constant(NULL, CHECK_(zero));
         } else {
@@ -1043,9 +1462,27 @@
       } else if (_rtype == T_VOID) {
         // We'll emit a _return with something on the stack.
         // It's OK to ignore what's on the stack.
+      } else if (rbt == T_INT && is_subword_type(_rtype)) {
+        // Convert value to match return type.
+        switch (_rtype) {
+        case T_BOOLEAN: {
+          // boolean is treated as a one-bit unsigned integer.
+          // Cf. API documentation: java/lang/invoke/MethodHandles.html#explicitCastArguments
+          ArgToken one = make_prim_constant(T_INT, &one_jvalue, CHECK_(zero));
+          emit_load_constant(one);
+          emit_bc(Bytecodes::_iand);
+          break;
+        }
+        case T_BYTE:    emit_bc(Bytecodes::_i2b); break;
+        case T_CHAR:    emit_bc(Bytecodes::_i2c); break;
+        case T_SHORT:   emit_bc(Bytecodes::_i2s); break;
+        default: ShouldNotReachHere();
+        }
+      } else if (is_subword_type(rbt) && (is_subword_type(_rtype) || (_rtype == T_INT))) {
+        // The subword type was returned as an int and will be passed
+        // on as an int.
       } else {
-        tty->print_cr("*** rbt=%d != rtype=%d", rbt, _rtype);
-        assert(false, "IMPLEMENT ME");
+        lose("unknown conversion", CHECK_(zero));
       }
     }
     switch (_rtype) {
@@ -1056,8 +1493,10 @@
     case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break;
     case T_VOID:   emit_bc(Bytecodes::_return);  break;
     case T_OBJECT:
-      if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass())
+      if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass() && !Klass::cast(_rklass())->is_interface()) {
         emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass()));
+        check_non_bcp_klass(_rklass(), CHECK_(zero));
+      }
       emit_bc(Bytecodes::_areturn);
       break;
     default: ShouldNotReachHere();
@@ -1065,7 +1504,6 @@
     ret = ArgToken();  // Dummy return value.
   }
   else {
-    stack_push(rbt);  // The return value is already pushed onto the stack.
     int index = new_local_index(rbt);
     switch (rbt) {
     case T_BOOLEAN: case T_BYTE: case T_CHAR:  case T_SHORT:
@@ -1090,8 +1528,32 @@
                                  const MethodHandleWalker::ArgToken& base,
                                  const MethodHandleWalker::ArgToken& offset,
                                  TRAPS) {
-  Unimplemented();
-  return ArgToken();
+  switch (base.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(base.basic_type(), base.index());
+      break;
+    case tt_constant:
+      emit_load_constant(base);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  switch (offset.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(offset.basic_type(), offset.index());
+      break;
+    case tt_constant:
+      emit_load_constant(offset);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  emit_bc(op);
+  int index = new_local_index(type);
+  emit_store(type, index);
+  return ArgToken(tt_temporary, type, index);
 }
 
 
@@ -1114,7 +1576,7 @@
 
 //   for (int i = 1, imax = _constants.length(); i < imax; i++) {
 //     ConstantValue* con = _constants.at(i);
-//     if (con != NULL && con->is_primitive() && con->_type == bt) {
+//     if (con != NULL && con->is_primitive() && con.basic_type() == bt) {
 //       bool match = false;
 //       switch (type2size[bt]) {
 //       case 1:  if (pcon->_value.i == con->i)  match = true;  break;
@@ -1134,6 +1596,52 @@
   return index;
 }
 
+bool MethodHandleCompiler::check_non_bcp_klasses(Handle method_type, TRAPS) {
+  bool res = false;
+  for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) {
+    oop ptype = (i == -1
+                 ? java_lang_invoke_MethodType::rtype(method_type())
+                 : java_lang_invoke_MethodType::ptype(method_type(), i));
+    res |= check_non_bcp_klass(java_lang_Class::as_klassOop(ptype), CHECK_(false));
+  }
+  return res;
+}
+
+bool MethodHandleCompiler::check_non_bcp_klass(klassOop klass, TRAPS) {
+  klass = methodOopDesc::check_non_bcp_klass(klass);
+  if (klass != NULL) {
+    Symbol* name = Klass::cast(klass)->name();
+    for (int i = _non_bcp_klasses.length() - 1; i >= 0; i--) {
+      klassOop k2 = _non_bcp_klasses.at(i)();
+      if (Klass::cast(k2)->name() == name) {
+        if (k2 != klass) {
+          lose(err_msg("unsupported klass name alias %s", name->as_utf8()), THREAD);
+        }
+        return true;
+      }
+    }
+    _non_bcp_klasses.append(KlassHandle(THREAD, klass));
+    return true;
+  }
+  return false;
+}
+
+void MethodHandleCompiler::record_non_bcp_klasses() {
+  // Append extra klasses to constant pool, to guide klass lookup.
+  for (int k = 0; k < _non_bcp_klasses.length(); k++) {
+    klassOop non_bcp_klass = _non_bcp_klasses.at(k)();
+    bool add_to_cp = true;
+    for (int j = 1; j < _constants.length(); j++) {
+      ConstantValue* cv = _constants.at(j);
+      if (cv != NULL && cv->tag() == JVM_CONSTANT_Class
+          && cv->klass_oop() == non_bcp_klass) {
+        add_to_cp = false;
+        break;
+      }
+    }
+    if (add_to_cp)  cpool_klass_put(non_bcp_klass);
+  }
+}
 
 constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const {
   constantPoolHandle nullHandle;
@@ -1153,6 +1661,8 @@
     case JVM_CONSTANT_Double:      cpool->double_at_put(       i, cv->get_jdouble()                    ); break;
     case JVM_CONSTANT_Class:       cpool->klass_at_put(        i, cv->klass_oop()                      ); break;
     case JVM_CONSTANT_Methodref:   cpool->method_at_put(       i, cv->first_index(), cv->second_index()); break;
+    case JVM_CONSTANT_InterfaceMethodref:
+                                cpool->interface_method_at_put(i, cv->first_index(), cv->second_index()); break;
     case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break;
     case JVM_CONSTANT_Object:      cpool->object_at_put(       i, cv->object_oop()                     ); break;
     default: ShouldNotReachHere();
@@ -1167,6 +1677,8 @@
     }
   }
 
+  cpool->set_preresolution();
+
   // Set the constant pool holder to the target method's class.
   cpool->set_pool_holder(_target_klass());
 
@@ -1175,7 +1687,7 @@
 
 
 methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
-  methodHandle nullHandle;
+  methodHandle empty;
   // Create a method that holds the generated bytecode.  invokedynamic
   // has no receiver, normal MH calls do.
   int flags_bits;
@@ -1184,13 +1696,16 @@
   else
     flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC);
 
-  methodOop m_oop = oopFactory::new_method(bytecode_length(),
-                                           accessFlags_from(flags_bits),
-                                           0, 0, 0, oopDesc::IsSafeConc, CHECK_(nullHandle));
-  methodHandle m(THREAD, m_oop);
-  m_oop = NULL;  // oop not GC safe
+  // Create a new method
+  methodHandle m;
+  {
+    methodOop m_oop = oopFactory::new_method(bytecode_length(),
+                                             accessFlags_from(flags_bits),
+                                             0, 0, 0, oopDesc::IsSafeConc, CHECK_(empty));
+    m = methodHandle(THREAD, m_oop);
+  }
 
-  constantPoolHandle cpool = get_constant_pool(CHECK_(nullHandle));
+  constantPoolHandle cpool = get_constant_pool(CHECK_(empty));
   m->set_constants(cpool());
 
   m->set_name_index(_name_index);
@@ -1205,16 +1720,62 @@
   typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
   m->set_exception_table(exception_handlers());
 
-  // Set the carry bit of the invocation counter to force inlining of
-  // the adapter.
-  InvocationCounter* ic = m->invocation_counter();
-  ic->set_carry_flag();
-
   // Rewrite the method and set up the constant pool cache.
-  objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));
+  objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(empty));
   objArrayHandle methods(THREAD, m_array);
   methods->obj_at_put(0, m());
-  Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(nullHandle));  // Use fake class.
+  Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty));  // Use fake class.
+  Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty));  // Use fake class.
+
+  // Pre-resolve selected CP cache entries, to avoid problems with class loader scoping.
+  constantPoolCacheHandle cpc(THREAD, cpool->cache());
+  for (int i = 0; i < cpc->length(); i++) {
+    ConstantPoolCacheEntry* e = cpc->entry_at(i);
+    assert(!e->is_secondary_entry(), "no indy instructions in here, yet");
+    int constant_pool_index = e->constant_pool_index();
+    ConstantValue* cv = _constants.at(constant_pool_index);
+    if (!cv->has_linkage())  continue;
+    methodHandle m = cv->linkage();
+    int index;
+    switch (cv->tag()) {
+    case JVM_CONSTANT_Methodref:
+      index = m->vtable_index();
+      if (m->is_static()) {
+        e->set_method(Bytecodes::_invokestatic, m, index);
+      } else {
+        e->set_method(Bytecodes::_invokespecial, m, index);
+        e->set_method(Bytecodes::_invokevirtual, m, index);
+      }
+      break;
+    case JVM_CONSTANT_InterfaceMethodref:
+      index = klassItable::compute_itable_index(m());
+      e->set_interface_call(m, index);
+      break;
+    }
+  }
+
+  // Set the invocation counter's count to the invoke count of the
+  // original call site.
+  InvocationCounter* ic = m->invocation_counter();
+  ic->set(InvocationCounter::wait_for_compile, _invoke_count);
+
+  // Create a new MDO
+  {
+    methodDataOop mdo = oopFactory::new_methodData(m, CHECK_(empty));
+    assert(m->method_data() == NULL, "there should not be an MDO yet");
+    m->set_method_data(mdo);
+
+    // Iterate over all profile data and set the count of the counter
+    // data entries to the original call site counter.
+    for (ProfileData* profile_data = mdo->first_data();
+         mdo->is_valid(profile_data);
+         profile_data = mdo->next_data(profile_data)) {
+      if (profile_data->is_CounterData()) {
+        CounterData* counter_data = profile_data->as_CounterData();
+        counter_data->set_count(_invoke_count);
+      }
+    }
+  }
 
 #ifndef PRODUCT
   if (TraceMethodHandles) {
@@ -1230,7 +1791,6 @@
 
 #ifndef PRODUCT
 
-#if 0
 // MH printer for debugging.
 
 class MethodHandlePrinter : public MethodHandleWalker {
@@ -1238,21 +1798,27 @@
   outputStream* _out;
   bool          _verbose;
   int           _temp_num;
+  int           _param_state;
   stringStream  _strbuf;
   const char* strbuf() {
     const char* s = _strbuf.as_string();
     _strbuf.reset();
     return s;
   }
-  ArgToken token(const char* str) {
-    return (ArgToken) str;
+  ArgToken token(const char* str, BasicType type) {
+    return ArgToken(str, type);
+  }
+  const char* string(ArgToken token) {
+    return token.str();
   }
   void start_params() {
+    _param_state <<= 1;
     _out->print("(");
   }
   void end_params() {
     if (_verbose)  _out->print("\n");
     _out->print(") => {");
+    _param_state >>= 1;
   }
   void put_type_name(BasicType type, klassOop tk, outputStream* s) {
     const char* kname = NULL;
@@ -1262,29 +1828,34 @@
   }
   ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) {
     const char* value = strbuf();
-    if (!_verbose)  return token(value);
+    if (!_verbose)  return token(value, type);
     // make an explicit binding for each separate value
     _strbuf.print("%s%d", temp_name, ++_temp_num);
     const char* temp = strbuf();
     _out->print("\n  %s %s %s = %s;", statement_op, type2name(type), temp, value);
-    return token(temp);
+    return token(temp, type);
   }
 
 public:
   MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS)
-    : MethodHandleWalker(root, THREAD),
+    : MethodHandleWalker(root, false, THREAD),
       _out(out),
       _verbose(verbose),
+      _param_state(0),
       _temp_num(0)
   {
+    out->print("MethodHandle:");
+    java_lang_invoke_MethodType::print_signature(java_lang_invoke_MethodHandle::type(root()), out);
+    out->print(" : #");
     start_params();
   }
   virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
     if (argnum < 0) {
       end_params();
-      return NULL;
+      return token("return", type);
     }
-    if (argnum == 0) {
+    if ((_param_state & 1) == 0) {
+      _param_state |= 1;
       _out->print(_verbose ? "\n  " : "");
     } else {
       _out->print(_verbose ? ",\n  " : ", ");
@@ -1296,7 +1867,7 @@
     const char* arg = strbuf();
     put_type_name(type, tk, _out);
     _out->print(" %s", arg);
-    return token(arg);
+    return token(arg, type);
   }
   virtual ArgToken make_oop_constant(oop con, TRAPS) {
     if (con == NULL)
@@ -1314,8 +1885,15 @@
     java_lang_boxing_object::print(type, con, &_strbuf);
     return maybe_make_temp("constant", type, "k");
   }
-  virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken src, TRAPS) {
-    _strbuf.print("%s(%s", Bytecodes::name(op), (const char*)src);
+  void print_bytecode_name(Bytecodes::Code op) {
+    if (Bytecodes::is_defined(op))
+      _strbuf.print("%s", Bytecodes::name(op));
+    else
+      _strbuf.print("bytecode_%d", (int) op);
+  }
+  virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) {
+    print_bytecode_name(op);
+    _strbuf.print("(%s", string(src));
     if (tk != NULL) {
       _strbuf.print(", ");
       put_type_name(type, tk, &_strbuf);
@@ -1323,8 +1901,8 @@
     _strbuf.print(")");
     return maybe_make_temp("convert", type, "v");
   }
-  virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken base, ArgToken offset, TRAPS) {
-    _strbuf.print("%s(%s, %s", Bytecodes::name(op), (const char*)base, (const char*)offset);
+  virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) {
+    _strbuf.print("%s(%s, %s", Bytecodes::name(op), string(base), string(offset));
     if (tk != NULL) {
       _strbuf.print(", ");
       put_type_name(type, tk, &_strbuf);
@@ -1332,11 +1910,12 @@
     _strbuf.print(")");
     return maybe_make_temp("fetch", type, "x");
   }
-  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid,
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid,
                                Bytecodes::Code op, bool tailcall,
                                int argc, ArgToken* argv, TRAPS) {
-    Symbol* name, sig;
-    if (m != NULL) {
+    Symbol* name;
+    Symbol* sig;
+    if (m.not_null()) {
       name = m->name();
       sig  = m->signature();
     } else {
@@ -1345,7 +1924,7 @@
     }
     _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string());
     for (int i = 0; i < argc; i++) {
-      _strbuf.print("%s%s", (i > 0 ? ", " : ""), (const char*)argv[i]);
+      _strbuf.print("%s%s", (i > 0 ? ", " : ""), string(argv[i]));
     }
     _strbuf.print(")");
     if (!tailcall) {
@@ -1375,7 +1954,7 @@
     out->print("\n");
   }
   static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) {
-    EXCEPTION_MARK;
+    Thread* THREAD = Thread::current();
     ResourceMark rm;
     MethodHandlePrinter printer(root, verbose, out, THREAD);
     if (!HAS_PENDING_EXCEPTION)
@@ -1383,24 +1962,20 @@
     if (HAS_PENDING_EXCEPTION) {
       oop ex = PENDING_EXCEPTION;
       CLEAR_PENDING_EXCEPTION;
-      out->print("\n*** ");
-      if (ex != Universe::virtual_machine_error_instance())
-        ex->print_on(out);
-      else
-        out->print("lose: %s", printer.lose_message());
-      out->print("\n}\n");
+      out->print(" *** ");
+      if (printer.lose_message() != NULL)  out->print("%s ", printer.lose_message());
+      out->print("}");
     }
     out->print("\n");
   }
 };
-#endif // 0
 
 extern "C"
 void print_method_handle(oop mh) {
   if (!mh->is_oop()) {
-    tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh);
+    tty->print_cr("*** not a method handle: "PTR_FORMAT, (intptr_t)mh);
   } else if (java_lang_invoke_MethodHandle::is_instance(mh)) {
-    //MethodHandlePrinter::print(mh);
+    MethodHandlePrinter::print(mh);
   } else {
     tty->print("*** not a method handle: ");
     mh->print();
--- a/src/share/vm/prims/methodHandleWalk.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/methodHandleWalk.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -56,6 +56,10 @@
   int BoundMethodHandle_vmargslot()    { return java_lang_invoke_BoundMethodHandle::vmargslot(method_handle_oop()); }
   int AdapterMethodHandle_conversion() { return java_lang_invoke_AdapterMethodHandle::conversion(method_handle_oop()); }
 
+#ifdef ASSERT
+  void print_impl(TRAPS);
+#endif
+
 public:
   MethodHandleChain(Handle root, TRAPS)
     : _root(root)
@@ -94,11 +98,21 @@
   int       bound_arg_slot()    { assert(is_bound(), ""); return _arg_slot; }
   oop       bound_arg_oop()     { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); }
 
+  methodHandle last_method()    { assert(is_last(), ""); return _last_method; }
   methodOop last_method_oop()   { assert(is_last(), ""); return _last_method(); }
   Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; }
 
   void lose(const char* msg, TRAPS);
   const char* lose_message()    { return _lose_message; }
+
+#ifdef ASSERT
+  // Print a symbolic description of a method handle chain, including
+  // the signature for each method.  The signatures are printed in
+  // slot order to make it easier to understand.
+  void print();
+  static void print(Handle mh);
+  static void print(oopDesc* mh);
+#endif
 };
 
 
@@ -113,6 +127,7 @@
     tt_parameter,
     tt_temporary,
     tt_constant,
+    tt_symbolic,
     tt_illegal
   };
 
@@ -125,67 +140,70 @@
     Handle    _handle;
 
   public:
-    ArgToken(TokenType tt = tt_illegal) : _tt(tt) {}
-    ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {}
+    ArgToken(TokenType tt = tt_illegal) : _tt(tt), _bt(tt == tt_void ? T_VOID : T_ILLEGAL) {
+      assert(tt == tt_illegal || tt == tt_void, "invalid token type");
+    }
 
     ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) {
+      assert(_tt == tt_parameter || _tt == tt_temporary, "must have index");
       _value.i = index;
     }
 
-    ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) {
-      _handle = value;
+    ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) { assert(_bt != T_OBJECT, "wrong constructor"); }
+    ArgToken(Handle handle) : _tt(tt_constant), _bt(T_OBJECT), _handle(handle) {}
+
+
+    ArgToken(const char* str, BasicType type) : _tt(tt_symbolic), _bt(type) {
+      _value.j = (intptr_t)str;
     }
 
     TokenType token_type()  const { return _tt; }
     BasicType basic_type()  const { return _bt; }
-    int       index()       const { return _value.i; }
-    Handle    object()      const { return _handle; }
-
-    jint      get_jint()    const { return _value.i; }
-    jlong     get_jlong()   const { return _value.j; }
-    jfloat    get_jfloat()  const { return _value.f; }
-    jdouble   get_jdouble() const { return _value.d; }
-  };
+    bool      has_index()   const { return _tt == tt_parameter || _tt == tt_temporary; }
+    int       index()       const { assert(has_index(), "must have index");; return _value.i; }
+    Handle    object()      const { assert(_bt == T_OBJECT, "wrong accessor"); assert(_tt == tt_constant, "value type"); return _handle; }
+    const char* str()       const { assert(_tt == tt_symbolic, "string type"); return (const char*)(intptr_t)_value.j; }
 
-  // Abstract interpretation state:
-  struct SlotState {
-    BasicType _type;
-    ArgToken  _arg;
-    SlotState() : _type(), _arg() {}
+    jint      get_jint()    const { assert(_bt == T_INT || is_subword_type(_bt), "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.i; }
+    jlong     get_jlong()   const { assert(_bt == T_LONG, "wrong accessor");   assert(_tt == tt_constant, "value types"); return _value.j; }
+    jfloat    get_jfloat()  const { assert(_bt == T_FLOAT, "wrong accessor");  assert(_tt == tt_constant, "value types"); return _value.f; }
+    jdouble   get_jdouble() const { assert(_bt == T_DOUBLE, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.d; }
   };
-  static SlotState make_state(BasicType type, ArgToken arg) {
-    SlotState ss;
-    ss._type = type; ss._arg = arg;
-    return ss;
-  }
 
 private:
   MethodHandleChain _chain;
   bool              _for_invokedynamic;
   int               _local_index;
 
-  GrowableArray<SlotState> _outgoing;       // current outgoing parameter slots
+  // This array is kept in an unusual order, indexed by low-level "slot number".
+  // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array.
+  // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1).
+  // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID.
+  GrowableArray<ArgToken>  _outgoing;       // current outgoing parameter slots
   int                      _outgoing_argc;  // # non-empty outgoing slots
 
+  vmIntrinsics::ID _return_conv;            // Return conversion required by raw retypes.
+
   // Replace a value of type old_type at slot (and maybe slot+1) with the new value.
   // If old_type != T_VOID, remove the old argument at that point.
   // If new_type != T_VOID, insert the new argument at that point.
   // Insert or delete a second empty slot as needed.
-  void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg);
-
-  SlotState* slot_state(int slot) {
-    if (slot < 0 || slot >= _outgoing.length())
-      return NULL;
-    return _outgoing.adr_at(slot);
+  void change_argument(BasicType old_type, int slot, const ArgToken& new_arg);
+  void change_argument(BasicType old_type, int slot, BasicType type, const ArgToken& new_arg) {
+    assert(type == new_arg.basic_type(), "must agree");
+    change_argument(old_type, slot, new_arg);
   }
-  BasicType slot_type(int slot) {
-    SlotState* ss = slot_state(slot);
-    if (ss == NULL)
-      return T_ILLEGAL;
-    return ss->_type;
+
+  // Raw retype conversions for OP_RAW_RETYPE.
+  void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS);
+  void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); }
+  void retype_raw_return_type(  BasicType src, BasicType dst,           TRAPS) { retype_raw_conversion(src, dst, true,  -1,   CHECK); }
+
+  BasicType arg_type(int slot) {
+    return _outgoing.at(slot).basic_type();
   }
-  bool slot_has_argument(int slot) {
-    return slot_type(slot) < T_VOID;
+  bool has_argument(int slot) {
+    return arg_type(slot) < T_VOID;
   }
 
 #ifdef ASSERT
@@ -197,12 +215,15 @@
 
   void walk_incoming_state(TRAPS);
 
+  void verify_args_and_signature(TRAPS) NOT_DEBUG_RETURN;
+
 public:
   MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS)
     : _chain(root, THREAD),
       _for_invokedynamic(for_invokedynamic),
       _outgoing(THREAD, 10),
-      _outgoing_argc(0)
+      _outgoing_argc(0),
+      _return_conv(vmIntrinsics::_none)
   {
     _local_index = for_invokedynamic ? 0 : 1;
   }
@@ -211,6 +232,10 @@
 
   bool for_invokedynamic() const { return _for_invokedynamic; }
 
+  vmIntrinsics::ID return_conv() const { return _return_conv; }
+  void set_return_conv(vmIntrinsics::ID c) { _return_conv = c; }
+  static vmIntrinsics::ID zero_return_conv() { return vmIntrinsics::_min; }
+
   int new_local_index(BasicType bt) {
     //int index = _for_invokedynamic ? _local_index : _local_index - 1;
     int index = _local_index;
@@ -221,14 +246,14 @@
   int max_locals() const { return _local_index; }
 
   // plug-in abstract interpretation steps:
-  virtual ArgToken make_parameter( BasicType type, klassOop tk, int argnum, TRAPS ) = 0;
-  virtual ArgToken make_prim_constant( BasicType type, jvalue* con, TRAPS ) = 0;
-  virtual ArgToken make_oop_constant( oop con, TRAPS ) = 0;
-  virtual ArgToken make_conversion( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS ) = 0;
-  virtual ArgToken make_fetch( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS ) = 0;
-  virtual ArgToken make_invoke( methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS ) = 0;
+  virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) = 0;
+  virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) = 0;
+  virtual ArgToken make_oop_constant(oop con, TRAPS) = 0;
+  virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0;
+  virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0;
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0;
 
-  // For make_invoke, the methodOop can be NULL if the intrinsic ID
+  // For make_invoke, the methodHandle can be NULL if the intrinsic ID
   // is something other than vmIntrinsics::_none.
 
   // and in case anyone cares to related the previous actions to the chain:
@@ -246,19 +271,24 @@
 // The IR happens to be JVM bytecodes.
 class MethodHandleCompiler : public MethodHandleWalker {
 private:
-  methodHandle _callee;
+  int          _invoke_count;  // count the original call site has been executed
   KlassHandle  _rklass;        // Return type for casting.
   BasicType    _rtype;
   KlassHandle  _target_klass;
   Thread*      _thread;
 
+  // Values used by the compiler.
+  static jvalue zero_jvalue;
+  static jvalue one_jvalue;
+
   // Fake constant pool entry.
-  class ConstantValue {
+  class ConstantValue : public ResourceObj {
   private:
     int       _tag;   // Constant pool tag type.
     JavaValue _value;
     Handle    _handle;
     Symbol*   _sym;
+    methodHandle _method;  // pre-linkage
 
   public:
     // Constructor for oop types.
@@ -307,11 +337,21 @@
     jlong     get_jlong()    const { return _value.get_jlong();   }
     jfloat    get_jfloat()   const { return _value.get_jfloat();  }
     jdouble   get_jdouble()  const { return _value.get_jdouble(); }
+
+    void set_linkage(methodHandle method) {
+      assert(_method.is_null(), "");
+      _method = method;
+    }
+    bool     has_linkage()   const { return _method.not_null(); }
+    methodHandle linkage()   const { return _method; }
   };
 
   // Fake constant pool.
   GrowableArray<ConstantValue*> _constants;
 
+  // Non-BCP classes that appear in associated MethodTypes (require special handling).
+  GrowableArray<KlassHandle> _non_bcp_klasses;
+
   // Accumulated compiler state:
   GrowableArray<unsigned char> _bytecode;
 
@@ -347,15 +387,20 @@
     return _constants.append(cv);
   }
 
-  int cpool_oop_reference_put(int tag, int first_index, int second_index) {
+  int cpool_oop_reference_put(int tag, int first_index, int second_index, methodHandle method) {
     if (first_index == 0 && second_index == 0)  return 0;
     assert(first_index != 0 && second_index != 0, "no zero indexes");
     ConstantValue* cv = new ConstantValue(tag, first_index, second_index);
+    if (method.not_null())  cv->set_linkage(method);
     return _constants.append(cv);
   }
 
   int cpool_primitive_put(BasicType type, jvalue* con);
 
+  bool check_non_bcp_klasses(Handle method_type, TRAPS);
+  bool check_non_bcp_klass(klassOop klass, TRAPS);
+  void record_non_bcp_klasses();
+
   int cpool_int_put(jint value) {
     jvalue con; con.i = value;
     return cpool_primitive_put(T_INT, &con);
@@ -382,14 +427,15 @@
   int cpool_klass_put(klassOop klass) {
     return cpool_oop_put(JVM_CONSTANT_Class, klass);
   }
-  int cpool_methodref_put(int class_index, int name_and_type_index) {
-    return cpool_oop_reference_put(JVM_CONSTANT_Methodref, class_index, name_and_type_index);
+  int cpool_methodref_put(Bytecodes::Code op, int class_index, int name_and_type_index, methodHandle method) {
+    int tag = (op == Bytecodes::_invokeinterface ? JVM_CONSTANT_InterfaceMethodref : JVM_CONSTANT_Methodref);
+    return cpool_oop_reference_put(tag, class_index, name_and_type_index, method);
   }
   int cpool_name_and_type_put(int name_index, int signature_index) {
-    return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index);
+    return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index, methodHandle());
   }
 
-  void emit_bc(Bytecodes::Code op, int index = 0);
+  void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1);
   void emit_load(BasicType bt, int index);
   void emit_store(BasicType bt, int index);
   void emit_load_constant(ArgToken arg);
@@ -399,15 +445,15 @@
   }
   virtual ArgToken make_oop_constant(oop con, TRAPS) {
     Handle h(THREAD, con);
-    return ArgToken(tt_constant, T_OBJECT, h);
+    return ArgToken(h);
   }
   virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
-    return ArgToken(tt_constant, type, *con);
+    return ArgToken(type, *con);
   }
 
   virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS);
   virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS);
-  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
 
   // Get a real constant pool.
   constantPoolHandle get_constant_pool(TRAPS) const;
@@ -416,7 +462,7 @@
   methodHandle get_method_oop(TRAPS) const;
 
 public:
-  MethodHandleCompiler(Handle root, methodHandle call_method, bool for_invokedynamic, TRAPS);
+  MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS);
 
   // Compile the given MH chain into bytecode.
   methodHandle compile(TRAPS);
--- a/src/share/vm/prims/methodHandles.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -24,10 +24,14 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
+#include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "prims/methodHandles.hpp"
+#include "prims/methodHandleWalk.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/signature.hpp"
@@ -66,8 +70,8 @@
   "adapter_drop_args",
   "adapter_collect_args",
   "adapter_spread_args",
-  "adapter_flyby",
-  "adapter_ricochet",
+  "adapter_fold_args",
+  "adapter_unused_13",
 
   // optimized adapter types:
   "adapter_swap_args/1",
@@ -83,26 +87,102 @@
   "adapter_prim_to_prim/f2d",
   "adapter_ref_to_prim/unboxi",
   "adapter_ref_to_prim/unboxl",
-  "adapter_spread_args/0",
-  "adapter_spread_args/1",
-  "adapter_spread_args/more",
+
+  // return value handlers for collect/filter/fold adapters:
+  "return/ref",
+  "return/int",
+  "return/long",
+  "return/float",
+  "return/double",
+  "return/void",
+  "return/S0/ref",
+  "return/S1/ref",
+  "return/S2/ref",
+  "return/S3/ref",
+  "return/S4/ref",
+  "return/S5/ref",
+  "return/any",
+
+  // spreading (array length cases 0, 1, ...)
+  "adapter_spread/0",
+  "adapter_spread/1/ref",
+  "adapter_spread/2/ref",
+  "adapter_spread/3/ref",
+  "adapter_spread/4/ref",
+  "adapter_spread/5/ref",
+  "adapter_spread/ref",
+  "adapter_spread/byte",
+  "adapter_spread/char",
+  "adapter_spread/short",
+  "adapter_spread/int",
+  "adapter_spread/long",
+  "adapter_spread/float",
+  "adapter_spread/double",
+
+  // blocking filter/collect conversions:
+  "adapter_collect/ref",
+  "adapter_collect/int",
+  "adapter_collect/long",
+  "adapter_collect/float",
+  "adapter_collect/double",
+  "adapter_collect/void",
+  "adapter_collect/0/ref",
+  "adapter_collect/1/ref",
+  "adapter_collect/2/ref",
+  "adapter_collect/3/ref",
+  "adapter_collect/4/ref",
+  "adapter_collect/5/ref",
+  "adapter_filter/S0/ref",
+  "adapter_filter/S1/ref",
+  "adapter_filter/S2/ref",
+  "adapter_filter/S3/ref",
+  "adapter_filter/S4/ref",
+  "adapter_filter/S5/ref",
+  "adapter_collect/2/S0/ref",
+  "adapter_collect/2/S1/ref",
+  "adapter_collect/2/S2/ref",
+  "adapter_collect/2/S3/ref",
+  "adapter_collect/2/S4/ref",
+  "adapter_collect/2/S5/ref",
+
+  // blocking fold conversions:
+  "adapter_fold/ref",
+  "adapter_fold/int",
+  "adapter_fold/long",
+  "adapter_fold/float",
+  "adapter_fold/double",
+  "adapter_fold/void",
+  "adapter_fold/1/ref",
+  "adapter_fold/2/ref",
+  "adapter_fold/3/ref",
+  "adapter_fold/4/ref",
+  "adapter_fold/5/ref",
 
   NULL
 };
 
 // Adapters.
-MethodHandlesAdapterBlob* MethodHandles::_adapter_code      = NULL;
-int                       MethodHandles::_adapter_code_size = StubRoutines::method_handles_adapters_code_size;
+MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
 
 jobject MethodHandles::_raise_exception_method;
 
+address MethodHandles::_adapter_return_handlers[CONV_TYPE_MASK+1];
+
 #ifdef ASSERT
 bool MethodHandles::spot_check_entry_names() {
   assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), "");
   assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), "");
   assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), "");
-  assert(!strcmp(entry_name(_adapter_ricochet), "adapter_ricochet"), "");
+  assert(!strcmp(entry_name(_adapter_fold_args), "adapter_fold_args"), "");
   assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), "");
+  assert(!strcmp(entry_name(_adapter_opt_spread_char), "adapter_spread/char"), "");
+  assert(!strcmp(entry_name(_adapter_opt_spread_double), "adapter_spread/double"), "");
+  assert(!strcmp(entry_name(_adapter_opt_collect_int), "adapter_collect/int"), "");
+  assert(!strcmp(entry_name(_adapter_opt_collect_0_ref), "adapter_collect/0/ref"), "");
+  assert(!strcmp(entry_name(_adapter_opt_collect_2_S3_ref), "adapter_collect/2/S3/ref"), "");
+  assert(!strcmp(entry_name(_adapter_opt_filter_S5_ref), "adapter_filter/S5/ref"), "");
+  assert(!strcmp(entry_name(_adapter_opt_fold_3_ref), "adapter_fold/3/ref"), "");
+  assert(!strcmp(entry_name(_adapter_opt_fold_void), "adapter_fold/void"), "");
   return true;
 }
 #endif
@@ -112,21 +192,23 @@
 // MethodHandles::generate_adapters
 //
 void MethodHandles::generate_adapters() {
+#ifdef TARGET_ARCH_NYI_6939861
+  if (FLAG_IS_DEFAULT(UseRicochetFrames))  UseRicochetFrames = false;
+#endif
   if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL)  return;
 
   assert(_adapter_code == NULL, "generate only once");
 
   ResourceMark rm;
   TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
-  _adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size);
+  _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
   if (_adapter_code == NULL)
-    vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
+    vm_exit_out_of_memory(adapter_code_size, "CodeCache: no room for MethodHandles adapters");
   CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
   g.generate();
 }
 
-
 //------------------------------------------------------------------------------
 // MethodHandlesAdapterGenerator::generate
 //
@@ -135,12 +217,62 @@
   for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
        ek < MethodHandles::_EK_LIMIT;
        ek = MethodHandles::EntryKind(1 + (int)ek)) {
-    StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-    MethodHandles::generate_method_handle_stub(_masm, ek);
+    if (MethodHandles::ek_supported(ek)) {
+      StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+      MethodHandles::generate_method_handle_stub(_masm, ek);
+    }
   }
 }
 
 
+#ifdef TARGET_ARCH_NYI_6939861
+// these defs belong in methodHandles_<arch>.cpp
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
+  ShouldNotCallThis();
+  return fr;
+}
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* f, const RegisterMap* reg_map) {
+  ShouldNotCallThis();
+}
+#endif //TARGET_ARCH_NYI_6939861
+
+
+//------------------------------------------------------------------------------
+// MethodHandles::ek_supported
+//
+bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) {
+  MethodHandles::EntryKind ek_orig = MethodHandles::ek_original_kind(ek);
+  switch (ek_orig) {
+  case _adapter_unused_13:
+    return false;  // not defined yet
+  case _adapter_prim_to_ref:
+    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF);
+  case _adapter_collect_args:
+    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS);
+  case _adapter_fold_args:
+    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS);
+  case _adapter_opt_return_any:
+    return UseRicochetFrames;
+#ifdef TARGET_ARCH_NYI_6939861
+  // ports before 6939861 supported only three kinds of spread ops
+  case _adapter_spread_args:
+    // restrict spreads to three kinds:
+    switch (ek) {
+    case _adapter_opt_spread_0:
+    case _adapter_opt_spread_1:
+    case _adapter_opt_spread_more:
+      break;
+    default:
+      return false;
+      break;
+    }
+    break;
+#endif //TARGET_ARCH_NYI_6939861
+  }
+  return true;
+}
+
+
 void MethodHandles::set_enabled(bool z) {
   if (_enabled != z) {
     guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic");
@@ -153,9 +285,9 @@
 // and local, like parse a data structure.  For speed, such methods work on plain
 // oops, not handles.  Trapping methods uniformly operate on handles.
 
-methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
-                                         klassOop& receiver_limit_result, int& decode_flags_result) {
-  if (vmtarget == NULL)  return NULL;
+methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
+                                            KlassHandle& receiver_limit_result, int& decode_flags_result) {
+  if (vmtarget == NULL)  return methodHandle();
   assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding");
   if (vmindex < 0) {
     // this DMH performs no dispatch; it is directly bound to a methodOop
@@ -198,20 +330,20 @@
 // MemberName and DirectMethodHandle have the same linkage to the JVM internals.
 // (MemberName is the non-operational name used for queries and setup.)
 
-methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
+methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh);
   int vmindex  = java_lang_invoke_DirectMethodHandle::vmindex(mh);
   oop mtype    = java_lang_invoke_DirectMethodHandle::type(mh);
   return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result);
 }
 
-methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
+methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), "");
   assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), "");
   for (oop bmh = mh;;) {
     // Bound MHs can be stacked to bind several arguments.
     oop target = java_lang_invoke_MethodHandle::vmtarget(bmh);
-    if (target == NULL)  return NULL;
+    if (target == NULL)  return methodHandle();
     decode_flags_result |= MethodHandles::_dmf_binds_argument;
     klassOop tk = target->klass();
     if (tk == SystemDictionary::BoundMethodHandle_klass()) {
@@ -236,14 +368,14 @@
   }
 }
 
-methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
+methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), "");
   for (oop amh = mh;;) {
     // Adapter MHs can be stacked to convert several arguments.
     int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh));
     decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK;
     oop target = java_lang_invoke_MethodHandle::vmtarget(amh);
-    if (target == NULL)  return NULL;
+    if (target == NULL)  return methodHandle();
     klassOop tk = target->klass();
     if (tk == SystemDictionary::AdapterMethodHandle_klass()) {
       amh = target;
@@ -255,8 +387,8 @@
   }
 }
 
-methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
-  if (mh == NULL)  return NULL;
+methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
+  if (mh == NULL)  return methodHandle();
   klassOop mhk = mh->klass();
   assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle");
   if (mhk == SystemDictionary::DirectMethodHandle_klass()) {
@@ -270,7 +402,7 @@
     return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result);
   } else {
     assert(false, "cannot parse this MH");
-    return NULL;              // random MH?
+    return methodHandle();  // random MH?
   }
 }
 
@@ -299,9 +431,9 @@
 
 // A trusted party is handing us a cookie to determine a method.
 // Let's boil it down to the method oop they really want.
-methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) {
+methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   decode_flags_result = 0;
-  receiver_limit_result = NULL;
+  receiver_limit_result = KlassHandle();
   klassOop xk = x->klass();
   if (xk == Universe::methodKlassObj()) {
     return decode_methodOop((methodOop) x, decode_flags_result);
@@ -329,7 +461,7 @@
     assert(!x->is_method(), "already checked");
     assert(!java_lang_invoke_MemberName::is_instance(x), "already checked");
   }
-  return NULL;
+  return methodHandle();
 }
 
 
@@ -389,11 +521,10 @@
     int offset = instanceKlass::cast(k)->offset_from_fields(slot);
     init_MemberName(mname_oop, k, accessFlags_from(mods), offset);
   } else {
-    int decode_flags = 0; klassOop receiver_limit = NULL;
-    methodOop m = MethodHandles::decode_method(target_oop,
-                                               receiver_limit, decode_flags);
+    KlassHandle receiver_limit; int decode_flags = 0;
+    methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags);
     bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
-    init_MemberName(mname_oop, m, do_dispatch);
+    init_MemberName(mname_oop, m(), do_dispatch);
   }
 }
 
@@ -423,13 +554,14 @@
 }
 
 
-methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) {
+methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) {
+  methodHandle empty;
   int flags  = java_lang_invoke_MemberName::flags(mname);
-  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return NULL;  // not invocable
+  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return empty;  // not invocable
   oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname);
   int vmindex  = java_lang_invoke_MemberName::vmindex(mname);
-  if (vmindex == VM_INDEX_UNINITIALIZED)  return NULL; // not resolved
-  methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
+  if (vmindex == VM_INDEX_UNINITIALIZED)  return empty;  // not resolved
+  methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
   oop clazz = java_lang_invoke_MemberName::clazz(mname);
   if (clazz != NULL && java_lang_Class::is_instance(clazz)) {
     klassOop klass = java_lang_Class::as_klassOop(clazz);
@@ -439,9 +571,7 @@
 }
 
 // convert the external string or reflective type to an internal signature
-Symbol* MethodHandles::convert_to_signature(oop type_str,
-                                            bool polymorphic,
-                                            TRAPS) {
+Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) {
   if (java_lang_invoke_MethodType::is_instance(type_str)) {
     return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL);
   } else if (java_lang_Class::is_instance(type_str)) {
@@ -474,48 +604,50 @@
 #endif
   if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED)
     return;  // already resolved
-  oop defc_oop = java_lang_invoke_MemberName::clazz(mname());
-  oop name_str = java_lang_invoke_MemberName::name(mname());
-  oop type_str = java_lang_invoke_MemberName::type(mname());
-  int flags    = java_lang_invoke_MemberName::flags(mname());
+  Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname()));
+  Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname()));
+  Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname()));
+  int    flags    =       java_lang_invoke_MemberName::flags(mname());
 
-  if (defc_oop == NULL || name_str == NULL || type_str == NULL) {
+  if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve");
   }
-  klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop);
-  defc_oop = NULL;  // safety
-  if (defc_klassOop == NULL)  return;  // a primitive; no resolution possible
-  if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
-    if (!Klass::cast(defc_klassOop)->oop_is_array())  return;
-    defc_klassOop = SystemDictionary::Object_klass();
+
+  instanceKlassHandle defc;
+  {
+    klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop());
+    if (defc_klassOop == NULL)  return;  // a primitive; no resolution possible
+    if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
+      if (!Klass::cast(defc_klassOop)->oop_is_array())  return;
+      defc_klassOop = SystemDictionary::Object_klass();
+    }
+    defc = instanceKlassHandle(THREAD, defc_klassOop);
   }
-  instanceKlassHandle defc(THREAD, defc_klassOop);
-  defc_klassOop = NULL;  // safety
   if (defc.is_null()) {
     THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class");
   }
-  defc->link_class(CHECK);
+  defc->link_class(CHECK);  // possible safepoint
 
   // convert the external string name to an internal symbol
-  TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str);
+  TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str());
   if (name == NULL)  return;  // no such name
-  name_str = NULL;  // safety
+  if (name == vmSymbols::class_initializer_name())
+    return; // illegal name
 
   Handle polymorphic_method_type;
   bool polymorphic_signature = false;
   if ((flags & ALL_KINDS) == IS_METHOD &&
       (defc() == SystemDictionary::MethodHandle_klass() &&
-       methodOopDesc::is_method_handle_invoke_name(name)))
+       methodOopDesc::is_method_handle_invoke_name(name))) {
     polymorphic_signature = true;
+  }
 
   // convert the external string or reflective type to an internal signature
-  TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK);
-  if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) {
-    polymorphic_method_type = Handle(THREAD, type_str);  //preserve exactly
+  TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK);
+  if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) {
+    polymorphic_method_type = type_str;  // preserve exactly
   }
-
   if (type == NULL)  return;  // no such signature exists in the VM
-  type_str = NULL; // safety
 
   // Time to do the lookup.
   switch (flags & ALL_KINDS) {
@@ -560,8 +692,8 @@
       java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
       java_lang_invoke_MemberName::set_vmindex(mname(),  vmindex);
       java_lang_invoke_MemberName::set_modifiers(mname(), mods);
-      DEBUG_ONLY(int junk; klassOop junk2);
-      assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
+      DEBUG_ONLY(KlassHandle junk1; int junk2);
+      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
              "properly stored for later decoding");
       return;
     }
@@ -589,8 +721,8 @@
       java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
       java_lang_invoke_MemberName::set_vmindex(mname(),  vmindex);
       java_lang_invoke_MemberName::set_modifiers(mname(), mods);
-      DEBUG_ONLY(int junk; klassOop junk2);
-      assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
+      DEBUG_ONLY(KlassHandle junk1; int junk2);
+      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
              "properly stored for later decoding");
       return;
     }
@@ -637,7 +769,9 @@
         m = NULL;
         // try again with a different class loader...
       }
-      if (m != NULL) {
+      if (m != NULL &&
+          m->is_method_handle_invoke() &&
+          java_lang_invoke_MethodType::equals(polymorphic_method_type(), m->method_handle_type())) {
         int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS);
         java_lang_invoke_MemberName::set_vmtarget(mname(),  m);
         java_lang_invoke_MemberName::set_vmindex(mname(),   m->vtable_index());
@@ -677,16 +811,14 @@
   case IS_METHOD:
   case IS_CONSTRUCTOR:
     {
-      klassOop receiver_limit = NULL;
-      int      decode_flags   = 0;
-      methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL,
-                                             receiver_limit, decode_flags));
+      KlassHandle receiver_limit; int decode_flags = 0;
+      methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags);
       if (m.is_null())  break;
       if (!have_defc) {
         klassOop defc = m->method_holder();
-        if (receiver_limit != NULL && receiver_limit != defc
-            && Klass::cast(receiver_limit)->is_subtype_of(defc))
-          defc = receiver_limit;
+        if (receiver_limit.not_null() && receiver_limit() != defc
+            && Klass::cast(receiver_limit())->is_subtype_of(defc))
+          defc = receiver_limit();
         java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror());
       }
       if (!have_name) {
@@ -858,6 +990,48 @@
 // This is for debugging and reflection.
 oop MethodHandles::encode_target(Handle mh, int format, TRAPS) {
   assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH");
+  if (format == ETF_FORCE_DIRECT_HANDLE ||
+      format == ETF_COMPILE_DIRECT_HANDLE) {
+    // Internal function for stress testing.
+    Handle mt = java_lang_invoke_MethodHandle::type(mh());
+    int invocation_count = 10000;
+    TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK_NULL);
+    bool omit_receiver_argument = true;
+    MethodHandleCompiler mhc(mh, vmSymbols::invoke_name(), signature, invocation_count, omit_receiver_argument, CHECK_NULL);
+    methodHandle m = mhc.compile(CHECK_NULL);
+    if (StressMethodHandleWalk && Verbose || PrintMiscellaneous) {
+      tty->print_cr("MethodHandleNatives.getTarget(%s)",
+                    format == ETF_FORCE_DIRECT_HANDLE ? "FORCE_DIRECT" : "COMPILE_DIRECT");
+      if (Verbose) {
+        m->print_codes();
+      }
+    }
+    if (StressMethodHandleWalk) {
+      InterpreterOopMap mask;
+      OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask);
+    }
+    if ((format == ETF_COMPILE_DIRECT_HANDLE ||
+         CompilationPolicy::must_be_compiled(m))
+        && !instanceKlass::cast(m->method_holder())->is_not_initialized()
+        && CompilationPolicy::can_be_compiled(m)) {
+      // Force compilation
+      CompileBroker::compile_method(m, InvocationEntryBci,
+                                    CompLevel_initial_compile,
+                                    methodHandle(), 0, "MethodHandleNatives.getTarget",
+                                    CHECK_NULL);
+    }
+    // Now wrap m in a DirectMethodHandle.
+    instanceKlassHandle dmh_klass(THREAD, SystemDictionary::DirectMethodHandle_klass());
+    Handle dmh = dmh_klass->allocate_instance_handle(CHECK_NULL);
+    JavaValue ignore_result(T_VOID);
+    Symbol* init_name = vmSymbols::object_initializer_name();
+    Symbol* init_sig  = vmSymbols::notifyGenericMethodType_signature();
+    JavaCalls::call_special(&ignore_result, dmh,
+                            SystemDictionaryHandles::MethodHandle_klass(), init_name, init_sig,
+                            java_lang_invoke_MethodHandle::type(mh()), CHECK_NULL);
+    MethodHandles::init_DirectMethodHandle(dmh, m, false, CHECK_NULL);
+    return dmh();
+  }
   if (format == ETF_HANDLE_OR_METHOD_NAME) {
     oop target = java_lang_invoke_MethodHandle::vmtarget(mh());
     if (target == NULL) {
@@ -884,10 +1058,9 @@
   // - AMH can have methodOop for static invoke with bound receiver
   // - DMH can have methodOop for static invoke (on variable receiver)
   // - DMH can have klassOop for dispatched (non-static) invoke
-  klassOop receiver_limit = NULL;
-  int decode_flags = 0;
-  methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
-  if (m == NULL)  return NULL;
+  KlassHandle receiver_limit; int decode_flags = 0;
+  methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
+  if (m.is_null())  return NULL;
   switch (format) {
   case ETF_REFLECT_METHOD:
     // same as jni_ToReflectedMethod:
@@ -903,10 +1076,10 @@
       if (SystemDictionary::MemberName_klass() == NULL)  break;
       instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass());
       mname_klass->initialize(CHECK_NULL);
-      Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL);
+      Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL);  // possible safepoint
       java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED);
       bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
-      init_MemberName(mname(), m, do_dispatch);
+      init_MemberName(mname(), m(), do_dispatch);
       expand_MemberName(mname, 0, CHECK_NULL);
       return mname();
     }
@@ -975,6 +1148,14 @@
   return Klass::cast(SystemDictionary::Object_klass())->java_mirror();
 }
 
+bool MethodHandles::is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst) {
+  if (src == T_FLOAT)   return dst == T_INT;
+  if (src == T_INT)     return dst == T_FLOAT;
+  if (src == T_DOUBLE)  return dst == T_LONG;
+  if (src == T_LONG)    return dst == T_DOUBLE;
+  return false;
+}
+
 bool MethodHandles::same_basic_type_for_arguments(BasicType src,
                                                   BasicType dst,
                                                   bool raw,
@@ -1001,10 +1182,8 @@
       return true;            // remaining case: byte fits in short
   }
   // allow float/fixed reinterpretation casts
-  if (src == T_FLOAT)   return dst == T_INT;
-  if (src == T_INT)     return dst == T_FLOAT;
-  if (src == T_DOUBLE)  return dst == T_LONG;
-  if (src == T_LONG)    return dst == T_DOUBLE;
+  if (is_float_fixed_reinterpretation_cast(src, dst))
+    return true;
   return false;
 }
 
@@ -1088,6 +1267,12 @@
           klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK);
           if (aklass_oop != NULL)
             aklass = KlassHandle(THREAD, aklass_oop);
+          if (aklass.is_null() &&
+              pklass.not_null() &&
+              loader.is_null() &&
+              pklass->name() == name)
+            // accept name equivalence here, since that's the best we can do
+            aklass = pklass;
         }
       } else {
         // for method handle invokers we don't look at the name in the signature
@@ -1173,6 +1358,7 @@
   // Verify that argslot points at the given argnum.
   int check_slot = argument_slot(java_lang_invoke_MethodHandle::type(mh()), argnum);
   if (argslot != check_slot || argslot < 0) {
+    ResourceMark rm;
     const char* fmt = "for argnum of %d, vmargslot is %d, should be %d";
     size_t msglen = strlen(fmt) + 3*11 + 1;
     char* msg = NEW_RESOURCE_ARRAY(char, msglen);
@@ -1275,7 +1461,7 @@
                                                       int argnum,
                                                       bool raw) {
   const char* err = NULL;
-  bool for_return = (argnum < 0);
+  const bool for_return = (argnum < 0);
 
   // just in case:
   if (src_type == T_ARRAY)  src_type = T_OBJECT;
@@ -1284,17 +1470,17 @@
   // Produce some nice messages if VerifyMethodHandles is turned on:
   if (!same_basic_type_for_arguments(src_type, dst_type, raw, for_return)) {
     if (src_type == T_OBJECT) {
-      if (raw && dst_type == T_INT && is_always_null_type(src_klass))
-        return NULL;    // OK to convert a null pointer to a garbage int
-      err = ((argnum >= 0)
+      if (raw && is_java_primitive(dst_type))
+        return NULL;    // ref-to-prim discards ref and returns zero
+      err = (!for_return
              ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s"
              : "type mismatch: returning a %s, but caller expects primitive %s");
     } else if (dst_type == T_OBJECT) {
-      err = ((argnum >= 0)
+      err = (!for_return
              ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s"
              : "type mismatch: returning a primitive %s, but caller expects %s");
     } else {
-      err = ((argnum >= 0)
+      err = (!for_return
              ? "type mismatch: passing a %s for method argument #%d, which expects %s"
              : "type mismatch: returning a %s, but caller expects %s");
     }
@@ -1303,11 +1489,11 @@
     if (!class_cast_needed(dst_klass, src_klass)) {
       if (raw)
         return NULL;    // reverse cast is OK; the MH target is trusted to enforce it
-      err = ((argnum >= 0)
+      err = (!for_return
              ? "cast required: passing a %s for method argument #%d, which expects %s"
              : "cast required: returning a %s, but caller expects %s");
     } else {
-      err = ((argnum >= 0)
+      err = (!for_return
              ? "reference mismatch: passing a %s for method argument #%d, which expects %s"
              : "reference mismatch: returning a %s, but caller expects %s");
     }
@@ -1328,7 +1514,7 @@
 
   size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11);
   char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1);
-  if (argnum >= 0) {
+  if (!for_return) {
     assert(strstr(err, "%d") != NULL, "");
     jio_snprintf(msg, msglen, err, src_name, argnum, dst_name);
   } else {
@@ -1459,8 +1645,8 @@
   // that links the interpreter calls to the method.  We need the same
   // bits, and will use the same calling sequence code.
 
-  int vmindex = methodOopDesc::garbage_vtable_index;
-  oop vmtarget = NULL;
+  int    vmindex = methodOopDesc::garbage_vtable_index;
+  Handle vmtarget;
 
   instanceKlass::cast(m->method_holder())->link_class(CHECK);
 
@@ -1478,7 +1664,7 @@
   } else if (!do_dispatch || m->can_be_statically_bound()) {
     // We are simulating an invokestatic or invokespecial instruction.
     // Set up the method pointer, just like ConstantPoolCacheEntry::set_method().
-    vmtarget = m();
+    vmtarget = m;
     // this does not help dispatch, but it will make it possible to parse this MH:
     vmindex  = methodOopDesc::nonvirtual_vtable_index;
     assert(vmindex < 0, "(>=0) == do_dispatch");
@@ -1490,7 +1676,7 @@
       // For a DMH, it is done now, when the handle is created.
       Klass* k = Klass::cast(m->method_holder());
       if (k->should_be_initialized()) {
-        k->initialize(CHECK);
+        k->initialize(CHECK);  // possible safepoint
       }
     }
   } else {
@@ -1504,10 +1690,10 @@
 
   if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
 
-  java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget);
-  java_lang_invoke_DirectMethodHandle::set_vmindex(mh(),  vmindex);
-  DEBUG_ONLY(int flags; klassOop rlimit);
-  assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(),
+  java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget());
+  java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex);
+  DEBUG_ONLY(KlassHandle rlimit; int flags);
+  assert(MethodHandles::decode_method(mh(), rlimit, flags) == m,
          "properly stored for later decoding");
   DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0));
   assert(!(actual_do_dispatch && !do_dispatch),
@@ -1523,10 +1709,13 @@
                                                            methodHandle m,
                                                            TRAPS) {
   // Verify type.
-  oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
+  KlassHandle bound_recv_type;
+  {
+    oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
+    if (receiver != NULL)
+      bound_recv_type = KlassHandle(THREAD, receiver->klass());
+  }
   Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh()));
-  KlassHandle bound_recv_type;
-  if (receiver != NULL)  bound_recv_type = KlassHandle(THREAD, receiver->klass());
   verify_method_type(m, mtype, true, bound_recv_type, CHECK);
 
   int receiver_pos = m->size_of_parameters() - 1;
@@ -1566,6 +1755,8 @@
   if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); }
 
   java_lang_invoke_MethodHandle::init_vmslots(mh());
+  int vmargslot = m->size_of_parameters() - 1;
+  assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, "");
 
   if (VerifyMethodHandles) {
     verify_BoundMethodHandle_with_receiver(mh, m, CHECK);
@@ -1573,8 +1764,8 @@
 
   java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m());
 
-  DEBUG_ONLY(int junk; klassOop junk2);
-  assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding");
+  DEBUG_ONLY(KlassHandle junk1; int junk2);
+  assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding");
   assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot");
 
   // Done!
@@ -1583,6 +1774,7 @@
 
 void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum,
                                              bool direct_to_method, TRAPS) {
+  ResourceMark rm;
   Handle ptype_handle(THREAD,
                            java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum));
   KlassHandle ptype_klass;
@@ -1644,14 +1836,9 @@
     DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh()));
     if (direct_to_method) {
       assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots");
-      assert(slots_pushed <= MethodHandlePushLimit, "");
     } else {
       int target_pushes = decode_MethodHandle_stack_pushes(target());
       assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct");
-      // do not blow the stack; use a Java-based adapter if this limit is exceeded
-      // FIXME
-      // if (slots_pushed + target_pushes > MethodHandlePushLimit)
-      //   err = "too many bound parameters";
     }
   }
 
@@ -1674,16 +1861,20 @@
   }
 
   java_lang_invoke_MethodHandle::init_vmslots(mh());
+  int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh());
 
   if (VerifyMethodHandles) {
     int insert_after = argnum - 1;
-    verify_vmargslot(mh, insert_after, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK);
+    verify_vmargslot(mh, insert_after, argslot, CHECK);
     verify_vmslots(mh, CHECK);
   }
 
   // Get bound type and required slots.
-  oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
-  BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
+  BasicType ptype;
+  {
+    oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
+    ptype = java_lang_Class::as_BasicType(ptype_oop);
+  }
   int slots_pushed = type2size[ptype];
 
   // If (a) the target is a direct non-dispatched method handle,
@@ -1693,14 +1884,14 @@
   bool direct_to_method = false;
   if (OptimizeMethodHandles &&
       target->klass() == SystemDictionary::DirectMethodHandle_klass() &&
+      (argnum != 0 || java_lang_invoke_BoundMethodHandle::argument(mh()) != NULL) &&
       (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) {
-    int decode_flags = 0; klassOop receiver_limit_oop = NULL;
-    methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags));
+    KlassHandle receiver_limit; int decode_flags = 0;
+    methodHandle m = decode_method(target(), receiver_limit, decode_flags);
     if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); }
     DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg.
     assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig");
     if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) {
-      KlassHandle receiver_limit(THREAD, receiver_limit_oop);
       init_BoundMethodHandle_with_receiver(mh, m,
                                            receiver_limit, decode_flags,
                                            CHECK);
@@ -1747,6 +1938,7 @@
 }
 
 void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
+  ResourceMark rm;
   jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
   int  argslot    = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
 
@@ -1769,6 +1961,7 @@
   Handle target(THREAD,    java_lang_invoke_AdapterMethodHandle::vmtarget(mh()));
   Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh()));
   Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target()));
+  Handle arg_mtype;
 
   const char* err = NULL;
 
@@ -1777,25 +1970,29 @@
     switch (ek) {
     case _adapter_check_cast:     // target type of cast
     case _adapter_ref_to_prim:    // wrapper type from which to unbox
-    case _adapter_prim_to_ref:    // wrapper type to box into
-    case _adapter_collect_args:   // array type to collect into
     case _adapter_spread_args:    // array type to spread from
       if (!java_lang_Class::is_instance(argument())
           || java_lang_Class::is_primitive(argument()))
         { err = "adapter requires argument of type java.lang.Class"; break; }
-      if (ek == _adapter_collect_args ||
-          ek == _adapter_spread_args) {
+      if (ek == _adapter_spread_args) {
         // Make sure it is a suitable collection type.  (Array, for now.)
         Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument()));
-        if (!ak->oop_is_objArray()) {
-          { err = "adapter requires argument of type java.lang.Class<Object[]>"; break; }
-        }
+        if (!ak->oop_is_array())
+          { err = "spread adapter requires argument representing an array class"; break; }
+        BasicType et = arrayKlass::cast(ak->as_klassOop())->element_type();
+        if (et != dest && stack_move <= 0)
+          { err = "spread adapter requires array class argument of correct type"; break; }
       }
       break;
-    case _adapter_flyby:
-    case _adapter_ricochet:
+    case _adapter_prim_to_ref:    // boxer MH to use
+    case _adapter_collect_args:   // method handle which collects the args
+    case _adapter_fold_args:      // method handle which collects the args
+      if (!UseRicochetFrames) {
+        { err = "box/collect/fold operators are not supported"; break; }
+      }
       if (!java_lang_invoke_MethodHandle::is_instance(argument()))
         { err = "MethodHandle adapter argument required"; break; }
+      arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument()));
       break;
     default:
       if (argument.not_null())
@@ -1806,6 +2003,7 @@
 
   if (err == NULL) {
     // Check that the src/dest types are supplied if needed.
+    // Also check relevant parameter or return types.
     switch (ek) {
     case _adapter_check_cast:
       if (src != T_OBJECT || dest != T_OBJECT) {
@@ -1828,73 +2026,120 @@
       }
       break;
     case _adapter_prim_to_ref:
-      if (!is_java_primitive(src) || dest != T_OBJECT
-          || argument() != Klass::cast(SystemDictionary::box_klass(src))->java_mirror()) {
+      if (!is_java_primitive(src) || dest != T_OBJECT) {
         err = "adapter requires primitive src conversion subfield"; break;
       }
       break;
     case _adapter_swap_args:
-    case _adapter_rot_args:
       {
-        if (!src || src != dest) {
+        if (!src || !dest) {
           err = "adapter requires src/dest conversion subfields for swap"; break;
         }
-        int swap_size = type2size[src];
-        oop src_mtype  = java_lang_invoke_AdapterMethodHandle::type(mh());
-        oop dest_mtype = java_lang_invoke_AdapterMethodHandle::type(target());
-        int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(target());
+        int src_size  = type2size[src];
+        if (src_size != type2size[dest]) {
+          err = "adapter requires equal sizes for src/dest"; break;
+        }
         int src_slot   = argslot;
         int dest_slot  = vminfo;
-        bool rotate_up = (src_slot > dest_slot); // upward rotation
         int src_arg    = argnum;
-        int dest_arg   = argument_slot_to_argnum(dest_mtype, dest_slot);
+        int dest_arg   = argument_slot_to_argnum(src_mtype(), dest_slot);
         verify_vmargslot(mh, dest_arg, dest_slot, CHECK);
-        if (!(dest_slot >= src_slot + swap_size) &&
-            !(src_slot >= dest_slot + swap_size)) {
-          err = "source, destination slots must be distinct";
-        } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) {
-          err = "source of swap must be deeper in stack";
-        } else if (ek == _adapter_swap_args) {
-          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, dest_arg),
-                                           java_lang_invoke_MethodType::ptype(dest_mtype, src_arg),
-                                           dest_arg);
-        } else if (ek == _adapter_rot_args) {
-          if (rotate_up) {
-            assert((src_slot > dest_slot) && (src_arg < dest_arg), "");
-            // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot]
-            // that is:   [src_arg+1..dest_arg] --> [src_arg..dest_arg-1]
-            for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) {
-              err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i),
-                                               java_lang_invoke_MethodType::ptype(dest_mtype, i-1),
-                                               i);
-            }
-          } else { // rotate down
-            assert((src_slot < dest_slot) && (src_arg > dest_arg), "");
-            // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss]
-            // that is:     [dest_arg..src_arg-1] --> [dst_arg+1..src_arg]
-            for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) {
-              err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i),
-                                               java_lang_invoke_MethodType::ptype(dest_mtype, i+1),
-                                               i);
-            }
+        if (!(dest_slot >= src_slot + src_size) &&
+            !(src_slot >= dest_slot + src_size)) {
+          err = "source, destination slots must be distinct"; break;
+        } else if (!(src_slot > dest_slot)) {
+          err = "source of swap must be deeper in stack"; break;
+        }
+        err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg),
+                                         java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg),
+                                         dest_arg);
+        if (err == NULL)
+          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg),
+                                           java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg),
+                                           src_arg);
+        break;
+      }
+    case _adapter_rot_args:
+      {
+        if (!src || !dest) {
+          err = "adapter requires src/dest conversion subfields for rotate"; break;
+        }
+        int src_slot   = argslot;
+        int limit_raw  = vminfo;
+        bool rot_down  = (src_slot < limit_raw);
+        int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0);
+        int limit_slot = limit_raw - limit_bias;
+        int src_arg    = argnum;
+        int limit_arg  = argument_slot_to_argnum(src_mtype(), limit_slot);
+        verify_vmargslot(mh, limit_arg, limit_slot, CHECK);
+        if (src_slot == limit_slot) {
+          err = "source, destination slots must be distinct"; break;
+        }
+        if (!rot_down) {  // rotate slots up == shift arguments left
+          // limit_slot is an inclusive lower limit
+          assert((src_slot > limit_slot) && (src_arg < limit_arg), "");
+          // rotate up: [limit_slot..src_slot-ss] --> [limit_slot+ss..src_slot]
+          // that is:   [src_arg+1..limit_arg] --> [src_arg..limit_arg-1]
+          for (int i = src_arg+1; i <= limit_arg && err == NULL; i++) {
+            err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
+                                             java_lang_invoke_MethodType::ptype(dst_mtype(), i-1),
+                                             i);
+          }
+        } else { // rotate slots down == shfit arguments right
+          // limit_slot is an exclusive upper limit
+          assert((src_slot < limit_slot - limit_bias) && (src_arg > limit_arg + limit_bias), "");
+          // rotate down: [src_slot+ss..limit_slot) --> [src_slot..limit_slot-ss)
+          // that is:     (limit_arg..src_arg-1] --> (dst_arg+1..src_arg]
+          for (int i = limit_arg+1; i <= src_arg-1 && err == NULL; i++) {
+            err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
+                                             java_lang_invoke_MethodType::ptype(dst_mtype(), i+1),
+                                             i);
           }
         }
-        if (err == NULL)
-          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, src_arg),
-                                           java_lang_invoke_MethodType::ptype(dest_mtype, dest_arg),
+        if (err == NULL) {
+          int dest_arg = (rot_down ? limit_arg+1 : limit_arg);
+          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg),
+                                           java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg),
                                            src_arg);
+        }
       }
       break;
+    case _adapter_spread_args:
     case _adapter_collect_args:
-    case _adapter_spread_args:
+    case _adapter_fold_args:
       {
-        BasicType coll_type = (ek == _adapter_collect_args) ? dest : src;
-        BasicType elem_type = (ek == _adapter_collect_args) ? src : dest;
-        if (coll_type != T_OBJECT || elem_type != T_OBJECT) {
-          err = "adapter requires src/dest subfields"; break;
-          // later:
-          // - consider making coll be a primitive array
-          // - consider making coll be a heterogeneous collection
+        bool is_spread = (ek == _adapter_spread_args);
+        bool is_fold   = (ek == _adapter_fold_args);
+        BasicType coll_type = is_spread ? src : dest;
+        BasicType elem_type = is_spread ? dest : src;
+        // coll_type is type of args in collected form (or T_VOID if none)
+        // elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous)
+        if (coll_type == 0 || elem_type == 0) {
+          err = "adapter requires src/dest subfields for spread or collect"; break;
+        }
+        if (is_spread && coll_type != T_OBJECT) {
+          err = "spread adapter requires object type for argument bundle"; break;
+        }
+        Handle spread_mtype = (is_spread ? dst_mtype : src_mtype);
+        int spread_slot = argslot;
+        int spread_arg  = argnum;
+        int slots_pushed = stack_move / stack_move_unit();
+        int coll_slot_count = type2size[coll_type];
+        int spread_slot_count = (is_spread ? slots_pushed : -slots_pushed) + coll_slot_count;
+        if (is_fold)  spread_slot_count = argument_slot_count(arg_mtype());
+        if (!is_spread) {
+          int init_slots = argument_slot_count(src_mtype());
+          int coll_slots = argument_slot_count(arg_mtype());
+          if (spread_slot_count > init_slots ||
+              spread_slot_count != coll_slots) {
+            err = "collect adapter has inconsistent arg counts"; break;
+          }
+          int next_slots = argument_slot_count(dst_mtype());
+          int unchanged_slots_in  = (init_slots - spread_slot_count);
+          int unchanged_slots_out = (next_slots - coll_slot_count - (is_fold ? spread_slot_count : 0));
+          if (unchanged_slots_in != unchanged_slots_out) {
+            err = "collect adapter continuation has inconsistent arg counts"; break;
+          }
         }
       }
       break;
@@ -1929,8 +2174,9 @@
       }
       break;
     case _adapter_collect_args:
-      if (slots_pushed > 1) {
-        err = "adapter requires conversion subfield slots_pushed <= 1";
+    case _adapter_fold_args:
+      if (slots_pushed > 2) {
+        err = "adapter requires conversion subfield slots_pushed <= 2";
       }
       break;
     case _adapter_spread_args:
@@ -1950,32 +2196,36 @@
   }
 
   if (err == NULL) {
-    // Make sure this adapter does not push too deeply.
+    // Make sure this adapter's stack pushing is accurately recorded.
     int slots_pushed = stack_move / stack_move_unit();
     int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh());
     int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target());
+    int target_pushes = decode_MethodHandle_stack_pushes(target());
     if (slots_pushed != (target_vmslots - this_vmslots)) {
       err = "stack_move inconsistent with previous and current MethodType vmslots";
-    } else if (slots_pushed > 0)  {
-      // verify stack_move against MethodHandlePushLimit
-      int target_pushes = decode_MethodHandle_stack_pushes(target());
-      // do not blow the stack; use a Java-based adapter if this limit is exceeded
-      if (slots_pushed + target_pushes > MethodHandlePushLimit) {
-        err = "adapter pushes too many parameters";
+    } else {
+      int this_pushes = decode_MethodHandle_stack_pushes(mh());
+      if (slots_pushed + target_pushes != this_pushes) {
+        if (this_pushes == 0)
+          err = "adapter push count not initialized";
+        else
+          err = "adapter push count is wrong";
       }
     }
 
     // While we're at it, check that the stack motion decoder works:
-    DEBUG_ONLY(int target_pushes = decode_MethodHandle_stack_pushes(target()));
     DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh()));
     assert(this_pushes == slots_pushed + target_pushes, "AMH stack motion must be correct");
   }
 
   if (err == NULL && vminfo != 0) {
     switch (ek) {
-      case _adapter_swap_args:
-      case _adapter_rot_args:
-        break;                // OK
+    case _adapter_swap_args:
+    case _adapter_rot_args:
+    case _adapter_prim_to_ref:
+    case _adapter_collect_args:
+    case _adapter_fold_args:
+      break;                // OK
     default:
       err = "vminfo subfield is reserved to the JVM";
     }
@@ -2019,14 +2269,15 @@
 }
 
 void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) {
-  oop  argument   = java_lang_invoke_AdapterMethodHandle::argument(mh());
-  int  argslot    = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
-  jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
-  jint conv_op    = adapter_conversion_op(conversion);
+  Handle argument   = java_lang_invoke_AdapterMethodHandle::argument(mh());
+  int    argslot    = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
+  jint   conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
+  jint   conv_op    = adapter_conversion_op(conversion);
 
   // adjust the adapter code to the internal EntryKind enumeration:
   EntryKind ek_orig = adapter_entry_kind(conv_op);
   EntryKind ek_opt  = ek_orig;  // may be optimized
+  EntryKind ek_try;             // temp
 
   // Finalize the vmtarget field (Java initialized it to null).
   if (!java_lang_invoke_MethodHandle::is_instance(target())) {
@@ -2035,17 +2286,23 @@
   }
   java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target());
 
-  if (VerifyMethodHandles) {
-    verify_AdapterMethodHandle(mh, argnum, CHECK);
-  }
-
   int stack_move = adapter_conversion_stack_move(conversion);
   BasicType src  = adapter_conversion_src_type(conversion);
   BasicType dest = adapter_conversion_dest_type(conversion);
   int vminfo     = adapter_conversion_vminfo(conversion); // should be zero
 
+  int slots_pushed = stack_move / stack_move_unit();
+
+  if (VerifyMethodHandles) {
+    verify_AdapterMethodHandle(mh, argnum, CHECK);
+  }
+
   const char* err = NULL;
 
+  if (!conv_op_supported(conv_op)) {
+    err = "adapter not yet implemented in the JVM";
+  }
+
   // Now it's time to finish the case analysis and pick a MethodHandleEntry.
   switch (ek_orig) {
   case _adapter_retype_only:
@@ -2074,20 +2331,20 @@
         } else if (src == T_DOUBLE && dest == T_FLOAT) {
           ek_opt = _adapter_opt_d2f;
         } else {
-          assert(false, "");
+          goto throw_not_impl;        // runs user code, hence could block
         }
         break;
       case 1 *4+ 2:
-        if (src == T_INT && dest == T_LONG) {
+        if ((src == T_INT || is_subword_type(src)) && dest == T_LONG) {
           ek_opt = _adapter_opt_i2l;
         } else if (src == T_FLOAT && dest == T_DOUBLE) {
           ek_opt = _adapter_opt_f2d;
         } else {
-          assert(false, "");
+          goto throw_not_impl;        // runs user code, hence could block
         }
         break;
       default:
-        assert(false, "");
+        goto throw_not_impl;        // runs user code, hence could block
         break;
       }
     }
@@ -2104,20 +2361,59 @@
         ek_opt = _adapter_opt_unboxl;
         break;
       default:
-        assert(false, "");
+        goto throw_not_impl;
         break;
       }
     }
     break;
 
   case _adapter_prim_to_ref:
-    goto throw_not_impl;        // allocates, hence could block
+    {
+      assert(UseRicochetFrames, "else don't come here");
+      // vminfo will be the location to insert the return value
+      vminfo = argslot;
+      ek_opt = _adapter_opt_collect_ref;
+      ensure_vmlayout_field(target, CHECK);
+      // for MethodHandleWalk:
+      if (java_lang_invoke_AdapterMethodHandle::is_instance(argument()))
+        ensure_vmlayout_field(argument, CHECK);
+      if (!OptimizeMethodHandles)  break;
+      switch (type2size[src]) {
+      case 1:
+        ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot);
+        if (ek_try < _adapter_opt_collect_LAST &&
+            ek_adapter_opt_collect_slot(ek_try) == argslot) {
+          assert(ek_adapter_opt_collect_count(ek_try) == 1 &&
+                 ek_adapter_opt_collect_type(ek_try) == T_OBJECT, "");
+          ek_opt = ek_try;
+          break;
+        }
+        // else downgrade to variable slot:
+        ek_opt = _adapter_opt_collect_1_ref;
+        break;
+      case 2:
+        ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot);
+        if (ek_try < _adapter_opt_collect_LAST &&
+            ek_adapter_opt_collect_slot(ek_try) == argslot) {
+          assert(ek_adapter_opt_collect_count(ek_try) == 2 &&
+                 ek_adapter_opt_collect_type(ek_try) == T_OBJECT, "");
+          ek_opt = ek_try;
+          break;
+        }
+        // else downgrade to variable slot:
+        ek_opt = _adapter_opt_collect_2_ref;
+        break;
+      default:
+        goto throw_not_impl;
+        break;
+      }
+    }
+    break;
 
   case _adapter_swap_args:
   case _adapter_rot_args:
     {
       int swap_slots = type2size[src];
-      int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(mh());
       int src_slot   = argslot;
       int dest_slot  = vminfo;
       int rotate     = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1;
@@ -2131,35 +2427,184 @@
                   rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down);
         break;
       default:
-        assert(false, "");
+        goto throw_not_impl;
         break;
       }
     }
     break;
 
-  case _adapter_collect_args:
-    goto throw_not_impl;        // allocates, hence could block
-
   case _adapter_spread_args:
     {
+#ifdef TARGET_ARCH_NYI_6939861
+      // ports before 6939861 supported only three kinds of spread ops
+      if (!UseRicochetFrames) {
+        int array_size   = slots_pushed + 1;
+        assert(array_size >= 0, "");
+        vminfo = array_size;
+        switch (array_size) {
+        case 0:   ek_opt = _adapter_opt_spread_0;       break;
+        case 1:   ek_opt = _adapter_opt_spread_1;       break;
+        default:  ek_opt = _adapter_opt_spread_more;    break;
+        }
+        break;
+      }
+#endif //TARGET_ARCH_NYI_6939861
       // vminfo will be the required length of the array
-      int slots_pushed = stack_move / stack_move_unit();
-      int array_size   = slots_pushed + 1;
-      assert(array_size >= 0, "");
+      int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1);
       vminfo = array_size;
-      switch (array_size) {
-      case 0:   ek_opt = _adapter_opt_spread_0;       break;
-      case 1:   ek_opt = _adapter_opt_spread_1;       break;
-      default:  ek_opt = _adapter_opt_spread_more;    break;
+      // general case
+      switch (dest) {
+      case T_BOOLEAN : // fall through to T_BYTE:
+      case T_BYTE    : ek_opt = _adapter_opt_spread_byte;    break;
+      case T_CHAR    : ek_opt = _adapter_opt_spread_char;    break;
+      case T_SHORT   : ek_opt = _adapter_opt_spread_short;   break;
+      case T_INT     : ek_opt = _adapter_opt_spread_int;     break;
+      case T_LONG    : ek_opt = _adapter_opt_spread_long;    break;
+      case T_FLOAT   : ek_opt = _adapter_opt_spread_float;   break;
+      case T_DOUBLE  : ek_opt = _adapter_opt_spread_double;  break;
+      case T_OBJECT  : ek_opt = _adapter_opt_spread_ref;     break;
+      case T_VOID    : if (array_size != 0)  goto throw_not_impl;
+                       ek_opt = _adapter_opt_spread_ref;     break;
+      default        : goto throw_not_impl;
       }
-      if ((vminfo & CONV_VMINFO_MASK) != vminfo)
-        goto throw_not_impl;    // overflow
+      assert(array_size == 0 ||  // it doesn't matter what the spreader is
+             (ek_adapter_opt_spread_count(ek_opt) == -1 &&
+              (ek_adapter_opt_spread_type(ek_opt) == dest ||
+               (ek_adapter_opt_spread_type(ek_opt) == T_BYTE && dest == T_BOOLEAN))),
+             err_msg("dest=%d ek_opt=%d", dest, ek_opt));
+
+      if (array_size <= 0) {
+        // since the general case does not handle length 0, this case is required:
+        ek_opt = _adapter_opt_spread_0;
+        break;
+      }
+      if (dest == T_OBJECT) {
+        ek_try = EntryKind(_adapter_opt_spread_1_ref - 1 + array_size);
+        if (ek_try < _adapter_opt_spread_LAST &&
+            ek_adapter_opt_spread_count(ek_try) == array_size) {
+          assert(ek_adapter_opt_spread_type(ek_try) == dest, "");
+          ek_opt = ek_try;
+          break;
+        }
+      }
+      break;
     }
     break;
 
-  case _adapter_flyby:
-  case _adapter_ricochet:
-    goto throw_not_impl;        // runs Java code, hence could block
+  case _adapter_collect_args:
+    {
+      assert(UseRicochetFrames, "else don't come here");
+      int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
+      // vminfo will be the location to insert the return value
+      vminfo = argslot;
+      ensure_vmlayout_field(target, CHECK);
+      ensure_vmlayout_field(argument, CHECK);
+
+      // general case:
+      switch (dest) {
+      default       : if (!is_subword_type(dest))  goto throw_not_impl;
+                    // else fall through:
+      case T_INT    : ek_opt = _adapter_opt_collect_int;     break;
+      case T_LONG   : ek_opt = _adapter_opt_collect_long;    break;
+      case T_FLOAT  : ek_opt = _adapter_opt_collect_float;   break;
+      case T_DOUBLE : ek_opt = _adapter_opt_collect_double;  break;
+      case T_OBJECT : ek_opt = _adapter_opt_collect_ref;     break;
+      case T_VOID   : ek_opt = _adapter_opt_collect_void;    break;
+      }
+      assert(ek_adapter_opt_collect_slot(ek_opt) == -1 &&
+             ek_adapter_opt_collect_count(ek_opt) == -1 &&
+             (ek_adapter_opt_collect_type(ek_opt) == dest ||
+              ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)),
+             "");
+
+      if (dest == T_OBJECT && elem_slots == 1 && OptimizeMethodHandles) {
+        // filter operation on a ref
+        ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot);
+        if (ek_try < _adapter_opt_collect_LAST &&
+            ek_adapter_opt_collect_slot(ek_try) == argslot) {
+          assert(ek_adapter_opt_collect_count(ek_try) == elem_slots &&
+                 ek_adapter_opt_collect_type(ek_try) == dest, "");
+          ek_opt = ek_try;
+          break;
+        }
+        ek_opt = _adapter_opt_collect_1_ref;
+        break;
+      }
+
+      if (dest == T_OBJECT && elem_slots == 2 && OptimizeMethodHandles) {
+        // filter of two arguments
+        ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot);
+        if (ek_try < _adapter_opt_collect_LAST &&
+            ek_adapter_opt_collect_slot(ek_try) == argslot) {
+          assert(ek_adapter_opt_collect_count(ek_try) == elem_slots &&
+                 ek_adapter_opt_collect_type(ek_try) == dest, "");
+          ek_opt = ek_try;
+          break;
+        }
+        ek_opt = _adapter_opt_collect_2_ref;
+        break;
+      }
+
+      if (dest == T_OBJECT && OptimizeMethodHandles) {
+        // try to use a fixed length adapter
+        ek_try = EntryKind(_adapter_opt_collect_0_ref + elem_slots);
+        if (ek_try < _adapter_opt_collect_LAST &&
+            ek_adapter_opt_collect_count(ek_try) == elem_slots) {
+          assert(ek_adapter_opt_collect_slot(ek_try) == -1 &&
+                 ek_adapter_opt_collect_type(ek_try) == dest, "");
+          ek_opt = ek_try;
+          break;
+        }
+      }
+
+      break;
+    }
+
+  case _adapter_fold_args:
+    {
+      assert(UseRicochetFrames, "else don't come here");
+      int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
+      // vminfo will be the location to insert the return value
+      vminfo = argslot + elem_slots;
+      ensure_vmlayout_field(target, CHECK);
+      ensure_vmlayout_field(argument, CHECK);
+
+      switch (dest) {
+      default       : if (!is_subword_type(dest))  goto throw_not_impl;
+                    // else fall through:
+      case T_INT    : ek_opt = _adapter_opt_fold_int;     break;
+      case T_LONG   : ek_opt = _adapter_opt_fold_long;    break;
+      case T_FLOAT  : ek_opt = _adapter_opt_fold_float;   break;
+      case T_DOUBLE : ek_opt = _adapter_opt_fold_double;  break;
+      case T_OBJECT : ek_opt = _adapter_opt_fold_ref;     break;
+      case T_VOID   : ek_opt = _adapter_opt_fold_void;    break;
+      }
+      assert(ek_adapter_opt_collect_slot(ek_opt) == -1 &&
+             ek_adapter_opt_collect_count(ek_opt) == -1 &&
+             (ek_adapter_opt_collect_type(ek_opt) == dest ||
+              ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)),
+             "");
+
+      if (dest == T_OBJECT && elem_slots == 0 && OptimizeMethodHandles) {
+        // if there are no args, just pretend it's a collect
+        ek_opt = _adapter_opt_collect_0_ref;
+        break;
+      }
+
+      if (dest == T_OBJECT && OptimizeMethodHandles) {
+        // try to use a fixed length adapter
+        ek_try = EntryKind(_adapter_opt_fold_1_ref - 1 + elem_slots);
+        if (ek_try < _adapter_opt_fold_LAST &&
+            ek_adapter_opt_collect_count(ek_try) == elem_slots) {
+          assert(ek_adapter_opt_collect_slot(ek_try) == -1 &&
+                 ek_adapter_opt_collect_type(ek_try) == dest, "");
+          ek_opt = ek_try;
+          break;
+        }
+      }
+
+      break;
+    }
 
   default:
     // should have failed much earlier; must be a missing case here
@@ -2167,13 +2612,38 @@
     // and fall through:
 
   throw_not_impl:
-    // FIXME: these adapters are NYI
-    err = "adapter not yet implemented in the JVM";
+    if (err == NULL)
+      err = "unknown adapter type";
     break;
   }
 
+  if (err == NULL && (vminfo & CONV_VMINFO_MASK) != vminfo) {
+    // should not happen, since vminfo is used to encode arg/slot indexes < 255
+    err = "vminfo overflow";
+  }
+
+  if (err == NULL && !have_entry(ek_opt)) {
+    err = "adapter stub for this kind of method handle is missing";
+  }
+
+  if (err == NULL && ek_opt == ek_orig) {
+    switch (ek_opt) {
+    case _adapter_prim_to_prim:
+    case _adapter_ref_to_prim:
+    case _adapter_prim_to_ref:
+    case _adapter_swap_args:
+    case _adapter_rot_args:
+    case _adapter_collect_args:
+    case _adapter_fold_args:
+    case _adapter_spread_args:
+      // should be handled completely by optimized cases; see above
+      err = "init_AdapterMethodHandle should not issue this";
+      break;
+    }
+  }
+
   if (err != NULL) {
-    throw_InternalError_for_bad_conversion(conversion, err, THREAD);
+    throw_InternalError_for_bad_conversion(conversion, err_msg("%s: conv_op %d ek_opt %d", err, conv_op, ek_opt), THREAD);
     return;
   }
 
@@ -2191,6 +2661,81 @@
   // Java code can publish it in global data structures.
 }
 
+void MethodHandles::ensure_vmlayout_field(Handle target, TRAPS) {
+  Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(target()));
+  Handle mtform(THREAD, java_lang_invoke_MethodType::form(mtype()));
+  if (mtform.is_null()) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) {
+    if (java_lang_invoke_MethodTypeForm::vmlayout(mtform()) == NULL) {
+      // fill it in
+      Handle erased_mtype(THREAD, java_lang_invoke_MethodTypeForm::erasedType(mtform()));
+      TempNewSymbol erased_signature
+        = java_lang_invoke_MethodType::as_signature(erased_mtype(), /*intern:*/true, CHECK);
+      methodOop cookie
+        = SystemDictionary::find_method_handle_invoke(vmSymbols::invokeExact_name(),
+                                                      erased_signature,
+                                                      SystemDictionaryHandles::Object_klass(),
+                                                      THREAD);
+      java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie);
+    }
+  }
+}
+
+#ifdef ASSERT
+
+extern "C"
+void print_method_handle(oop mh);
+
+static void stress_method_handle_walk_impl(Handle mh, TRAPS) {
+  if (StressMethodHandleWalk) {
+    // Exercise the MethodHandleWalk code in various ways and validate
+    // the resulting method oop.  Some of these produce output so they
+    // are guarded under Verbose.
+    ResourceMark rm;
+    HandleMark hm;
+    if (Verbose) {
+      print_method_handle(mh());
+    }
+    TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK);
+    Handle mt = java_lang_invoke_MethodHandle::type(mh());
+    TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK);
+    MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK);
+    methodHandle m = mhc.compile(CHECK);
+    if (Verbose) {
+      m->print_codes();
+    }
+    InterpreterOopMap mask;
+    OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask);
+    // compile to object code if -Xcomp or WizardMode
+    if ((WizardMode ||
+         CompilationPolicy::must_be_compiled(m))
+        && !instanceKlass::cast(m->method_holder())->is_not_initialized()
+        && CompilationPolicy::can_be_compiled(m)) {
+      // Force compilation
+      CompileBroker::compile_method(m, InvocationEntryBci,
+                                    CompLevel_initial_compile,
+                                    methodHandle(), 0, "StressMethodHandleWalk",
+                                    CHECK);
+    }
+  }
+}
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {
+  stress_method_handle_walk_impl(mh, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = PENDING_EXCEPTION;
+    CLEAR_PENDING_EXCEPTION;
+    tty->print("StressMethodHandleWalk: ");
+    java_lang_Throwable::print(ex, tty);
+    tty->cr();
+  }
+}
+#else
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {}
+
+#endif
+
 //
 // Here are the native methods on sun.invoke.MethodHandleImpl.
 // They are the private interface between this JVM and the HotSpot-specific
@@ -2207,26 +2752,22 @@
   ResourceMark rm;              // for error messages
 
   // This is the guy we are initializing:
-  if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
 
   // Early returns out of this method leave the DMH in an unfinished state.
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
   // which method are we really talking about?
-  if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
-  oop target_oop = JNIHandles::resolve_non_null(target_jh);
-  if (java_lang_invoke_MemberName::is_instance(target_oop) &&
-      java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) {
-    Handle mname(THREAD, target_oop);
-    MethodHandles::resolve_MemberName(mname, CHECK);
-    target_oop = mname(); // in case of GC
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
+  Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
+  if (java_lang_invoke_MemberName::is_instance(target()) &&
+      java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) {
+    MethodHandles::resolve_MemberName(target, CHECK);
   }
 
-  int decode_flags = 0; klassOop receiver_limit = NULL;
-  methodHandle m(THREAD,
-                 MethodHandles::decode_method(target_oop,
-                                              receiver_limit, decode_flags));
+  KlassHandle receiver_limit; int decode_flags = 0;
+  methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
   if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); }
 
   // The trusted Java code that calls this method should already have performed
@@ -2262,6 +2803,7 @@
   }
 
   MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2271,34 +2813,35 @@
   ResourceMark rm;              // for error messages
 
   // This is the guy we are initializing:
-  if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
 
   // Early returns out of this method leave the BMH in an unfinished state.
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
-  if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
 
   if (!java_lang_invoke_MethodHandle::is_instance(target())) {
     // Target object is a reflective method.  (%%% Do we need this alternate path?)
     Untested("init_BMH of non-MH");
     if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); }
-    int decode_flags = 0; klassOop receiver_limit_oop = NULL;
-    methodHandle m(THREAD,
-                   MethodHandles::decode_method(target(),
-                                                receiver_limit_oop,
-                                                decode_flags));
-    KlassHandle receiver_limit(THREAD, receiver_limit_oop);
+    KlassHandle receiver_limit; int decode_flags = 0;
+    methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
     MethodHandles::init_BoundMethodHandle_with_receiver(mh, m,
                                                        receiver_limit,
                                                        decode_flags,
                                                        CHECK);
-    return;
+  } else {
+    // Build a BMH on top of a DMH or another BMH:
+    MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
   }
 
-  // Build a BMH on top of a DMH or another BMH:
-  MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
+  if (StressMethodHandleWalk) {
+    if (mh->klass() == SystemDictionary::BoundMethodHandle_klass())
+      stress_method_handle_walk(mh, CHECK);
+    // else don't, since the subclass has not yet initialized its own fields
+  }
 }
 JVM_END
 
@@ -2306,9 +2849,8 @@
 JVM_ENTRY(void, MHN_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh,
                              jobject target_jh, int argnum)) {
   // This is the guy we are initializing:
-  if (mh_jh == NULL || target_jh == NULL) {
-    THROW(vmSymbols::java_lang_InternalError());
-  }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
   Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
 
@@ -2316,6 +2858,7 @@
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
   MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2362,6 +2905,8 @@
     return MethodHandles::stack_move_unit();
   case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK:
     return MethodHandles::adapter_conversion_ops_supported_mask();
+  case MethodHandles::GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS:
+    return MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS;
   }
   return 0;
 }
@@ -2369,8 +2914,12 @@
 
 #ifndef PRODUCT
 #define EACH_NAMED_CON(template) \
-    template(MethodHandles,GC_JVM_PUSH_LIMIT) \
-    template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) \
+  /* hold back this one until JDK stabilizes */ \
+  /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */  \
+  /* hold back this one until JDK stabilizes */ \
+  /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \
+  /* hold back this one until JDK stabilizes */ \
+  /* template(MethodHandles,GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS) */ \
     template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \
     template(MethodHandles,ETF_DIRECT_HANDLE) \
     template(MethodHandles,ETF_METHOD_NAME) \
@@ -2394,9 +2943,8 @@
     template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \
     template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \
     template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \
-    template(java_lang_invoke_AdapterMethodHandle,OP_FLYBY) \
-    template(java_lang_invoke_AdapterMethodHandle,OP_RICOCHET) \
-    template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT) \
+      /* hold back this one until JDK stabilizes */ \
+      /*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/  \
     template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \
     template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \
     template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \
@@ -2424,12 +2972,12 @@
 #ifndef PRODUCT
   if (which >= 0 && which < con_value_count) {
     int con = con_values[which];
-    objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh);
-    if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
+    objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh));
+    if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
       const char* str = &con_names[0];
       for (int i = 0; i < which; i++)
         str += strlen(str) + 1;   // skip name and null
-      oop name = java_lang_String::create_oop_from_str(str, CHECK_0);
+      oop name = java_lang_String::create_oop_from_str(str, CHECK_0);  // possible safepoint
       box->obj_at_put(0, name);
     }
     return con;
@@ -2441,7 +2989,8 @@
 
 // void init(MemberName self, AccessibleObject ref)
 JVM_ENTRY(void, MHN_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobject target_jh)) {
-  if (mname_jh == NULL || target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
   oop target_oop = JNIHandles::resolve_non_null(target_jh);
   MethodHandles::init_MemberName(mname(), target_oop);
@@ -2450,7 +2999,7 @@
 
 // void expand(MemberName self)
 JVM_ENTRY(void, MHN_expand_Mem(JNIEnv *env, jobject igcls, jobject mname_jh)) {
-  if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
   MethodHandles::expand_MemberName(mname, 0, CHECK);
 }
@@ -2458,7 +3007,7 @@
 
 // void resolve(MemberName self, Class<?> caller)
 JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
-  if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
 
   // The trusted Java code that calls this method should already have performed
@@ -2486,10 +3035,10 @@
                                jclass clazz_jh, jstring name_jh, jstring sig_jh,
                                int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) {
   if (clazz_jh == NULL || results_jh == NULL)  return -1;
-  klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh));
+  KlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)));
 
-  objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh);
-  if (results == NULL || !results->is_objArray())       return -1;
+  objArrayHandle results(THREAD, (objArrayOop) JNIHandles::resolve(results_jh));
+  if (results.is_null() || !results->is_objArray())  return -1;
 
   TempNewSymbol name = NULL;
   TempNewSymbol sig = NULL;
@@ -2502,25 +3051,78 @@
     if (sig == NULL)  return 0; // a match is not possible
   }
 
-  klassOop caller = NULL;
+  KlassHandle caller;
   if (caller_jh != NULL) {
     oop caller_oop = JNIHandles::resolve_non_null(caller_jh);
     if (!java_lang_Class::is_instance(caller_oop))  return -1;
-    caller = java_lang_Class::as_klassOop(caller_oop);
+    caller = KlassHandle(THREAD, java_lang_Class::as_klassOop(caller_oop));
   }
 
-  if (name != NULL && sig != NULL && results != NULL) {
+  if (name != NULL && sig != NULL && results.not_null()) {
     // try a direct resolve
     // %%% TO DO
   }
 
-  int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags,
-                                            caller, skip, results);
+  int res = MethodHandles::find_MemberNames(k(), name, sig, mflags,
+                                            caller(), skip, results());
   // TO DO: expand at least some of the MemberNames, to avoid massive callbacks
   return res;
 }
 JVM_END
 
+methodOop MethodHandles::resolve_raise_exception_method(TRAPS) {
+  if (_raise_exception_method != NULL) {
+    // no need to do it twice
+    return raise_exception_method();
+  }
+  // LinkResolver::resolve_invokedynamic can reach this point
+  // because an invokedynamic has failed very early (7049415)
+  KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass();
+  if (MHN_klass.not_null()) {
+    TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK_NULL);
+    TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK_NULL);
+    methodOop raiseException_method  = instanceKlass::cast(MHN_klass->as_klassOop())
+                  ->find_method(raiseException_name, raiseException_sig);
+    if (raiseException_method != NULL && raiseException_method->is_static()) {
+      return raiseException_method;
+    }
+  }
+  // not found; let the caller deal with it
+  return NULL;
+}
+void MethodHandles::raise_exception(int code, oop actual, oop required, TRAPS) {
+  methodOop raiseException_method = resolve_raise_exception_method(CHECK);
+  if (raiseException_method != NULL &&
+      instanceKlass::cast(raiseException_method->method_holder())->is_not_initialized()) {
+    instanceKlass::cast(raiseException_method->method_holder())->initialize(CHECK);
+    // it had better be resolved by now, or maybe JSR 292 failed to load
+    raiseException_method = raise_exception_method();
+  }
+  if (raiseException_method == NULL) {
+    THROW_MSG(vmSymbols::java_lang_InternalError(), "no raiseException method");
+  }
+  JavaCallArguments args;
+  args.push_int(code);
+  args.push_oop(actual);
+  args.push_oop(required);
+  JavaValue result(T_VOID);
+  JavaCalls::call(&result, raiseException_method, &args, CHECK);
+}
+
+JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
+JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invokeExact cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
 
 /// JVM_RegisterMethodHandleMethods
 
@@ -2559,6 +3161,12 @@
   {CC"getMembers",              CC"("CLS""STRG""STRG"I"CLS"I["MEM")I",  FN_PTR(MHN_getMembers)}
 };
 
+static JNINativeMethod invoke_methods[] = {
+  // void init(MemberName self, AccessibleObject ref)
+  {CC"invoke",                  CC"(["OBJ")"OBJ,                FN_PTR(MH_invoke_UOE)},
+  {CC"invokeExact",             CC"(["OBJ")"OBJ,                FN_PTR(MH_invokeExact_UOE)}
+};
+
 // This one function is exported, used by NativeLookup.
 
 JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
@@ -2575,6 +3183,12 @@
     ThreadToNativeFromVM ttnfv(thread);
 
     int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod));
+    if (!env->ExceptionOccurred()) {
+      const char* L_MH_name = (JLINV "MethodHandle");
+      const char* MH_name = L_MH_name+1;
+      jclass MH_class = env->FindClass(MH_name);
+      status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod));
+    }
     if (env->ExceptionOccurred()) {
       MethodHandles::set_enabled(false);
       warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
@@ -2584,19 +3198,11 @@
   }
 
   if (enable_MH) {
-    KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass();
-    if (MHN_klass.not_null()) {
-      TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK);
-      TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK);
-      methodOop raiseException_method  = instanceKlass::cast(MHN_klass->as_klassOop())
-                    ->find_method(raiseException_name, raiseException_sig);
-      if (raiseException_method != NULL && raiseException_method->is_static()) {
-        MethodHandles::set_raise_exception_method(raiseException_method);
-      } else {
-        warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
-        enable_MH = false;
-      }
+    methodOop raiseException_method = MethodHandles::resolve_raise_exception_method(CHECK);
+    if (raiseException_method != NULL) {
+      MethodHandles::set_raise_exception_method(raiseException_method);
     } else {
+      warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
       enable_MH = false;
     }
   }
--- a/src/share/vm/prims/methodHandles.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -66,8 +66,8 @@
     _adapter_drop_args     = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS,
     _adapter_collect_args  = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS,
     _adapter_spread_args   = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS,
-    _adapter_flyby         = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FLYBY,
-    _adapter_ricochet      = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RICOCHET,
+    _adapter_fold_args     = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS,
+    _adapter_unused_13     = _adapter_mh_first + 13,  //hole in the CONV_OP enumeration
     _adapter_mh_last       = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1,
 
     // Optimized adapter types
@@ -93,10 +93,99 @@
     _adapter_opt_unboxi,
     _adapter_opt_unboxl,
 
-    // spreading (array length cases 0, 1, >=2)
-    _adapter_opt_spread_0,
-    _adapter_opt_spread_1,
-    _adapter_opt_spread_more,
+    // %% Maybe tame the following with a VM_SYMBOLS_DO type macro?
+
+    // how a blocking adapter returns (platform-dependent)
+    _adapter_opt_return_ref,
+    _adapter_opt_return_int,
+    _adapter_opt_return_long,
+    _adapter_opt_return_float,
+    _adapter_opt_return_double,
+    _adapter_opt_return_void,
+    _adapter_opt_return_S0_ref,  // return ref to S=0 (last slot)
+    _adapter_opt_return_S1_ref,  // return ref to S=1 (2nd-to-last slot)
+    _adapter_opt_return_S2_ref,
+    _adapter_opt_return_S3_ref,
+    _adapter_opt_return_S4_ref,
+    _adapter_opt_return_S5_ref,
+    _adapter_opt_return_any,     // dynamically select r/i/l/f/d
+    _adapter_opt_return_FIRST = _adapter_opt_return_ref,
+    _adapter_opt_return_LAST  = _adapter_opt_return_any,
+
+    // spreading (array length cases 0, 1, ...)
+    _adapter_opt_spread_0,       // spread empty array to N=0 arguments
+    _adapter_opt_spread_1_ref,   // spread Object[] to N=1 argument
+    _adapter_opt_spread_2_ref,   // spread Object[] to N=2 arguments
+    _adapter_opt_spread_3_ref,   // spread Object[] to N=3 arguments
+    _adapter_opt_spread_4_ref,   // spread Object[] to N=4 arguments
+    _adapter_opt_spread_5_ref,   // spread Object[] to N=5 arguments
+    _adapter_opt_spread_ref,     // spread Object[] to N arguments
+    _adapter_opt_spread_byte,    // spread byte[] or boolean[] to N arguments
+    _adapter_opt_spread_char,    // spread char[], etc., to N arguments
+    _adapter_opt_spread_short,   // spread short[], etc., to N arguments
+    _adapter_opt_spread_int,     // spread int[], short[], etc., to N arguments
+    _adapter_opt_spread_long,    // spread long[] to N arguments
+    _adapter_opt_spread_float,   // spread float[] to N arguments
+    _adapter_opt_spread_double,  // spread double[] to N arguments
+    _adapter_opt_spread_FIRST = _adapter_opt_spread_0,
+    _adapter_opt_spread_LAST  = _adapter_opt_spread_double,
+
+    // blocking filter/collect conversions
+    // These collect N arguments and replace them (at slot S) by a return value
+    // which is passed to the final target, along with the unaffected arguments.
+    // collect_{N}_{T} collects N arguments at any position into a T value
+    // collect_{N}_S{S}_{T} collects N arguments at slot S into a T value
+    // collect_{T} collects any number of arguments at any position
+    // filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection)
+    // (collect_2 is also usable as a filter, with long or double arguments)
+    _adapter_opt_collect_ref,    // combine N arguments, replace with a reference
+    _adapter_opt_collect_int,    // combine N arguments, replace with an int, short, etc.
+    _adapter_opt_collect_long,   // combine N arguments, replace with a long
+    _adapter_opt_collect_float,  // combine N arguments, replace with a float
+    _adapter_opt_collect_double, // combine N arguments, replace with a double
+    _adapter_opt_collect_void,   // combine N arguments, replace with nothing
+    // if there is a small fixed number to push, do so without a loop:
+    _adapter_opt_collect_0_ref,  // collect N=0 arguments, insert a reference
+    _adapter_opt_collect_1_ref,  // collect N=1 argument, replace with a reference
+    _adapter_opt_collect_2_ref,  // combine N=2 arguments, replace with a reference
+    _adapter_opt_collect_3_ref,  // combine N=3 arguments, replace with a reference
+    _adapter_opt_collect_4_ref,  // combine N=4 arguments, replace with a reference
+    _adapter_opt_collect_5_ref,  // combine N=5 arguments, replace with a reference
+    // filters are an important special case because they never move arguments:
+    _adapter_opt_filter_S0_ref,  // filter N=1 argument at S=0, replace with a reference
+    _adapter_opt_filter_S1_ref,  // filter N=1 argument at S=1, replace with a reference
+    _adapter_opt_filter_S2_ref,  // filter N=1 argument at S=2, replace with a reference
+    _adapter_opt_filter_S3_ref,  // filter N=1 argument at S=3, replace with a reference
+    _adapter_opt_filter_S4_ref,  // filter N=1 argument at S=4, replace with a reference
+    _adapter_opt_filter_S5_ref,  // filter N=1 argument at S=5, replace with a reference
+    // these move arguments, but they are important for boxing
+    _adapter_opt_collect_2_S0_ref,  // combine last N=2 arguments, replace with a reference
+    _adapter_opt_collect_2_S1_ref,  // combine N=2 arguments at S=1, replace with a reference
+    _adapter_opt_collect_2_S2_ref,  // combine N=2 arguments at S=2, replace with a reference
+    _adapter_opt_collect_2_S3_ref,  // combine N=2 arguments at S=3, replace with a reference
+    _adapter_opt_collect_2_S4_ref,  // combine N=2 arguments at S=4, replace with a reference
+    _adapter_opt_collect_2_S5_ref,  // combine N=2 arguments at S=5, replace with a reference
+    _adapter_opt_collect_FIRST = _adapter_opt_collect_ref,
+    _adapter_opt_collect_LAST  = _adapter_opt_collect_2_S5_ref,
+
+    // blocking folding conversions
+    // these are like collects, but retain all the N arguments for the final target
+    //_adapter_opt_fold_0_ref,   // same as _adapter_opt_collect_0_ref
+    // fold_{N}_{T} processes N arguments at any position into a T value, which it inserts
+    // fold_{T} processes any number of arguments at any position
+    _adapter_opt_fold_ref,       // process N arguments, prepend a reference
+    _adapter_opt_fold_int,       // process N arguments, prepend an int, short, etc.
+    _adapter_opt_fold_long,      // process N arguments, prepend a long
+    _adapter_opt_fold_float,     // process N arguments, prepend a float
+    _adapter_opt_fold_double,    // process N arguments, prepend a double
+    _adapter_opt_fold_void,      // process N arguments, but leave the list unchanged
+    _adapter_opt_fold_1_ref,     // process N=1 argument, prepend a reference
+    _adapter_opt_fold_2_ref,     // process N=2 arguments, prepend a reference
+    _adapter_opt_fold_3_ref,     // process N=3 arguments, prepend a reference
+    _adapter_opt_fold_4_ref,     // process N=4 arguments, prepend a reference
+    _adapter_opt_fold_5_ref,     // process N=5 arguments, prepend a reference
+    _adapter_opt_fold_FIRST = _adapter_opt_fold_ref,
+    _adapter_opt_fold_LAST  = _adapter_opt_fold_5_ref,
 
     _EK_LIMIT,
     _EK_FIRST = 0
@@ -110,6 +199,7 @@
   enum {  // import java_lang_invoke_AdapterMethodHandle::CONV_OP_*
     CONV_OP_LIMIT         = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT,
     CONV_OP_MASK          = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK,
+    CONV_TYPE_MASK        = java_lang_invoke_AdapterMethodHandle::CONV_TYPE_MASK,
     CONV_VMINFO_MASK      = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK,
     CONV_VMINFO_SHIFT     = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT,
     CONV_OP_SHIFT         = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT,
@@ -123,10 +213,10 @@
   static MethodHandleEntry* _entries[_EK_LIMIT];
   static const char*        _entry_names[_EK_LIMIT+1];
   static jobject            _raise_exception_method;
+  static address            _adapter_return_handlers[CONV_TYPE_MASK+1];
 
   // Adapters.
   static MethodHandlesAdapterBlob* _adapter_code;
-  static int                       _adapter_code_size;
 
   static bool ek_valid(EntryKind ek)            { return (uint)ek < (uint)_EK_LIMIT; }
   static bool conv_op_valid(int op)             { return (uint)op < (uint)CONV_OP_LIMIT; }
@@ -147,39 +237,195 @@
   }
 
   // Some adapter helper functions.
-  static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
+  static EntryKind ek_original_kind(EntryKind ek) {
+    if (ek <= _adapter_mh_last)  return ek;
     switch (ek) {
-    case _bound_int_mh        : // fall-thru
-    case _bound_int_direct_mh : arg_type = T_INT;    arg_mask = _INSERT_INT_MASK;  break;
-    case _bound_long_mh       : // fall-thru
-    case _bound_long_direct_mh: arg_type = T_LONG;   arg_mask = _INSERT_LONG_MASK; break;
-    case _bound_ref_mh        : // fall-thru
-    case _bound_ref_direct_mh : arg_type = T_OBJECT; arg_mask = _INSERT_REF_MASK;  break;
-    default: ShouldNotReachHere();
+    case _adapter_opt_swap_1:
+    case _adapter_opt_swap_2:
+      return _adapter_swap_args;
+    case _adapter_opt_rot_1_up:
+    case _adapter_opt_rot_1_down:
+    case _adapter_opt_rot_2_up:
+    case _adapter_opt_rot_2_down:
+      return _adapter_rot_args;
+    case _adapter_opt_i2i:
+    case _adapter_opt_l2i:
+    case _adapter_opt_d2f:
+    case _adapter_opt_i2l:
+    case _adapter_opt_f2d:
+      return _adapter_prim_to_prim;
+    case _adapter_opt_unboxi:
+    case _adapter_opt_unboxl:
+      return _adapter_ref_to_prim;
     }
-    arg_slots = type2size[arg_type];
+    if (ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST)
+      return _adapter_spread_args;
+    if (ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST)
+      return _adapter_collect_args;
+    if (ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST)
+      return _adapter_fold_args;
+    if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST)
+      return _adapter_opt_return_any;
+    assert(false, "oob");
+    return _EK_LIMIT;
+  }
+
+  static bool ek_supported(MethodHandles::EntryKind ek);
+
+  static BasicType ek_bound_mh_arg_type(EntryKind ek) {
+    switch (ek) {
+    case _bound_int_mh         : // fall-thru
+    case _bound_int_direct_mh  : return T_INT;
+    case _bound_long_mh        : // fall-thru
+    case _bound_long_direct_mh : return T_LONG;
+    default                    : return T_OBJECT;
+    }
+  }
+
+  static int ek_adapter_opt_swap_slots(EntryKind ek) {
+    switch (ek) {
+    case _adapter_opt_swap_1        : return  1;
+    case _adapter_opt_swap_2        : return  2;
+    case _adapter_opt_rot_1_up      : return  1;
+    case _adapter_opt_rot_1_down    : return  1;
+    case _adapter_opt_rot_2_up      : return  2;
+    case _adapter_opt_rot_2_down    : return  2;
+    default : ShouldNotReachHere();   return -1;
+    }
+  }
+
+  static int ek_adapter_opt_swap_mode(EntryKind ek) {
+    switch (ek) {
+    case _adapter_opt_swap_1       : return  0;
+    case _adapter_opt_swap_2       : return  0;
+    case _adapter_opt_rot_1_up     : return  1;
+    case _adapter_opt_rot_1_down   : return -1;
+    case _adapter_opt_rot_2_up     : return  1;
+    case _adapter_opt_rot_2_down   : return -1;
+    default : ShouldNotReachHere();  return  0;
+    }
   }
 
-  static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
-    int swap_slots = 0;
+  static int ek_adapter_opt_collect_count(EntryKind ek) {
+    assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST ||
+           ek >= _adapter_opt_fold_FIRST    && ek <= _adapter_opt_fold_LAST, "");
     switch (ek) {
-    case _adapter_opt_swap_1:     swap_slots = 1; rotate =  0; break;
-    case _adapter_opt_swap_2:     swap_slots = 2; rotate =  0; break;
-    case _adapter_opt_rot_1_up:   swap_slots = 1; rotate =  1; break;
-    case _adapter_opt_rot_1_down: swap_slots = 1; rotate = -1; break;
-    case _adapter_opt_rot_2_up:   swap_slots = 2; rotate =  1; break;
-    case _adapter_opt_rot_2_down: swap_slots = 2; rotate = -1; break;
-    default: ShouldNotReachHere();
+    case _adapter_opt_collect_0_ref    : return  0;
+    case _adapter_opt_filter_S0_ref    :
+    case _adapter_opt_filter_S1_ref    :
+    case _adapter_opt_filter_S2_ref    :
+    case _adapter_opt_filter_S3_ref    :
+    case _adapter_opt_filter_S4_ref    :
+    case _adapter_opt_filter_S5_ref    :
+    case _adapter_opt_fold_1_ref       :
+    case _adapter_opt_collect_1_ref    : return  1;
+    case _adapter_opt_collect_2_S0_ref :
+    case _adapter_opt_collect_2_S1_ref :
+    case _adapter_opt_collect_2_S2_ref :
+    case _adapter_opt_collect_2_S3_ref :
+    case _adapter_opt_collect_2_S4_ref :
+    case _adapter_opt_collect_2_S5_ref :
+    case _adapter_opt_fold_2_ref       :
+    case _adapter_opt_collect_2_ref    : return  2;
+    case _adapter_opt_fold_3_ref       :
+    case _adapter_opt_collect_3_ref    : return  3;
+    case _adapter_opt_fold_4_ref       :
+    case _adapter_opt_collect_4_ref    : return  4;
+    case _adapter_opt_fold_5_ref       :
+    case _adapter_opt_collect_5_ref    : return  5;
+    default                            : return -1;  // sentinel value for "variable"
     }
-    // Return the size of the stack slots to move in bytes.
-    swap_bytes = swap_slots * Interpreter::stackElementSize;
+  }
+
+  static int ek_adapter_opt_collect_slot(EntryKind ek) {
+    assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST ||
+           ek >= _adapter_opt_fold_FIRST    && ek <= _adapter_opt_fold_LAST, "");
+    switch (ek) {
+    case _adapter_opt_collect_2_S0_ref  :
+    case _adapter_opt_filter_S0_ref     : return 0;
+    case _adapter_opt_collect_2_S1_ref  :
+    case _adapter_opt_filter_S1_ref     : return 1;
+    case _adapter_opt_collect_2_S2_ref  :
+    case _adapter_opt_filter_S2_ref     : return 2;
+    case _adapter_opt_collect_2_S3_ref  :
+    case _adapter_opt_filter_S3_ref     : return 3;
+    case _adapter_opt_collect_2_S4_ref  :
+    case _adapter_opt_filter_S4_ref     : return 4;
+    case _adapter_opt_collect_2_S5_ref  :
+    case _adapter_opt_filter_S5_ref     : return 5;
+    default                             : return -1;  // sentinel value for "variable"
+    }
   }
 
-  static int get_ek_adapter_opt_spread_info(EntryKind ek) {
+  static BasicType ek_adapter_opt_collect_type(EntryKind ek) {
+    assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST ||
+           ek >= _adapter_opt_fold_FIRST    && ek <= _adapter_opt_fold_LAST, "");
+    switch (ek) {
+    case _adapter_opt_fold_int          :
+    case _adapter_opt_collect_int       : return T_INT;
+    case _adapter_opt_fold_long         :
+    case _adapter_opt_collect_long      : return T_LONG;
+    case _adapter_opt_fold_float        :
+    case _adapter_opt_collect_float     : return T_FLOAT;
+    case _adapter_opt_fold_double       :
+    case _adapter_opt_collect_double    : return T_DOUBLE;
+    case _adapter_opt_fold_void         :
+    case _adapter_opt_collect_void      : return T_VOID;
+    default                             : return T_OBJECT;
+    }
+  }
+
+  static int ek_adapter_opt_return_slot(EntryKind ek) {
+    assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, "");
+    switch (ek) {
+    case _adapter_opt_return_S0_ref : return 0;
+    case _adapter_opt_return_S1_ref : return 1;
+    case _adapter_opt_return_S2_ref : return 2;
+    case _adapter_opt_return_S3_ref : return 3;
+    case _adapter_opt_return_S4_ref : return 4;
+    case _adapter_opt_return_S5_ref : return 5;
+    default                         : return -1;  // sentinel value for "variable"
+    }
+  }
+
+  static BasicType ek_adapter_opt_return_type(EntryKind ek) {
+    assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, "");
     switch (ek) {
-    case _adapter_opt_spread_0: return  0;
-    case _adapter_opt_spread_1: return  1;
-    default                   : return -1;
+    case _adapter_opt_return_int    : return T_INT;
+    case _adapter_opt_return_long   : return T_LONG;
+    case _adapter_opt_return_float  : return T_FLOAT;
+    case _adapter_opt_return_double : return T_DOUBLE;
+    case _adapter_opt_return_void   : return T_VOID;
+    case _adapter_opt_return_any    : return T_CONFLICT;  // sentinel value for "variable"
+    default                         : return T_OBJECT;
+    }
+  }
+
+  static int ek_adapter_opt_spread_count(EntryKind ek) {
+    assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, "");
+    switch (ek) {
+    case _adapter_opt_spread_0     : return  0;
+    case _adapter_opt_spread_1_ref : return  1;
+    case _adapter_opt_spread_2_ref : return  2;
+    case _adapter_opt_spread_3_ref : return  3;
+    case _adapter_opt_spread_4_ref : return  4;
+    case _adapter_opt_spread_5_ref : return  5;
+    default                        : return -1;  // sentinel value for "variable"
+    }
+  }
+
+  static BasicType ek_adapter_opt_spread_type(EntryKind ek) {
+    assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, "");
+    switch (ek) {
+    // (there is no _adapter_opt_spread_boolean; we use byte)
+    case _adapter_opt_spread_byte   : return T_BYTE;
+    case _adapter_opt_spread_char   : return T_CHAR;
+    case _adapter_opt_spread_short  : return T_SHORT;
+    case _adapter_opt_spread_int    : return T_INT;
+    case _adapter_opt_spread_long   : return T_LONG;
+    case _adapter_opt_spread_float  : return T_FLOAT;
+    case _adapter_opt_spread_double : return T_DOUBLE;
+    default                         : return T_OBJECT;
     }
   }
 
@@ -192,6 +438,9 @@
     assert(_raise_exception_method == NULL, "");
     _raise_exception_method = JNIHandles::make_global(Handle(rem));
   }
+  static methodOop resolve_raise_exception_method(TRAPS);
+  // call raise_exception_method from C code:
+  static void raise_exception(int code, oop actual, oop required, TRAPS);
 
   static jint adapter_conversion(int conv_op, BasicType src, BasicType dest,
                                  int stack_move = 0, int vminfo = 0) {
@@ -228,12 +477,21 @@
   // Bit mask of conversion_op values.  May vary by platform.
   static int adapter_conversion_ops_supported_mask();
 
+  static bool conv_op_supported(int conv_op) {
+    assert(conv_op_valid(conv_op), "");
+    return ((adapter_conversion_ops_supported_mask() & nth_bit(conv_op)) != 0);
+  }
+
   // Offset in words that the interpreter stack pointer moves when an argument is pushed.
   // The stack_move value must always be a multiple of this.
   static int stack_move_unit() {
     return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords;
   }
 
+  // Adapter frame traversal.  (Implementation-specific.)
+  static frame ricochet_frame_sender(const frame& fr, RegisterMap* reg_map);
+  static void ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map);
+
   enum { CONV_VMINFO_SIGN_FLAG = 0x80 };
   // Shift values for prim-to-prim conversions.
   static int adapter_prim_to_prim_subword_vminfo(BasicType dest) {
@@ -265,13 +523,13 @@
   static inline address from_interpreted_entry(EntryKind ek);
 
   // helpers for decode_method.
-  static methodOop decode_methodOop(methodOop m, int& decode_flags_result);
-  static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result);
-  static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result);
-  static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
-  static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
-  static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
-  static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
+  static methodOop    decode_methodOop(methodOop m, int& decode_flags_result);
+  static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
 
   // Find out how many stack slots an mh pushes or pops.
   // The result is *not* reported as a multiple of stack_move_unit();
@@ -317,18 +575,24 @@
     _dmf_adapter_lsb    = 0x20,
     _DMF_ADAPTER_MASK   = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb
   };
-  static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result);
+  static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result);
   enum {
     // format of query to getConstant:
     GC_JVM_PUSH_LIMIT = 0,
     GC_JVM_STACK_MOVE_UNIT = 1,
     GC_CONV_OP_IMPLEMENTED_MASK = 2,
+    GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS = 3,
 
     // format of result from getTarget / encode_target:
     ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method)
     ETF_DIRECT_HANDLE         = 1, // ultimate method handle (will be a DMH, may be self)
     ETF_METHOD_NAME           = 2, // ultimate method as MemberName
-    ETF_REFLECT_METHOD        = 3  // ultimate method as java.lang.reflect object (sans refClass)
+    ETF_REFLECT_METHOD        = 3, // ultimate method as java.lang.reflect object (sans refClass)
+    ETF_FORCE_DIRECT_HANDLE   = 64,
+    ETF_COMPILE_DIRECT_HANDLE = 65,
+
+    // ad hoc constants
+    OP_ROT_ARGS_DOWN_LIMIT_BIAS = -1
   };
   static int get_named_constant(int which, Handle name_box, TRAPS);
   static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code)
@@ -429,6 +693,7 @@
 
   // Fill in the fields of an AdapterMethodHandle mh.  (MH.type must be pre-filled.)
   static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS);
+  static void ensure_vmlayout_field(Handle target, TRAPS);
 
 #ifdef ASSERT
   static bool spot_check_entry_names();
@@ -441,6 +706,8 @@
                                               KlassHandle receiver_klass,
                                               TRAPS);
 
+public:
+  static bool is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst);
   static bool same_basic_type_for_arguments(BasicType src, BasicType dst,
                                             bool raw = false,
                                             bool for_return = false);
@@ -448,12 +715,50 @@
     return same_basic_type_for_arguments(src, dst, raw, true);
   }
 
-  enum {                        // arg_mask values
+  static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS);
+
+#ifdef TARGET_ARCH_x86
+# include "methodHandles_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "methodHandles_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "methodHandles_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "methodHandles_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "methodHandles_ppc.hpp"
+#endif
+
+#ifdef TARGET_ARCH_NYI_6939861
+  // Here are some backward compatible declarations until the 6939861 ports are updated.
+  #define _adapter_flyby    (_EK_LIMIT + 10)
+  #define _adapter_ricochet (_EK_LIMIT + 11)
+  #define _adapter_opt_spread_1    _adapter_opt_spread_1_ref
+  #define _adapter_opt_spread_more _adapter_opt_spread_ref
+  enum {
     _INSERT_NO_MASK   = -1,
     _INSERT_REF_MASK  = 0,
     _INSERT_INT_MASK  = 1,
     _INSERT_LONG_MASK = 3
   };
+  static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
+    arg_type = ek_bound_mh_arg_type(ek);
+    arg_mask = 0;
+    arg_slots = type2size[arg_type];;
+  }
+  static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
+    int swap_slots = ek_adapter_opt_swap_slots(ek);
+    rotate = ek_adapter_opt_swap_mode(ek);
+    swap_bytes = swap_slots * Interpreter::stackElementSize;
+  }
+  static int get_ek_adapter_opt_spread_info(EntryKind ek) {
+    return ek_adapter_opt_spread_count(ek);
+  }
+
   static void insert_arg_slots(MacroAssembler* _masm,
                                RegisterOrConstant arg_slots,
                                int arg_mask,
@@ -466,8 +771,7 @@
                                Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
 
   static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
-
-  static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS);
+#endif //TARGET_ARCH_NYI_6939861
 };
 
 
@@ -530,7 +834,7 @@
 //
 class MethodHandlesAdapterGenerator : public StubCodeGenerator {
 public:
-  MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
+  MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code, PrintMethodHandleStubs) {}
 
   void generate();
 };
--- a/src/share/vm/prims/unsafe.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/prims/unsafe.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -24,6 +24,9 @@
 
 #include "precompiled.hpp"
 #include "classfile/vmSymbols.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // SERIALGC
 #include "memory/allocation.inline.hpp"
 #include "prims/jni.h"
 #include "prims/jvm.h"
@@ -193,7 +196,32 @@
   UnsafeWrapper("Unsafe_GetObject");
   if (obj == NULL)  THROW_0(vmSymbols::java_lang_NullPointerException());
   GET_OOP_FIELD(obj, offset, v)
-  return JNIHandles::make_local(env, v);
+  jobject ret = JNIHandles::make_local(env, v);
+#ifndef SERIALGC
+  // We could be accessing the referent field in a reference
+  // object. If G1 is enabled then we need to register a non-null
+  // referent with the SATB barrier.
+  if (UseG1GC) {
+    bool needs_barrier = false;
+
+    if (ret != NULL) {
+      if (offset == java_lang_ref_Reference::referent_offset) {
+        oop o = JNIHandles::resolve_non_null(obj);
+        klassOop k = o->klass();
+        if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
+          assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+          needs_barrier = true;
+        }
+      }
+    }
+
+    if (needs_barrier) {
+      oop referent = JNIHandles::resolve(ret);
+      G1SATBCardTableModRefBS::enqueue(referent);
+    }
+  }
+#endif // SERIALGC
+  return ret;
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
@@ -226,7 +254,32 @@
 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
   UnsafeWrapper("Unsafe_GetObject");
   GET_OOP_FIELD(obj, offset, v)
-  return JNIHandles::make_local(env, v);
+  jobject ret = JNIHandles::make_local(env, v);
+#ifndef SERIALGC
+  // We could be accessing the referent field in a reference
+  // object. If G1 is enabled then we need to register non-null
+  // referent with the SATB barrier.
+  if (UseG1GC) {
+    bool needs_barrier = false;
+
+    if (ret != NULL) {
+      if (offset == java_lang_ref_Reference::referent_offset && obj != NULL) {
+        oop o = JNIHandles::resolve(obj);
+        klassOop k = o->klass();
+        if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
+          assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+          needs_barrier = true;
+        }
+      }
+    }
+
+    if (needs_barrier) {
+      oop referent = JNIHandles::resolve(ret);
+      G1SATBCardTableModRefBS::enqueue(referent);
+    }
+  }
+#endif // SERIALGC
+  return ret;
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,7 +1,26 @@
 /*
-* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved.
-* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
-*/
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
 
 #include "precompiled.hpp"
 #include "runtime/advancedThresholdPolicy.hpp"
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,7 +1,26 @@
 /*
-* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved.
-* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
-*/
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
 
 #ifndef SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP
 #define SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP
--- a/src/share/vm/runtime/arguments.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -246,6 +246,12 @@
   { "MaxLiveObjectEvacuationRatio",
                            JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   { "ForceSharedSpaces",   JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) },
+  { "UseParallelOldGCCompacting",
+                           JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
+  { "UseParallelDensePrefixUpdate",
+                           JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
+  { "UseParallelOldGCDensePrefix",
+                           JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
   { "AllowTransitionalJSR292",       JDK_Version::jdk(7), JDK_Version::jdk(8) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
@@ -807,26 +813,22 @@
 
   JDK_Version since = JDK_Version();
 
-  if (parse_argument(arg, origin)) {
-    // do nothing
-  } else if (is_newly_obsolete(arg, &since)) {
-    enum { bufsize = 256 };
-    char buffer[bufsize];
-    since.to_string(buffer, bufsize);
-    jio_fprintf(defaultStream::error_stream(),
-      "Warning: The flag %s has been EOL'd as of %s and will"
-      " be ignored\n", arg, buffer);
-  } else {
-    if (!ignore_unrecognized) {
-      jio_fprintf(defaultStream::error_stream(),
-                  "Unrecognized VM option '%s'\n", arg);
-      // allow for commandline "commenting out" options like -XX:#+Verbose
-      if (strlen(arg) == 0 || arg[0] != '#') {
-        return false;
-      }
-    }
+  if (parse_argument(arg, origin) || ignore_unrecognized) {
+    return true;
   }
-  return true;
+
+  const char * const argname = *arg == '+' || *arg == '-' ? arg + 1 : arg;
+  if (is_newly_obsolete(arg, &since)) {
+    char version[256];
+    since.to_string(version, sizeof(version));
+    warning("ignoring option %s; support was removed in %s", argname, version);
+    return true;
+  }
+
+  jio_fprintf(defaultStream::error_stream(),
+              "Unrecognized VM option '%s'\n", argname);
+  // allow for commandline "commenting out" options like -XX:#+Verbose
+  return arg[0] == '#';
 }
 
 bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) {
@@ -964,7 +966,7 @@
   // Ensure Agent_OnLoad has the correct initial values.
   // This may not be the final mode; mode may change later in onload phase.
   PropertyList_unique_add(&_system_properties, "java.vm.info",
-                          (char*)Abstract_VM_Version::vm_info_string(), false);
+                          (char*)VM_Version::vm_info_string(), false);
 
   UseInterpreter             = true;
   UseCompiler                = true;
@@ -973,10 +975,10 @@
 #ifndef ZERO
   // Turn these off for mixed and comp.  Leave them on for Zero.
   if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
-    UseFastAccessorMethods = mode == _int;
+    UseFastAccessorMethods = (mode == _int);
   }
   if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
-    UseFastEmptyMethods = mode == _int;
+    UseFastEmptyMethods = (mode == _int);
   }
 #endif
 
@@ -1427,6 +1429,11 @@
       }
     }
   }
+  if (UseNUMA) {
+    if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
+      FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+    }
+  }
 }
 
 void Arguments::set_g1_gc_flags() {
@@ -1991,6 +1998,9 @@
   Arguments::_ClipInlining             = ClipInlining;
   Arguments::_BackgroundCompilation    = BackgroundCompilation;
 
+  // Setup flags for mixed which is the default
+  set_mode_flags(_mixed);
+
   // Parse JAVA_TOOL_OPTIONS environment variable (if present)
   jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
   if (result != JNI_OK) {
@@ -2380,7 +2390,6 @@
       _gc_log_filename = strdup(tail);
       FLAG_SET_CMDLINE(bool, PrintGC, true);
       FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
-      FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
 
     // JNI hooks
     } else if (match_option(option, "-Xcheck", &tail)) {
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/deoptimization.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -90,12 +90,14 @@
 
 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
                                          int  caller_adjustment,
+                                         int  caller_actual_parameters,
                                          int  number_of_frames,
                                          intptr_t* frame_sizes,
                                          address* frame_pcs,
                                          BasicType return_type) {
   _size_of_deoptimized_frame = size_of_deoptimized_frame;
   _caller_adjustment         = caller_adjustment;
+  _caller_actual_parameters  = caller_actual_parameters;
   _number_of_frames          = number_of_frames;
   _frame_sizes               = frame_sizes;
   _frame_pcs                 = frame_pcs;
@@ -193,6 +195,10 @@
   assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
   thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
 
+  if (VerifyStack) {
+    thread->validate_frame_layout();
+  }
+
   // Create a growable array of VFrames where each VFrame represents an inlined
   // Java frame.  This storage is allocated with the usual system arena.
   assert(deoptee.is_compiled_frame(), "Wrong frame type");
@@ -373,6 +379,28 @@
     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
   }
 
+  // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
+  // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
+  // than simply use array->sender.pc(). This requires us to walk the current set of frames
+  //
+  frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
+  deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
+
+  // It's possible that the number of paramters at the call site is
+  // different than number of arguments in the callee when method
+  // handles are used.  If the caller is interpreted get the real
+  // value so that the proper amount of space can be added to it's
+  // frame.
+  int caller_actual_parameters = callee_parameters;
+  if (deopt_sender.is_interpreted_frame()) {
+    methodHandle method = deopt_sender.interpreter_frame_method();
+    Bytecode_invoke cur = Bytecode_invoke_check(method,
+                                                deopt_sender.interpreter_frame_bci());
+    Symbol* signature = method->constants()->signature_ref_at(cur.index());
+    ArgumentSizeComputer asc(signature);
+    caller_actual_parameters = asc.size() + (cur.has_receiver() ? 1 : 0);
+  }
+
   //
   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
   // frame_sizes/frame_pcs[1] next oldest frame (int)
@@ -391,7 +419,13 @@
     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
-    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
+    int caller_parms = callee_parameters;
+    if (index == array->frames() - 1) {
+      // Use the value from the interpreted caller
+      caller_parms = caller_actual_parameters;
+    }
+    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
+                                                                                                    callee_parameters,
                                                                                                     callee_locals,
                                                                                                     index == 0,
                                                                                                     popframe_extra_args);
@@ -418,13 +452,6 @@
   // Compute information for handling adapters and adjusting the frame size of the caller.
   int caller_adjustment = 0;
 
-  // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
-  // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
-  // than simply use array->sender.pc(). This requires us to walk the current set of frames
-  //
-  frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
-  deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
-
   // Compute the amount the oldest interpreter frame will have to adjust
   // its caller's stack by. If the caller is a compiled frame then
   // we pretend that the callee has no parameters so that the
@@ -439,14 +466,13 @@
 
   if (deopt_sender.is_compiled_frame()) {
     caller_adjustment = last_frame_adjust(0, callee_locals);
-  } else if (callee_locals > callee_parameters) {
+  } else if (callee_locals > caller_actual_parameters) {
     // The caller frame may need extending to accommodate
     // non-parameter locals of the first unpacked interpreted frame.
     // Compute that adjustment.
-    caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
+    caller_adjustment = last_frame_adjust(caller_actual_parameters, callee_locals);
   }
 
-
   // If the sender is deoptimized the we must retrieve the address of the handler
   // since the frame will "magically" show the original pc before the deopt
   // and we'd undo the deopt.
@@ -459,6 +485,7 @@
 
   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
                                       caller_adjustment * BytesPerWord,
+                                      caller_actual_parameters,
                                       number_of_frames,
                                       frame_sizes,
                                       frame_pcs,
@@ -556,7 +583,7 @@
   UnrollBlock* info = array->unroll_block();
 
   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
-  array->unpack_to_stack(stub_frame, exec_mode);
+  array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 
   BasicType bt = info->return_type();
 
@@ -573,6 +600,8 @@
   if (VerifyStack) {
     ResourceMark res_mark;
 
+    thread->validate_frame_layout();
+
     // Verify that the just-unpacked frames match the interpreter's
     // notions of expression stack and locals
     vframeArray* cur_array = thread->vframe_array_last();
@@ -1777,7 +1806,8 @@
   "constraint",
   "div0_check",
   "age",
-  "predicate"
+  "predicate",
+  "loop_limit_check"
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
   // Note:  Keep this in sync. with enum DeoptAction.
--- a/src/share/vm/runtime/deoptimization.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/deoptimization.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -56,6 +56,7 @@
     Reason_div0_check,            // a null_check due to division by zero
     Reason_age,                   // nmethod too old; tier threshold reached
     Reason_predicate,             // compiler generated predicate failed
+    Reason_loop_limit_check,      // compiler generated loop limits check failed
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
     Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
@@ -78,7 +79,7 @@
 
   enum {
     _action_bits = 3,
-    _reason_bits = 4,
+    _reason_bits = 5,
     _action_shift = 0,
     _reason_shift = _action_shift+_action_bits,
     BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist
@@ -137,6 +138,9 @@
     intptr_t* _register_block;            // Block for storing callee-saved registers.
     BasicType _return_type;               // Tells if we have to restore double or long return value
     intptr_t  _initial_fp;                // FP of the sender frame
+    int       _caller_actual_parameters;  // The number of actual arguments at the
+                                          // interpreted caller of the deoptimized frame
+
     // The following fields are used as temps during the unpacking phase
     // (which is tight on registers, especially on x86). They really ought
     // to be PD variables but that involves moving this class into its own
@@ -148,6 +152,7 @@
     // Constructor
     UnrollBlock(int  size_of_deoptimized_frame,
                 int  caller_adjustment,
+                int  caller_actual_parameters,
                 int  number_of_frames,
                 intptr_t* frame_sizes,
                 address* frames_pcs,
@@ -167,6 +172,8 @@
 
     void set_initial_fp(intptr_t fp) { _initial_fp = fp; }
 
+    int caller_actual_parameters() const { return _caller_actual_parameters; }
+
     // Accessors used by the code generator for the unpack stub.
     static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); }
     static int caller_adjustment_offset_in_bytes()         { return offset_of(UnrollBlock, _caller_adjustment);         }
--- a/src/share/vm/runtime/frame.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/frame.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -33,6 +33,7 @@
 #include "oops/methodOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
+#include "prims/methodHandles.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
@@ -169,6 +170,11 @@
 }
 
 // type testers
+bool frame::is_ricochet_frame() const {
+  RicochetBlob* rcb = SharedRuntime::ricochet_blob();
+  return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc));
+}
+
 bool frame::is_deoptimized_frame() const {
   assert(_deopt_state != unknown, "not answerable");
   return _deopt_state == is_deoptimized;
@@ -341,12 +347,18 @@
 
 frame frame::real_sender(RegisterMap* map) const {
   frame result = sender(map);
-  while (result.is_runtime_frame()) {
+  while (result.is_runtime_frame() ||
+         result.is_ricochet_frame()) {
     result = result.sender(map);
   }
   return result;
 }
 
+frame frame::sender_for_ricochet_frame(RegisterMap* map) const {
+  assert(is_ricochet_frame(), "");
+  return MethodHandles::ricochet_frame_sender(*this, map);
+}
+
 // Note: called by profiler - NOT for current thread
 frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
 // If we don't recognize this frame, walk back up the stack until we do
@@ -529,6 +541,7 @@
 const char* frame::print_name() const {
   if (is_native_frame())      return "Native";
   if (is_interpreted_frame()) return "Interpreted";
+  if (is_ricochet_frame())    return "Ricochet";
   if (is_compiled_frame()) {
     if (is_deoptimized_frame()) return "Deoptimized";
     return "Compiled";
@@ -715,6 +728,8 @@
       st->print("v  ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
     } else if (_cb->is_deoptimization_stub()) {
       st->print("v  ~DeoptimizationBlob");
+    } else if (_cb->is_ricochet_stub()) {
+      st->print("v  ~RichochetBlob");
     } else if (_cb->is_exception_stub()) {
       st->print("v  ~ExceptionBlob");
     } else if (_cb->is_safepoint_stub()) {
@@ -978,6 +993,9 @@
 
 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
   assert(_cb != NULL, "sanity check");
+  if (_cb == SharedRuntime::ricochet_blob()) {
+    oops_ricochet_do(f, reg_map);
+  }
   if (_cb->oop_maps() != NULL) {
     OopMapSet::oops_do(this, reg_map, f);
 
@@ -996,6 +1014,11 @@
     cf->do_code_blob(_cb);
 }
 
+void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) {
+  assert(is_ricochet_frame(), "");
+  MethodHandles::ricochet_frame_oops_do(*this, f, map);
+}
+
 class CompiledArgumentOopFinder: public SignatureInfo {
  protected:
   OopClosure*     _f;
@@ -1308,6 +1331,72 @@
   guarantee((current - low_mark) % monitor_size  ==  0         , "Misaligned bottom of BasicObjectLock*");
   guarantee( current >= low_mark                               , "Current BasicObjectLock* below than low_mark");
 }
+
+
+void frame::describe(FrameValues& values, int frame_no) {
+  if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
+    // Label values common to most frames
+    values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
+    values.describe(-1, sp(), err_msg("sp for #%d", frame_no));
+    values.describe(-1, fp(), err_msg("fp for #%d", frame_no));
+  }
+  if (is_interpreted_frame()) {
+    methodOop m = interpreter_frame_method();
+    int bci = interpreter_frame_bci();
+
+    // Label the method and current bci
+    values.describe(-1, MAX2(sp(), fp()),
+                    FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
+    values.describe(-1, MAX2(sp(), fp()),
+                    err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
+    if (m->max_locals() > 0) {
+      intptr_t* l0 = interpreter_frame_local_at(0);
+      intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1);
+      values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1);
+      // Report each local and mark as owned by this frame
+      for (int l = 0; l < m->max_locals(); l++) {
+        intptr_t* l0 = interpreter_frame_local_at(l);
+        values.describe(frame_no, l0, err_msg("local %d", l));
+      }
+    }
+
+    // Compute the actual expression stack size
+    InterpreterOopMap mask;
+    OopMapCache::compute_one_oop_map(m, bci, &mask);
+    intptr_t* tos = NULL;
+    // Report each stack element and mark as owned by this frame
+    for (int e = 0; e < mask.expression_stack_size(); e++) {
+      tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
+      values.describe(frame_no, interpreter_frame_expression_stack_at(e),
+                      err_msg("stack %d", e));
+    }
+    if (tos != NULL) {
+      values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1);
+    }
+    if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) {
+      values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
+      values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end");
+    }
+  } else if (is_entry_frame()) {
+    // For now just label the frame
+    values.describe(-1, MAX2(sp(), fp()), err_msg("#%d entry frame", frame_no), 2);
+  } else if (is_compiled_frame()) {
+    // For now just label the frame
+    nmethod* nm = cb()->as_nmethod_or_null();
+    values.describe(-1, MAX2(sp(), fp()),
+                    FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
+                                       nm, nm->method()->name_and_sig_as_C_string(),
+                                       is_deoptimized_frame() ? " (deoptimized" : ""), 2);
+  } else if (is_native_frame()) {
+    // For now just label the frame
+    nmethod* nm = cb()->as_nmethod_or_null();
+    values.describe(-1, MAX2(sp(), fp()),
+                    FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
+                                       nm, nm->method()->name_and_sig_as_C_string()), 2);
+  }
+  describe_pd(values, frame_no);
+}
+
 #endif
 
 
@@ -1319,3 +1408,84 @@
   _fr = thread->last_frame();
   _is_done = false;
 }
+
+
+#ifdef ASSERT
+
+void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
+  FrameValue fv;
+  fv.location = location;
+  fv.owner = owner;
+  fv.priority = priority;
+  fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1);
+  strcpy(fv.description, description);
+  _values.append(fv);
+}
+
+
+void FrameValues::validate() {
+  _values.sort(compare);
+  bool error = false;
+  FrameValue prev;
+  prev.owner = -1;
+  for (int i = _values.length() - 1; i >= 0; i--) {
+    FrameValue fv = _values.at(i);
+    if (fv.owner == -1) continue;
+    if (prev.owner == -1) {
+      prev = fv;
+      continue;
+    }
+    if (prev.location == fv.location) {
+      if (fv.owner != prev.owner) {
+        tty->print_cr("overlapping storage");
+        tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description);
+        tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
+        error = true;
+      }
+    } else {
+      prev = fv;
+    }
+  }
+  assert(!error, "invalid layout");
+}
+
+
+void FrameValues::print() {
+  _values.sort(compare);
+  JavaThread* thread = JavaThread::current();
+
+  // Sometimes values like the fp can be invalid values if the
+  // register map wasn't updated during the walk.  Trim out values
+  // that aren't actually in the stack of the thread.
+  int min_index = 0;
+  int max_index = _values.length() - 1;
+  intptr_t* v0 = _values.at(min_index).location;
+  while (!thread->is_in_stack((address)v0)) {
+    v0 = _values.at(++min_index).location;
+  }
+  intptr_t* v1 = _values.at(max_index).location;
+  while (!thread->is_in_stack((address)v1)) {
+    v1 = _values.at(--max_index).location;
+  }
+  intptr_t* min = MIN2(v0, v1);
+  intptr_t* max = MAX2(v0, v1);
+  intptr_t* cur = max;
+  intptr_t* last = NULL;
+  for (int i = max_index; i >= min_index; i--) {
+    FrameValue fv = _values.at(i);
+    while (cur > fv.location) {
+      tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur);
+      cur--;
+    }
+    if (last == fv.location) {
+      const char* spacer = "          " LP64_ONLY("        ");
+      tty->print_cr(" %s  %s %s", spacer, spacer, fv.description);
+    } else {
+      tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
+      last = fv.location;
+      cur--;
+    }
+  }
+}
+
+#endif
--- a/src/share/vm/runtime/frame.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/frame.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -60,6 +60,7 @@
 typedef class BytecodeInterpreter* interpreterState;
 
 class CodeBlob;
+class FrameValues;
 class vframeArray;
 
 
@@ -134,6 +135,7 @@
   bool is_interpreted_frame()    const;
   bool is_java_frame()           const;
   bool is_entry_frame()          const;             // Java frame called from C?
+  bool is_ricochet_frame()       const;
   bool is_native_frame()         const;
   bool is_runtime_frame()        const;
   bool is_compiled_frame()       const;
@@ -174,6 +176,7 @@
   // Helper methods for better factored code in frame::sender
   frame sender_for_compiled_frame(RegisterMap* map) const;
   frame sender_for_entry_frame(RegisterMap* map) const;
+  frame sender_for_ricochet_frame(RegisterMap* map) const;
   frame sender_for_interpreter_frame(RegisterMap* map) const;
   frame sender_for_native_frame(RegisterMap* map) const;
 
@@ -381,6 +384,8 @@
  private:
   const char* print_name() const;
 
+  void describe_pd(FrameValues& values, int frame_no);
+
  public:
   void print_value() const { print_value_on(tty,NULL); }
   void print_value_on(outputStream* st, JavaThread *thread) const;
@@ -388,12 +393,16 @@
   void interpreter_frame_print_on(outputStream* st) const;
   void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
 
+  // Add annotated descriptions of memory locations belonging to this frame to values
+  void describe(FrameValues& values, int frame_no);
+
   // Conversion from an VMReg to physical stack location
   oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
 
   // Oops-do's
   void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
   void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
+  void oops_ricochet_do(OopClosure* f, const RegisterMap* map);
 
  private:
   void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
@@ -472,6 +481,41 @@
 
 };
 
+#ifdef ASSERT
+// A simple class to describe a location on the stack
+class FrameValue VALUE_OBJ_CLASS_SPEC {
+ public:
+  intptr_t* location;
+  char* description;
+  int owner;
+  int priority;
+};
+
+
+// A collection of described stack values that can print a symbolic
+// description of the stack memory.  Interpreter frame values can be
+// in the caller frames so all the values are collected first and then
+// sorted before being printed.
+class FrameValues {
+ private:
+  GrowableArray<FrameValue> _values;
+
+  static int compare(FrameValue* a, FrameValue* b) {
+    if (a->location == b->location) {
+      return a->priority - b->priority;
+    }
+    return a->location - b->location;
+  }
+
+ public:
+  // Used by frame functions to describe locations.
+  void describe(int owner, intptr_t* location, const char* description, int priority = 0);
+
+  void validate();
+  void print();
+};
+
+#endif
 
 //
 // StackFrameStream iterates through the frames of a thread starting from
--- a/src/share/vm/runtime/globals.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/globals.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -620,6 +620,9 @@
   product(bool, UseSSE42Intrinsics, false,                                  \
           "SSE4.2 versions of intrinsics")                                  \
                                                                             \
+  product(bool, UseCondCardMark, false,                                     \
+          "Check for already marked card before updating card table")       \
+                                                                            \
   develop(bool, TraceCallFixup, false,                                      \
           "traces all call fixups")                                         \
                                                                             \
@@ -1358,13 +1361,6 @@
   product(bool, UseParallelOldGC, false,                                    \
           "Use the Parallel Old garbage collector")                         \
                                                                             \
-  product(bool, UseParallelOldGCCompacting, true,                           \
-          "In the Parallel Old garbage collector use parallel compaction")  \
-                                                                            \
-  product(bool, UseParallelDensePrefixUpdate, true,                         \
-          "In the Parallel Old garbage collector use parallel dense"        \
-          " prefix update")                                                 \
-                                                                            \
   product(uintx, HeapMaximumCompactionInterval, 20,                         \
           "How often should we maximally compact the heap (not allowing "   \
           "any dead space)")                                                \
@@ -1384,9 +1380,6 @@
           "The standard deviation used by the par compact dead wood"        \
           "limiter (a number between 0-100).")                              \
                                                                             \
-  product(bool, UseParallelOldGCDensePrefix, true,                          \
-          "Use a dense prefix with the Parallel Old garbage collector")     \
-                                                                            \
   product(uintx, ParallelGCThreads, 0,                                      \
           "Number of parallel threads parallel gc will use")                \
                                                                             \
@@ -1470,8 +1463,10 @@
   product(intx, ParallelGCBufferWastePct, 10,                               \
           "wasted fraction of parallel allocation buffer.")                 \
                                                                             \
-  product(bool, ParallelGCRetainPLAB, true,                                 \
-          "Retain parallel allocation buffers across scavenges.")           \
+  diagnostic(bool, ParallelGCRetainPLAB, false,                             \
+             "Retain parallel allocation buffers across scavenges; "        \
+             " -- disabled because this currently conflicts with "          \
+             " parallel card scanning under certain conditions ")           \
                                                                             \
   product(intx, TargetPLABWastePct, 10,                                     \
           "target wasted space in last buffer as pct of overall allocation")\
@@ -1505,7 +1500,15 @@
   product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
           "The desired number of objects to claim from the overflow list")  \
                                                                             \
-  product(uintx, CMSParPromoteBlocksToClaim, 16,                             \
+  diagnostic(intx, ParGCStridesPerThread, 2,                                \
+          "The number of strides per worker thread that we divide up the "  \
+          "card table scanning work into")                                  \
+                                                                            \
+  diagnostic(intx, ParGCCardsPerStrideChunk, 256,                           \
+          "The number of cards in each chunk of the parallel chunks used "  \
+          "during card table scanning")                                     \
+                                                                            \
+  product(uintx, CMSParPromoteBlocksToClaim, 16,                            \
           "Number of blocks to attempt to claim when refilling CMS LAB for "\
           "parallel GC.")                                                   \
                                                                             \
@@ -1837,7 +1840,7 @@
   develop(bool, VerifyBlockOffsetArray, false,                              \
           "Do (expensive!) block offset array verification")                \
                                                                             \
-  product(bool, BlockOffsetArrayUseUnallocatedBlock, false,                 \
+  diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false,              \
           "Maintain _unallocated_block in BlockOffsetArray"                 \
           " (currently applicable only to CMS collector)")                  \
                                                                             \
@@ -2892,7 +2895,7 @@
           "Max. no. of lines in the stack trace for Java exceptions "       \
           "(0 means all)")                                                  \
                                                                             \
-  NOT_EMBEDDED(develop(intx, GuaranteedSafepointInterval, 1000,             \
+  NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000,          \
           "Guarantee a safepoint (at least) every so many milliseconds "    \
           "(0 means none)"))                                                \
                                                                             \
@@ -2909,6 +2912,12 @@
   product(intx, NmethodSweepCheckInterval, 5,                               \
           "Compilers wake up every n seconds to possibly sweep nmethods")   \
                                                                             \
+  notproduct(bool, LogSweeper, false,                                       \
+            "Keep a ring buffer of sweeper activity")                       \
+                                                                            \
+  notproduct(intx, SweeperLogEntries, 1024,                                 \
+            "Number of records in the ring buffer of sweeper activity")     \
+                                                                            \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
                                                                             \
@@ -3709,6 +3718,9 @@
   diagnostic(intx, MethodHandlePushLimit, 3,                                \
           "number of additional stack slots a method handle may push")      \
                                                                             \
+  diagnostic(bool, PrintMethodHandleStubs, false,                           \
+          "Print generated stub code for method handles")                   \
+                                                                            \
   develop(bool, TraceMethodHandles, false,                                  \
           "trace internal method handle operations")                        \
                                                                             \
@@ -3718,10 +3730,17 @@
   diagnostic(bool, OptimizeMethodHandles, true,                             \
           "when constructing method handles, try to improve them")          \
                                                                             \
+  develop(bool, StressMethodHandleWalk, false,                              \
+          "Process all method handles with MethodHandleWalk")               \
+                                                                            \
+  diagnostic(bool, UseRicochetFrames, true,                                 \
+          "use ricochet stack frames for method handle combination, "       \
+          "if the platform supports them")                                  \
+                                                                            \
   experimental(bool, TrustFinalNonStaticFields, false,                      \
           "trust final non-static declarations for constant folding")       \
                                                                             \
-  experimental(bool, AllowInvokeGeneric, true,                              \
+  experimental(bool, AllowInvokeGeneric, false,                             \
           "accept MethodHandle.invoke and MethodHandle.invokeGeneric "      \
           "as equivalent methods")                                          \
                                                                             \
--- a/src/share/vm/runtime/javaCalls.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/javaCalls.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -408,7 +408,7 @@
   // to Java
   if (!os::stack_shadow_pages_available(THREAD, method)) {
     // Throw stack overflow exception with preinitialized exception.
-    Exceptions::throw_stack_overflow_exception(THREAD, __FILE__, __LINE__);
+    Exceptions::throw_stack_overflow_exception(THREAD, __FILE__, __LINE__, method);
     return;
   } else {
     // Touch pages checked if the OS needs them to be touched to be mapped.
--- a/src/share/vm/runtime/os.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/os.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -274,7 +274,7 @@
   static char*  reserve_memory_special(size_t size, char* addr = NULL,
                 bool executable = false);
   static bool   release_memory_special(char* addr, size_t bytes);
-  static bool   large_page_init();
+  static void   large_page_init();
   static size_t large_page_size();
   static bool   can_commit_large_page_memory();
   static bool   can_execute_large_page_memory();
--- a/src/share/vm/runtime/serviceThread.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/serviceThread.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -28,6 +28,7 @@
 #include "runtime/serviceThread.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "prims/jvmtiImpl.hpp"
+#include "services/gcNotifier.hpp"
 
 ServiceThread* ServiceThread::_instance = NULL;
 
@@ -81,6 +82,7 @@
   while (true) {
     bool sensors_changed = false;
     bool has_jvmti_events = false;
+    bool has_gc_notification_event = false;
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -95,9 +97,10 @@
 
       MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
       while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
-             !(has_jvmti_events = JvmtiDeferredEventQueue::has_events())) {
+             !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
+              !(has_gc_notification_event = GCNotifier::has_event())) {
         // wait until one of the sensors has pending requests, or there is a
-        // pending JVMTI event to post
+        // pending JVMTI event or JMX GC notification to post
         Service_lock->wait(Mutex::_no_safepoint_check_flag);
       }
 
@@ -113,6 +116,10 @@
     if (sensors_changed) {
       LowMemoryDetector::process_sensor_changes(jt);
     }
+
+    if(has_gc_notification_event) {
+        GCNotifier::sendNotification(CHECK);
+    }
   }
 }
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -80,6 +80,72 @@
 #include "c1/c1_Runtime1.hpp"
 #endif
 
+// Shared stub locations
+RuntimeStub*        SharedRuntime::_wrong_method_blob;
+RuntimeStub*        SharedRuntime::_ic_miss_blob;
+RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
+RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
+RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
+
+DeoptimizationBlob* SharedRuntime::_deopt_blob;
+RicochetBlob*       SharedRuntime::_ricochet_blob;
+
+SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
+SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
+
+#ifdef COMPILER2
+UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
+#endif // COMPILER2
+
+
+//----------------------------generate_stubs-----------------------------------
+void SharedRuntime::generate_stubs() {
+  _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),         "wrong_method_stub");
+  _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
+  _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),  "resolve_opt_virtual_call");
+  _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),      "resolve_virtual_call");
+  _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),       "resolve_static_call");
+
+  _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false);
+  _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true);
+
+  generate_ricochet_blob();
+  generate_deopt_blob();
+
+#ifdef COMPILER2
+  generate_uncommon_trap_blob();
+#endif // COMPILER2
+}
+
+//----------------------------generate_ricochet_blob---------------------------
+void SharedRuntime::generate_ricochet_blob() {
+  if (!EnableInvokeDynamic)  return;  // leave it as a null
+
+#ifndef TARGET_ARCH_NYI_6939861
+  // allocate space for the code
+  ResourceMark rm;
+  // setup code generation tools
+  CodeBuffer buffer("ricochet_blob", 256 LP64_ONLY(+ 256), 256);  // XXX x86 LP64L: 512, 512
+  MacroAssembler* masm = new MacroAssembler(&buffer);
+
+  int bounce_offset = -1, exception_offset = -1, frame_size_in_words = -1;
+  MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &bounce_offset, &exception_offset, &frame_size_in_words);
+
+  // -------------
+  // make sure all code is generated
+  masm->flush();
+
+  // failed to generate?
+  if (bounce_offset < 0 || exception_offset < 0 || frame_size_in_words < 0) {
+    assert(false, "bad ricochet blob");
+    return;
+  }
+
+  _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
+#endif
+}
+
+
 #include <math.h>
 
 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
@@ -141,6 +207,7 @@
 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 
+
 void SharedRuntime::trace_ic_miss(address at) {
   for (int i = 0; i < _ICmiss_index; i++) {
     if (_ICmiss_at[i] == at) {
@@ -460,6 +527,10 @@
   if (Interpreter::contains(return_address)) {
     return Interpreter::rethrow_exception_entry();
   }
+  // Ricochet frame unwind code
+  if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) {
+    return SharedRuntime::ricochet_blob()->exception_addr();
+  }
 
   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
@@ -708,6 +779,13 @@
   return (SharedRuntime::deopt_blob()->jmp_uncommon_trap());
 }
 
+JRT_ENTRY(void, SharedRuntime::throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual))
+  assert(thread == JavaThread::current() && required->is_oop() && actual->is_oop(), "bad args");
+  ResourceMark rm;
+  char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual);
+  throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_invoke_WrongMethodTypeException(), message);
+JRT_END
+
 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
                                                            address pc,
                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
@@ -1205,6 +1283,7 @@
   assert(stub_frame.is_runtime_frame(), "sanity check");
   frame caller_frame = stub_frame.sender(&reg_map);
   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
+  assert(!caller_frame.is_ricochet_frame(), "unexpected frame");
 #endif /* ASSERT */
 
   methodHandle callee_method;
@@ -1253,6 +1332,7 @@
 
   if (caller_frame.is_interpreted_frame() ||
       caller_frame.is_entry_frame()       ||
+      caller_frame.is_ricochet_frame()    ||
       is_mh_invoke_via_adapter) {
     methodOop callee = thread->callee_target();
     guarantee(callee != NULL && callee->is_method(), "bad handshake");
@@ -1752,14 +1832,14 @@
         targetArity = ArgumentCount(target->signature()).size();
       }
     }
-    klassOop kignore; int dmf_flags = 0;
-    methodOop actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
+    KlassHandle kignore; int dmf_flags = 0;
+    methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
     if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver |
                        MethodHandles::_dmf_does_dispatch |
                        MethodHandles::_dmf_from_interface)) != 0)
-      actual_method = NULL;  // MH does extra binds, drops, etc.
+      actual_method = methodHandle();  // MH does extra binds, drops, etc.
     bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0);
-    if (actual_method != NULL) {
+    if (actual_method.not_null()) {
       mhName = actual_method->signature()->as_C_string();
       mhArity = ArgumentCount(actual_method->signature()).size();
       if (!actual_method->is_static())  mhArity += 1;
--- a/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -52,26 +52,33 @@
 
   // Shared stub locations
 
-  static RuntimeStub* _wrong_method_blob;
-  static RuntimeStub* _ic_miss_blob;
-  static RuntimeStub* _resolve_opt_virtual_call_blob;
-  static RuntimeStub* _resolve_virtual_call_blob;
-  static RuntimeStub* _resolve_static_call_blob;
+  static RuntimeStub*        _wrong_method_blob;
+  static RuntimeStub*        _ic_miss_blob;
+  static RuntimeStub*        _resolve_opt_virtual_call_blob;
+  static RuntimeStub*        _resolve_virtual_call_blob;
+  static RuntimeStub*        _resolve_static_call_blob;
 
-  static SafepointBlob* _polling_page_safepoint_handler_blob;
-  static SafepointBlob* _polling_page_return_handler_blob;
+  static DeoptimizationBlob* _deopt_blob;
+  static RicochetBlob*       _ricochet_blob;
+
+  static SafepointBlob*      _polling_page_safepoint_handler_blob;
+  static SafepointBlob*      _polling_page_return_handler_blob;
+
 #ifdef COMPILER2
-  static ExceptionBlob*       _exception_blob;
-  static UncommonTrapBlob*    _uncommon_trap_blob;
+  static UncommonTrapBlob*   _uncommon_trap_blob;
 #endif // COMPILER2
 
 #ifndef PRODUCT
-
   // Counters
   static int     _nof_megamorphic_calls;         // total # of megamorphic calls (through vtable)
+#endif // !PRODUCT
 
-#endif // !PRODUCT
+ private:
+  static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return);
+  static RuntimeStub*   generate_resolve_blob(address destination, const char* name);
+
  public:
+  static void generate_stubs(void);
 
   // max bytes for each dtrace string parameter
   enum { max_dtrace_string_size = 256 };
@@ -179,6 +186,7 @@
   static void    throw_NullPointerException_at_call(JavaThread* thread);
   static void    throw_StackOverflowError(JavaThread* thread);
   static address deoptimization_continuation(JavaThread* thread, address pc, nmethod* nm);
+  static void    throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual);
   static address continuation_for_implicit_exception(JavaThread* thread,
                                                      address faulting_pc,
                                                      ImplicitExceptionKind exception_kind);
@@ -214,6 +222,16 @@
     return _resolve_static_call_blob->entry_point();
   }
 
+  static RicochetBlob* ricochet_blob() {
+#ifdef X86
+    // Currently only implemented on x86
+    assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops");
+#endif
+    return _ricochet_blob;
+  }
+
+  static void generate_ricochet_blob();
+
   static SafepointBlob* polling_page_return_handler_blob()     { return _polling_page_return_handler_blob; }
   static SafepointBlob* polling_page_safepoint_handler_blob()  { return _polling_page_safepoint_handler_blob; }
 
@@ -315,12 +333,9 @@
                                      bool is_virtual,
                                      bool is_optimized, TRAPS);
 
-  static void generate_stubs(void);
-
   private:
   // deopt blob
   static void generate_deopt_blob(void);
-  static DeoptimizationBlob* _deopt_blob;
 
   public:
   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -80,9 +80,10 @@
 
 // Implementation of StubCodeGenerator
 
-StubCodeGenerator::StubCodeGenerator(CodeBuffer* code) {
+StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) {
   _masm = new MacroAssembler(code);
   _first_stub = _last_stub = NULL;
+  _print_code = print_code;
 }
 
 extern "C" {
@@ -94,7 +95,7 @@
 }
 
 StubCodeGenerator::~StubCodeGenerator() {
-  if (PrintStubCode) {
+  if (PrintStubCode || _print_code) {
     CodeBuffer* cbuf = _masm->code();
     CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
     if (blob != NULL) {
--- a/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -98,9 +98,10 @@
 
   StubCodeDesc* _first_stub;
   StubCodeDesc* _last_stub;
+  bool _print_code;
 
  public:
-  StubCodeGenerator(CodeBuffer* code);
+  StubCodeGenerator(CodeBuffer* code, bool print_code = false);
   ~StubCodeGenerator();
 
   MacroAssembler* assembler() const              { return _masm; }
--- a/src/share/vm/runtime/stubRoutines.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/stubRoutines.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -55,6 +55,7 @@
 address StubRoutines::_throw_NullPointerException_entry         = NULL;
 address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
 address StubRoutines::_throw_StackOverflowError_entry           = NULL;
+address StubRoutines::_throw_WrongMethodTypeException_entry     = NULL;
 address StubRoutines::_handler_for_unsafe_access_entry          = NULL;
 jint    StubRoutines::_verify_oop_count                         = 0;
 address StubRoutines::_verify_oop_subroutine_entry              = NULL;
--- a/src/share/vm/runtime/stubRoutines.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/stubRoutines.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -132,6 +132,7 @@
   static address _throw_NullPointerException_entry;
   static address _throw_NullPointerException_at_call_entry;
   static address _throw_StackOverflowError_entry;
+  static address _throw_WrongMethodTypeException_entry;
   static address _handler_for_unsafe_access_entry;
 
   static address _atomic_xchg_entry;
@@ -254,6 +255,7 @@
   static address throw_NullPointerException_entry()        { return _throw_NullPointerException_entry; }
   static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
   static address throw_StackOverflowError_entry()          { return _throw_StackOverflowError_entry; }
+  static address throw_WrongMethodTypeException_entry()    { return _throw_WrongMethodTypeException_entry; }
 
   // Exceptions during unsafe access - should throw Java exception rather
   // than crash.
--- a/src/share/vm/runtime/sweeper.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -37,6 +37,94 @@
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
 
+#ifdef ASSERT
+
+#define SWEEP(nm) record_sweep(nm, __LINE__)
+// Sweeper logging code
+class SweeperRecord {
+ public:
+  int traversal;
+  int invocation;
+  int compile_id;
+  long traversal_mark;
+  int state;
+  const char* kind;
+  address vep;
+  address uep;
+  int line;
+
+  void print() {
+      tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
+                    PTR_FORMAT " state = %d traversal_mark %d line = %d",
+                    traversal,
+                    invocation,
+                    compile_id,
+                    kind == NULL ? "" : kind,
+                    uep,
+                    vep,
+                    state,
+                    traversal_mark,
+                    line);
+  }
+};
+
+static int _sweep_index = 0;
+static SweeperRecord* _records = NULL;
+
+void NMethodSweeper::report_events(int id, address entry) {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+  }
+}
+
+void NMethodSweeper::report_events() {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+  }
+}
+
+void NMethodSweeper::record_sweep(nmethod* nm, int line) {
+  if (_records != NULL) {
+    _records[_sweep_index].traversal = _traversals;
+    _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
+    _records[_sweep_index].invocation = _invocations;
+    _records[_sweep_index].compile_id = nm->compile_id();
+    _records[_sweep_index].kind = nm->compile_kind();
+    _records[_sweep_index].state = nm->_state;
+    _records[_sweep_index].vep = nm->verified_entry_point();
+    _records[_sweep_index].uep = nm->entry_point();
+    _records[_sweep_index].line = line;
+
+    _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
+  }
+}
+#else
+#define SWEEP(nm)
+#endif
+
+
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
@@ -137,6 +225,13 @@
     if (old != 0) {
       return;
     }
+#ifdef ASSERT
+    if (LogSweeper && _records == NULL) {
+      // Create the ring buffer for the logging code
+      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
+      memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
+    }
+#endif
     if (_invocations > 0) {
       sweep_code_cache();
       _invocations--;
@@ -213,10 +308,29 @@
   }
 }
 
+class NMethodMarker: public StackObj {
+ private:
+  CompilerThread* _thread;
+ public:
+  NMethodMarker(nmethod* nm) {
+    _thread = CompilerThread::current();
+    _thread->set_scanned_nmethod(nm);
+  }
+  ~NMethodMarker() {
+    _thread->set_scanned_nmethod(NULL);
+  }
+};
+
 
 void NMethodSweeper::process_nmethod(nmethod *nm) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
+  // Make sure this nmethod doesn't get unloaded during the scan,
+  // since the locks acquired below might safepoint.
+  NMethodMarker nmm(nm);
+
+  SWEEP(nm);
+
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
@@ -224,8 +338,10 @@
       // Clean-up all inline caches that points to zombie/non-reentrant methods
       MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
+      SWEEP(nm);
     } else {
       _locked_seen++;
+      SWEEP(nm);
     }
     return;
   }
@@ -247,6 +363,7 @@
       }
       nm->mark_for_reclamation();
       _rescan = true;
+      SWEEP(nm);
     }
   } else if (nm->is_not_entrant()) {
     // If there is no current activations of this method on the
@@ -257,6 +374,7 @@
       }
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
@@ -265,6 +383,7 @@
       // request a rescan.  If this method stays on the stack for a
       // long time we don't want to keep rescanning the code cache.
       _not_entrant_seen_on_stack++;
+      SWEEP(nm);
     }
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
@@ -273,10 +392,12 @@
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      SWEEP(nm);
       nm->flush();
     } else {
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     }
   } else {
     assert(nm->is_alive(), "should be alive");
@@ -293,6 +414,7 @@
     // Clean-up all inline caches that points to zombie/non-reentrant methods
     MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
+    SWEEP(nm);
   }
 }
 
--- a/src/share/vm/runtime/sweeper.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/sweeper.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -57,6 +57,13 @@
  public:
   static long traversal_count() { return _traversals; }
 
+#ifdef ASSERT
+  // Keep track of sweeper activity in the ring buffer
+  static void record_sweep(nmethod* nm, int line);
+  static void report_events(int id, address entry);
+  static void report_events();
+#endif
+
   static void scan_stacks();      // Invoked at the end of each safepoint
   static void sweep_code_cache(); // Concurrent part of sweep job
   static void possibly_sweep();   // Compiler threads call this to sweep
--- a/src/share/vm/runtime/thread.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/thread.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -32,6 +32,7 @@
 #include "graal/graalCompiler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
@@ -2863,6 +2864,26 @@
 }
 
 
+#ifdef ASSERT
+// Print or validate the layout of stack frames
+void JavaThread::print_frame_layout(int depth, bool validate_only) {
+  ResourceMark rm;
+  PRESERVE_EXCEPTION_MARK;
+  FrameValues values;
+  int frame_no = 0;
+  for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
+    fst.current()->describe(values, ++frame_no);
+    if (depth == frame_no) break;
+  }
+  if (validate_only) {
+    values.validate();
+  } else {
+    tty->print_cr("[Describe stack layout]");
+    values.print();
+  }
+}
+#endif
+
 void JavaThread::trace_stack_from(vframe* start_vf) {
   ResourceMark rm;
   int vframe_no = 1;
@@ -2926,12 +2947,22 @@
   _counters = counters;
   _is_compiling = false;
   _buffer_blob = NULL;
+  _scanned_nmethod = NULL;
 
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
 #endif
 }
 
+void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+  JavaThread::oops_do(f, cf);
+  if (_scanned_nmethod != NULL && cf != NULL) {
+    // Safepoints can occur when the sweeper is scanning an nmethod so
+    // process it here to make sure it isn't unloaded in the middle of
+    // a scan.
+    cf->do_code_blob(_scanned_nmethod);
+  }
+}
 
 // ======= Threads ========
 
--- a/src/share/vm/runtime/thread.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/thread.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -439,7 +439,7 @@
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+  virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
@@ -1384,6 +1384,12 @@
   void trace_stack_from(vframe* start_vf)        PRODUCT_RETURN;
   void trace_frames()                            PRODUCT_RETURN;
 
+  // Print an annotated view of the stack frames
+  void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
+  void validate_frame_layout() {
+    print_frame_layout(0, true);
+  }
+
   // Returns the number of stack frames on the stack
   int depth() const;
 
@@ -1697,6 +1703,8 @@
   bool          _is_compiling;
   BufferBlob*   _buffer_blob;
 
+  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
+
  public:
 
   static CompilerThread* current();
@@ -1727,6 +1735,11 @@
     _log = log;
   }
 
+  // GC support
+  // Apply "f->do_oop" to all root oops in "this".
+  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+
 #ifndef PRODUCT
 private:
   IdealGraphPrinter *_ideal_graph_printer;
@@ -1738,6 +1751,12 @@
   // Get/set the thread's current task
   CompileTask*  task()                           { return _task; }
   void          set_task(CompileTask* task)      { _task = task; }
+
+  // Track the nmethod currently being scanned by the sweeper
+  void          set_scanned_nmethod(nmethod* nm) {
+    assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
+    _scanned_nmethod = nm;
+  }
 };
 
 inline CompilerThread* CompilerThread::current() {
--- a/src/share/vm/runtime/vframeArray.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -164,7 +164,8 @@
 
 int unpack_counter = 0;
 
-void vframeArrayElement::unpack_on_stack(int callee_parameters,
+void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
+                                         int callee_parameters,
                                          int callee_locals,
                                          frame* caller,
                                          bool is_top_frame,
@@ -280,6 +281,7 @@
                                  temps + callee_parameters,
                                  popframe_preserved_args_size_in_words,
                                  locks,
+                                 caller_actual_parameters,
                                  callee_parameters,
                                  callee_locals,
                                  caller,
@@ -460,7 +462,8 @@
 
 }
 
-int vframeArrayElement::on_stack_size(int callee_parameters,
+int vframeArrayElement::on_stack_size(int caller_actual_parameters,
+                                      int callee_parameters,
                                       int callee_locals,
                                       bool is_top_frame,
                                       int popframe_extra_stack_expression_els) const {
@@ -471,6 +474,7 @@
                                       temps + callee_parameters,
                                       popframe_extra_stack_expression_els,
                                       locks,
+                                      caller_actual_parameters,
                                       callee_parameters,
                                       callee_locals,
                                       is_top_frame);
@@ -541,7 +545,7 @@
   }
 }
 
-void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) {
+void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
   // stack picture
   //   unpack_frame
   //   [new interpreter frames ] (frames are skeletal but walkable)
@@ -570,7 +574,8 @@
   for (index = frames() - 1; index >= 0 ; index--) {
     int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters();
     int callee_locals     = index == 0 ? 0 : element(index-1)->method()->max_locals();
-    element(index)->unpack_on_stack(callee_parameters,
+    element(index)->unpack_on_stack(caller_actual_parameters,
+                                    callee_parameters,
                                     callee_locals,
                                     &caller_frame,
                                     index == 0,
@@ -579,6 +584,7 @@
       Deoptimization::unwind_callee_save_values(element(index)->iframe(), this);
     }
     caller_frame = *element(index)->iframe();
+    caller_actual_parameters = callee_parameters;
   }
 
 
--- a/src/share/vm/runtime/vframeArray.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/vframeArray.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -83,13 +83,15 @@
 
   // Returns the on stack word size for this frame
   // callee_parameters is the number of callee locals residing inside this frame
-  int on_stack_size(int callee_parameters,
+  int on_stack_size(int caller_actual_parameters,
+                    int callee_parameters,
                     int callee_locals,
                     bool is_top_frame,
                     int popframe_extra_stack_expression_els) const;
 
   // Unpacks the element to skeletal interpreter frame
-  void unpack_on_stack(int callee_parameters,
+  void unpack_on_stack(int caller_actual_parameters,
+                       int callee_parameters,
                        int callee_locals,
                        frame* caller,
                        bool is_top_frame,
@@ -190,7 +192,7 @@
   int frame_size() const { return _frame_size; }
 
   // Unpack the array on the stack passed in stack interval
-  void unpack_to_stack(frame &unpack_frame, int exec_mode);
+  void unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters);
 
   // Deallocates monitor chunks allocated during deoptimization.
   // This should be called when the array is not used anymore.
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -783,6 +783,7 @@
   nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
+  nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
@@ -800,6 +801,8 @@
   nonstatic_field(nmethod,             _osr_entry_point,                              address)                               \
   nonstatic_field(nmethod,             _lock_count,                                   jint)                                  \
   nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
+  nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
+  nonstatic_field(nmethod,             _marked_for_deoptimization,                    bool)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* JavaCalls (NOTE: incomplete) */                                                                                                 \
@@ -1310,11 +1313,13 @@
                                                                           \
   declare_toplevel_type(CodeBlob)                                         \
   declare_type(BufferBlob,            CodeBlob)                           \
-  declare_type(nmethod,       CodeBlob)                           \
+  declare_type(AdapterBlob,           BufferBlob)                         \
+  declare_type(nmethod,               CodeBlob)                           \
   declare_type(RuntimeStub,           CodeBlob)                           \
   declare_type(SingletonBlob,         CodeBlob)                           \
   declare_type(SafepointBlob,         SingletonBlob)                      \
   declare_type(DeoptimizationBlob,    SingletonBlob)                      \
+  declare_type(RicochetBlob,          SingletonBlob)                      \
   declare_c2_type(ExceptionBlob,      SingletonBlob)                      \
   declare_c2_type(UncommonTrapBlob,   CodeBlob)                           \
                                                                           \
--- a/src/share/vm/runtime/vmThread.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -291,7 +291,9 @@
     // Among other things, this ensures that Eden top is correct.
     Universe::heap()->prepare_for_verify();
     os::check_heap();
-    Universe::verify(true, true); // Silent verification to not polute normal output
+    // Silent verification so as not to pollute normal output,
+    // unless we really asked for it.
+    Universe::verify(true, !(PrintGCDetails || Verbose));
   }
 
   CompileBroker::set_should_block();
--- a/src/share/vm/services/g1MemoryPool.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/g1MemoryPool.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,10 @@
                                      size_t init_size,
                                      bool support_usage_threshold) :
   _g1h(g1h), CollectedMemoryPool(name,
-                                 MemoryPool::Heap,
-                                 init_size,
-                                 undefined_max(),
-                                 support_usage_threshold) {
+                                   MemoryPool::Heap,
+                                   init_size,
+                                   undefined_max(),
+                                   support_usage_threshold) {
   assert(UseG1GC, "sanity");
 }
 
@@ -48,44 +48,27 @@
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  size_t young_list_length = g1h->young_list()->length();
-  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
-  size_t survivor_used = survivor_space_used(g1h);
-  eden_used = subtract_up_to_zero(eden_used, survivor_used);
-  return eden_used;
+  return g1h->g1mm()->eden_space_used();
 }
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
-  return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
+  return g1h->g1mm()->survivor_space_committed();
 }
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
-  size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
-  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
-  return survivor_used;
+  return g1h->g1mm()->survivor_space_used();
 }
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
-  size_t committed = overall_committed(g1h);
-  size_t eden_committed = eden_space_committed(g1h);
-  size_t survivor_committed = survivor_space_committed(g1h);
-  committed = subtract_up_to_zero(committed, eden_committed);
-  committed = subtract_up_to_zero(committed, survivor_committed);
-  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
-  return committed;
+  return g1h->g1mm()->old_space_committed();
 }
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
-  size_t used = overall_used(g1h);
-  size_t eden_used = eden_space_used(g1h);
-  size_t survivor_used = survivor_space_used(g1h);
-  used = subtract_up_to_zero(used, eden_used);
-  used = subtract_up_to_zero(used, survivor_used);
-  return used;
+  return g1h->g1mm()->old_space_used();
 }
 
 G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
--- a/src/share/vm/services/g1MemoryPool.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/g1MemoryPool.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,68 +46,9 @@
 // get, as this does affect the performance and behavior of G1. Which
 // is why we introduce the three memory pools implemented here.
 //
-// The above approach inroduces a couple of challenging issues in the
-// implementation of the three memory pools:
-//
-// 1) The used space calculation for a pool is not necessarily
-// independent of the others. We can easily get from G1 the overall
-// used space in the entire heap, the number of regions in the young
-// generation (includes both eden and survivors), and the number of
-// survivor regions. So, from that we calculate:
-//
-//  survivor_used = survivor_num * region_size
-//  eden_used     = young_region_num * region_size - survivor_used
-//  old_gen_used  = overall_used - eden_used - survivor_used
-//
-// Note that survivor_used and eden_used are upper bounds. To get the
-// actual value we would have to iterate over the regions and add up
-// ->used(). But that'd be expensive. So, we'll accept some lack of
-// accuracy for those two. But, we have to be careful when calculating
-// old_gen_used, in case we subtract from overall_used more then the
-// actual number and our result goes negative.
-//
-// 2) Calculating the used space is straightforward, as described
-// above. However, how do we calculate the committed space, given that
-// we allocate space for the eden, survivor, and old gen out of the
-// same pool of regions? One way to do this is to use the used value
-// as also the committed value for the eden and survivor spaces and
-// then calculate the old gen committed space as follows:
-//
-//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+// See comments in g1MonitoringSupport.hpp for additional details
+// on this model.
 //
-// Maybe a better way to do that would be to calculate used for eden
-// and survivor as a sum of ->used() over their regions and then
-// calculate committed as region_num * region_size (i.e., what we use
-// to calculate the used space now). This is something to consider
-// in the future.
-//
-// 3) Another decision that is again not straightforward is what is
-// the max size that each memory pool can grow to. One way to do this
-// would be to use the committed size for the max for the eden and
-// survivors and calculate the old gen max as follows (basically, it's
-// a similar pattern to what we use for the committed space, as
-// described above):
-//
-//  old_gen_max = overall_max - eden_max - survivor_max
-//
-// Unfortunately, the above makes the max of each pool fluctuate over
-// time and, even though this is allowed according to the spec, it
-// broke several assumptions in the M&M framework (there were cases
-// where used would reach a value greater than max). So, for max we
-// use -1, which means "undefined" according to the spec.
-//
-// 4) Now, there is a very subtle issue with all the above. The
-// framework will call get_memory_usage() on the three pools
-// asynchronously. As a result, each call might get a different value
-// for, say, survivor_num which will yield inconsistent values for
-// eden_used, survivor_used, and old_gen_used (as survivor_num is used
-// in the calculation of all three). This would normally be
-// ok. However, it's possible that this might cause the sum of
-// eden_used, survivor_used, and old_gen_used to go over the max heap
-// size and this seems to sometimes cause JConsole (and maybe other
-// clients) to get confused. There's not a really an easy / clean
-// solution to this problem, due to the asynchrounous nature of the
-// framework.
 
 
 // This class is shared by the three G1 memory pool classes
@@ -116,22 +57,6 @@
 // (see comment above), we put the calculations in this class so that
 // we can easily share them among the subclasses.
 class G1MemoryPoolSuper : public CollectedMemoryPool {
-private:
-  // It returns x - y if x > y, 0 otherwise.
-  // As described in the comment above, some of the inputs to the
-  // calculations we have to do are obtained concurrently and hence
-  // may be inconsistent with each other. So, this provides a
-  // defensive way of performing the subtraction and avoids the value
-  // going negative (which would mean a very large result, given that
-  // the parameter are size_t).
-  static size_t subtract_up_to_zero(size_t x, size_t y) {
-    if (x > y) {
-      return x - y;
-    } else {
-      return 0;
-    }
-  }
-
 protected:
   G1CollectedHeap* _g1h;
 
@@ -148,13 +73,6 @@
     return (size_t) -1;
   }
 
-  static size_t overall_committed(G1CollectedHeap* g1h) {
-    return g1h->capacity();
-  }
-  static size_t overall_used(G1CollectedHeap* g1h) {
-    return g1h->used_unlocked();
-  }
-
   static size_t eden_space_committed(G1CollectedHeap* g1h);
   static size_t eden_space_used(G1CollectedHeap* g1h);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/gcNotifier.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "services/gcNotifier.hpp"
+#include "services/management.hpp"
+#include "services/memoryService.hpp"
+#include "memoryManager.hpp"
+#include "memory/oopFactory.hpp"
+
+GCNotificationRequest *GCNotifier::first_request = NULL;
+GCNotificationRequest *GCNotifier::last_request = NULL;
+
+void GCNotifier::pushNotification(GCMemoryManager *mgr, const char *action, const char *cause) {
+  // Make a copy of the last GC statistics
+  // GC may occur between now and the creation of the notification
+  int num_pools = MemoryService::num_memory_pools();
+  GCStatInfo* stat = new GCStatInfo(num_pools);
+  mgr->get_last_gc_stat(stat);
+  GCNotificationRequest *request = new GCNotificationRequest(os::javaTimeMillis(),mgr,action,cause,stat);
+  addRequest(request);
+ }
+
+void GCNotifier::addRequest(GCNotificationRequest *request) {
+  MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  if(first_request == NULL) {
+    first_request = request;
+  } else {
+    last_request->next = request;
+  }
+  last_request = request;
+  Service_lock->notify_all();
+}
+
+GCNotificationRequest *GCNotifier::getRequest() {
+  MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  GCNotificationRequest *request = first_request;
+  if(first_request != NULL) {
+    first_request = first_request->next;
+  }
+  return request;
+}
+
+bool GCNotifier::has_event() {
+  return first_request != NULL;
+}
+
+static Handle getGcInfoBuilder(GCMemoryManager *gcManager,TRAPS) {
+
+  klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK_NH);
+  instanceKlassHandle gcMBeanKlass (THREAD, k);
+
+  instanceOop i = gcManager->get_memory_manager_instance(THREAD);
+  instanceHandle ih(THREAD, i);
+
+  JavaValue result(T_OBJECT);
+  JavaCallArguments args(ih);
+
+  JavaCalls::call_virtual(&result,
+                          gcMBeanKlass,
+                          vmSymbols::getGcInfoBuilder_name(),
+                          vmSymbols::getGcInfoBuilder_signature(),
+                          &args,
+                          CHECK_NH);
+  return Handle(THREAD,(oop)result.get_jobject());
+
+}
+
+static Handle createGcInfo(GCMemoryManager *gcManager, GCStatInfo *gcStatInfo,TRAPS) {
+
+  // Fill the arrays of MemoryUsage objects with before and after GC
+  // per pool memory usage
+
+  klassOop muKlass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);   objArrayOop bu = oopFactory::new_objArray( muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  objArrayHandle usage_before_gc_ah(THREAD, bu);
+  objArrayOop au = oopFactory::new_objArray(muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  objArrayHandle usage_after_gc_ah(THREAD, au);
+
+  for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
+    Handle before_usage = MemoryService::create_MemoryUsage_obj(gcStatInfo->before_gc_usage_for_pool(i), CHECK_NH);
+    Handle after_usage;
+
+    MemoryUsage u = gcStatInfo->after_gc_usage_for_pool(i);
+    if (u.max_size() == 0 && u.used() > 0) {
+      // If max size == 0, this pool is a survivor space.
+      // Set max size = -1 since the pools will be swapped after GC.
+      MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1);
+      after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK_NH);
+    } else {
+        after_usage = MemoryService::create_MemoryUsage_obj(u, CHECK_NH);
+    }
+    usage_before_gc_ah->obj_at_put(i, before_usage());
+    usage_after_gc_ah->obj_at_put(i, after_usage());
+  }
+
+  // Current implementation only has 1 attribute (number of GC threads)
+  // The type is 'I'
+  objArrayOop extra_args_array = oopFactory::new_objArray(SystemDictionary::Integer_klass(), 1, CHECK_NH);
+  objArrayHandle extra_array (THREAD, extra_args_array);
+  klassOop itKlass= SystemDictionary::Integer_klass();
+  instanceKlassHandle intK(THREAD, itKlass);
+
+  instanceHandle extra_arg_val = intK->allocate_instance_handle(CHECK_NH);
+
+  {
+    JavaValue res(T_VOID);
+    JavaCallArguments argsInt;
+    argsInt.push_oop(extra_arg_val);
+    argsInt.push_int(gcManager->num_gc_threads());
+
+    JavaCalls::call_special(&res,
+                            intK,
+                            vmSymbols::object_initializer_name(),
+                            vmSymbols::int_void_signature(),
+                            &argsInt,
+                            CHECK_NH);
+  }
+  extra_array->obj_at_put(0,extra_arg_val());
+
+  klassOop gcInfoklass = Management::com_sun_management_GcInfo_klass(CHECK_NH);
+  instanceKlassHandle ik (THREAD,gcInfoklass);
+
+  Handle gcInfo_instance = ik->allocate_instance_handle(CHECK_NH);
+
+  JavaValue constructor_result(T_VOID);
+  JavaCallArguments constructor_args(16);
+  constructor_args.push_oop(gcInfo_instance);
+  constructor_args.push_oop(getGcInfoBuilder(gcManager,THREAD));
+  constructor_args.push_long(gcStatInfo->gc_index());
+  constructor_args.push_long(gcStatInfo->start_time());
+  constructor_args.push_long(gcStatInfo->end_time());
+  constructor_args.push_oop(usage_before_gc_ah);
+  constructor_args.push_oop(usage_after_gc_ah);
+  constructor_args.push_oop(extra_array);
+
+  JavaCalls::call_special(&constructor_result,
+                          ik,
+                          vmSymbols::object_initializer_name(),
+                          vmSymbols::com_sun_management_GcInfo_constructor_signature(),
+                          &constructor_args,
+                          CHECK_NH);
+
+  return Handle(gcInfo_instance());
+}
+
+void GCNotifier::sendNotification(TRAPS) {
+  ResourceMark rm(THREAD);
+  GCNotificationRequest *request = getRequest();
+  if(request != NULL) {
+    Handle objGcInfo = createGcInfo(request->gcManager,request->gcStatInfo,THREAD);
+
+    Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK);
+    Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK);
+    Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK);
+
+    klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK);
+    instanceKlassHandle gc_mbean_klass (THREAD, k);
+
+    instanceOop gc_mbean = request->gcManager->get_memory_manager_instance(THREAD);
+    instanceHandle gc_mbean_h(THREAD, gc_mbean);
+    if (!gc_mbean_h->is_a(k)) {
+      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+                "This GCMemoryManager doesn't have a GarbageCollectorMXBean");
+    }
+
+    JavaValue result(T_VOID);
+    JavaCallArguments args(gc_mbean_h);
+    args.push_long(request->timestamp);
+    args.push_oop(objName);
+    args.push_oop(objAction);
+    args.push_oop(objCause);
+    args.push_oop(objGcInfo);
+
+    JavaCalls::call_virtual(&result,
+                            gc_mbean_klass,
+                            vmSymbols::createGCNotification_name(),
+                            vmSymbols::createGCNotification_signature(),
+                            &args,
+                            CHECK);
+    if (HAS_PENDING_EXCEPTION) {
+      CLEAR_PENDING_EXCEPTION;
+    }
+
+    delete request;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/gcNotifier.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_GCNOTIFIER_HPP
+#define SHARE_VM_SERVICES_GCNOTIFIER_HPP
+
+#include "memory/allocation.hpp"
+#include "services/memoryPool.hpp"
+#include "services/memoryService.hpp"
+#include "services/memoryManager.hpp"
+
+class GCNotificationRequest : public CHeapObj {
+  friend class GCNotifier;
+  GCNotificationRequest *next;
+  jlong timestamp;
+  GCMemoryManager *gcManager;
+  const char *gcAction;
+  const char *gcCause;
+  GCStatInfo *gcStatInfo;
+public:
+  GCNotificationRequest(jlong ts, GCMemoryManager *manager, const char*action, const char *cause,GCStatInfo *info) {
+    next = NULL;
+    timestamp = ts;
+    gcManager = manager;
+    gcAction = action;
+    gcCause = cause;
+    gcStatInfo = info;
+  }
+
+  ~GCNotificationRequest() {
+    delete gcStatInfo;
+  }
+};
+
+class GCNotifier : public AllStatic {
+  friend class ServiceThread;
+private:
+  static GCNotificationRequest *first_request;
+  static GCNotificationRequest *last_request;
+  static void addRequest(GCNotificationRequest *request);
+  static GCNotificationRequest *getRequest();
+public:
+  static void pushNotification(GCMemoryManager *manager, const char *action, const char *cause);
+  static bool has_event();
+  static void sendNotification(TRAPS);
+};
+
+#endif // SHARE_VM_SERVICES_GCNOTIFIER_HPP
--- a/src/share/vm/services/heapDumper.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/heapDumper.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1649,6 +1649,9 @@
         if (fr->is_entry_frame()) {
           last_entry_frame = fr;
         }
+        if (fr->is_ricochet_frame()) {
+          fr->oops_ricochet_do(&blk, vf->register_map());
+        }
       }
       vf = vf->sender();
     }
--- a/src/share/vm/services/jmm.h	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/jmm.h	Wed Jul 27 17:32:44 2011 -0700
@@ -48,7 +48,7 @@
   JMM_VERSION_1_0 = 0x20010000,
   JMM_VERSION_1_1 = 0x20010100, // JDK 6
   JMM_VERSION_1_2 = 0x20010200, // JDK 7
-  JMM_VERSION     = 0x20010200
+  JMM_VERSION     = 0x20010201
 };
 
 typedef struct {
@@ -293,6 +293,9 @@
                                                   jlongArray ids,
                                                   jboolean lockedMonitors,
                                                   jboolean lockedSynchronizers);
+  void         (JNICALL *SetGCNotificationEnabled) (JNIEnv *env,
+                                                    jobject mgr,
+                                                    jboolean enabled);
 } JmmInterface;
 
 #ifdef __cplusplus
--- a/src/share/vm/services/management.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/management.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -42,6 +42,7 @@
 #include "services/classLoadingService.hpp"
 #include "services/heapDumper.hpp"
 #include "services/lowMemoryDetector.hpp"
+#include "services/gcNotifier.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memoryPool.hpp"
@@ -60,6 +61,8 @@
 klassOop Management::_memoryManagerMXBean_klass = NULL;
 klassOop Management::_garbageCollectorMXBean_klass = NULL;
 klassOop Management::_managementFactory_klass = NULL;
+klassOop Management::_garbageCollectorImpl_klass = NULL;
+klassOop Management::_gcInfo_klass = NULL;
 
 jmmOptionalSupport Management::_optional_support = {0};
 TimeStamp Management::_stamp;
@@ -179,6 +182,8 @@
   f->do_oop((oop*) &_memoryManagerMXBean_klass);
   f->do_oop((oop*) &_garbageCollectorMXBean_klass);
   f->do_oop((oop*) &_managementFactory_klass);
+  f->do_oop((oop*) &_garbageCollectorImpl_klass);
+  f->do_oop((oop*) &_gcInfo_klass);
 }
 
 klassOop Management::java_lang_management_ThreadInfo_klass(TRAPS) {
@@ -230,6 +235,20 @@
   return _managementFactory_klass;
 }
 
+klassOop Management::sun_management_GarbageCollectorImpl_klass(TRAPS) {
+  if (_garbageCollectorImpl_klass == NULL) {
+    _garbageCollectorImpl_klass = load_and_initialize_klass(vmSymbols::sun_management_GarbageCollectorImpl(), CHECK_NULL);
+  }
+  return _garbageCollectorImpl_klass;
+}
+
+klassOop Management::com_sun_management_GcInfo_klass(TRAPS) {
+  if (_gcInfo_klass == NULL) {
+    _gcInfo_klass = load_and_initialize_klass(vmSymbols::com_sun_management_GcInfo(), CHECK_NULL);
+  }
+  return _gcInfo_klass;
+}
+
 static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args, ThreadSnapshot* snapshot, TRAPS) {
   Handle snapshot_thread(THREAD, snapshot->threadObj());
 
@@ -2056,6 +2075,13 @@
   }
 JVM_END
 
+JVM_ENTRY(void, jmm_SetGCNotificationEnabled(JNIEnv *env, jobject obj, jboolean enabled))
+  ResourceMark rm(THREAD);
+  // Get the GCMemoryManager
+  GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK);
+  mgr->set_notification_enabled(enabled?true:false);
+JVM_END
+
 // Dump heap - Returns 0 if succeeds.
 JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
 #ifndef SERVICES_KERNEL
@@ -2122,7 +2148,8 @@
   jmm_FindDeadlockedThreads,
   jmm_SetVMGlobal,
   NULL,
-  jmm_DumpThreads
+  jmm_DumpThreads,
+  jmm_SetGCNotificationEnabled
 };
 
 void* Management::get_jmm_interface(int version) {
--- a/src/share/vm/services/management.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/management.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -49,6 +49,8 @@
   static klassOop           _memoryManagerMXBean_klass;
   static klassOop           _garbageCollectorMXBean_klass;
   static klassOop           _managementFactory_klass;
+  static klassOop           _garbageCollectorImpl_klass;
+  static klassOop           _gcInfo_klass;
 
   static klassOop load_and_initialize_klass(Symbol* sh, TRAPS);
 
@@ -86,6 +88,8 @@
   static klassOop java_lang_management_GarbageCollectorMXBean_klass(TRAPS);
   static klassOop sun_management_Sensor_klass(TRAPS);
   static klassOop sun_management_ManagementFactory_klass(TRAPS);
+  static klassOop sun_management_GarbageCollectorImpl_klass(TRAPS);
+  static klassOop com_sun_management_GcInfo_klass(TRAPS);
 
   static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS);
   static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);
--- a/src/share/vm/services/memoryManager.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/memoryManager.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -33,6 +33,7 @@
 #include "services/memoryManager.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryService.hpp"
+#include "services/gcNotifier.hpp"
 #include "utilities/dtrace.hpp"
 
 HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__begin, char*, int, char*, int,
@@ -202,6 +203,7 @@
   _last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true);
   _current_gc_stat = NULL;
   _num_gc_threads = 1;
+  _notification_enabled = false;
 }
 
 GCMemoryManager::~GCMemoryManager() {
@@ -250,7 +252,8 @@
 // to ensure the current gc stat is placed in _last_gc_stat.
 void GCMemoryManager::gc_end(bool recordPostGCUsage,
                              bool recordAccumulatedGCTime,
-                             bool recordGCEndTime, bool countCollection) {
+                             bool recordGCEndTime, bool countCollection,
+                             GCCause::Cause cause) {
   if (recordAccumulatedGCTime) {
     _accumulated_timer.stop();
   }
@@ -283,6 +286,11 @@
       pool->set_last_collection_usage(usage);
       LowMemoryDetector::detect_after_gc_memory(pool);
     }
+    if(is_notification_enabled()) {
+      bool isMajorGC = this == MemoryService::get_major_gc_manager();
+      GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
+                                   GCCause::to_string(cause));
+    }
   }
   if (countCollection) {
     _num_collections++;
--- a/src/share/vm/services/memoryManager.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/memoryManager.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -166,6 +166,7 @@
   Mutex*       _last_gc_lock;
   GCStatInfo*  _current_gc_stat;
   int          _num_gc_threads;
+  volatile bool _notification_enabled;
 public:
   GCMemoryManager();
   ~GCMemoryManager();
@@ -181,7 +182,7 @@
   void   gc_begin(bool recordGCBeginTime, bool recordPreGCUsage,
                   bool recordAccumulatedGCTime);
   void   gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime,
-                bool recordGCEndTime, bool countCollection);
+                bool recordGCEndTime, bool countCollection, GCCause::Cause cause);
 
   void        reset_gc_stat()   { _num_collections = 0; _accumulated_timer.reset(); }
 
@@ -189,6 +190,8 @@
   // the collection count. Zero signifies no gc has taken place.
   size_t get_last_gc_stat(GCStatInfo* dest);
 
+  void set_notification_enabled(bool enabled) { _notification_enabled = enabled; }
+  bool is_notification_enabled() { return _notification_enabled; }
   virtual MemoryManager::Name kind() = 0;
 };
 
--- a/src/share/vm/services/memoryService.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/memoryService.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -565,7 +565,8 @@
 
 void MemoryService::gc_end(bool fullGC, bool recordPostGCUsage,
                            bool recordAccumulatedGCTime,
-                           bool recordGCEndTime, bool countCollection) {
+                           bool recordGCEndTime, bool countCollection,
+                           GCCause::Cause cause) {
 
   GCMemoryManager* mgr;
   if (fullGC) {
@@ -577,7 +578,7 @@
 
   // register the GC end statistics and memory usage
   mgr->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
-              countCollection);
+              countCollection, cause);
 }
 
 void MemoryService::oops_do(OopClosure* f) {
@@ -633,7 +634,7 @@
 // gc manager (so _fullGC is set to false ) and for other generation kinds
 // doing mark-sweep-compact uses major gc manager (so _fullGC is set
 // to true).
-TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind) {
+TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
   switch (kind) {
     case Generation::DefNew:
 #ifndef SERIALGC
@@ -654,9 +655,10 @@
   }
   // this has to be called in a stop the world pause and represent
   // an entire gc pause, start to finish:
-  initialize(_fullGC, true, true, true, true, true, true, true);
+  initialize(_fullGC, cause,true, true, true, true, true, true, true);
 }
 TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC,
+                                                 GCCause::Cause cause,
                                                  bool recordGCBeginTime,
                                                  bool recordPreGCUsage,
                                                  bool recordPeakUsage,
@@ -664,7 +666,7 @@
                                                  bool recordAccumulatedGCTime,
                                                  bool recordGCEndTime,
                                                  bool countCollection) {
-  initialize(fullGC, recordGCBeginTime, recordPreGCUsage, recordPeakUsage,
+    initialize(fullGC, cause, recordGCBeginTime, recordPreGCUsage, recordPeakUsage,
              recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
              countCollection);
 }
@@ -672,6 +674,7 @@
 // for a subclass to create then initialize an instance before invoking
 // the MemoryService
 void TraceMemoryManagerStats::initialize(bool fullGC,
+                                         GCCause::Cause cause,
                                          bool recordGCBeginTime,
                                          bool recordPreGCUsage,
                                          bool recordPeakUsage,
@@ -687,6 +690,7 @@
   _recordAccumulatedGCTime = recordAccumulatedGCTime;
   _recordGCEndTime = recordGCEndTime;
   _countCollection = countCollection;
+  _cause = cause;
 
   MemoryService::gc_begin(_fullGC, _recordGCBeginTime, _recordAccumulatedGCTime,
                           _recordPreGCUsage, _recordPeakUsage);
@@ -694,6 +698,6 @@
 
 TraceMemoryManagerStats::~TraceMemoryManagerStats() {
   MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime,
-                        _recordGCEndTime, _countCollection);
+                        _recordGCEndTime, _countCollection, _cause);
 }
 
--- a/src/share/vm/services/memoryService.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/services/memoryService.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -29,6 +29,7 @@
 #include "memory/generation.hpp"
 #include "runtime/handles.hpp"
 #include "services/memoryUsage.hpp"
+#include "gc_interface/gcCause.hpp"
 
 // Forward declaration
 class MemoryPool;
@@ -162,7 +163,8 @@
                        bool recordPreGCUsage, bool recordPeakUsage);
   static void gc_end(bool fullGC, bool recordPostGCUsage,
                      bool recordAccumulatedGCTime,
-                     bool recordGCEndTime, bool countCollection);
+                     bool recordGCEndTime, bool countCollection,
+                     GCCause::Cause cause);
 
 
   static void oops_do(OopClosure* f);
@@ -172,6 +174,14 @@
 
   // Create an instance of java/lang/management/MemoryUsage
   static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS);
+
+  static const GCMemoryManager* get_minor_gc_manager() {
+      return _minor_gc_manager;
+  }
+
+  static const GCMemoryManager* get_major_gc_manager() {
+      return _major_gc_manager;
+  }
 };
 
 class TraceMemoryManagerStats : public StackObj {
@@ -184,10 +194,11 @@
   bool         _recordAccumulatedGCTime;
   bool         _recordGCEndTime;
   bool         _countCollection;
-
+  GCCause::Cause _cause;
 public:
   TraceMemoryManagerStats() {}
   TraceMemoryManagerStats(bool fullGC,
+                          GCCause::Cause cause,
                           bool recordGCBeginTime = true,
                           bool recordPreGCUsage = true,
                           bool recordPeakUsage = true,
@@ -197,6 +208,7 @@
                           bool countCollection = true);
 
   void initialize(bool fullGC,
+                  GCCause::Cause cause,
                   bool recordGCBeginTime,
                   bool recordPreGCUsage,
                   bool recordPeakUsage,
@@ -205,7 +217,7 @@
                   bool recordGCEndTime,
                   bool countCollection);
 
-  TraceMemoryManagerStats(Generation::Name kind);
+  TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause);
   ~TraceMemoryManagerStats();
 };
 
--- a/src/share/vm/utilities/debug.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/debug.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -469,6 +469,7 @@
 extern "C" void pp(void* p) {
   Command c("pp");
   FlagSetting fl(PrintVMMessages, true);
+  FlagSetting f2(DisplayVMOutput, true);
   if (Universe::heap()->is_in(p)) {
     oop obj = oop(p);
     obj->print();
@@ -507,6 +508,17 @@
 
 }
 
+extern "C" void pfl() {
+  // print frame layout
+  Command c("pfl");
+  JavaThread* p = JavaThread::active();
+  tty->print(" for thread: ");
+  p->print();
+  tty->cr();
+  if (p->has_last_Java_frame()) {
+    p->print_frame_layout();
+  }
+}
 
 extern "C" void psf() { // print stack frames
   {
--- a/src/share/vm/utilities/elfFile.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/elfFile.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -29,6 +29,7 @@
 #include <string.h>
 #include <stdio.h>
 #include <limits.h>
+#include <new>
 
 #include "memory/allocation.inline.hpp"
 #include "utilities/decoder.hpp"
@@ -46,7 +47,7 @@
   m_status = Decoder::no_error;
 
   int len = strlen(filepath) + 1;
-  m_filepath = NEW_C_HEAP_ARRAY(char, len);
+  m_filepath = (const char*)os::malloc(len * sizeof(char));
   if (m_filepath != NULL) {
     strcpy((char*)m_filepath, filepath);
     m_file = fopen(filepath, "r");
@@ -74,7 +75,7 @@
   }
 
   if (m_filepath != NULL) {
-    FREE_C_HEAP_ARRAY(char, m_filepath);
+    os::free((void*)m_filepath);
   }
 
   if (m_next != NULL) {
@@ -120,14 +121,14 @@
       }
       // string table
       if (shdr.sh_type == SHT_STRTAB) {
-        ElfStringTable* table = new ElfStringTable(m_file, shdr, index);
+        ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
         if (table == NULL) {
           m_status = Decoder::out_of_memory;
           return false;
         }
         add_string_table(table);
       } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
-        ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr);
+        ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
         if (table == NULL) {
           m_status = Decoder::out_of_memory;
           return false;
--- a/src/share/vm/utilities/elfStringTable.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/elfStringTable.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -27,6 +27,7 @@
 #ifndef _WINDOWS
 
 #include "memory/allocation.inline.hpp"
+#include "runtime/os.hpp"
 #include "utilities/elfStringTable.hpp"
 
 // We will try to load whole string table into memory if we can.
@@ -41,14 +42,14 @@
 
   // try to load the string table
   long cur_offset = ftell(file);
-  m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size);
+  m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size);
   if (m_table != NULL) {
     // if there is an error, mark the error
     if (fseek(file, shdr.sh_offset, SEEK_SET) ||
       fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
       fseek(file, cur_offset, SEEK_SET)) {
       m_status = Decoder::file_invalid;
-      FREE_C_HEAP_ARRAY(char, m_table);
+      os::free((void*)m_table);
       m_table = NULL;
     }
   } else {
@@ -58,7 +59,7 @@
 
 ElfStringTable::~ElfStringTable() {
   if (m_table != NULL) {
-    FREE_C_HEAP_ARRAY(char, m_table);
+    os::free((void*)m_table);
   }
 
   if (m_next != NULL) {
--- a/src/share/vm/utilities/exceptions.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/exceptions.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -209,7 +209,7 @@
 }
 
 
-void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line) {
+void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line, methodHandle method) {
   Handle exception;
   if (!THREAD->has_pending_exception()) {
     klassOop k = SystemDictionary::StackOverflowError_klass();
@@ -217,13 +217,13 @@
     exception = Handle(THREAD, e);  // fill_in_stack trace does gc
     assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation");
     if (StackTraceInThrowable) {
-      java_lang_Throwable::fill_in_stack_trace(exception);
+      java_lang_Throwable::fill_in_stack_trace(exception, method());
     }
   } else {
     // if prior exception, throw that one instead
     exception = Handle(THREAD, THREAD->pending_exception());
   }
-  _throw_oop(THREAD, file, line, exception());
+  _throw(THREAD, file, line, exception);
 }
 
 void Exceptions::fthrow(Thread* thread, const char* file, int line, Symbol* h_name, const char* format, ...) {
--- a/src/share/vm/utilities/exceptions.hpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/exceptions.hpp	Wed Jul 27 17:32:44 2011 -0700
@@ -144,7 +144,7 @@
                              const char* message,
                              ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8);
 
-  static void throw_stack_overflow_exception(Thread* thread, const char* file, int line);
+  static void throw_stack_overflow_exception(Thread* thread, const char* file, int line, methodHandle method);
 
   // for AbortVMOnException flag
   NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
--- a/src/share/vm/utilities/yieldingWorkgroup.cpp	Wed Jul 27 17:24:11 2011 -0700
+++ b/src/share/vm/utilities/yieldingWorkgroup.cpp	Wed Jul 27 17:32:44 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test5091921.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2011 Hewlett-Packard Company. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 5091921
+ * @summary Sign flip issues in loop optimizer
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test5091921 -XX:MaxInlineSize=1 Test5091921
+ */
+
+public class Test5091921 {
+  private static int result = 0;
+
+
+  /* Test for the bug of transforming indx >= MININT to indx > MININT-1 */
+  public static int test_ge1(int limit) {
+    int indx;
+    int sum = 0;
+    for (indx = 500; indx >= limit; indx -= 2) {
+      sum += 2000 / indx;
+      result = sum;
+    }
+    return sum;
+  }
+
+  /* Test for the bug of transforming indx <= MAXINT to indx < MAXINT+1 */
+  public static int test_le1(int limit) {
+    int indx;
+    int sum = 0;
+    for (indx = -500; indx <= limit; indx += 2)
+    {
+      sum += 3000 / indx;
+      result = sum;
+    }
+    return sum;
+  }
+
+  /* Run with -Xcomp -XX:CompileOnly=wrap1.test1 -XX:MaxInlineSize=1 */
+  /* limit reset to ((limit-init+stride-1)/stride)*stride+init */
+  /* Calculation may overflow */
+  public static volatile int c = 1;
+  public static int test_wrap1(int limit)
+  {
+    int indx;
+    int sum = 0;
+    for (indx = 0xffffffff; indx < limit; indx += 0x20000000)
+    {
+      sum += c;
+    }
+    return sum;
+  }
+
+  /* Test for range check elimination with bit flip issue for
+     scale*i+offset<limit where offset is not 0 */
+  static int[] box5 = {1,2,3,4,5,6,7,8,9};
+  public static int test_rce5(int[] b, int limit)
+  {
+    int indx;
+    int sum = b[1];
+    result = sum;
+    for (indx = 0x80000000; indx < limit; ++indx)
+    {
+      if (indx > 0x80000000)
+      {
+        // this test is not issued in pre-loop but issued in main loop
+        // trick rce into thinking expression is false when indx >= 0
+        // in fact it is false when indx==0x80000001
+        if (indx - 9 < -9)
+        {
+          sum += indx;
+          result = sum;
+          sum ^= b[indx & 7];
+          result = sum;
+        }
+        else
+          break;
+      }
+      else
+      {
+        sum += b[indx & 3];
+        result = sum;
+      }
+    }
+    return sum;
+  }
+
+  /* Test for range check elimination with bit flip issue for
+     scale*i<limit where scale > 1 */
+  static int[] box6 = {1,2,3,4,5,6,7,8,9};
+  public static int test_rce6(int[] b, int limit)
+  {
+    int indx;
+    int sum = b[1];
+    result = sum;
+    for (indx = 0x80000000; indx < limit; ++indx)
+    {
+      if (indx > 0x80000000)
+      {
+        // harmless rce target
+        if (indx < 0)
+        {
+          sum += result;
+          result = sum;
+        }
+        else
+          break;
+        // this test is not issued in pre-loop but issued in main loop
+        // trick rce into thinking expression is false when indx >= 0
+        // in fact it is false when indx==0x80000001
+        // In compilers that transform mulI to shiftI may mask this issue.
+        if (indx * 28 + 1 < 0)
+        {
+          sum += indx;
+          result = sum;
+          sum ^= b[indx & 7];
+          result = sum;
+        }
+        else
+          break;
+      }
+      else
+      {
+        sum += b[indx & 3];
+        result = sum;
+      }
+    }
+    return sum;
+  }
+
+  /* Test for range check elimination with i <= limit */
+  static int[] box7 = {1,2,3,4,5,6,7,8,9,0x7fffffff};
+  public static int test_rce7(int[] b)
+  {
+    int indx;
+    int max = b[9];
+    int sum = b[7];
+    result = sum;
+    for (indx = 0; indx < b.length; ++indx)
+    {
+      if (indx <= max)
+      {
+        sum += (indx ^ 15) + ((result != 0) ? 0 : sum);
+        result = sum;
+      }
+      else
+        throw new RuntimeException();
+    }
+    for (indx = -7; indx < b.length; ++indx)
+    {
+      if (indx <= 9)
+      {
+        sum += (sum ^ 15) + ((result != 0) ? 0 : sum);
+        result = sum;
+      }
+      else
+        throw new RuntimeException();
+    }
+    return sum;
+  }
+
+  /* Test for range check elimination with i >= limit */
+  static int[] box8 = {-1,0,1,2,3,4,5,6,7,8,0x80000000};
+  public static int test_rce8(int[] b)
+  {
+    int indx;
+    int sum = b[5];
+    int min = b[10];
+    result = sum;
+    for (indx = b.length-1; indx >= 0; --indx)
+    {
+      if (indx >= min)
+      {
+        sum += (sum ^ 9) + ((result != 0) ? 0 :sum);
+        result = sum;
+      }
+      else
+        throw new RuntimeException();
+    }
+    return sum;
+  }
+
+  public static void main(String[] args)
+  {
+    result=1;
+    int r = 0;
+    try {
+      r = test_ge1(0x80000000);
+      System.out.println(result);
+      System.out.println("test_ge1 FAILED");
+      System.exit(1);
+    }
+    catch (ArithmeticException e1) {
+      System.out.println("test_ge1: Expected exception caught");
+      if (result != 5986) {
+        System.out.println(result);
+        System.out.println("test_ge1 FAILED");
+        System.exit(97);
+      }
+    }
+    System.out.println("test_ge1 WORKED");
+
+    result=0;
+    try
+    {
+      r = test_le1(0x7fffffff);
+      System.out.println(result);
+      System.out.println("test_le1 FAILED");
+      System.exit(1);
+    }
+    catch (ArithmeticException e1)
+    {
+      System.out.println("test_le1: Expected exception caught");
+      if (result != -9039)
+      {
+        System.out.println(result);
+        System.out.println("test_le1 FAILED");
+        System.exit(97);
+      }
+    }
+    System.out.println("test_le1 WORKED");
+
+    result=0;
+    r = test_wrap1(0x7fffffff);
+    if (r != 4)
+    {
+      System.out.println(result);
+      System.out.println("test_wrap1 FAILED");
+      System.exit(97);
+    }
+    else
+    {
+      System.out.println("test_wrap1 WORKED");
+    }
+
+    result=0;
+    r = test_rce5(box5,0x80000100);
+    if (result != 3)
+    {
+      System.out.println(result);
+      System.out.println("test_rce5 FAILED");
+      System.exit(97);
+    }
+    else
+    {
+      System.out.println("test_rce5 WORKED");
+    }
+
+    result=0;
+    r = test_rce6(box6,0x80000100);
+    if (result != 6)
+    {
+      System.out.println(result);
+      System.out.println("test_rce6 FAILED");
+      System.exit(97);
+    }
+    else
+    {
+      System.out.println("test_rce6 WORKED");
+    }
+
+    result=0;
+    r = test_rce7(box7);
+    if (result != 14680079)
+    {
+      System.out.println(result);
+      System.out.println("test_rce7 FAILED");
+      System.exit(97);
+    }
+    else
+    {
+      System.out.println("test_rce7 WORKED");
+    }
+
+    result=0;
+    r = test_rce8(box8);
+    if (result != 16393)
+    {
+      System.out.println(result);
+      System.out.println("test_rce8 FAILED");
+      System.exit(97);
+    }
+    else
+    {
+      System.out.println("test_rce8 WORKED");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6186134.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6186134
+ * @summary Server virtual machine produces/exeutes incorrect code.
+ *
+ * @run main Test6186134 100000
+ */
+import java.util.ArrayList;
+
+public class Test6186134 {
+
+  int num = 0;
+
+  public Test6186134(int n) {
+    num = n;
+  }
+
+  public boolean more() {
+    return num-- > 0;
+  }
+
+  public ArrayList test1() {
+    ArrayList res = new ArrayList();
+    int maxResults = Integer.MAX_VALUE;
+    int n = 0;
+    boolean more = more();
+    while ((n++ < maxResults) && more) {
+      res.add(new Object());
+      more = more();
+    }
+    return res;
+  }
+
+  public static void main(String[] pars) {
+    int n = Integer.parseInt(pars[0]);
+    for (int i=0; i<n; i++) {
+      Test6186134 t = new Test6186134(10);
+      int size = t.test1().size();
+      if (size != 10) {
+        System.out.println("wrong size: " + size +", should be 10");
+        System.exit(97);
+      }
+    }
+    System.out.println("Passed");
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6196102.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6196102
+ * @summary Integer seems to be greater than Integer.MAX_VALUE
+ *
+ * @run main Test6196102
+ */
+
+public class Test6196102 {
+    static public void main(String[] args) {
+        int i1 = 0;
+        int i2 = Integer.MAX_VALUE;
+
+        while (i1 >= 0) {
+            i1++;
+            if (i1 > i2) {
+                System.out.println("E R R O R: " + i1);
+                System.exit(97);
+            }
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6357214.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6357214
+ * @summary Hotspot server compiler gets integer comparison wrong
+ *
+ * @run main/othervm/timeout=60 -DshowAll=ffo -DeventID=444 Test6357214
+ */
+
+// The test hangs after few iterations before the fix. So it fails if timeout.
+class MyResult {
+        public boolean next() {
+                return true;
+        }
+
+        public String getString(String in) {
+                if (in.equals("id"))
+                        return "idFoo";
+                if (in.equals("contentKey"))
+                        return "ckFoo";
+                return "Foo";
+        }
+
+        public int getInt(String in) {
+                if (in.equals("processingComplete"))
+                        return 0;
+                return 1;
+        }
+
+        public byte[] getBytes(String in) {
+                byte[] arr = null;
+                if (in.equals("content")) {
+                        arr = new byte[65536];
+                        byte j = 32;
+                        for (int i=0; i<65536; i++) {
+                                arr[i] = j;
+                                if (++j == 127)
+                                        j=32;
+                        }
+                }
+                return arr;
+        }
+}
+
+public class Test6357214 {
+        public static volatile boolean bollocks = true;
+    public String create(String context) throws Exception {
+
+        //
+        // Extract HTTP parameters
+        //
+
+        boolean showAll = System.getProperty("showAll") != null;
+          String eventID = System.getProperty("eventID");
+          String eventContentKey = System.getProperty("cKey");
+        //
+        // Build ContentStaging query based on eventID or eventContentKey
+        //
+
+        String sql = "select id, processingComplete, contentKey, content "
+                   + "from   ContentStaging cs, ContentStagingKey csk "
+                   + "where  cs.eventContentKey = csk.eventContentKey ";
+
+        if (eventID != null) {
+            sql += "and id = " + eventID;
+        }
+        else if (eventContentKey != null) {
+            sql += "and cs.eventContentKey = '"
+                +  eventContentKey
+                +  "' having id = max(id)";
+        }
+        else {
+            throw new Exception("Need eventID or eventContentKey");
+        }
+
+        //
+        // This factory builds a static panel, there is no JSP
+        //
+
+        StringBuffer html = new StringBuffer();
+
+        try {
+
+                MyResult result = new MyResult();
+            if (result.next()) {
+
+                eventID = result.getString("id");
+                int processingComplete = result.getInt("processingComplete");
+                String contentKey = result.getString("contentKey");
+                byte[] bytes = result.getBytes("content");
+
+                //
+                // Print content status and associated controls
+                //
+
+                html.append("<br/><font class=\"small\">");
+                html.append("Status: ");
+                switch (processingComplete) {
+                    case  0 :
+                    case  1 : html.append("PENDING"); break;
+                    case  2 : html.append(contentKey); break;
+                    case  3 : html.append(eventID); break;
+                    default : html.append("UNKNONW");
+                }
+                html.append("</font><br/>");
+
+                //
+                // Print at most 20Kb of content unless "showAll" is set
+                //
+
+                int limit = showAll ? Integer.MAX_VALUE : 1024 * 20;
+                System.out.println(limit);
+                html.append("<pre>");
+                for (int i = 0; bytes != null && i < bytes.length; i++) {
+                    char c = (char) bytes[i];
+                    switch (c) {
+                        case '<' : html.append("&lt;");  break;
+                        case '>' : html.append("&gt;");  break;
+                        case '&' : html.append("&amp;"); break;
+                        default  : html.append(c);
+                    }
+
+                    if (i > limit) {
+                        while (bollocks);
+                        // System.out.println("i is " + i);
+                        // System.out.println("limit is " + limit);
+                        html.append("...\n</pre>");
+                        html.append(eventID);
+                        html.append("<pre>");
+                        break;
+                    }
+                }
+                html.append("</pre>");
+            }
+        }
+        catch (Exception exception) {
+            throw exception;
+        }
+        finally {
+            html.append("Oof!!");
+        }
+        String ret = html.toString();
+        System.out.println("Returning string length = "+ ret.length());
+        return ret;
+    }
+
+    public static void main(String[] args) throws Exception {
+                int length=0;
+
+                for (int i = 0; i < 100; i++) {
+                        length = new Test6357214().create("boo").length();
+                        System.out.println(length);
+                }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6559156.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6559156
+ * @summary Server compiler generates bad code for "<= Integer.MAX_VALUE" expression
+ *
+ * @run main Test6559156
+ */
+
+public class Test6559156 {
+
+    static final int N_TESTS = 1000000;
+
+    public static void main(String[] args) throws Exception {
+
+        /*
+         * If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes
+         * because (apparently) bad code is only generated when comparing
+         * <= MAX_VALUE in the doTest method.
+         */
+        Test6559156 test = new Test6559156();
+        for (int i = 0; i < N_TESTS; i += 1) {
+            test.doTest1(10, Integer.MAX_VALUE, i);
+            test.doTest2(10, Integer.MAX_VALUE, i);
+        }
+        System.out.println("No failure");
+    }
+
+    void doTest1(int expected, int max, int i) {
+        int counted;
+        for (counted = 0;
+             (counted <= max) && (counted < expected);
+             counted += 1) {
+        }
+        if (counted != expected) {
+            throw new RuntimeException("Failed test1 iteration=" + i +
+                                       " max=" + max +
+                                       " counted=" + counted +
+                                       " expected=" + expected);
+        }
+    }
+
+    void doTest2(int expected, int max, int i) {
+        int counted;
+        for (counted = 0;
+             // change test sequence.
+             (counted < expected) && (counted <= max);
+             counted += 1) {
+        }
+        if (counted != expected) {
+            throw new RuntimeException("Failed test1 iteration=" + i +
+                                       " max=" + max +
+                                       " counted=" + counted +
+                                       " expected=" + expected);
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6753639.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6753639
+ * @summary Strange optimisation in for loop with cyclic integer condition
+ *
+ * @run main/othervm -Xbatch Test6753639
+ */
+
+public class Test6753639 {
+    public static void main(String[] args) throws InterruptedException {
+        int END = Integer.MAX_VALUE;
+        int count = 0;
+        for(int i = Integer.MAX_VALUE - 5; i <= END; i++) {
+            count++;
+            if (count > 100000) {
+                System.out.println("Passed");
+                System.exit(95);
+            }
+        }
+        System.out.println("broken " + count);
+        System.out.println("FAILED");
+        System.exit(97);
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6850611.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6850611
+ * @summary int / long arithmetic seems to be broken in 1.6.0_14 HotSpot Server VM (Win XP)
+ *
+ * @run main Test6850611
+ */
+
+public class Test6850611 {
+
+    public static void main(String[] args) {
+        test();
+    }
+
+    private static void test() {
+        for (int j = 0; j < 5; ++j) {
+            long x = 0;
+            for (int i = Integer.MIN_VALUE; i < Integer.MAX_VALUE; ++i) {
+                x += i;
+            }
+            System.out.println("sum: " + x);
+            if (x != -4294967295l) {
+                System.out.println("FAILED");
+                System.exit(97);
+            }
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6890943.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6890943
+ * @summary JVM mysteriously gives wrong result on 64-bit 1.6 VMs in hotspot mode.
+ *
+ * @run shell Test6890943.sh
+ */
+import java.util.*;
+import java.io.*;
+import java.util.regex.*;
+
+public class Test6890943 {
+  public static final boolean AIR = true, ROCK = false;
+  public static void main(String[] args) {
+    new Test6890943().go();
+  }
+
+  int r, c, f, t;
+  boolean[][] grid;
+
+  public void go() {
+    Scanner s = new Scanner(System.in);
+    s.useDelimiter("\\s+");
+    int T = s.nextInt();
+    for (t = 0 ; t < T ; t++) {
+      r = s.nextInt(); c = s.nextInt(); f = s.nextInt();
+      grid = new boolean[r][c];
+      for (int x = 0 ; x < r ; x++) {
+        String line = s.next();
+        for (int y = 0 ; y < c ; y++) grid[x][y] = line.charAt(y) == '.';
+      }
+      int digs = solve();
+      String res = digs == -1 ? "No" : "Yes " + digs;
+      System.out.printf("Case #%d: %s\n", t+1, res);
+    }
+  }
+
+  Map<Integer, Integer> M = new HashMap<Integer, Integer>();
+
+  private int solve() {
+    M = new HashMap<Integer, Integer>();
+    M.put(calcWalkingRange(0, 0), 0);
+    for (int digDown = 0 ; digDown < r ; digDown++) {
+      Map<Integer, Integer> tries = new HashMap<Integer, Integer>();
+      for (Map.Entry<Integer, Integer> m : M.entrySet()) {
+        int q = m.getKey();
+        if (depth(q) != (digDown)) continue;
+        if (stuck(q)) continue;
+        tries.put(q, m.getValue());
+      }
+
+      for (Map.Entry<Integer, Integer> m : tries.entrySet()) {
+        int q = m.getKey();
+        int fallLeftDelta = 0, fallRightDelta = 0;
+        //fall left
+        int fallLeft = fall(digDown, start(q));
+        if (fallLeft > 0) {
+          fallLeftDelta = 1;
+          if (fallLeft <= f) addToM(calcWalkingRange(digDown+fallLeft, start(q)), m.getValue());
+        }
+
+        //fall right
+        int fallRight = fall(digDown, end(q));
+        if (fallRight > 0) {
+          fallRightDelta = 1;
+
+          if (fallRight <= f) addToM(calcWalkingRange(digDown+fallRight, end(q)), m.getValue());
+        }
+
+        for (int p = start(q) + fallLeftDelta ; p <= end(q) - fallRightDelta ; p++) {
+          //goLeft
+          for (int digSpot = p ; digSpot > start(q) +fallLeftDelta ; digSpot--) {
+            int fallDown = 1+fall(digDown+1, digSpot);
+            if (fallDown <= f) {
+              if (fallDown == 1) {
+                addToM(calcWalkingRange(digDown + 1, digSpot, digSpot, p), m.getValue() + Math.abs(digSpot-p)+1);
+              } else {
+                addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
+              }
+            }
+          }
+
+          //goRight
+          for (int digSpot = p ; digSpot < end(q)-fallRightDelta ;digSpot++) {
+            int fallDown = 1+fall(digDown+1, digSpot);
+            if (fallDown <= f) {
+              if (fallDown == 1) {
+                addToM(calcWalkingRange(digDown + 1, digSpot, p, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
+              } else {
+                addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    int result = Integer.MAX_VALUE;
+    for (Map.Entry<Integer, Integer> m : M.entrySet()) {
+      if (depth(m.getKey()) == r-1) result = Math.min(m.getValue(), result);
+    }
+
+    if (result == Integer.MAX_VALUE) return -1;
+    return result;
+  }
+
+  private void addToM(int q, int i) {
+    Integer original = M.get(q);
+    if ( original == null ) M.put(q, i);
+    else M.put(q, Math.min(original, i));
+  }
+
+  private int fall(int row, int column) {
+    int res = 0;
+    for ( int p = row+1 ; p < r ; p++) {
+      if (grid[p][column] == AIR) res++;
+      else break;
+    }
+    return res;
+  }
+
+  private boolean stuck(int q) {
+    return start(q) == end(q);
+  }
+
+  private int depth(int q) {
+    return q % 50;
+  }
+
+  private int start(int q) {
+    return q / (50*50);
+  }
+
+  private int end(int q) {
+    return (q / 50) % 50;
+  }
+
+  private int calcWalkingRange(int depth, int pos) {
+    return calcWalkingRange(depth, pos, Integer.MAX_VALUE, Integer.MIN_VALUE);
+  }
+
+  private int calcWalkingRange(int depth, int pos, int airOverrideStart, int airOverrideEnd) {
+    int left = pos, right = pos;
+    if (depth >= r) return (c-1)*50 + depth;
+
+    while (left > 0) {
+      if (grid[depth][left-1] == ROCK && (left-1 < airOverrideStart || left-1 > airOverrideEnd)) break;
+      if (depth < r-1 && grid[depth+1][left-1] == AIR) {
+        left--;
+        break;
+      }
+      left--;
+    }
+    while (right < c-1) {
+      if (grid[depth][right+1] == ROCK && (right+1 < airOverrideStart || right+1 > airOverrideEnd)) break;
+      if (depth < r-1 && grid[depth+1][right+1] == AIR) {
+        right++;
+        break;
+      }
+      right++;
+    }
+
+    return left *50*50 + right*50 + depth;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6890943.sh	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,67 @@
+#!/bin/sh
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTCLASSES=${TESTCLASSES}"
+echo "CLASSPATH=${CLASSPATH}"
+
+set -x
+
+cp ${TESTSRC}/Test6890943.java .
+cp ${TESTSRC}/input6890943.txt .
+cp ${TESTSRC}/output6890943.txt .
+cp ${TESTSRC}/Test6890943.sh .
+
+${TESTJAVA}/bin/javac -d . Test6890943.java
+
+${TESTJAVA}/bin/java -XX:-PrintVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
+
+diff output6890943.txt test.out
+
+result=$?
+if [ $result -eq 0 ]
+then
+  echo "Passed"
+  exit 0
+else
+  echo "Failed"
+  exit 1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6897150.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6897150
+ * @summary Hotspot optimises away a valid loop
+ *
+ * @run main Test6897150
+ */
+
+// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem.
+public class Test6897150 {
+    public static void main(String[] args) {
+        // This works
+        loopAndPrint(Integer.MAX_VALUE -1);
+        // This doesn't
+        loopAndPrint(Integer.MAX_VALUE);
+    }
+
+    static void verify(int max, int a) {
+        if ( a != (max - 1)) {
+            System.out.println("Expected: " + (max - 1));
+            System.out.println("Actual  : " + a);
+            System.exit(97);
+        }
+    }
+    static void loopAndPrint(int max) {
+        int a = -1;
+        int i = 1;
+        for (; i < max; i++) {
+            a = i;
+        }
+        verify(max, a);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6905845.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6905845
+ * @summary Server VM improperly optimizing away loop.
+ *
+ * @run main Test6905845
+ */
+
+public class Test6905845 {
+
+   public static void main(String[] args){
+      for (int asdf = 0; asdf < 5; asdf++){
+         //test block
+         {
+            StringBuilder strBuf1 = new StringBuilder(65);
+            long          start   = System.currentTimeMillis();
+            int           count   = 0;
+
+            for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){
+               strBuf1.append(i);
+               count++;
+               strBuf1.delete(0, 65);
+            }
+
+            System.out.println(count);
+            if (count != 54366674) {
+              System.out.println("wrong count: " + count +", should be 54366674");
+              System.exit(97);
+            }
+         }
+         //test block
+         {
+            StringBuilder strBuf1 = new StringBuilder(65);
+            long          start   = System.currentTimeMillis();
+            int           count   = 0;
+
+            for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){
+               strBuf1.append(i);
+               count++;
+               strBuf1.delete(0, 65);
+            }
+
+            System.out.println(count);
+            if (count != 54366674) {
+              System.out.println("wrong count: " + count +", should be 54366674");
+              System.exit(97);
+            }
+         }
+      }
+   }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6931567.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6931567
+ * @summary JIT Error (on class file compiled with eclipse) on JVM x64 (but not on x32!).
+ *
+ * @run main Test6931567
+ */
+
+// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem.
+public class Test6931567 {
+
+    public static void main(final String[] args) {
+        booleanInvert(Integer.MAX_VALUE);
+        booleanInvert(Integer.MAX_VALUE - 1);
+    }
+
+    private static void booleanInvert(final int max) {
+        boolean test1 = false;
+        boolean test2 = false;
+
+        for (int i = 0; i < max; i++) {
+            test1 = !test1;
+        }
+
+        for (int i = 0; i < max; i++) {
+            test2 ^= true;
+        }
+
+        if (test1 != test2) {
+            System.out.println("ERROR: Boolean invert\n\ttest1=" + test1
+                    + "\n\ttest2=" + test2);
+            System.exit(97);
+        } else {
+            System.out.println("Passed!");
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6935022.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6935022
+ * @summary Server VM incorrectly breaks out of while loop
+ *
+ * @run main Test6935022
+ */
+
+public class Test6935022 {
+    public static final void main(String[] args) throws Exception {
+        Test6935022 test = new Test6935022();
+
+        int cnt = 0;
+
+        while (cnt < 10000) {
+            try {
+                ++cnt;
+                if ((cnt&1023) == 0)
+                  System.out.println("Thread="+Thread.currentThread().getName() + " iteration: " + cnt);
+                test.loop(2147483647, (cnt&1023));
+            }
+
+            catch (Exception e) {
+                System.out.println("Caught on iteration " + cnt);
+                e.printStackTrace();
+                System.exit(97);
+            }
+        }
+    }
+
+    private void loop(int endingRow, int mask) throws Exception {
+        int rows = 1;
+        boolean next = true;
+
+        while(rows <= endingRow && next) {
+            rows++;
+            if (rows == mask)
+              System.out.println("Rows="+rows+", end="+endingRow+", next="+next);
+            next = next(rows);
+        }
+
+        if (next)
+            throw new Exception("Ended on rows(no rs): " + rows);
+    }
+
+    private boolean next(int rows) {
+        return rows < 12;
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6959129.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6959129
+ * @summary COMPARISON WITH INTEGER.MAX_INT DOES NOT WORK CORRECTLY IN THE CLIENT VM.
+ *
+ * @run main/othervm -ea Test6959129
+ */
+
+public class Test6959129 {
+
+  public static void main(String[] args) {
+    long start  = System.currentTimeMillis();
+    int min = Integer.MAX_VALUE-30000;
+    int max = Integer.MAX_VALUE;
+    long maxmoves = 0;
+    try {
+      maxmoves = maxMoves(min, max);
+    } catch (AssertionError e) {
+      System.out.println("Passed");
+      System.exit(95);
+    }
+    System.out.println("maxMove:" + maxmoves);
+    System.out.println("FAILED");
+    System.exit(97);
+  }
+  /**
+   * Imperative implementation that returns the length hailstone moves
+   * for a given number.
+   */
+  public static long hailstoneLengthImp(long n) {
+    long moves = 0;
+    while (n != 1) {
+      assert n > 1;
+      if (isEven(n)) {
+        n = n / 2;
+      } else {
+        n = 3 * n + 1;
+      }
+      ++moves;
+    }
+    return moves;
+  }
+
+  private static boolean isEven(long n) {
+    return n % 2 == 0;
+  }
+
+  /**
+   * Returns the maximum length of the hailstone sequence for numbers
+   * between min to max.
+   *
+   * For rec1 - Assume that min is bigger than max.
+   */
+  public static long maxMoves(int min, int max) {
+    long maxmoves = 0;
+    for (int n = min; n <= max; n++) {
+      if ((n & 1023) == 0) System.out.println(n);
+      long moves = hailstoneLengthImp(n);
+      if (moves > maxmoves) {
+        maxmoves = moves;
+      }
+    }
+    return maxmoves;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6985295.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6985295
+ * @summary JVM fails to evaluate condition randomly
+ *
+ * @run main/othervm -Xbatch Test6985295
+ */
+
+public class Test6985295 {
+
+    public static void main(String[] args) {
+        int min = Integer.MAX_VALUE-50000;
+        int max = Integer.MAX_VALUE;
+        System.out.println("max = " + max);
+        long counter = 0;
+        int i;
+        for(i = min; i <= max; i++) {
+            counter++;
+            if (counter > 1000000) {
+              System.out.println("Passed");
+              System.exit(95);
+            }
+        }
+        System.out.println("iteration went " + counter + " times (" + i + ")");
+        System.out.println("FAILED");
+        System.exit(97);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test6992759.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6992759
+ * @summary Bad code generated for integer <= comparison, fails for Integer.MAX_VALUE
+ *
+ * @run main Test6992759
+ */
+
+public class Test6992759 {
+
+    static final int N_TESTS = 1000000000;
+
+    public static void main(String[] args) throws Exception {
+
+        /*
+         * If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes
+         * because (apparently) bad code is only generated when comparing
+         * <= MAX_VALUE in the doTest method.
+         */
+        Test6992759 test = new Test6992759();
+        for (int i = 0; i < N_TESTS; i += 1) {
+            test.doTest(10, Integer.MAX_VALUE, i);
+            //test.doTest(10, Integer.MAX_VALUE - 1, i);
+        }
+        System.out.println("No failure");
+    }
+
+    void doTest(int expected, int max, int i) {
+        int counted;
+        for (counted = 0;
+             (counted <= max) && (counted < expected);
+             counted += 1) {
+        }
+        if (counted != expected) {
+            throw new RuntimeException("Failed test iteration=" + i +
+                                       " max=" + max +
+                                       " counted=" + counted +
+                                       " expected=" + expected);
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test7005594.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7005594
+ * @summary Array overflow not handled correctly with loop optimzations
+ *
+ * @run shell Test7005594.sh
+ */
+
+public class Test7005594 {
+
+      static int test(byte a[]){
+          int result=0;
+          for( int i=0; i<a.length; i+=((0x7fffffff>>1)+1) ){
+              result += a[i];
+          }
+          return result;
+      }
+
+      public static void main(String [] args){
+          byte a[]=new byte[(0x7fffffff>>1)+2];
+          int result = 0;
+          try {
+              result = test(a);
+          } catch (ArrayIndexOutOfBoundsException e) {
+              e.printStackTrace(System.out);
+              System.out.println("Passed");
+              System.exit(95);
+          }
+          System.out.println(result);
+          System.out.println("FAILED");
+          System.exit(97);
+      }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test7005594.sh	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,80 @@
+#!/bin/sh
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTCLASSES=${TESTCLASSES}"
+echo "CLASSPATH=${CLASSPATH}"
+
+set -x
+
+cp ${TESTSRC}/Test7005594.java .
+cp ${TESTSRC}/Test7005594.sh .
+
+${TESTJAVA}/bin/javac -d . Test7005594.java
+
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+
+result=$?
+
+cat test.out
+
+if [ $result -eq 95 ]
+then
+  echo "Passed"
+  exit 0
+fi
+
+if [ $result -eq 97 ]
+then
+  echo "Failed"
+  exit 1
+fi
+
+# The test should pass when no enough space for object heap
+grep "Could not reserve enough space for object heap" test.out
+if [ $? = 0 ]
+then
+  echo "Passed"
+  exit 0
+else
+  echo "Failed"
+  exit 1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test7020614.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7020614
+ * @summary "-server" mode optimizer makes code hang
+ *
+ * @run main/othervm/timeout=30 -Xbatch Test7020614
+ */
+
+public class Test7020614 {
+
+    private static final int ITERATIONS = 1000;
+    private static int doNotOptimizeOut = 0;
+
+    public static long bitCountShort() {
+        long t0 = System.currentTimeMillis();
+        int sum = 0;
+        for (int it = 0; it < ITERATIONS; ++it) {
+            short value = 0;
+            do {
+                sum += Integer.bitCount(value);
+            } while (++value != 0);
+        }
+        doNotOptimizeOut += sum;
+        return System.currentTimeMillis() - t0;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 4; ++i) {
+            System.out.println((i + 1) + ": " + bitCountShort());
+        }
+        System.out.println("doNotOptimizeOut value: " + doNotOptimizeOut);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/input6890943.txt	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,1543 @@
+50
+4 5 1
+.....
+#####
+..###
+.#...
+4 5 1
+.....
+#####
+###..
+...#.
+5 4 2
+....
+####
+..##
+.###
+.#.#
+6 10 5
+..........
+####.#####
+####.#####
+####.#####
+####.#####
+####.#####
+6 10 4
+..........
+#....#####
+#....#####
+#....#####
+#....#####
+#....#####
+6 10 1
+..........
+####.#####
+####.#####
+####.#####
+####.#####
+####.#####
+6 10 2
+..........
+####.#####
+####.#####
+####.#####
+####.#####
+####.#####
+6 11 2
+.....######
+###########
+###.......#
+###.#.#...#
+###.#.##..#
+###.#.###.#
+6 11 1
+.....######
+###########
+###.......#
+###.#.#...#
+###.#.##..#
+###.#.###.#
+6 11 2
+.......####
+###########
+###.......#
+###.#.#...#
+###.#.##..#
+###.#.###.#
+7 11 1
+..#........
+##.#.......
+###.#......
+####.#.....
+#####.#....
+######.#...
+#########..
+13 16 2
+................
+#.#.#.#.#.#.#.#.
+................
+.#.#.#.#.#.#.#.#
+................
+#.#.#.#.#.#.#.#.
+................
+.#.#.#.#.#.#.#.#
+................
+#.#.#.#.#.#.#.#.
+................
+.#.#.#.#.#.#.#.#
+................
+4 16 3
+................
+#.#.#.#.#.#.#.#.
+.#.#.#.#.#.#.#.#
+................
+50 50 1
+..................................................
+################################################.#
+.#............#....#.......................#....##
+..#.....#......#....#.....................#....#..
+...#.......#....#....#...................#....#...
+#...#....#.......#....#.................#....#....
+##...#.......#....#....#...............#....#.....
+###...#....#.......#....#.............#....#......
+####...#.......#....#....#...........#....#.......
+.####...#....#.......#....#.........#....#........
+..####...#.......#....#....#.......#....#.........
+...####...#....#.......#....#.....#....#..........
+....####...#.......#....#....#...#....#...###.....
+.....####...#....#.......#....#.#....#..##..###...
+......####...#.......#....#....#.............##...
+.......####...#....#.......#...............##.....
+........####...#.......#....#............##.......
+.........####...#....#.......#.........#######....
+..###.....####...#.......#....#...................
+.#..###....####...#....#.......#.........####.....
+##...###....####...#.......#....#.......##..##....
+##...........####...#....#.......#......##..##....
+##..####......####...#.......#....##....##..##....
+##....##.......####...#....#......##....##..##....
+.###.##.........####.............##.....##..##....
+..###............#######........##.......####.....
+.........###......######.......##.................
+.......##..##..........#......##.........####.....
+......##....##........#......##.........##..##....
+......##.............#......##..........##..##....
+......##............#......##...........##..##....
+......##....#......#......##............##..##....
+.......##..##.....#....########.........##..##....
+........####.....#.....###.#...#.........####.....
+.#####..........#.....#..##.#...#.................
+...##..........#.....#....##.#...#.........####...
+...##.........#.....#..#...##.#...#.......##..##..
+...##........#.....#..##....##.#...#......##..##..
+...##.......#.....#....##....##.#...#.....##..##..
+##.##......#.....#...###......##.#...#.....#####..
+.###......#.....#..##..........##.#...#.......##..
+.........#.....#...##..####.....##.#...#.....##...
+........#.....#...###..#.##......##.#...#...##....
+.......#.....#....##.....###......##.#...#........
+......#.....#..#.##...###..........##.#...#.......
+.....#.....#...##..#..#..##.##.#....##.#...#......
+....#.....#........###.....##........##.#...#.....
+...#.....#.......##...####..###.......##.#...#....
+..#.....#......##.#.#..#.#..##.........##.#...#...
+.#..............#.#.#.#.#.#.#...........##.#...#..
+50 50 13
+..................................................
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+##################################################
+20 49 5
+.................................................
+#################################################
+#################################################
+##################################.##############
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+############################.####################
+#################################################
+######.##########################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+49 49 48
+.................................................
+#################################################
+################################################.
+#################################################
+####################################.############
+#################################################
+##########.######################################
+#######.#########################################
+#################################################
+#################################################
+#################################################
+#######################################.#########
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+###########################################.#####
+#################################################
+#################################################
+###.#############################################
+###############.#################################
+#################################################
+##.##############################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#####################################.###########
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#####.###########################################
+#####################.###########################
+#################################################
+###.#############################################
+#################################################
+#################################################
+#################################################
+#############.###########.#######################
+###.##############.####.#########################
+#########################################.#######
+#################################################
+########################################.########
+40 49 10
+.................................................
+#################################################
+..#..............#...............................
+.......................#.......................#.
+..#....#...................#.................#...
+..........#...............................#......
+............#..................#.......#.........
+........#............#............#..............
+...................#.....#.......................
+....#..........#........................#........
+....#.................................#......#...
+...........#.....................................
+....................#............................
+.##........#................#....................
+..............#..................................
+........#......................................#.
+.................................#....#........#.
+.................................................
+.............................#..##...............
+...........................................#.#...
+........#..........#........#.............#.....#
+..........#.#....................................
+.............................#............#......
+.......#.......................#.........#.......
+..............................#..................
+......#..........................................
+.............#................#..................
+......#.............................#............
+#.................#..................#......#....
+..............#..................................
+...........#...........................#.....#...
+............#............#...#...................
+.......................#.......................#.
+............................#....................
+..........................#......................
+...........................#.............#...#...
+.#...............................................
+..................#..............................
+...#..............................#..............
+.....#..................#........................
+49 49 20
+.........................................##..#..#
+########################################.##...#.#
+#.##..#....#####..####...#..#.##.###.#..#..#.....
+..#...#.##...##.#....#...##..#.#..###...###......
+..#..##.#.###......#.....##.....#.......##.#.#.#.
+..##......###.#.##.....##.#..#.##.###..##.#.#####
+#.#.##....#..#..#.....#.#.##.#.#.##.#.....###.#.#
+..###.#..#..###...###..#.#.....#..######.#.#....#
+..#...#...##.#..##.#.#.#..###.#..#..#...#....##..
+.#..#.#..##.#...##........##.##..#..........#.#..
+#.#.###.####...#.#...#..###..#.##....#...........
+#..#.#######.#....#.#.##...#.#.............##....
+###..###....#..#....#.#.###...#..##.#.#..##.#####
+....####...#.#...........##.#.#.#..#.#.#.##.#.#..
+##.#...##..#..#...#..##..####.##.#...#.....#...#.
+...###.##.#..##.....#.#.##..#..###.#.###.#.#..#.#
+###....##...#.#.##.##..#.#...#...##....#.###.....
+##..#....###..#.....#..#....#.#.#.##.......##.###
+.#.#....#.....####..##....##...##...#.##..##.#...
+##....#....#.#.###.##...#..##.##...##....#.######
+..#....#..##.....##.##.#.........##..##...#.#....
+#..#..#.#....##.#.#...#.###..#...#..#.##.#.#....#
+.....#..#...###.....##...###....###.##.....#...#.
+#..#.#...###..#....#..####....#.#......#..##....#
+.....#.#...###...###.#..#.#.#.........#.#.#..#.##
+.#..##..##..#..#.#....##.........#..#.##.#..##...
+##.#.#.....##.##..###...#.#.#..#.#.####.#.###.#..
+..#.#.......#.#.#...#####..#.##.#....#...#.#.....
+..########.####.....#..#.........#..#####.##.#...
+.#......##.####..###..#.####........#....#....#.#
+#.....#....#...#...#..###......#.##..#..#...#.###
+...#..###.....#....#..#..#......#.....#.#.#.#..##
+....#.##..####.#..###...#...#...#######..#..#....
+.#..#...##...#...#......##...#####.##...#..##....
+..#.#.#######.#....#.#.###....#.##...#..#.##..#..
+#..#.##.#.#.##..###....#.##.#..#..#...##....##..#
+.###.#.#..##.###...#..##.#.#...#.#.####....#..###
+#.#......#...##.##...#.#.....##..#..##....#.##...
+#.#...#.#.##...##.###.#..##..##..####..##.#.#...#
+....#....#..####.##.....#.#....#..##..##....#..#.
+....#...###.....##..#..###....#........###..##..#
+.###....##...........#....#........####.#.####..#
+.#........##...#.###..###.#...##.##..###..###..##
+#.#######.#....###...#..##..#...#....##....#....#
+#....#..#......#..#...##.....##.#.#.#..#......##.
+#.##.##.#.#.##..#..##..######.##.###.#.#..#....#.
+.###.##.....#.##.#..###....###..##....#.#..#.....
+#####..#.#....#.#......##..##.#...........####.#.
+..#..#.#..#...##....###.##.#.#...#..#..#....#..##
+49 49 5
+.........................................#....###
+#########################################..#.....
+.....#.###...#...#...........##...#...#....#...#.
+....#.....#..#....#....##...#..###.#...#..##.#.##
+....#...###.##...........#.#.##.......#.#.#.#..#.
+...........#..###..#..##.....#.........#.....#...
+.....#......#.##.#..##.###.....#...###.#....#...#
+.#......##.#.......##...##.....###..#......#.##..
+#.....#.#..#.#...#####...#.###.##.....#..#.......
+........##....#....#....#.#.#....####....#.##.###
+....#.#......#.##.....#..........##.............#
+#..##....#.#....#..#.....#.#.#....#.#.#....##...#
+...#..##..#...#....#.#....#..#....#..#..#.......#
+......#...##.#.####.....####.###.#..#........####
+##..........#...........#..#.##......##......#.##
+....###....##...#.####.....#............#..#.....
+...#....#....##.....##.....#.#.....#..#.#..#..#..
+...........#...#.##..##..##.#......#..#.#..##....
+.#........##......#.........##..#..........#.#...
+#....##.#.##..#.....#.....##.....##....#.........
+.#.......###.......#..#.....#.....#..##.##...#..#
+###......##.....##...##..#.#....#...####.##......
+....#..#....##...#.##.##.#.#...#...#.#..#.##.#..#
+..........#.#..#####..####....##..#.#.#.........#
+........##.#.###..###....#...#........##.#......#
+...........##.##..#.....#.#.#..#....#.###..#.###.
+#.#....#.#...##............#.#.....#....#...#.#..
+..###.#.##...#....#..##.##...#........#..........
+...........#...##...#..#..##.....#..#.......#..#.
+#.#.#......##.#.....#...#.#.#.#...#..#######.#.##
+...##..#.......###..#..#.##....#......####.#.....
+....##......#.#......#....##..#...##.......#..#..
+#..###.##....##..##.#..#.###.##.....#...#........
+#......#....####....#.........#.........#.##.##..
+.#.#..###......#...##.#.##..#.#.....#...#.#......
+......#.#...##.#....#..#.#.......#.#.....#.#.###.
+###..#.....#....#.#...##..#.#.....#.#.....#.#.#..
+..........#........#....#...#..#...#...#.#.#.#...
+#..##..#...###....#.#..#....##....#...#..##....#.
+.#.#.#....#..##.#...#.....#.##..#....##........#.
+..#.##....###...#...#..#..#.....#..#..###..#...#.
+##....#........#..#..#..#......#...#.##....#.#.##
+....#.............#..#...#...#.#.#...##....#..#.#
+...#.........#....###.....#.#..#..#...#..#...#...
+.#.#.###..###..##.#.##...#...#..#................
+.#......#..#..#.#.#...#.....................#..#.
+..#.....#.......#..##.......#...#...##.#.....#.#.
+#..####.#....#......#.........#.#...###...#....#.
+#.#.#.#....#......#..#.#........###..#....##.....
+31 47 7
+.........................#.#..##.#..#...#.#...#
+#########################.#.#...#...#.#........
+...#...........#...#...#...##...#..#.#.#...#.##
+.#........#.......#.#.....#......#........#....
+#.#....###...#...#....#....#......##...#.......
+#.#......##..##...#.....#.##.#.........#.......
+.......#.....##........#..#.......#.##...#.....
+..#.......##.#...#.#..#.#..#....#......#......#
+#.##.........#####..###.......#........#......#
+.#..#.#.#...#....####...#...........#....#.....
+##..##..#..###..###....##......#....#..##...#.#
+#......#........##...#.#.#..#..#....#..#...##.#
+.#.#.##..........#.#..#...##.##................
+##.##.#...#....#.............#.#.....###...#.#.
+..#..#.#..#..#.#....#....#............#.##..#..
+......###......#..##..#.#...##.........#.#.###.
+..#.##.#..#......##....#.#........#....##..#..#
+.........#.#.#.....##...#.#...#.##.....##.##...
+.#........#...#.###.........#.#.#..............
+....##........#.....#....###....###.#..#....##.
+..#..#....#....#.#.......#.#.#..........###....
+.#..###.#...#.###...##...#....#...#............
+##.....#..##.#.##.##.......#.##.....###...##.#.
+.###.......#...#.....#.....###.........#...#...
+#.....#..####.....##...#........####..........#
+#..#...........##.#.#.#..............#....##...
+..#.#..........#..##.#.##...##..##.#.#..##..#..
+#..........................###......#....##....
+.....#..........#..#......#...#.#..#....#...#..
+#......#....##.........#..#.......#..#.......#.
+.#......#...###...##....##.#..........#...#..#.
+44 35 13
+...........................##..####
+############################.....##
+..###.#.##.#...#.##.####.###.###.##
+#.##.#.#.#.#..###.#..#...###.##..##
+.#.##.#.##..#.###.###.##.#.#...#.#.
+##....##.#..#####.#.####..##.##...#
+####.##..##.###.##.#.#...#.##.###.#
+#.#######.#.#.#..#.#..#..#...###.#.
+##.###.#..#.####.##.#.#########.##.
+#.##.#######..#.#.####.#..#..######
+.#.##..########...##.###..#..##.##.
+#.#######.###...##.#...#.####.#..#.
+.###..###....#.#.##.###..##.##.##.#
+...#.###.#.##.#.####.##.####.#.#...
+#..#......##....#.##.#.#.###.#..#..
+...##.###.###.#.####..#.#.#..###.#.
+.#####.#..#..##.#.#...##.#.#.##...#
+..##..#.#....##.#.#.###.##.##...#..
+###.#..###....#######..#.#.###.##.#
+##.##.#.#.##..#.#.#.#.#..##.####...
+##.###...#.###.#.#.#..#####.###.#..
+...#.#...##......#.##..##.##.#.#.##
+##..#..####..###...###.#........###
+##..#....#..#.#..##.#####..###.#...
+##...#.#####.###.##....###...####..
+#.####.#..#.##.#.#...#.###.#...##..
+####.#####.###.#.##...##...#...#.##
+#..##.##....###..#..####.##..#.#.##
+..#.###.##....####.##.#..###.#....#
+#.#.###.#..#.##.##...###.##..######
+##.#.##.###.#..#...###.####..##.###
+.########.#....#..#........#..##..#
+####..#.##.#.##.####..#.###...#####
+#..##..#..##.###....####.#.#...#.#.
+.#...##.##.###.###...##..##..###...
+###.##...#.##...####.#.#.##..#.####
+##.###..##.#....#.###..##.#...###.#
+##..##.###..#..#.####.#.....##.####
+.#....##...#####.....####...#.##.#.
+##.#.#.....##...#..#...#....#....##
+#..#.#..#####.##..###.#.###########
+.#.###.#..###.##.###.#.###.##.#.###
+.###..####.#..##......#..##.######.
+...##..###.#.....##.#.#..##......#.
+6 10 2
+.......#..
+##########
+##.#...#..
+.#..###...
+#.##.#####
+.####..#.#
+7 9 4
+......##.
+#########
+.#.###.##
+..#...#..
+.....##.#
+####...##
+.#..#.#.#
+10 9 2
+.........
+#########
+..###.###
+#..###.##
+.....#.#.
+..##.....
+###.#....
+.##..#...
+##...#..#
+#..##..##
+10 7 1
+....#..
+####..#
+####..#
+..#.#..
+...##.#
+#....##
+..#..##
+#.#....
+.##.#..
+###...#
+6 10 2
+.......#..
+#######.#.
+..##.#.##.
+..#..#....
+.#.#....##
+..#....#..
+40 40 5
+........................................
+###############################..#######
+#..##############.....##################
+........................................
+...........................##########...
+#####...################..##############
+##################.#####################
+.............................#####......
+#.............................##########
+...............................####.....
+.........#################..............
+..........................###...........
+........................................
+....................#####...............
+##########....................##########
+.......##################...............
+########........###############..#######
+........................................
+..........#########################.....
+#####...................................
+.........................####...........
+......................#####...######....
+######..................################
+........#########.......................
+############.........................###
+........####################............
+......................##################
+....................######......#.......
+............################............
+........................................
+......................#########.........
+#######............#####################
+........###############.................
+........................................
+.........###################............
+.............................#..........
+######................................##
+........................#############...
+......##................................
+........................................
+30 20 11
+....................
+####################
+......##.##.........
+#.....####....######
+...##...............
+........#######.###.
+.......##...........
+....#####..##.......
+........##.#........
+.....###...........#
+..##########.....##.
+..#........#.#......
+......##...##.......
+....##...###..###...
+.####...#####...####
+###.................
+.......##...........
+..........###..####.
+..####..#####.#####.
+..###....#.......##.
+......##....##......
+....##.###..........
+.##.....#####..#....
+....................
+......###...........
+####..###.#########.
+.......#######......
+....###.............
+.........###.#####..
+................####
+50 20 17
+....................
+###################.
+#..##...............
+....................
+............####....
+........#####.......
+..............#..#..
+.........####...####
+.............####...
+.....#....###.......
+####...............#
+....................
+.....######.........
+......#.............
+.####......#####....
+.............##.....
+#####....#####......
+..#####.............
+##..####....###..###
+....................
+....................
+........######.###..
+....####....#####...
+....########........
+...#####............
+.###................
+...............###..
+.......#########....
+..................##
+.......####.........
+..#####.............
+...####.............
+.##..........#####..
+....................
+...#.##.......###...
+######.....##......#
+......####..........
+......#.............
+....................
+....................
+....................
+.....#..............
+.....####....###....
+......#.........####
+.......######...###.
+....................
+....................
+##...........###...#
+.###................
+......#######.......
+45 25 10
+.........................
+#########################
+#...................#..##
+....................#....
+.........................
+.........................
+...................#####.
+##.....................##
+.#.....................#.
+##.....................##
+.........................
+.....####................
+.....##.#................
+.....##.#................
+.....##.#............####
+.....##.#.......###..####
+.....##.#.......#.#......
+.....##.######..#.#......
+.....####....#..#.#..###.
+........#....#..#.#..###.
+...##...#....#..#.#......
+...##...#....#..#.#......
+...##...#....#..#.#......
+...##...#....#..#.#......
+...########..#..###......
+##.##...#....#........###
+.#.##...######..#####.#..
+.#.##...........#...#.#..
+.#.##...........#...#.#..
+.#..............#...#.#..
+.#..............#...#.#..
+.#.####.........#...#.#..
+.#.#..#.........#...#.#..
+.#.#..#.........#####.#..
+##.#.##########.......###
+...#.####.....#..........
+..#####.#.....#..####....
+..#####.#.....#..#..#....
+..#..#..#.....#..#..#....
+..#..#..#.....#..#..#....
+#.#..#..#.....#..#..#..##
+#.#..#..#######..#..#..#.
+#.####.......#####..#..#.
+#............#..##..#..#.
+#............########..#.
+25 45 5
+.............................................
+#############################################
+..........#...###################.#...#......
+#########.#....##############################
+#########.#.............##################...
+####################....#####################
+#########..........#........#################
+####################........#################
+..................####################.....#.
+####..............#...........###############
+######################################.######
+#########################.....#...###########
+###.###################.......#.....#########
+#####.#.........#.....#.....#################
+#############################################
+###.#############...####............#########
+......############################...#....#..
+....#########....................#...######..
+##########.##..............##################
+#####################......##################
+#############################################
+#############################################
+#######################################...###
+#############################################
+##########....###############################
+40 40 5
+...............................#########
+########################################
+########################################
+########################################
+#########....###########################
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+..#######.######################........
+########################################
+########################################
+########################################
+...........#####################........
+####.................###################
+########################################
+########################################
+########################################
+########################################
+#####................###################
+###################...........##########
+########################################
+########################################
+.......##############################...
+########################################
+########################################
+########################################
+##############..............############
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+########################################
+30 20 2
+....................
+####################
+#############.....##
+##...###############
+####################
+####################
+####################
+....##.....#...#....
+####################
+##########.#########
+###....#############
+#########....#######
+###....#############
+####################
+....#######.....###.
+####################
+########.###########
+####################
+#############.....##
+....#####..#########
+####################
+##..########.##.####
+########.###########
+#########..#########
+#........###########
+#########..######.##
+###.################
+####################
+####################
+##############...###
+50 20 7
+...................#
+####################
+#####...############
+####################
+####################
+####################
+####################
+####################
+####################
+.....##############.
+####################
+####################
+.........########...
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+#########.##########
+####################
+####################
+####################
+....###########.....
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+##........##########
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+####################
+....##########......
+####################
+49 49 3
+.................................................
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+.................................................
+.................................................
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+........##########...............................
+#################################################
+###########..................####################
+#################################################
+#################################################
+#################################################
+#################################################
+.................................................
+#################################################
+#################################################
+#################################################
+#################################################
+#################################################
+.......#####.....................................
+.................................................
+#################################################
+................................################.
+..................................###########....
+#################################################
+#################################################
+#################################################
+#################################################
+45 25 4
+.........................
+#########################
+########............#####
+#########################
+.........................
+###############.#######.#
+###############.#######.#
+############....#######.#
+.........................
+############.#.##########
+############...##########
+#########################
+.................######..
+################.######.#
+.........................
+....................##...
+.........................
+#########################
+#########################
+#########################
+#########################
+#########################
+#########################
+..........####...........
+##.......................
+##.......................
+#########.####.##########
+..........####...........
+#########################
+#########################
+#########################
+#########################
+#########################
+#########################
+.........................
+##################.######
+##################.######
+##################.######
+.........................
+#########################
+#########################
+#########################
+.........................
+.........................
+.........................
+25 45 5
+.............................................
+#############################################
+####################################.########
+###########.#################################
+###########.#################################
+###########.#################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################################
+#############################...#############
+#############################.#.#############
+#############################...#############
+#############################################
+#############################################
+#############################################
+###..########################################
+###..########################################
+#########################################....
+####################################.####.##.
+50 50 18
+..................................................
+##################################################
+..##..##..##..##..##..##..##..##..##..##..##..##..
+.###.###.###.###.###.###.###.###.###.###.###.###.#
+....####....####....####....####....####....####..
+.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+..######..######..######..######..######..######..
+.#######.#######.#######.#######.#######.#######.#
+........########........########........########..
+.#.#.#.#########.#.#.#.#########.#.#.#.#########.#
+..##..##########..##..##########..##..##########..
+.###.###########.###.###########.###.###########.#
+....############....############....############..
+.#.#############.#.#############.#.#############.#
+..##############..##############..##############..
+.###############.###############.###############.#
+................################................##
+.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.###
+..##..##..##..##################..##..##..##..####
+.###.###.###.###################.###.###.###.#####
+....####....####################....####....######
+.#.#####.#.#####################.#.#####.#.#######
+..######..######################..######..########
+.#######.#######################.#######.#########
+........########################........##########
+.#.#.#.#########################.#.#.#.###########
+..##..##########################..##..############
+.###.###########################.###.#############
+....############################....##############
+.#.#############################.#.###############
+..##############################..################
+.###############################.#################
+................................##################
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.###################
+..##..##..##..##..##..##..##..####################
+.###.###.###.###.###.###.###.#####################
+....####....####....####....######################
+.#.#####.#.#####.#.#####.#.#######################
+..######..######..######..########################
+.#######.#######.#######.#########################
+........########........##########################
+.#.#.#.#########.#.#.#.###########################
+..##..##########..##..############################
+.###.###########.###.#############################
+....############....##############################
+.#.#############.#.###############################
+..##############..################################
+.###############.#################################
+................##################################
+.#.#.#.#.#.#.#.###################################
+50 50 19
+..................................................
+##################################################
+..##..##..##..##..##..##..##..##..##..##..##..##..
+.###.###.###.###.###.###.###.###.###.###.###.###.#
+....####....####....####....####....####....####..
+.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+..######..######..######..######..######..######..
+.#######.#######.#######.#######.#######.#######.#
+........########........########........########..
+.#.#.#.#########.#.#.#.#########.#.#.#.#########.#
+..##..##########..##..##########..##..##########..
+.###.###########.###.###########.###.###########.#
+....############....############....############..
+.#.#############.#.#############.#.#############.#
+..##############..##############..##############..
+.###############.###############.###############.#
+................################................##
+.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.###
+..##..##..##..##################..##..##..##..####
+.###.###.###.###################.###.###.###.#####
+....####....####################....####....######
+.#.#####.#.#####################.#.#####.#.#######
+..######..######################..######..########
+.#######.#######################.#######.#########
+........########################........##########
+.#.#.#.#########################.#.#.#.###########
+..##..##########################..##..############
+.###.###########################.###.#############
+....############################....##############
+.#.#############################.#.###############
+..##############################..################
+.###############################.#################
+................................##################
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.###################
+..##..##..##..##..##..##..##..####################
+.###.###.###.###.###.###.###.#####################
+....####....####....####....######################
+.#.#####.#.#####.#.#####.#.#######################
+..######..######..######..########################
+.#######.#######.#######.#########################
+........########........##########################
+.#.#.#.#########.#.#.#.###########################
+..##..##########..##..############################
+.###.###########.###.#############################
+....############....##############################
+.#.#############.#.###############################
+..##############..################################
+.###############.#################################
+................##################################
+.#.#.#.#.#.#.#.###################################
+50 50 20
+..................................................
+##################################################
+..##..##..##..##..##..##..##..##..##..##..##..##..
+.###.###.###.###.###.###.###.###.###.###.###.###.#
+....####....####....####....####....####....####..
+.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+..######..######..######..######..######..######..
+.#######.#######.#######.#######.#######.#######.#
+........########........########........########..
+.#.#.#.#########.#.#.#.#########.#.#.#.#########.#
+..##..##########..##..##########..##..##########..
+.###.###########.###.###########.###.###########.#
+....############....############....############..
+.#.#############.#.#############.#.#############.#
+..##############..##############..##############..
+.###############.###############.###############.#
+................################................##
+.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.###
+..##..##..##..##################..##..##..##..####
+.###.###.###.###################.###.###.###.#####
+....####....####################....####....######
+.#.#####.#.#####################.#.#####.#.#######
+..######..######################..######..########
+.#######.#######################.#######.#########
+........########################........##########
+.#.#.#.#########################.#.#.#.###########
+..##..##########################..##..############
+.###.###########################.###.#############
+....############################....##############
+.#.#############################.#.###############
+..##############################..################
+.###############################.#################
+................................##################
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.###################
+..##..##..##..##..##..##..##..####################
+.###.###.###.###.###.###.###.#####################
+....####....####....####....######################
+.#.#####.#.#####.#.#####.#.#######################
+..######..######..######..########################
+.#######.#######.#######.#########################
+........########........##########################
+.#.#.#.#########.#.#.#.###########################
+..##..##########..##..############################
+.###.###########.###.#############################
+....############....##############################
+.#.#############.#.###############################
+..##############..################################
+.###############.#################################
+................##################################
+.#.#.#.#.#.#.#.###################################
+49 48 5
+................................................
+################################################
+################################.###############
+###############################..##############.
+##############################.#.#############.#
+#############################....############...
+############################.###.###########.###
+###########################..##..##########..##.
+##########################.#.#.#.#########.#.#.#
+#########################........########.......
+########################.#######.#######.#######
+#######################..######..######..######.
+######################.#.#####.#.#####.#.#####.#
+#####################....####....####....####...
+####################.###.###.###.###.###.###.###
+###################..##..##..##..##..##..##..##.
+##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+#################...............................
+################.###############################
+###############..##############################.
+##############.#.#############################.#
+#############....############################...
+############.###.###########################.###
+###########..##..##########################..##.
+##########.#.#.#.#########################.#.#.#
+#########........########################.......
+########.#######.#######################.#######
+#######..######..######################..######.
+######.#.#####.#.#####################.#.#####.#
+#####....####....####################....####...
+####.###.###.###.###################.###.###.###
+###..##..##..##..##################..##..##..##.
+##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#
+#................################...............
+.###############.###############.###############
+.##############..##############..##############.
+.#############.#.#############.#.#############.#
+.############....############....############...
+.###########.###.###########.###.###########.###
+.##########..##..##########..##..##########..##.
+.#########.#.#.#.#########.#.#.#.#########.#.#.#
+.########........########........########.......
+.#######.#######.#######.#######.#######.#######
+.######..######..######..######..######..######.
+.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+.####....####....####....####....####....####...
+.###.###.###.###.###.###.###.###.###.###.###.###
+.##..##..##..##..##..##..##..##..##..##..##..##.
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+49 48 11
+................................................
+################################################
+################################.###############
+###############################..##############.
+##############################.#.#############.#
+#############################....############...
+############################.###.###########.###
+###########################..##..##########..##.
+##########################.#.#.#.#########.#.#.#
+#########################........########.......
+########################.#######.#######.#######
+#######################..######..######..######.
+######################.#.#####.#.#####.#.#####.#
+#####################....####....####....####...
+####################.###.###.###.###.###.###.###
+###################..##..##..##..##..##..##..##.
+##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+#################...............................
+################.###############################
+###############..##############################.
+##############.#.#############################.#
+#############....############################...
+############.###.###########################.###
+###########..##..##########################..##.
+##########.#.#.#.#########################.#.#.#
+#########........########################.......
+########.#######.#######################.#######
+#######..######..######################..######.
+######.#.#####.#.#####################.#.#####.#
+#####....####....####################....####...
+####.###.###.###.###################.###.###.###
+###..##..##..##..##################..##..##..##.
+##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#
+#................################...............
+.###############.###############.###############
+.##############..##############..##############.
+.#############.#.#############.#.#############.#
+.############....############....############...
+.###########.###.###########.###.###########.###
+.##########..##..##########..##..##########..##.
+.#########.#.#.#.#########.#.#.#.#########.#.#.#
+.########........########........########.......
+.#######.#######.#######.#######.#######.#######
+.######..######..######..######..######..######.
+.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+.####....####....####....####....####....####...
+.###.###.###.###.###.###.###.###.###.###.###.###
+.##..##..##..##..##..##..##..##..##..##..##..##.
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+49 48 30
+................................................
+################################################
+################################.###############
+###############################..##############.
+##############################.#.#############.#
+#############################....############...
+############################.###.###########.###
+###########################..##..##########..##.
+##########################.#.#.#.#########.#.#.#
+#########################........########.......
+########################.#######.#######.#######
+#######################..######..######..######.
+######################.#.#####.#.#####.#.#####.#
+#####################....####....####....####...
+####################.###.###.###.###.###.###.###
+###################..##..##..##..##..##..##..##.
+##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+#################...............................
+################.###############################
+###############..##############################.
+##############.#.#############################.#
+#############....############################...
+############.###.###########################.###
+###########..##..##########################..##.
+##########.#.#.#.#########################.#.#.#
+#########........########################.......
+########.#######.#######################.#######
+#######..######..######################..######.
+######.#.#####.#.#####################.#.#####.#
+#####....####....####################....####...
+####.###.###.###.###################.###.###.###
+###..##..##..##..##################..##..##..##.
+##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#
+#................################...............
+.###############.###############.###############
+.##############..##############..##############.
+.#############.#.#############.#.#############.#
+.############....############....############...
+.###########.###.###########.###.###########.###
+.##########..##..##########..##..##########..##.
+.#########.#.#.#.#########.#.#.#.#########.#.#.#
+.########........########........########.......
+.#######.#######.#######.#######.#######.#######
+.######..######..######..######..######..######.
+.#####.#.#####.#.#####.#.#####.#.#####.#.#####.#
+.####....####....####....####....####....####...
+.###.###.###.###.###.###.###.###.###.###.###.###
+.##..##..##..##..##..##..##..##..##..##..##..##.
+.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#
+50 10 4
+..........
+#####.....
+#####.....
+.####.....
+#.###.....
+##.##.....
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+#####.....
+.####.....
+#.###.....
+##.##.....
+..........
+#####.....
+#.........
+.#........
+..#.......
+...#......
+..........
+....#.....
+...#......
+..#.......
+.#........
+..........
+#####.....
+#####.....
+.####.....
+#.###.....
+##.##.....
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+#####.....
+#####.....
+#####.....
+#####.....
+..........
+#####.....
+#####.....
+50 10 5
+..........
+#####.....
+...##.....
+..........
+..........
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+#.........
+.#........
+..#.......
+...#......
+..........
+....#.....
+...#......
+..#.......
+.#........
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+#####.....
+.####.....
+#.###.....
+##.##.....
+..........
+#####.....
+#.........
+.#........
+..#.......
+...#......
+..........
+....#.....
+...#......
+..#.......
+.#........
+..........
+50 10 4
+..........
+#####.....
+#.........
+.#........
+..#.......
+...#......
+..........
+....#.....
+...#......
+..#.......
+.#........
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+#####.....
+#####.....
+#####.....
+#####.....
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+#####.....
+#####.....
+#####.....
+#####.....
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+50 10 5
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+#####.....
+#####.....
+#####.....
+#####.....
+..........
+#####.....
+##........
+..........
+..........
+..........
+#####.....
+#####.....
+.####.....
+#.###.....
+##.##.....
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+...##.....
+..........
+..........
+..........
+#####.....
+#####.....
+#####.....
+#####.....
+#####.....
+..........
+#####.....
+#####.....
+####......
+###.#.....
+##.##.....
+..........
+#####.....
+#.........
+.#........
+..#.......
+35 41 11
+........................................#
+#########################################
+##.......####.#..######.##.###...#####.##
+.##.#.#...#.###...##...#..#.#..##..######
+.#...##....#.###...##.#.##.#.###...####..
+.###...##.##..####..##.#.#####.#...#.#...
+..#...#.##..#...##..###..##...###...#.#..
+.#.####.##.##.###.....#..#..##.###..#.##.
+##..##..#...##.###.#...####...#..##....#.
+#....#..##.#.#.#......####.#.....#...#.#.
+#.##.#####......####......###.###..###.#.
+##..######...######.##.#.##.......#...#..
+.#.....###......#####...#..#.#.###...##.#
+...##.##.##..##...####.#.###...#..#.##..#
+....###.#.#..#...###..###.###..#####...##
+....##.##..#.#.#.#.#####...##..######....
+#.#.##.##.#...#####....##.#.#...#.##.#..#
+#.##.##.##.#...#.#.####...#..#.......##.#
+.##.#..###..####.#..###...#...###.##.##..
+.####.#.#######.#......##....#######..##.
+.#..#...#.#.####..#.######.#.#..##.#.####
+...#.###..#.##.#.###.#.#....#.###.#.#...#
+..#.#.####....###...#..##..#####.#.######
+#......####.#..##.....#####.##...###.....
+##..##..####......#.#.##..##...###.#.....
+#.#.####.####.......##......####.###..###
+##...###.#...#.####.##.#........##..#.###
+....#..#####.#....#.##...###..#####.#.###
+####.#.###.........####..###.#..######.##
+.#..########..###..#####.######.###.#...#
+.##..#.##..#....####...#.###.....##.#...#
+##.#..##.##..##.##...##.##.##.#.##.######
+..#..#..####..##...###.#...#.....###..#..
+####.#..####.###...##..#.#.###.#..#..####
+#...#..#..#.#...#...#.#.##.##.#...###.#.#
+31 41 12
+.........................................
+#################################.....#..
+.#..........#.......#....##....#...#.....
+.........#......#.#...#...#..#........#..
+#......#.#......#...#.........#.........#
+.................#....#...#...##.........
+.......#.#..#.....#..#.....##........#..#
+.....#..#..#......................#..#...
+..............#....##....#...#..#..#....#
+...#.#.#........##.#..#..........#......#
+...#......#..#......#....#....#....#.....
+......##.#...#.##..........#.............
+.......##.#.#..#...#.....#.#..#..........
+.........#..........#.................#..
+.#....#..#......#.......#.#..#..####.##..
+#...#................##...#..........#...
+..........#...#.#..#..###..#...#.........
+........##.......#.....##.#......#...#.#.
+.#.....#.#..#.....#.#..##.#.#...........#
+.......####...#.#.........#...#.#........
+.##.................#.#.#................
+.....###.#...#..#.#..............#.......
+.....#...#.....#........#....##.......#..
+.........#...........##.#..#.....##......
+...#....#.........#...#...#.#............
+.....#..............#..............#....#
+#.##...#.............#....#.#..#......#..
+........#...#...##.............#.#.......
+.......#.......#..............#..........
+.....#.........#.........#..#...#..#....#
+####..#...#.#.....##...........#.#.#.#.#.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/output6890943.txt	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,50 @@
+Case #1: Yes 2
+Case #2: Yes 2
+Case #3: Yes 1
+Case #4: Yes 0
+Case #5: No
+Case #6: No
+Case #7: Yes 6
+Case #8: Yes 6
+Case #9: No
+Case #10: Yes 1
+Case #11: Yes 6
+Case #12: Yes 0
+Case #13: No
+Case #14: Yes 22
+Case #15: Yes 1225
+Case #16: Yes 178
+Case #17: No
+Case #18: Yes 1
+Case #19: Yes 7
+Case #20: Yes 2
+Case #21: Yes 1
+Case #22: No
+Case #23: Yes 3
+Case #24: Yes 1
+Case #25: Yes 7
+Case #26: No
+Case #27: Yes 2
+Case #28: Yes 4
+Case #29: Yes 2
+Case #30: Yes 1
+Case #31: Yes 2
+Case #32: Yes 20
+Case #33: Yes 161
+Case #34: Yes 48
+Case #35: No
+Case #36: Yes 218
+Case #37: Yes 51
+Case #38: Yes 247
+Case #39: Yes 32
+Case #40: Yes 31
+Case #41: Yes 31
+Case #42: Yes 25
+Case #43: Yes 17
+Case #44: Yes 2
+Case #45: Yes 61
+Case #46: Yes 25
+Case #47: No
+Case #48: No
+Case #49: Yes 8
+Case #50: Yes 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6796786/Test6796786.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6796786
+ * @summary invalid FP identity transform - (a - b) -> b - a
+ *
+ * @run main/othervm -Xbatch Test6796786
+ */
+
+public class Test6796786 {
+    static volatile float d1;
+    static volatile float d2;
+
+    public static void main(String[] args) {
+        int total = 0;
+        for (int i = 0; i < 100000; i++) {
+            if (Float.floatToRawIntBits(- (d1 - d2)) == Float.floatToRawIntBits(-0.0f)) {
+                total++;
+            }
+        }
+        if (total != 100000) {
+            throw new InternalError();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6956668/Test6956668.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6956668
+ * @summary misbehavior of XOR operator (^) with int
+ *
+ * @run main/othervm -Xbatch Test6956668
+ */
+
+
+public class Test6956668 {
+
+   public static int bitTest() {
+      int result = 0;
+
+      int testValue = 73;
+      int bitCount = Integer.bitCount(testValue);
+
+      if (testValue != 0) {
+         int gap = Long.numberOfTrailingZeros(testValue);
+         testValue >>>= gap;
+
+         while (testValue != 0) {
+            result++;
+
+            if ((testValue ^= 0x1) != 0) {
+               gap = Long.numberOfTrailingZeros(testValue);
+               testValue >>>= gap;
+            }
+         }
+      }
+
+      if (bitCount != result) {
+         System.out.println("ERROR: " + bitCount + " != " + result);
+         System.exit(97);
+      }
+
+      return (result);
+   }
+
+   public static void main(String[] args) {
+      for (int i = 0; i < 100000; i++) {
+         int ct = bitTest();
+      }
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7041100/Test7041100.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7041100
+ * @summary The load in String.equals intrinsic executed before null check
+ *
+ * @run main/othervm -Xbatch Test7041100 abc def
+ */
+
+public class Test7041100 {
+
+    static String n = null;
+    public static void main(String[] args) throws Exception {
+        for (int i = 0; i < 10000; i++) {
+            stringEQ(args[0], args[1]);
+            stringEQ(args[0], args[0]);
+            stringEQ(args[0], n);
+            stringEQ(n, args[0]);
+        }
+    }
+
+    public static boolean stringEQ(String a, String b) {
+        if (a == b)
+            return true;
+        if (a == null || b == null)
+            return false;
+        else
+            return a.equals(b);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7042153/Test7042153.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7042153
+ * @summary Bad folding of IfOps with unloaded constant arguments in C1
+ *
+ * @run main/othervm -Xcomp Test7042153
+ */
+
+import java.lang.reflect.*;
+
+public class Test7042153 {
+  static public class Bar { }
+  static public class Foo { }
+
+  static volatile boolean z;
+  public static void main(String [] args) {
+    Class cx = Bar.class;
+    Class cy = Foo.class;
+    z = (cx == cy);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7047069/Test7047069.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7047069
+ * @summary Array can dynamically change size when assigned to an object field
+ *
+ * @run main/othervm -Xbatch Test7047069
+ */
+
+import java.util.*;
+import java.awt.geom.*;
+
+public class Test7047069 {
+    static boolean verbose;
+
+    static final int GROW_SIZE = 24;    // Multiple of cubic & quad curve size
+
+    float squareflat;           // Square of the flatness parameter
+                    // for testing against squared lengths
+
+    int limit;              // Maximum number of recursion levels
+
+    float hold[] = new float[14];   // The cache of interpolated coords
+                    // Note that this must be long enough
+                    // to store a full cubic segment and
+                    // a relative cubic segment to avoid
+                    // aliasing when copying the coords
+                    // of a curve to the end of the array.
+                    // This is also serendipitously equal
+                    // to the size of a full quad segment
+                    // and 2 relative quad segments.
+
+    int holdEnd;            // The index of the last curve segment
+                    // being held for interpolation
+
+    int holdIndex;          // The index of the curve segment
+                    // that was last interpolated.  This
+                    // is the curve segment ready to be
+                    // returned in the next call to
+                    // currentSegment().
+
+    int levels[];           // The recursion level at which
+                    // each curve being held in storage
+                    // was generated.
+
+    int levelIndex;         // The index of the entry in the
+                    // levels array of the curve segment
+                    // at the holdIndex
+
+    public static void subdivide(float src[], int srcoff,
+                                 float left[], int leftoff,
+                                 float right[], int rightoff)
+    {
+        float x1 = src[srcoff + 0];
+        float y1 = src[srcoff + 1];
+        float ctrlx = src[srcoff + 2];
+        float ctrly = src[srcoff + 3];
+        float x2 = src[srcoff + 4];
+        float y2 = src[srcoff + 5];
+        if (left != null) {
+            left[leftoff + 0] = x1;
+            left[leftoff + 1] = y1;
+        }
+        if (right != null) {
+            right[rightoff + 4] = x2;
+            right[rightoff + 5] = y2;
+        }
+        x1 = (x1 + ctrlx) / 2f;
+        y1 = (y1 + ctrly) / 2f;
+        x2 = (x2 + ctrlx) / 2f;
+        y2 = (y2 + ctrly) / 2f;
+        ctrlx = (x1 + x2) / 2f;
+        ctrly = (y1 + y2) / 2f;
+        if (left != null) {
+            left[leftoff + 2] = x1;
+            left[leftoff + 3] = y1;
+            left[leftoff + 4] = ctrlx;
+            left[leftoff + 5] = ctrly;
+        }
+        if (right != null) {
+            right[rightoff + 0] = ctrlx;
+            right[rightoff + 1] = ctrly;
+            right[rightoff + 2] = x2;
+            right[rightoff + 3] = y2;
+        }
+    }
+
+    public static double getFlatnessSq(float coords[], int offset) {
+        return Line2D.ptSegDistSq(coords[offset + 0], coords[offset + 1],
+                                  coords[offset + 4], coords[offset + 5],
+                                  coords[offset + 2], coords[offset + 3]);
+    }
+
+    public Test7047069() {
+        this.squareflat = .0001f * .0001f;
+        holdIndex = hold.length - 6;
+        holdEnd = hold.length - 2;
+        hold[holdIndex + 0] = (float) (Math.random() * 100);
+        hold[holdIndex + 1] = (float) (Math.random() * 100);
+        hold[holdIndex + 2] = (float) (Math.random() * 100);
+        hold[holdIndex + 3] = (float) (Math.random() * 100);
+        hold[holdIndex + 4] = (float) (Math.random() * 100);
+        hold[holdIndex + 5] = (float) (Math.random() * 100);
+        levelIndex = 0;
+        this.limit = 10;
+        this.levels = new int[limit + 1];
+    }
+
+    /*
+     * Ensures that the hold array can hold up to (want) more values.
+     * It is currently holding (hold.length - holdIndex) values.
+     */
+    void ensureHoldCapacity(int want) {
+        if (holdIndex - want < 0) {
+            int have = hold.length - holdIndex;
+            int newsize = hold.length + GROW_SIZE;
+            float newhold[] = new float[newsize];
+            System.arraycopy(hold, holdIndex,
+                     newhold, holdIndex + GROW_SIZE,
+                     have);
+            if (verbose) System.err.println("old hold = "+hold+"["+hold.length+"]");
+            if (verbose) System.err.println("replacement hold = "+newhold+"["+newhold.length+"]");
+            hold = newhold;
+            if (verbose) System.err.println("new hold = "+hold+"["+hold.length+"]");
+            if (verbose) System.err.println("replacement hold still = "+newhold+"["+newhold.length+"]");
+            holdIndex += GROW_SIZE;
+            holdEnd += GROW_SIZE;
+        }
+    }
+
+    private boolean next() {
+        if (holdIndex >= holdEnd) {
+            return false;
+        }
+
+        int level = levels[levelIndex];
+        while (level < limit) {
+            if (getFlatnessSq(hold, holdIndex) < squareflat) {
+                break;
+            }
+
+            ensureHoldCapacity(4);
+            subdivide(hold, holdIndex,
+                      hold, holdIndex - 4,
+                      hold, holdIndex);
+            holdIndex -= 4;
+
+            // Now that we have subdivided, we have constructed
+            // two curves of one depth lower than the original
+            // curve.  One of those curves is in the place of
+            // the former curve and one of them is in the next
+            // set of held coordinate slots.  We now set both
+            // curves level values to the next higher level.
+            level++;
+            levels[levelIndex] = level;
+            levelIndex++;
+            levels[levelIndex] = level;
+        }
+
+        // This curve segment is flat enough, or it is too deep
+        // in recursion levels to try to flatten any more.  The
+        // two coordinates at holdIndex+4 and holdIndex+5 now
+        // contain the endpoint of the curve which can be the
+        // endpoint of an approximating line segment.
+        holdIndex += 4;
+        levelIndex--;
+        return true;
+    }
+
+    public static void main(String argv[]) {
+        verbose = (argv.length > 0);
+        for (int i = 0; i < 100000; i++) {
+            Test7047069 st = new Test7047069();
+            while (st.next()) {}
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7048332/Test7048332.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7048332
+ * @summary Cadd_cmpLTMask doesn't handle 64-bit tmp register properly
+ *
+ * @run main/othervm -Xbatch Test7048332
+ */
+
+
+public class Test7048332 {
+
+  static int capacity = 2;
+  static int first = 1;
+  static int last = 2;
+
+  static int test(int i1, int i2, int i3, int i4, int i5, int i6) {
+    final int result;
+    if (last >= first) {
+      result = last - first;
+    } else {
+      result = last - first + capacity;
+    }
+    return result;
+  }
+
+  public static void main(String [] args) {
+    for (int i = 0; i < 11000; i++) {
+      last = (i & 1) << 1; // 0 or 2
+      int k = test(1, 2, 3, 4, 5, 6);
+      if (k != 1) {
+        System.out.println("FAILED: " + k + " != 1");
+        System.exit(97);
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7052494/Test7052494.java	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7052494
+ * @summary Eclipse test fails on JDK 7 b142
+ *
+ * @run main/othervm -Xbatch Test7052494
+ */
+
+
+public class Test7052494 {
+
+  static int test1(int i, int limit) {
+    int result = 0;
+    while (i++ != 0) {
+      if (result >= limit)
+        break;
+      result = i*2;
+    }
+    return result;
+  }
+
+  static int test2(int i, int limit) {
+    int result = 0;
+    while (i-- != 0) {
+      if (result <= limit)
+        break;
+      result = i*2;
+    }
+    return result;
+  }
+
+  static void test3(int i, int limit, int arr[]) {
+    while (i++ != 0) {
+      if (arr[i-1] >= limit)
+        break;
+      arr[i] = i*2;
+    }
+  }
+
+  static void test4(int i, int limit, int arr[]) {
+    while (i-- != 0) {
+      if (arr[arr.length + i + 1] <= limit)
+        break;
+      arr[arr.length + i] = i*2;
+    }
+  }
+
+  // Empty loop rolls through MAXINT if i > 0
+  static int test5(int i) {
+    int result = 0;
+    while (i++ != 0) {
+      result = i*2;
+    }
+    return result;
+  }
+
+  // Empty loop rolls through MININT if i < 0
+  static int test6(int i) {
+    int result = 0;
+    while (i-- != 0) {
+      result = i*2;
+    }
+    return result;
+  }
+
+  public static void main(String [] args) {
+    boolean failed = false;
+    int[] arr = new int[8];
+    int[] ar3 = { 0, 0, 4, 6, 8, 10, 0, 0 };
+    int[] ar4 = { 0, 0, 0, -10, -8, -6, -4, 0 };
+    for (int i = 0; i < 11000; i++) {
+      int k = test1(1, 10);
+      if (k != 10) {
+        System.out.println("FAILED: " + k + " != 10");
+        failed = true;
+        break;
+      }
+    }
+    for (int i = 0; i < 11000; i++) {
+      int k = test2(-1, -10);
+      if (k != -10) {
+        System.out.println("FAILED: " + k + " != -10");
+        failed = true;
+        break;
+      }
+    }
+    for (int i = 0; i < 11000; i++) {
+      java.util.Arrays.fill(arr, 0);
+      test3(1, 10, arr);
+      if (!java.util.Arrays.equals(arr,ar3)) {
+        System.out.println("FAILED: arr = { " + arr[0] + ", "
+                                              + arr[1] + ", "
+                                              + arr[2] + ", "
+                                              + arr[3] + ", "
+                                              + arr[4] + ", "
+                                              + arr[5] + ", "
+                                              + arr[6] + ", "
+                                              + arr[7] + " }");
+        failed = true;
+        break;
+      }
+    }
+    for (int i = 0; i < 11000; i++) {
+      java.util.Arrays.fill(arr, 0);
+      test4(-1, -10, arr);
+      if (!java.util.Arrays.equals(arr,ar4)) {
+        System.out.println("FAILED: arr = { " + arr[0] + ", "
+                                              + arr[1] + ", "
+                                              + arr[2] + ", "
+                                              + arr[3] + ", "
+                                              + arr[4] + ", "
+                                              + arr[5] + ", "
+                                              + arr[6] + ", "
+                                              + arr[7] + " }");
+        failed = true;
+        break;
+      }
+    }
+    for (int i = 0; i < 11000; i++) {
+      int k = test5(1);
+      if (k != 0) {
+        System.out.println("FAILED: " + k + " != 0");
+        failed = true;
+        break;
+      }
+    }
+    for (int i = 0; i < 11000; i++) {
+      int k = test6(-1);
+      if (k != 0) {
+        System.out.println("FAILED: " + k + " != 0");
+        failed = true;
+        break;
+      }
+    }
+    if (failed)
+      System.exit(97);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/7020373/Test7020373.sh	Wed Jul 27 17:32:44 2011 -0700
@@ -0,0 +1,86 @@
+#!/bin/sh
+
+##
+## @test
+## @bug 7020373 7055247
+## @key cte_test
+## @summary JSR rewriting can overflow memory address size variables
+## @ignore Ignore it until 7053586 fixed
+## @run shell Test7020373.sh
+##
+
+if [ "${TESTSRC}" = "" ]
+then TESTSRC=.
+fi
+
+if [ "${TESTJAVA}" = "" ]
+then
+  PARENT=`dirname \`which java\``
+  TESTJAVA=`dirname ${PARENT}`
+  echo "TESTJAVA not set, selecting " ${TESTJAVA}
+  echo "If this is incorrect, try setting the variable manually."
+fi
+
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+
+BIT_FLAG=""
+
+# set platform-dependent variables
+OS=`uname -s`
+case "$OS" in
+  SunOS | Linux )
+    NULL=/dev/null
+    PS=":"
+    FS="/"
+    ## for solaris, linux it's HOME
+    FILE_LOCATION=$HOME
+    if [ -f ${FILE_LOCATION}${FS}JDK64BIT -a ${OS} = "SunOS" ]
+    then
+        BIT_FLAG=`cat ${FILE_LOCATION}${FS}JDK64BIT | grep -v '^#'`
+    fi
+    ;;
+  Windows_* )
+    NULL=NUL
+    PS=";"
+    FS="\\"
+    ;;
+  * )
+    echo "Unrecognized system!"
+    exit 1;
+    ;;
+esac
+
+JEMMYPATH=${CPAPPEND}
+CLASSPATH=.${PS}${TESTCLASSES}${PS}${JEMMYPATH} ; export CLASSPATH
+
+THIS_DIR=`pwd`
+
+${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -version
+
+${TESTJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar
+
+${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} OOMCrashClass4000_1 > test.out 2>&1
+
+cat test.out
+
+egrep "SIGSEGV|An unexpected error has been detected" test.out
+
+if [ $? = 0 ]
+then
+    echo "Test Failed"
+    exit 1
+else
+    grep "java.lang.LinkageError" test.out
+    if [ $? = 0 ]
+    then
+        echo "Test Passed"
+        exit 0
+    else
+        echo "Test Failed"
+        exit 1
+    fi
+fi
Binary file test/runtime/7020373/testcase.jar has changed