changeset 17835:5186bc5047c1 hs25.20-b09

Merge
author amurillo
date Fri, 04 Apr 2014 09:49:19 -0700
parents d5f0404d965f (current diff) b257acb35d3e (diff)
children 4d73f1e99f97
files
diffstat 32 files changed, 634 insertions(+), 198 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed Apr 02 09:31:10 2014 -0700
+++ b/make/hotspot_version	Fri Apr 04 09:49:19 2014 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=20
-HS_BUILD_NUMBER=08
+HS_BUILD_NUMBER=09
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/Makefile	Wed Apr 02 09:31:10 2014 -0700
+++ b/make/linux/Makefile	Fri Apr 04 09:49:19 2014 -0700
@@ -66,8 +66,8 @@
     FORCE_TIERED=1
   endif
 endif
-# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
-ifneq (,$(filter $(ARCH),ppc64 pp64le))
+# C1 is not ported on ppc64, so we cannot build a tiered VM:
+ifeq ($(ARCH),ppc64)
   FORCE_TIERED=0
 endif
 
--- a/make/linux/makefiles/defs.make	Wed Apr 02 09:31:10 2014 -0700
+++ b/make/linux/makefiles/defs.make	Fri Apr 04 09:49:19 2014 -0700
@@ -33,6 +33,11 @@
 # ARCH can be set explicitly in spec.gmk
 ifndef ARCH
   ARCH := $(shell uname -m)
+  # Fold little endian PowerPC64 into big-endian (if ARCH is set in
+  # hotspot-spec.gmk, this will be done by the configure script).
+  ifeq ($(ARCH),ppc64le)
+    ARCH := ppc64
+  endif
 endif
 
 PATH_SEP ?= :
--- a/make/linux/makefiles/ppc64.make	Wed Apr 02 09:31:10 2014 -0700
+++ b/make/linux/makefiles/ppc64.make	Fri Apr 04 09:49:19 2014 -0700
@@ -26,14 +26,26 @@
 # make c code know it is on a 64 bit platform.
 CFLAGS += -D_LP64=1
 
-# fixes `relocation truncated to fit' error for gcc 4.1.
-CFLAGS += -mminimal-toc
+ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
+  # This can happen during hotspot standalone build. Set endianness from
+  # uname. We assume build and target machines are the same.
+  OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
+endif
 
-# finds use ppc64 instructions, but schedule for power5
-CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
+  $(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
+endif
 
-# let linker find external 64 bit libs.
-LFLAGS_VM += -L/lib64
+ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
+  # fixes `relocation truncated to fit' error for gcc 4.1.
+  CFLAGS += -mminimal-toc
 
-# specify lib format.
-LFLAGS_VM +=  -Wl,-melf64ppc
+  # finds use ppc64 instructions, but schedule for power5
+  CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+else
+  # Little endian machine uses ELFv2 ABI.
+  CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
+
+  # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
+  CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+endif
--- a/make/windows/makefiles/defs.make	Wed Apr 02 09:31:10 2014 -0700
+++ b/make/windows/makefiles/defs.make	Fri Apr 04 09:49:19 2014 -0700
@@ -260,7 +260,6 @@
       EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
     endif
   endif
-  EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
 endif
 ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
@@ -275,6 +274,8 @@
   endif
 endif
 
+EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
+
 ifeq ($(BUILD_WIN_SA), 1)
   EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
--- a/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -1025,15 +1025,14 @@
   }
 
   static void set_imm(int* instr, short s) {
-    short* p = ((short *)instr) + 1;
-    *p = s;
+    // imm is always in the lower 16 bits of the instruction,
+    // so this is endian-neutral. Same for the get_imm below.
+    uint32_t w = *(uint32_t *)instr;
+    *instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF));
   }
 
   static int get_imm(address a, int instruction_number) {
-    short imm;
-    short *p =((short *)a)+2*instruction_number+1;
-    imm = *p;
-    return (int)imm;
+    return (short)((int *)a)[instruction_number];
   }
 
   static inline int hi16_signed(  int x) { return (int)(int16_t)(x >> 16); }
--- a/src/cpu/ppc/vm/bytes_ppc.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/cpu/ppc/vm/bytes_ppc.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -35,6 +35,126 @@
 
   // Can I count on address always being a pointer to an unsigned char? Yes.
 
+#if defined(VM_LITTLE_ENDIAN)
+
+  // Returns true, if the byte ordering used by Java is different from the native byte ordering
+  // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
+  static inline bool is_Java_byte_ordering_different() { return true; }
+
+  // Forward declarations of the compiler-dependent implementation
+  static inline u2 swap_u2(u2 x);
+  static inline u4 swap_u4(u4 x);
+  static inline u8 swap_u8(u8 x);
+
+  static inline u2   get_native_u2(address p) {
+    return (intptr_t(p) & 1) == 0
+             ?   *(u2*)p
+             :   ( u2(p[1]) << 8 )
+               | ( u2(p[0])      );
+  }
+
+  static inline u4   get_native_u4(address p) {
+    switch (intptr_t(p) & 3) {
+     case 0:  return *(u4*)p;
+
+     case 2:  return (  u4( ((u2*)p)[1] ) << 16  )
+                   | (  u4( ((u2*)p)[0] )        );
+
+    default:  return ( u4(p[3]) << 24 )
+                   | ( u4(p[2]) << 16 )
+                   | ( u4(p[1]) <<  8 )
+                   |   u4(p[0]);
+    }
+  }
+
+  static inline u8   get_native_u8(address p) {
+    switch (intptr_t(p) & 7) {
+      case 0:  return *(u8*)p;
+
+      case 4:  return (  u8( ((u4*)p)[1] ) << 32  )
+                    | (  u8( ((u4*)p)[0] )        );
+
+      case 2:  return (  u8( ((u2*)p)[3] ) << 48  )
+                    | (  u8( ((u2*)p)[2] ) << 32  )
+                    | (  u8( ((u2*)p)[1] ) << 16  )
+                    | (  u8( ((u2*)p)[0] )        );
+
+     default:  return ( u8(p[7]) << 56 )
+                    | ( u8(p[6]) << 48 )
+                    | ( u8(p[5]) << 40 )
+                    | ( u8(p[4]) << 32 )
+                    | ( u8(p[3]) << 24 )
+                    | ( u8(p[2]) << 16 )
+                    | ( u8(p[1]) <<  8 )
+                    |   u8(p[0]);
+    }
+  }
+
+
+
+  static inline void put_native_u2(address p, u2 x) {
+    if ( (intptr_t(p) & 1) == 0 )  *(u2*)p = x;
+    else {
+      p[1] = x >> 8;
+      p[0] = x;
+    }
+  }
+
+  static inline void put_native_u4(address p, u4 x) {
+    switch ( intptr_t(p) & 3 ) {
+    case 0:  *(u4*)p = x;
+              break;
+
+    case 2:  ((u2*)p)[1] = x >> 16;
+             ((u2*)p)[0] = x;
+             break;
+
+    default: ((u1*)p)[3] = x >> 24;
+             ((u1*)p)[2] = x >> 16;
+             ((u1*)p)[1] = x >>  8;
+             ((u1*)p)[0] = x;
+             break;
+    }
+  }
+
+  static inline void put_native_u8(address p, u8 x) {
+    switch ( intptr_t(p) & 7 ) {
+    case 0:  *(u8*)p = x;
+             break;
+
+    case 4:  ((u4*)p)[1] = x >> 32;
+             ((u4*)p)[0] = x;
+             break;
+
+    case 2:  ((u2*)p)[3] = x >> 48;
+             ((u2*)p)[2] = x >> 32;
+             ((u2*)p)[1] = x >> 16;
+             ((u2*)p)[0] = x;
+             break;
+
+    default: ((u1*)p)[7] = x >> 56;
+             ((u1*)p)[6] = x >> 48;
+             ((u1*)p)[5] = x >> 40;
+             ((u1*)p)[4] = x >> 32;
+             ((u1*)p)[3] = x >> 24;
+             ((u1*)p)[2] = x >> 16;
+             ((u1*)p)[1] = x >>  8;
+             ((u1*)p)[0] = x;
+    }
+  }
+
+  // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
+  // (no byte-order reversal is needed since Power CPUs are big-endian oriented).
+  static inline u2   get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
+  static inline u4   get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
+  static inline u8   get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
+
+  static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, swap_u2(x)); }
+  static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, swap_u4(x)); }
+  static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, swap_u8(x)); }
+
+#else // !defined(VM_LITTLE_ENDIAN)
+
   // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
   // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
   static inline bool is_Java_byte_ordering_different() { return false; }
@@ -150,6 +270,12 @@
   static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, x); }
   static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, x); }
   static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, x); }
+
+#endif // VM_LITTLE_ENDIAN
 };
 
+#if defined(TARGET_OS_ARCH_linux_ppc)
+#include "bytes_linux_ppc.inline.hpp"
+#endif
+
 #endif // CPU_PPC_VM_BYTES_PPC_HPP
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -801,7 +801,13 @@
         if (UseCompressedOops && !wide) {
           __ movl(as_Address(addr), (int32_t)NULL_WORD);
         } else {
+#ifdef _LP64
+          __ xorptr(rscratch1, rscratch1);
+          null_check_here = code_offset();
+          __ movptr(as_Address(addr), rscratch1);
+#else
           __ movptr(as_Address(addr), NULL_WORD);
+#endif
         }
       } else {
         if (is_literal_address(addr)) {
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -59,9 +59,9 @@
 static const int stub_size = 600;
 
 extern "C" {
-  typedef void (*getPsrInfo_stub_t)(void*);
+  typedef void (*get_cpu_info_stub_t)(void*);
 }
-static getPsrInfo_stub_t getPsrInfo_stub = NULL;
+static get_cpu_info_stub_t get_cpu_info_stub = NULL;
 
 
 class VM_Version_StubGenerator: public StubCodeGenerator {
@@ -69,7 +69,7 @@
 
   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
 
-  address generate_getPsrInfo() {
+  address generate_get_cpu_info() {
     // Flags to test CPU type.
     const uint32_t HS_EFL_AC           = 0x40000;
     const uint32_t HS_EFL_ID           = 0x200000;
@@ -81,13 +81,13 @@
     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
 
-    StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
+    StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
 #   define __ _masm->
 
     address start = __ pc();
 
     //
-    // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
+    // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
     //
     // LP64: rcx and rdx are first and second argument registers on windows
 
@@ -385,6 +385,14 @@
 };
 
 
+void VM_Version::get_cpu_info_wrapper() {
+  get_cpu_info_stub(&_cpuid_info);
+}
+
+#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
+  #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
+#endif
+
 void VM_Version::get_processor_features() {
 
   _cpu = 4; // 486 by default
@@ -395,7 +403,11 @@
 
   if (!Use486InstrsOnly) {
     // Get raw processor info
-    getPsrInfo_stub(&_cpuid_info);
+
+    // Some platforms (like Win*) need a wrapper around here
+    // in order to properly handle SEGV for YMM registers test.
+    CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
+
     assert_is_initialized();
     _cpu = extended_cpu_family();
     _model = extended_cpu_model();
@@ -986,14 +998,14 @@
   ResourceMark rm;
   // Making this stub must be FIRST use of assembler
 
-  stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
+  stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
   if (stub_blob == NULL) {
-    vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
+    vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
   }
   CodeBuffer c(stub_blob);
   VM_Version_StubGenerator g(&c);
-  getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
-                                   g.generate_getPsrInfo());
+  get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
+                                     g.generate_get_cpu_info());
 
   get_processor_features();
 }
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -507,6 +507,7 @@
   // The value used to check ymm register after signal handle
   static int ymm_test_value()    { return 0xCAFEBABE; }
 
+  static void get_cpu_info_wrapper();
   static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
   static bool  is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
   static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
--- a/src/os/linux/vm/os_linux.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -1963,7 +1963,11 @@
     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
+#if defined(VM_LITTLE_ENDIAN)
+    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
+#else
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+#endif
     {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
     {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
--- a/src/os/windows/vm/os_windows.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -2716,7 +2716,6 @@
 }
 #endif
 
-#ifndef PRODUCT
 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
   // Install a win32 structured exception handler around the test
   // function call so the VM can generate an error dump if needed.
@@ -2727,7 +2726,6 @@
     // Nothing to do.
   }
 }
-#endif
 
 // Virtual Memory
 
--- a/src/os/windows/vm/os_windows.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/os/windows/vm/os_windows.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -97,9 +97,7 @@
   static address fast_jni_accessor_wrapper(BasicType);
 #endif
 
-#ifndef PRODUCT
   static void call_test_func_with_wrapper(void (*funcPtr)(void));
-#endif
 
   // filter function to ignore faults on serializations page
   static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
--- a/src/os/windows/vm/os_windows.inline.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/os/windows/vm/os_windows.inline.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -107,9 +107,7 @@
   return ::close(fd);
 }
 
-#ifndef PRODUCT
-  #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
-            os::win32::call_test_func_with_wrapper(f)
-#endif
+#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
+        os::win32::call_test_func_with_wrapper(f)
 
 #endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2014 Google Inc.  All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
+#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
+
+#if defined(VM_LITTLE_ENDIAN)
+#include <byteswap.h>
+
+// Efficient swapping of data bytes from Java byte
+// ordering to native byte ordering and vice versa.
+inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); }
+inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); }
+inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); }
+#endif // VM_LITTLE_ENDIAN
+
+#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -3547,6 +3547,29 @@
   }
 };
 
+bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
+                                       const HeapRegion* hr,
+                                       const VerifyOption vo) const {
+  switch (vo) {
+  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
+  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
+  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  default:                            ShouldNotReachHere();
+  }
+  return false; // keep some compilers happy
+}
+
+bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
+                                       const VerifyOption vo) const {
+  switch (vo) {
+  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
+  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
+  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  default:                            ShouldNotReachHere();
+  }
+  return false; // keep some compilers happy
+}
+
 void G1CollectedHeap::print_on(outputStream* st) const {
   st->print(" %-20s", "garbage-first heap");
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -706,19 +706,7 @@
   // This is a fast test on whether a reference points into the
   // collection set or not. Assume that the reference
   // points into the heap.
-  bool in_cset_fast_test(oop obj) {
-    assert(_in_cset_fast_test != NULL, "sanity");
-    assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
-    // no need to subtract the bottom of the heap from obj,
-    // _in_cset_fast_test is biased
-    uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
-    bool ret = _in_cset_fast_test[index];
-    // let's make sure the result is consistent with what the slower
-    // test returns
-    assert( ret || !obj_in_cs(obj), "sanity");
-    assert(!ret ||  obj_in_cs(obj), "sanity");
-    return ret;
-  }
+  inline bool in_cset_fast_test(oop obj);
 
   void clear_cset_fast_test() {
     assert(_in_cset_fast_test_base != NULL, "sanity");
@@ -1264,9 +1252,7 @@
     }
   }
 
-  void old_set_remove(HeapRegion* hr) {
-    _old_set.remove(hr);
-  }
+  inline void old_set_remove(HeapRegion* hr);
 
   size_t non_young_capacity_bytes() {
     return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
@@ -1357,7 +1343,7 @@
   void heap_region_iterate(HeapRegionClosure* blk) const;
 
   // Return the region with the given index. It assumes the index is valid.
-  HeapRegion* region_at(uint index) const { return _hrs.at(index); }
+  inline HeapRegion* region_at(uint index) const;
 
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
@@ -1486,10 +1472,7 @@
     return true;
   }
 
-  bool is_in_young(const oop obj) {
-    HeapRegion* hr = heap_region_containing(obj);
-    return hr != NULL && hr->is_young();
-  }
+  inline bool is_in_young(const oop obj);
 
 #ifdef ASSERT
   virtual bool is_in_partial_collection(const void* p);
@@ -1502,9 +1485,7 @@
   // pre-value that needs to be remembered; for the remembered-set
   // update logging post-barrier, we don't maintain remembered set
   // information for young gen objects.
-  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
-    return is_in_young(new_obj);
-  }
+  virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
 
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
@@ -1598,23 +1579,9 @@
 
   // Added if it is NULL it isn't dead.
 
-  bool is_obj_dead(const oop obj) const {
-    const HeapRegion* hr = heap_region_containing(obj);
-    if (hr == NULL) {
-      if (obj == NULL) return false;
-      else return true;
-    }
-    else return is_obj_dead(obj, hr);
-  }
+  inline bool is_obj_dead(const oop obj) const;
 
-  bool is_obj_ill(const oop obj) const {
-    const HeapRegion* hr = heap_region_containing(obj);
-    if (hr == NULL) {
-      if (obj == NULL) return false;
-      else return true;
-    }
-    else return is_obj_ill(obj, hr);
-  }
+  inline bool is_obj_ill(const oop obj) const;
 
   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
@@ -1708,26 +1675,10 @@
 
   bool is_obj_dead_cond(const oop obj,
                         const HeapRegion* hr,
-                        const VerifyOption vo) const {
-    switch (vo) {
-    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
-    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
-    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
-    default:                            ShouldNotReachHere();
-    }
-    return false; // keep some compilers happy
-  }
+                        const VerifyOption vo) const;
 
   bool is_obj_dead_cond(const oop obj,
-                        const VerifyOption vo) const {
-    switch (vo) {
-    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
-    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
-    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
-    default:                            ShouldNotReachHere();
-    }
-    return false; // keep some compilers happy
-  }
+                        const VerifyOption vo) const;
 
   // Printing
 
@@ -1821,11 +1772,7 @@
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
-  template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
-    if (!from->is_survivor()) {
-      _g1_rem->par_write_ref(from, p, tid);
-    }
-  }
+  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
 
   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
     // If the new value of the field points to the same region or
@@ -1867,13 +1814,7 @@
     refs()->push(ref);
   }
 
-  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
-    if (G1DeferredRSUpdate) {
-      deferred_rs_update(from, p, tid);
-    } else {
-      immediate_rs_update(from, p, tid);
-    }
-  }
+  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
 
   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
     HeapWord* obj = NULL;
@@ -1997,54 +1938,7 @@
     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
   }
 
-  void do_oop_partial_array(oop* p) {
-    assert(has_partial_array_mask(p), "invariant");
-    oop from_obj = clear_partial_array_mask(p);
-
-    assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
-    assert(from_obj->is_objArray(), "must be obj array");
-    objArrayOop from_obj_array = objArrayOop(from_obj);
-    // The from-space object contains the real length.
-    int length                 = from_obj_array->length();
-
-    assert(from_obj->is_forwarded(), "must be forwarded");
-    oop to_obj                 = from_obj->forwardee();
-    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
-    objArrayOop to_obj_array   = objArrayOop(to_obj);
-    // We keep track of the next start index in the length field of the
-    // to-space object.
-    int next_index             = to_obj_array->length();
-    assert(0 <= next_index && next_index < length,
-           err_msg("invariant, next index: %d, length: %d", next_index, length));
-
-    int start                  = next_index;
-    int end                    = length;
-    int remainder              = end - start;
-    // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
-    if (remainder > 2 * ParGCArrayScanChunk) {
-      end = start + ParGCArrayScanChunk;
-      to_obj_array->set_length(end);
-      // Push the remainder before we process the range in case another
-      // worker has run out of things to do and can steal it.
-      oop* from_obj_p = set_partial_array_mask(from_obj);
-      push_on_queue(from_obj_p);
-    } else {
-      assert(length == end, "sanity");
-      // We'll process the final range for this object. Restore the length
-      // so that the heap remains parsable in case of evacuation failure.
-      to_obj_array->set_length(end);
-    }
-    _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
-    // Process indexes [start,end). It will also process the header
-    // along with the first chunk (i.e., the chunk with start == 0).
-    // Note that at this point the length field of to_obj_array is not
-    // correct given that we are using it to keep track of the next
-    // start index. oop_iterate_range() (thankfully!) ignores the length
-    // field and only relies on the start / end parameters.  It does
-    // however return the size of the object which will be incorrect. So
-    // we have to ignore it even if we wanted to use it.
-    to_obj_array->oop_iterate_range(&_scanner, start, end);
-  }
+  inline void do_oop_partial_array(oop* p);
 
   // This method is applied to the fields of the objects that have just been copied.
   template <class T> void do_oop_evac(T* p, HeapRegion* from) {
@@ -2074,26 +1968,9 @@
 
   oop copy_to_survivor_space(oop const obj);
 
-  template <class T> void deal_with_reference(T* ref_to_scan) {
-    if (!has_partial_array_mask(ref_to_scan)) {
-      // Note: we can use "raw" versions of "region_containing" because
-      // "obj_to_scan" is definitely in the heap, and is not in a
-      // humongous region.
-      HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-      do_oop_evac(ref_to_scan, r);
-    } else {
-      do_oop_partial_array((oop*)ref_to_scan);
-    }
-  }
+  template <class T> inline void deal_with_reference(T* ref_to_scan);
 
-  void deal_with_reference(StarTask ref) {
-    assert(verify_task(ref), "sanity");
-    if (ref.is_narrow()) {
-      deal_with_reference((narrowOop*)ref);
-    } else {
-      deal_with_reference((oop*)ref);
-    }
-  }
+  inline void deal_with_reference(StarTask ref);
 
 public:
   void trim_queue();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -36,6 +37,9 @@
 
 // Inline functions for G1CollectedHeap
 
+// Return the region with the given index. It assumes the index is valid.
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
+
 template <class T>
 inline HeapRegion*
 G1CollectedHeap::heap_region_containing(const T addr) const {
@@ -55,6 +59,10 @@
   return res;
 }
 
+inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
+  _old_set.remove(hr);
+}
+
 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
   return r != NULL && r->in_collection_set();
@@ -151,6 +159,24 @@
   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 }
 
+
+// This is a fast test on whether a reference points into the
+// collection set or not. Assume that the reference
+// points into the heap.
+inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
+  assert(_in_cset_fast_test != NULL, "sanity");
+  assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
+  // no need to subtract the bottom of the heap from obj,
+  // _in_cset_fast_test is biased
+  uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
+  bool ret = _in_cset_fast_test[index];
+  // let's make sure the result is consistent with what the slower
+  // test returns
+  assert( ret || !obj_in_cs(obj), "sanity");
+  assert(!ret ||  obj_in_cs(obj), "sanity");
+  return ret;
+}
+
 #ifndef PRODUCT
 // Support for G1EvacuationFailureALot
 
@@ -224,4 +250,121 @@
 }
 #endif  // #ifndef PRODUCT
 
+inline bool G1CollectedHeap::is_in_young(const oop obj) {
+  HeapRegion* hr = heap_region_containing(obj);
+  return hr != NULL && hr->is_young();
+}
+
+// We don't need barriers for initializing stores to objects
+// in the young gen: for the SATB pre-barrier, there is no
+// pre-value that needs to be remembered; for the remembered-set
+// update logging post-barrier, we don't maintain remembered set
+// information for young gen objects.
+inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  return is_in_young(new_obj);
+}
+
+inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
+  const HeapRegion* hr = heap_region_containing(obj);
+  if (hr == NULL) {
+    if (obj == NULL) return false;
+    else return true;
+  }
+  else return is_obj_dead(obj, hr);
+}
+
+inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
+  const HeapRegion* hr = heap_region_containing(obj);
+  if (hr == NULL) {
+    if (obj == NULL) return false;
+    else return true;
+  }
+  else return is_obj_ill(obj, hr);
+}
+
+template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
+  if (!from->is_survivor()) {
+    _g1_rem->par_write_ref(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
+  if (G1DeferredRSUpdate) {
+    deferred_rs_update(from, p, tid);
+  } else {
+    immediate_rs_update(from, p, tid);
+  }
+}
+
+
+inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
+  assert(has_partial_array_mask(p), "invariant");
+  oop from_obj = clear_partial_array_mask(p);
+
+  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(from_obj->is_objArray(), "must be obj array");
+  objArrayOop from_obj_array = objArrayOop(from_obj);
+  // The from-space object contains the real length.
+  int length                 = from_obj_array->length();
+
+  assert(from_obj->is_forwarded(), "must be forwarded");
+  oop to_obj                 = from_obj->forwardee();
+  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
+  objArrayOop to_obj_array   = objArrayOop(to_obj);
+  // We keep track of the next start index in the length field of the
+  // to-space object.
+  int next_index             = to_obj_array->length();
+  assert(0 <= next_index && next_index < length,
+         err_msg("invariant, next index: %d, length: %d", next_index, length));
+
+  int start                  = next_index;
+  int end                    = length;
+  int remainder              = end - start;
+  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    end = start + ParGCArrayScanChunk;
+    to_obj_array->set_length(end);
+    // Push the remainder before we process the range in case another
+    // worker has run out of things to do and can steal it.
+    oop* from_obj_p = set_partial_array_mask(from_obj);
+    push_on_queue(from_obj_p);
+  } else {
+    assert(length == end, "sanity");
+    // We'll process the final range for this object. Restore the length
+    // so that the heap remains parsable in case of evacuation failure.
+    to_obj_array->set_length(end);
+  }
+  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  // Process indexes [start,end). It will also process the header
+  // along with the first chunk (i.e., the chunk with start == 0).
+  // Note that at this point the length field of to_obj_array is not
+  // correct given that we are using it to keep track of the next
+  // start index. oop_iterate_range() (thankfully!) ignores the length
+  // field and only relies on the start / end parameters.  It does
+  // however return the size of the object which will be incorrect. So
+  // we have to ignore it even if we wanted to use it.
+  to_obj_array->oop_iterate_range(&_scanner, start, end);
+}
+
+template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
+  if (!has_partial_array_mask(ref_to_scan)) {
+    // Note: we can use "raw" versions of "region_containing" because
+    // "obj_to_scan" is definitely in the heap, and is not in a
+    // humongous region.
+    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    do_oop_evac(ref_to_scan, r);
+  } else {
+    do_oop_partial_array((oop*)ref_to_scan);
+  }
+}
+
+inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
+  assert(verify_task(ref), "sanity");
+  if (ref.is_narrow()) {
+    deal_with_reference((narrowOop*)ref);
+  } else {
+    deal_with_reference((oop*)ref);
+  }
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
 
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "memory/allocation.hpp"
 #include "memory/cardTableModRefBS.hpp"
--- a/src/share/vm/prims/jni.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -5208,7 +5208,7 @@
     }
 
 #ifndef PRODUCT
-  #ifndef TARGET_OS_FAMILY_windows
+  #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
     #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
   #endif
 
--- a/src/share/vm/runtime/arguments.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -1880,24 +1880,22 @@
 // check if do gclog rotation
 // +UseGCLogFileRotation is a must,
 // no gc log rotation when log file not supplied or
-// NumberOfGCLogFiles is 0, or GCLogFileSize is 0
+// NumberOfGCLogFiles is 0
 void check_gclog_consistency() {
   if (UseGCLogFileRotation) {
-    if ((Arguments::gc_log_filename() == NULL) ||
-        (NumberOfGCLogFiles == 0)  ||
-        (GCLogFileSize == 0)) {
+    if ((Arguments::gc_log_filename() == NULL) || (NumberOfGCLogFiles == 0)) {
       jio_fprintf(defaultStream::output_stream(),
-                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>[k|K|m|M|g|G]\n"
-                  "where num_of_file > 0 and num_of_size > 0\n"
+                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files>\n"
+                  "where num_of_file > 0\n"
                   "GC log rotation is turned off\n");
       UseGCLogFileRotation = false;
     }
   }
 
-  if (UseGCLogFileRotation && GCLogFileSize < 8*K) {
-        FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
-        jio_fprintf(defaultStream::output_stream(),
-                    "GCLogFileSize changed to minimum 8K\n");
+  if (UseGCLogFileRotation && (GCLogFileSize != 0) && (GCLogFileSize < 8*K)) {
+    FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
+    jio_fprintf(defaultStream::output_stream(),
+                "GCLogFileSize changed to minimum 8K\n");
   }
 }
 
--- a/src/share/vm/runtime/globals.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/runtime/globals.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -325,7 +325,7 @@
     else st->print("%-16s", "");
   }
 
-  st->print("%-20");
+  st->print("%-20s", " ");
   print_kind(st);
 
   if (withComments) {
--- a/src/share/vm/runtime/globals.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -2422,9 +2422,9 @@
           "Number of gclog files in rotation "                              \
           "(default: 0, no rotation)")                                      \
                                                                             \
-  product(uintx, GCLogFileSize, 0,                                          \
-          "GC log file size (default: 0 bytes, no rotation). "              \
-          "It requires UseGCLogFileRotation")                               \
+  product(uintx, GCLogFileSize, 8*K,                                        \
+          "GC log file size, requires UseGCLogFileRotation. "               \
+          "Set to 0 to only trigger rotation via jcmd")                     \
                                                                             \
   /* JVMTI heap profiling */                                                \
                                                                             \
--- a/src/share/vm/runtime/safepoint.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -535,7 +535,7 @@
 
   // rotate log files?
   if (UseGCLogFileRotation) {
-    gclog_or_tty->rotate_log();
+    gclog_or_tty->rotate_log(false);
   }
 
   if (MemTracker::is_on()) {
--- a/src/share/vm/runtime/vm_operations.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/runtime/vm_operations.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -94,6 +94,7 @@
   template(JFRCheckpoint)                         \
   template(Exit)                                  \
   template(LinuxDllLoad)                          \
+  template(RotateGCLog)                           \
 
 class VM_Operation: public CHeapObj<mtInternal> {
  public:
@@ -397,4 +398,15 @@
   void doit();
 };
 
+
+class VM_RotateGCLog: public VM_Operation {
+ private:
+  outputStream* _out;
+
+ public:
+  VM_RotateGCLog(outputStream* st) : _out(st) {}
+  VMOp_Type type() const { return VMOp_RotateGCLog; }
+  void doit() { gclog_or_tty->rotate_log(true, _out); }
+};
+
 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
--- a/src/share/vm/services/diagnosticCommand.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/services/diagnosticCommand.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -53,6 +53,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassStatsDCmd>(full_export, true, false));
 #endif // INCLUDE_SERVICES
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RotateGCLogDCmd>(full_export, true, false));
 
   // Enhanced JMX Agent Support
   // These commands won't be exported via the DiagnosticCommandMBean until an
@@ -650,3 +651,11 @@
     JavaCalls::call_static(&result, ik, vmSymbols::stopRemoteAgent_name(), vmSymbols::void_method_signature(), CHECK);
 }
 
+void RotateGCLogDCmd::execute(DCmdSource source, TRAPS) {
+  if (UseGCLogFileRotation) {
+    VM_RotateGCLog rotateop(output());
+    VMThread::execute(&rotateop);
+  } else {
+    output()->print_cr("Target VM does not support GC log file rotation.");
+  }
+}
--- a/src/share/vm/services/diagnosticCommand.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/services/diagnosticCommand.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -360,4 +360,21 @@
   virtual void execute(DCmdSource source, TRAPS);
 };
 
+class RotateGCLogDCmd : public DCmd {
+public:
+  RotateGCLogDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
+  static const char* name() { return "GC.rotate_log"; }
+  static const char* description() {
+    return "Force the GC log file to be rotated.";
+  }
+  static const char* impact() { return "Low"; }
+  virtual void execute(DCmdSource source, TRAPS);
+  static int num_arguments() { return 0; }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "control", NULL};
+    return p;
+  }
+};
+
 #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- a/src/share/vm/utilities/ostream.cpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Fri Apr 04 09:49:19 2014 -0700
@@ -662,13 +662,13 @@
 // write to gc log file at safepoint. If in future, changes made for mutator threads or
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
-void gcLogFileStream::rotate_log() {
+void gcLogFileStream::rotate_log(bool force, outputStream* out) {
   char time_msg[FILENAMEBUFLEN];
   char time_str[EXTRACHARLEN];
   char current_file_name[FILENAMEBUFLEN];
   char renamed_file_name[FILENAMEBUFLEN];
 
-  if (_bytes_written < (jlong)GCLogFileSize) {
+  if (!should_rotate(force)) {
     return;
   }
 
@@ -685,6 +685,11 @@
     jio_snprintf(time_msg, sizeof(time_msg), "File  %s rotated at %s\n",
                  _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
     write(time_msg, strlen(time_msg));
+
+    if (out != NULL) {
+      out->print(time_msg);
+    }
+
     dump_loggc_header();
     return;
   }
@@ -706,12 +711,18 @@
                  _file_name, _cur_file_num);
     jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
                  _file_name, _cur_file_num);
-    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the"
-                           " maximum size. Saved as %s\n",
-                           os::local_time_string((char *)time_str, sizeof(time_str)),
-                           renamed_file_name);
+
+    const char* msg = force ? "GC log rotation request has been received."
+                            : "GC log file has reached the maximum size.";
+    jio_snprintf(time_msg, sizeof(time_msg), "%s %s Saved as %s\n",
+                     os::local_time_string((char *)time_str, sizeof(time_str)),
+                                                         msg, renamed_file_name);
     write(time_msg, strlen(time_msg));
 
+    if (out != NULL) {
+      out->print(time_msg);
+    }
+
     fclose(_file);
     _file = NULL;
 
@@ -752,6 +763,11 @@
                            os::local_time_string((char *)time_str, sizeof(time_str)),
                            current_file_name);
     write(time_msg, strlen(time_msg));
+
+    if (out != NULL) {
+      out->print(time_msg);
+    }
+
     dump_loggc_header();
     // remove the existing file
     if (access(current_file_name, F_OK) == 0) {
--- a/src/share/vm/utilities/ostream.hpp	Wed Apr 02 09:31:10 2014 -0700
+++ b/src/share/vm/utilities/ostream.hpp	Fri Apr 04 09:49:19 2014 -0700
@@ -115,7 +115,7 @@
    // flushing
    virtual void flush() {}
    virtual void write(const char* str, size_t len) = 0;
-   virtual void rotate_log() {} // GC log rotation
+   virtual void rotate_log(bool force, outputStream* out = NULL) {} // GC log rotation
    virtual ~outputStream() {}   // close properly on deletion
 
    void dec_cr() { dec(); cr(); }
@@ -240,8 +240,15 @@
   gcLogFileStream(const char* file_name);
   ~gcLogFileStream();
   virtual void write(const char* c, size_t len);
-  virtual void rotate_log();
+  virtual void rotate_log(bool force, outputStream* out = NULL);
   void dump_loggc_header();
+
+  /* If "force" sets true, force log file rotation from outside JVM */
+  bool should_rotate(bool force) {
+    return force ||
+             ((GCLogFileSize != 0) && ((uintx)_bytes_written >= GCLogFileSize));
+  }
+
 };
 
 #ifndef PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/codegen/C1NullCheckOfNullStore.java	Fri Apr 04 09:49:19 2014 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8039043
+ * @summary Null check is placed in a wrong place when storing a null to an object field on x64 with compressed oops off
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CompileCommand=compileonly,C1NullCheckOfNullStore::test -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-UseCompressedOops C1NullCheckOfNullStore
+ *
+ */
+
+public class C1NullCheckOfNullStore {
+  private static class Foo {
+    Object bar;
+  }
+  static private void test(Foo x) {
+    x.bar = null;
+  }
+  static public void main(String args[]) {
+    Foo x = new Foo();
+    for (int i = 0; i < 10000; i++) {
+      test(x);
+    }
+    boolean gotNPE = false;
+    try {
+      for (int i = 0; i < 10000; i++) {
+        test(null);
+      }
+    }
+    catch(NullPointerException e) {
+      gotNPE = true;
+    }
+    if (!gotNPE) {
+      throw new Error("Expecting a NullPointerException");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/TestGCLogRotationViaJcmd.java	Fri Apr 04 09:49:19 2014 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestGCLogRotationViaJcmd.java
+ * @bug 7090324
+ * @summary test for gc log rotation via jcmd
+ * @library /testlibrary
+ * @run main/othervm -Xloggc:test.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=3 TestGCLogRotationViaJcmd
+ *
+ */
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+import java.io.FilenameFilter;
+
+public class TestGCLogRotationViaJcmd {
+
+    static final File currentDirectory = new File(".");
+    static final String LOG_FILE_NAME = "test.log";
+    static final int NUM_LOGS = 3;
+
+    static FilenameFilter logFilter = new FilenameFilter() {
+        @Override
+        public boolean accept(File dir, String name) {
+            return name.startsWith(LOG_FILE_NAME);
+        }
+    };
+
+    public static void main(String[] args) throws Exception {
+        // Grab the pid from the current java process
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        // Create a JDKToolLauncher
+        JDKToolLauncher jcmd = JDKToolLauncher.create("jcmd")
+                                              .addToolArg(pid)
+                                              .addToolArg("GC.rotate_log");
+
+        for (int times = 1; times < NUM_LOGS; times++) {
+            // Run jcmd <pid> GC.rotate_log
+            ProcessBuilder pb = new ProcessBuilder(jcmd.getCommand());
+
+            // Make sure we didn't crash
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        // GC log check
+        File[] logs = currentDirectory.listFiles(logFilter);
+        if (logs.length != NUM_LOGS) {
+            throw new Error("There are only " + logs.length
+                                              + " logs instead " + NUM_LOGS);
+        }
+
+    }
+
+}
+