Mercurial > hg > graal-compiler
comparison src/share/vm/memory/universe.cpp @ 12110:4c84d351cca9
8007074: SIGSEGV at ParMarkBitMap::verify_clear()
Summary: Replace the broken large pages implementation on Linux. New flag: -XX:+UseTransparentHugePages - Linux specific flag to turn on transparent huge page hinting with madvise(..., MAP_HUGETLB). Changed behavior: -XX:+UseLargePages - tries to use -XX:+UseTransparentHugePages before trying other large pages implementations (on Linux). Changed behavior: -XX:+UseHugeTLBFS - Use upfront allocation of Large Pages instead of using the broken implementation to dynamically committing large pages. Changed behavior: -XX:LargePageSizeInBytes - Turned off the ability to use this flag on Linux and provides warning to user if set to a value different than the OS chosen large page size. Changed behavior: Setting no large page size - Now defaults to use -XX:UseTransparentHugePages if the OS supports it. Previously, -XX:+UseHugeTLBFS was chosen if the OS was configured to use large pages.
Reviewed-by: tschatzl, dcubed, brutisso
author | stefank |
---|---|
date | Fri, 16 Aug 2013 13:22:32 +0200 |
parents | 1a8fb39bdbc4 |
children | 7944aba7ba41 69f26e8e09f9 e2722a66aba7 |
comparison
equal
deleted
inserted
replaced
12108:badf4244ceae | 12110:4c84d351cca9 |
---|---|
679 // 4Gb | 679 // 4Gb |
680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); | 680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); |
681 // 32Gb | 681 // 32Gb |
682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; | 682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; |
683 | 683 |
684 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) { | 684 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { |
685 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); | |
686 assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be"); | |
687 assert(is_size_aligned(heap_size, alignment), "Must be"); | |
688 | |
689 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); | |
690 | |
685 size_t base = 0; | 691 size_t base = 0; |
686 #ifdef _LP64 | 692 #ifdef _LP64 |
687 if (UseCompressedOops) { | 693 if (UseCompressedOops) { |
688 assert(mode == UnscaledNarrowOop || | 694 assert(mode == UnscaledNarrowOop || |
689 mode == ZeroBasedNarrowOop || | 695 mode == ZeroBasedNarrowOop || |
690 mode == HeapBasedNarrowOop, "mode is invalid"); | 696 mode == HeapBasedNarrowOop, "mode is invalid"); |
691 const size_t total_size = heap_size + HeapBaseMinAddress; | 697 const size_t total_size = heap_size + heap_base_min_address_aligned; |
692 // Return specified base for the first request. | 698 // Return specified base for the first request. |
693 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { | 699 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { |
694 base = HeapBaseMinAddress; | 700 base = heap_base_min_address_aligned; |
695 | 701 |
696 // If the total size is small enough to allow UnscaledNarrowOop then | 702 // If the total size is small enough to allow UnscaledNarrowOop then |
697 // just use UnscaledNarrowOop. | 703 // just use UnscaledNarrowOop. |
698 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { | 704 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { |
699 if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && | 705 if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && |
740 } | 746 } |
741 #endif // _WIN64 | 747 #endif // _WIN64 |
742 } | 748 } |
743 } | 749 } |
744 #endif | 750 #endif |
751 | |
752 assert(is_ptr_aligned((char*)base, alignment), "Must be"); | |
745 return (char*)base; // also return NULL (don't care) for 32-bit VM | 753 return (char*)base; // also return NULL (don't care) for 32-bit VM |
746 } | 754 } |
747 | 755 |
748 jint Universe::initialize_heap() { | 756 jint Universe::initialize_heap() { |
749 | 757 |
865 // Reserve the Java heap, which is now the same for all GCs. | 873 // Reserve the Java heap, which is now the same for all GCs. |
866 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { | 874 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { |
867 size_t total_reserved = align_size_up(heap_size, alignment); | 875 size_t total_reserved = align_size_up(heap_size, alignment); |
868 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), | 876 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), |
869 "heap size is too big for compressed oops"); | 877 "heap size is too big for compressed oops"); |
870 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); | 878 |
871 | 879 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); |
872 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr); | 880 assert(!UseLargePages |
881 || UseParallelOldGC | |
882 || use_large_pages, "Wrong alignment to use large pages"); | |
883 | |
884 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop); | |
885 | |
886 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr); | |
873 | 887 |
874 if (UseCompressedOops) { | 888 if (UseCompressedOops) { |
875 if (addr != NULL && !total_rs.is_reserved()) { | 889 if (addr != NULL && !total_rs.is_reserved()) { |
876 // Failed to reserve at specified address - the requested memory | 890 // Failed to reserve at specified address - the requested memory |
877 // region is taken already, for example, by 'java' launcher. | 891 // region is taken already, for example, by 'java' launcher. |
878 // Try again to reserver heap higher. | 892 // Try again to reserver heap higher. |
879 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); | 893 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop); |
880 | 894 |
881 ReservedHeapSpace total_rs0(total_reserved, alignment, | 895 ReservedHeapSpace total_rs0(total_reserved, alignment, |
882 UseLargePages, addr); | 896 use_large_pages, addr); |
883 | 897 |
884 if (addr != NULL && !total_rs0.is_reserved()) { | 898 if (addr != NULL && !total_rs0.is_reserved()) { |
885 // Failed to reserve at specified address again - give up. | 899 // Failed to reserve at specified address again - give up. |
886 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); | 900 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop); |
887 assert(addr == NULL, ""); | 901 assert(addr == NULL, ""); |
888 | 902 |
889 ReservedHeapSpace total_rs1(total_reserved, alignment, | 903 ReservedHeapSpace total_rs1(total_reserved, alignment, |
890 UseLargePages, addr); | 904 use_large_pages, addr); |
891 total_rs = total_rs1; | 905 total_rs = total_rs1; |
892 } else { | 906 } else { |
893 total_rs = total_rs0; | 907 total_rs = total_rs0; |
894 } | 908 } |
895 } | 909 } |