annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 10374:87c64c0438fb

6976350: G1: deal with fragmentation while copying objects during GC Summary: Create G1ParGCAllocBufferContainer to contain two buffers instead of previously using one buffer, in order to hold the first priority buffer longer. Thus, when some large objects hits the value of free space left in the first priority buffer it has an alternative to fit in the second priority buffer while the first priority buffer is given more chances to try allocating smaller objects. Overall, it will improve heap space efficiency. Reviewed-by: johnc, jmasa, brutisso Contributed-by: tamao <tao.mao@oracle.com>
author tamao
date Mon, 03 Jun 2013 14:37:13 -0700
parents ca9580859cf4
children de6a9e811145
Ignore whitespace changes - Everywhere: Within whitespace: At end of lines:
rev   line source
0
a61af66fc99e Initial load
duke
parents:
diff changeset
1
a61af66fc99e Initial load
duke
parents:
diff changeset
2 /*
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
3 * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
a61af66fc99e Initial load
duke
parents:
diff changeset
5 *
a61af66fc99e Initial load
duke
parents:
diff changeset
6 * This code is free software; you can redistribute it and/or modify it
a61af66fc99e Initial load
duke
parents:
diff changeset
7 * under the terms of the GNU General Public License version 2 only, as
a61af66fc99e Initial load
duke
parents:
diff changeset
8 * published by the Free Software Foundation.
a61af66fc99e Initial load
duke
parents:
diff changeset
9 *
a61af66fc99e Initial load
duke
parents:
diff changeset
10 * This code is distributed in the hope that it will be useful, but WITHOUT
a61af66fc99e Initial load
duke
parents:
diff changeset
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
a61af66fc99e Initial load
duke
parents:
diff changeset
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
a61af66fc99e Initial load
duke
parents:
diff changeset
13 * version 2 for more details (a copy is included in the LICENSE file that
a61af66fc99e Initial load
duke
parents:
diff changeset
14 * accompanied this code).
a61af66fc99e Initial load
duke
parents:
diff changeset
15 *
a61af66fc99e Initial load
duke
parents:
diff changeset
16 * You should have received a copy of the GNU General Public License version
a61af66fc99e Initial load
duke
parents:
diff changeset
17 * 2 along with this work; if not, write to the Free Software Foundation,
a61af66fc99e Initial load
duke
parents:
diff changeset
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
a61af66fc99e Initial load
duke
parents:
diff changeset
19 *
1552
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
21 * or visit www.oracle.com if you need additional information or have any
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
22 * questions.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
23 *
a61af66fc99e Initial load
duke
parents:
diff changeset
24 */
a61af66fc99e Initial load
duke
parents:
diff changeset
25
1972
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
26 #include "precompiled.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
27 #include "gc_implementation/shared/mutableNUMASpace.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
28 #include "gc_implementation/shared/spaceDecorator.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
29 #include "memory/sharedHeap.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
30 #include "oops/oop.inline.hpp"
7180
f34d701e952e 8003935: Simplify the needed includes for using Thread::current()
stefank
parents: 6197
diff changeset
31 #include "runtime/thread.inline.hpp"
0
a61af66fc99e Initial load
duke
parents:
diff changeset
32
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
33 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
6197
d2a62e0f25eb 6995781: Native Memory Tracking (Phase 1)
zgu
parents: 6008
diff changeset
34 _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
35 _page_size = os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
36 _adaptation_cycles = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
37 _samples_count = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
38 update_layout(true);
a61af66fc99e Initial load
duke
parents:
diff changeset
39 }
a61af66fc99e Initial load
duke
parents:
diff changeset
40
a61af66fc99e Initial load
duke
parents:
diff changeset
41 MutableNUMASpace::~MutableNUMASpace() {
a61af66fc99e Initial load
duke
parents:
diff changeset
42 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
43 delete lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
44 }
a61af66fc99e Initial load
duke
parents:
diff changeset
45 delete lgrp_spaces();
a61af66fc99e Initial load
duke
parents:
diff changeset
46 }
a61af66fc99e Initial load
duke
parents:
diff changeset
47
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
48 #ifndef PRODUCT
0
a61af66fc99e Initial load
duke
parents:
diff changeset
49 void MutableNUMASpace::mangle_unused_area() {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
50 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
51 // It can be called on a numa space during a full compaction.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
52 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
53 void MutableNUMASpace::mangle_unused_area_complete() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
54 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
55 // It can be called on a numa space during a full compaction.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
56 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
57 void MutableNUMASpace::mangle_region(MemRegion mr) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
58 // This method should do nothing because numa spaces are not mangled.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
59 }
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
60 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
61 assert(false, "Do not mangle MutableNUMASpace's");
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
62 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
63 void MutableNUMASpace::set_top_for_allocations() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
64 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
65 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
66 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
67 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
68 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
69 void MutableNUMASpace::check_mangled_unused_area_complete() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
70 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
71 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
72 #endif // NOT_PRODUCT
0
a61af66fc99e Initial load
duke
parents:
diff changeset
73
a61af66fc99e Initial load
duke
parents:
diff changeset
74 // There may be unallocated holes in the middle chunks
a61af66fc99e Initial load
duke
parents:
diff changeset
75 // that should be filled with dead objects to ensure parseability.
a61af66fc99e Initial load
duke
parents:
diff changeset
76 void MutableNUMASpace::ensure_parsability() {
a61af66fc99e Initial load
duke
parents:
diff changeset
77 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
78 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
79 MutableSpace *s = ls->space();
605
98cb887364d3 6810672: Comment typos
twisti
parents: 535
diff changeset
80 if (s->top() < top()) { // For all spaces preceding the one containing top()
0
a61af66fc99e Initial load
duke
parents:
diff changeset
81 if (s->free_in_words() > 0) {
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
82 intptr_t cur_top = (intptr_t)s->top();
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
83 size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
84 while (words_left_to_fill > 0) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
85 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
86 assert(words_to_fill >= CollectedHeap::min_fill_size(),
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
87 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
88 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
89 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
90 if (!os::numa_has_static_binding()) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
91 size_t touched_words = words_to_fill;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
92 #ifndef ASSERT
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
93 if (!ZapUnusedHeapArea) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
94 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
95 touched_words);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
96 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
97 #endif
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
98 MemRegion invalid;
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
99 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
100 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
101 if (crossing_start != crossing_end) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
102 // If object header crossed a small page boundary we mark the area
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
103 // as invalid rounding it to a page_size().
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
104 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
105 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
106 invalid = MemRegion(start, end);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
107 }
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
108
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
109 ls->add_invalid_region(invalid);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
110 }
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
111 cur_top = cur_top + (words_to_fill * HeapWordSize);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
112 words_left_to_fill -= words_to_fill;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
113 }
a61af66fc99e Initial load
duke
parents:
diff changeset
114 }
a61af66fc99e Initial load
duke
parents:
diff changeset
115 } else {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
116 if (!os::numa_has_static_binding()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
117 #ifdef ASSERT
a61af66fc99e Initial load
duke
parents:
diff changeset
118 MemRegion invalid(s->top(), s->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
119 ls->add_invalid_region(invalid);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
120 #else
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
121 if (ZapUnusedHeapArea) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
122 MemRegion invalid(s->top(), s->end());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
123 ls->add_invalid_region(invalid);
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
124 } else {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
125 return;
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
126 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
127 #endif
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
128 } else {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
129 return;
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
130 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
131 }
a61af66fc99e Initial load
duke
parents:
diff changeset
132 }
a61af66fc99e Initial load
duke
parents:
diff changeset
133 }
a61af66fc99e Initial load
duke
parents:
diff changeset
134
a61af66fc99e Initial load
duke
parents:
diff changeset
135 size_t MutableNUMASpace::used_in_words() const {
a61af66fc99e Initial load
duke
parents:
diff changeset
136 size_t s = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
137 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
138 s += lgrp_spaces()->at(i)->space()->used_in_words();
a61af66fc99e Initial load
duke
parents:
diff changeset
139 }
a61af66fc99e Initial load
duke
parents:
diff changeset
140 return s;
a61af66fc99e Initial load
duke
parents:
diff changeset
141 }
a61af66fc99e Initial load
duke
parents:
diff changeset
142
a61af66fc99e Initial load
duke
parents:
diff changeset
143 size_t MutableNUMASpace::free_in_words() const {
a61af66fc99e Initial load
duke
parents:
diff changeset
144 size_t s = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
145 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
146 s += lgrp_spaces()->at(i)->space()->free_in_words();
a61af66fc99e Initial load
duke
parents:
diff changeset
147 }
a61af66fc99e Initial load
duke
parents:
diff changeset
148 return s;
a61af66fc99e Initial load
duke
parents:
diff changeset
149 }
a61af66fc99e Initial load
duke
parents:
diff changeset
150
a61af66fc99e Initial load
duke
parents:
diff changeset
151
a61af66fc99e Initial load
duke
parents:
diff changeset
152 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
153 guarantee(thr != NULL, "No thread");
a61af66fc99e Initial load
duke
parents:
diff changeset
154 int lgrp_id = thr->lgrp_id();
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
155 if (lgrp_id == -1) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
156 // This case can occur after the topology of the system has
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
157 // changed. Thread can change their location, the new home
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
158 // group will be determined during the first allocation
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
159 // attempt. For now we can safely assume that all spaces
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
160 // have equal size because the whole space will be reinitialized.
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
161 if (lgrp_spaces()->length() > 0) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
162 return capacity_in_bytes() / lgrp_spaces()->length();
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
163 } else {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
164 assert(false, "There should be at least one locality group");
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
165 return 0;
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
166 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
167 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
168 // That's the normal case, where we know the locality group of the thread.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
169 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
170 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
171 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
172 }
a61af66fc99e Initial load
duke
parents:
diff changeset
173 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
a61af66fc99e Initial load
duke
parents:
diff changeset
174 }
a61af66fc99e Initial load
duke
parents:
diff changeset
175
a61af66fc99e Initial load
duke
parents:
diff changeset
176 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
177 // Please see the comments for tlab_capacity().
0
a61af66fc99e Initial load
duke
parents:
diff changeset
178 guarantee(thr != NULL, "No thread");
a61af66fc99e Initial load
duke
parents:
diff changeset
179 int lgrp_id = thr->lgrp_id();
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
180 if (lgrp_id == -1) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
181 if (lgrp_spaces()->length() > 0) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
182 return free_in_bytes() / lgrp_spaces()->length();
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
183 } else {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
184 assert(false, "There should be at least one locality group");
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
185 return 0;
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
186 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
187 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
188 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
189 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
190 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
191 }
a61af66fc99e Initial load
duke
parents:
diff changeset
192 return lgrp_spaces()->at(i)->space()->free_in_bytes();
a61af66fc99e Initial load
duke
parents:
diff changeset
193 }
a61af66fc99e Initial load
duke
parents:
diff changeset
194
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
195
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
196 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
197 guarantee(thr != NULL, "No thread");
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
198 int lgrp_id = thr->lgrp_id();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
199 if (lgrp_id == -1) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
200 if (lgrp_spaces()->length() > 0) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
201 return capacity_in_words() / lgrp_spaces()->length();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
202 } else {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
203 assert(false, "There should be at least one locality group");
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
204 return 0;
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
205 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
206 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
207 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
208 if (i == -1) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
209 return 0;
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
210 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
211 return lgrp_spaces()->at(i)->space()->capacity_in_words();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
212 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
213
0
a61af66fc99e Initial load
duke
parents:
diff changeset
214 // Check if the NUMA topology has changed. Add and remove spaces if needed.
a61af66fc99e Initial load
duke
parents:
diff changeset
215 // The update can be forced by setting the force parameter equal to true.
a61af66fc99e Initial load
duke
parents:
diff changeset
216 bool MutableNUMASpace::update_layout(bool force) {
a61af66fc99e Initial load
duke
parents:
diff changeset
217 // Check if the topology had changed.
a61af66fc99e Initial load
duke
parents:
diff changeset
218 bool changed = os::numa_topology_changed();
a61af66fc99e Initial load
duke
parents:
diff changeset
219 if (force || changed) {
a61af66fc99e Initial load
duke
parents:
diff changeset
220 // Compute lgrp intersection. Add/remove spaces.
a61af66fc99e Initial load
duke
parents:
diff changeset
221 int lgrp_limit = (int)os::numa_get_groups_num();
6197
d2a62e0f25eb 6995781: Native Memory Tracking (Phase 1)
zgu
parents: 6008
diff changeset
222 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
223 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
a61af66fc99e Initial load
duke
parents:
diff changeset
224 assert(lgrp_num > 0, "There should be at least one locality group");
a61af66fc99e Initial load
duke
parents:
diff changeset
225 // Add new spaces for the new nodes
a61af66fc99e Initial load
duke
parents:
diff changeset
226 for (int i = 0; i < lgrp_num; i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
227 bool found = false;
a61af66fc99e Initial load
duke
parents:
diff changeset
228 for (int j = 0; j < lgrp_spaces()->length(); j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
229 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
a61af66fc99e Initial load
duke
parents:
diff changeset
230 found = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
231 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
232 }
a61af66fc99e Initial load
duke
parents:
diff changeset
233 }
a61af66fc99e Initial load
duke
parents:
diff changeset
234 if (!found) {
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
235 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
0
a61af66fc99e Initial load
duke
parents:
diff changeset
236 }
a61af66fc99e Initial load
duke
parents:
diff changeset
237 }
a61af66fc99e Initial load
duke
parents:
diff changeset
238
a61af66fc99e Initial load
duke
parents:
diff changeset
239 // Remove spaces for the removed nodes.
a61af66fc99e Initial load
duke
parents:
diff changeset
240 for (int i = 0; i < lgrp_spaces()->length();) {
a61af66fc99e Initial load
duke
parents:
diff changeset
241 bool found = false;
a61af66fc99e Initial load
duke
parents:
diff changeset
242 for (int j = 0; j < lgrp_num; j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
243 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
a61af66fc99e Initial load
duke
parents:
diff changeset
244 found = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
245 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
246 }
a61af66fc99e Initial load
duke
parents:
diff changeset
247 }
a61af66fc99e Initial load
duke
parents:
diff changeset
248 if (!found) {
a61af66fc99e Initial load
duke
parents:
diff changeset
249 delete lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
250 lgrp_spaces()->remove_at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
251 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
252 i++;
a61af66fc99e Initial load
duke
parents:
diff changeset
253 }
a61af66fc99e Initial load
duke
parents:
diff changeset
254 }
a61af66fc99e Initial load
duke
parents:
diff changeset
255
6197
d2a62e0f25eb 6995781: Native Memory Tracking (Phase 1)
zgu
parents: 6008
diff changeset
256 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
257
a61af66fc99e Initial load
duke
parents:
diff changeset
258 if (changed) {
a61af66fc99e Initial load
duke
parents:
diff changeset
259 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
260 thread->set_lgrp_id(-1);
a61af66fc99e Initial load
duke
parents:
diff changeset
261 }
a61af66fc99e Initial load
duke
parents:
diff changeset
262 }
a61af66fc99e Initial load
duke
parents:
diff changeset
263 return true;
a61af66fc99e Initial load
duke
parents:
diff changeset
264 }
a61af66fc99e Initial load
duke
parents:
diff changeset
265 return false;
a61af66fc99e Initial load
duke
parents:
diff changeset
266 }
a61af66fc99e Initial load
duke
parents:
diff changeset
267
a61af66fc99e Initial load
duke
parents:
diff changeset
268 // Bias region towards the first-touching lgrp. Set the right page sizes.
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
269 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
270 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
271 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
272 if (end > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
273 MemRegion aligned_region(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
274 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
a61af66fc99e Initial load
duke
parents:
diff changeset
275 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
a61af66fc99e Initial load
duke
parents:
diff changeset
276 assert(region().contains(aligned_region), "Sanity");
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
277 // First we tell the OS which page size we want in the given range. The underlying
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
278 // large page can be broken down if we require small pages.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
279 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
280 // Then we uncommit the pages in the range.
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
281 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
282 // And make them local/first-touch biased.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
283 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
284 }
a61af66fc99e Initial load
duke
parents:
diff changeset
285 }
a61af66fc99e Initial load
duke
parents:
diff changeset
286
a61af66fc99e Initial load
duke
parents:
diff changeset
287 // Free all pages in the region.
a61af66fc99e Initial load
duke
parents:
diff changeset
288 void MutableNUMASpace::free_region(MemRegion mr) {
a61af66fc99e Initial load
duke
parents:
diff changeset
289 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
290 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
291 if (end > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
292 MemRegion aligned_region(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
293 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
a61af66fc99e Initial load
duke
parents:
diff changeset
294 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
a61af66fc99e Initial load
duke
parents:
diff changeset
295 assert(region().contains(aligned_region), "Sanity");
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
296 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
297 }
a61af66fc99e Initial load
duke
parents:
diff changeset
298 }
a61af66fc99e Initial load
duke
parents:
diff changeset
299
a61af66fc99e Initial load
duke
parents:
diff changeset
300 // Update space layout. Perform adaptation.
a61af66fc99e Initial load
duke
parents:
diff changeset
301 void MutableNUMASpace::update() {
a61af66fc99e Initial load
duke
parents:
diff changeset
302 if (update_layout(false)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
303 // If the topology has changed, make all chunks zero-sized.
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
304 // And clear the alloc-rate statistics.
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
305 // In future we may want to handle this more gracefully in order
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
306 // to avoid the reallocation of the pages as much as possible.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
307 for (int i = 0; i < lgrp_spaces()->length(); i++) {
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
308 LGRPSpace *ls = lgrp_spaces()->at(i);
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
309 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
310 s->set_end(s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
311 s->set_top(s->bottom());
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
312 ls->clear_alloc_rate();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
313 }
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
314 // A NUMA space is never mangled
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
315 initialize(region(),
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
316 SpaceDecorator::Clear,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
317 SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
318 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
319 bool should_initialize = false;
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
320 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
321 for (int i = 0; i < lgrp_spaces()->length(); i++) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
322 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
323 should_initialize = true;
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
324 break;
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
325 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
326 }
a61af66fc99e Initial load
duke
parents:
diff changeset
327 }
a61af66fc99e Initial load
duke
parents:
diff changeset
328
a61af66fc99e Initial load
duke
parents:
diff changeset
329 if (should_initialize ||
a61af66fc99e Initial load
duke
parents:
diff changeset
330 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
331 // A NUMA space is never mangled
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
332 initialize(region(),
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
333 SpaceDecorator::Clear,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
334 SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
335 }
a61af66fc99e Initial load
duke
parents:
diff changeset
336 }
a61af66fc99e Initial load
duke
parents:
diff changeset
337
a61af66fc99e Initial load
duke
parents:
diff changeset
338 if (NUMAStats) {
a61af66fc99e Initial load
duke
parents:
diff changeset
339 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
340 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
341 }
a61af66fc99e Initial load
duke
parents:
diff changeset
342 }
a61af66fc99e Initial load
duke
parents:
diff changeset
343
a61af66fc99e Initial load
duke
parents:
diff changeset
344 scan_pages(NUMAPageScanRate);
a61af66fc99e Initial load
duke
parents:
diff changeset
345 }
a61af66fc99e Initial load
duke
parents:
diff changeset
346
a61af66fc99e Initial load
duke
parents:
diff changeset
347 // Scan pages. Free pages that have smaller size or wrong placement.
a61af66fc99e Initial load
duke
parents:
diff changeset
348 void MutableNUMASpace::scan_pages(size_t page_count)
a61af66fc99e Initial load
duke
parents:
diff changeset
349 {
a61af66fc99e Initial load
duke
parents:
diff changeset
350 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
351 if (pages_per_chunk > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
352 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
353 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
354 ls->scan_pages(page_size(), pages_per_chunk);
a61af66fc99e Initial load
duke
parents:
diff changeset
355 }
a61af66fc99e Initial load
duke
parents:
diff changeset
356 }
a61af66fc99e Initial load
duke
parents:
diff changeset
357 }
a61af66fc99e Initial load
duke
parents:
diff changeset
358
a61af66fc99e Initial load
duke
parents:
diff changeset
359 // Accumulate statistics about the allocation rate of each lgrp.
a61af66fc99e Initial load
duke
parents:
diff changeset
360 void MutableNUMASpace::accumulate_statistics() {
a61af66fc99e Initial load
duke
parents:
diff changeset
361 if (UseAdaptiveNUMAChunkSizing) {
a61af66fc99e Initial load
duke
parents:
diff changeset
362 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
363 lgrp_spaces()->at(i)->sample();
a61af66fc99e Initial load
duke
parents:
diff changeset
364 }
a61af66fc99e Initial load
duke
parents:
diff changeset
365 increment_samples_count();
a61af66fc99e Initial load
duke
parents:
diff changeset
366 }
a61af66fc99e Initial load
duke
parents:
diff changeset
367
a61af66fc99e Initial load
duke
parents:
diff changeset
368 if (NUMAStats) {
a61af66fc99e Initial load
duke
parents:
diff changeset
369 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
370 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
371 }
a61af66fc99e Initial load
duke
parents:
diff changeset
372 }
a61af66fc99e Initial load
duke
parents:
diff changeset
373 }
a61af66fc99e Initial load
duke
parents:
diff changeset
374
a61af66fc99e Initial load
duke
parents:
diff changeset
375 // Get the current size of a chunk.
a61af66fc99e Initial load
duke
parents:
diff changeset
376 // This function computes the size of the chunk based on the
a61af66fc99e Initial load
duke
parents:
diff changeset
377 // difference between chunk ends. This allows it to work correctly in
a61af66fc99e Initial load
duke
parents:
diff changeset
378 // case the whole space is resized and during the process of adaptive
a61af66fc99e Initial load
duke
parents:
diff changeset
379 // chunk resizing.
a61af66fc99e Initial load
duke
parents:
diff changeset
380 size_t MutableNUMASpace::current_chunk_size(int i) {
a61af66fc99e Initial load
duke
parents:
diff changeset
381 HeapWord *cur_end, *prev_end;
a61af66fc99e Initial load
duke
parents:
diff changeset
382 if (i == 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
383 prev_end = bottom();
a61af66fc99e Initial load
duke
parents:
diff changeset
384 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
385 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
a61af66fc99e Initial load
duke
parents:
diff changeset
386 }
a61af66fc99e Initial load
duke
parents:
diff changeset
387 if (i == lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
388 cur_end = end();
a61af66fc99e Initial load
duke
parents:
diff changeset
389 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
390 cur_end = lgrp_spaces()->at(i)->space()->end();
a61af66fc99e Initial load
duke
parents:
diff changeset
391 }
a61af66fc99e Initial load
duke
parents:
diff changeset
392 if (cur_end > prev_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
393 return pointer_delta(cur_end, prev_end, sizeof(char));
a61af66fc99e Initial load
duke
parents:
diff changeset
394 }
a61af66fc99e Initial load
duke
parents:
diff changeset
395 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
396 }
a61af66fc99e Initial load
duke
parents:
diff changeset
397
a61af66fc99e Initial load
duke
parents:
diff changeset
398 // Return the default chunk size by equally diving the space.
a61af66fc99e Initial load
duke
parents:
diff changeset
399 // page_size() aligned.
a61af66fc99e Initial load
duke
parents:
diff changeset
400 size_t MutableNUMASpace::default_chunk_size() {
a61af66fc99e Initial load
duke
parents:
diff changeset
401 return base_space_size() / lgrp_spaces()->length() * page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
402 }
a61af66fc99e Initial load
duke
parents:
diff changeset
403
a61af66fc99e Initial load
duke
parents:
diff changeset
404 // Produce a new chunk size. page_size() aligned.
391
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
405 // This function is expected to be called on sequence of i's from 0 to
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
406 // lgrp_spaces()->length().
0
a61af66fc99e Initial load
duke
parents:
diff changeset
407 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
a61af66fc99e Initial load
duke
parents:
diff changeset
408 size_t pages_available = base_space_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
409 for (int j = 0; j < i; j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
410 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
411 }
a61af66fc99e Initial load
duke
parents:
diff changeset
412 pages_available -= lgrp_spaces()->length() - i - 1;
a61af66fc99e Initial load
duke
parents:
diff changeset
413 assert(pages_available > 0, "No pages left");
a61af66fc99e Initial load
duke
parents:
diff changeset
414 float alloc_rate = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
415 for (int j = i; j < lgrp_spaces()->length(); j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
416 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
a61af66fc99e Initial load
duke
parents:
diff changeset
417 }
a61af66fc99e Initial load
duke
parents:
diff changeset
418 size_t chunk_size = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
419 if (alloc_rate > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
420 LGRPSpace *ls = lgrp_spaces()->at(i);
391
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
421 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
422 }
a61af66fc99e Initial load
duke
parents:
diff changeset
423 chunk_size = MAX2(chunk_size, page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
424
a61af66fc99e Initial load
duke
parents:
diff changeset
425 if (limit > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
426 limit = round_down(limit, page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
427 if (chunk_size > current_chunk_size(i)) {
462
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
428 size_t upper_bound = pages_available * page_size();
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
429 if (upper_bound > limit &&
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
430 current_chunk_size(i) < upper_bound - limit) {
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
431 // The resulting upper bound should not exceed the available
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
432 // amount of memory (pages_available * page_size()).
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
433 upper_bound = current_chunk_size(i) + limit;
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
434 }
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
435 chunk_size = MIN2(chunk_size, upper_bound);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
436 } else {
462
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
437 size_t lower_bound = page_size();
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
438 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
439 lower_bound = current_chunk_size(i) - limit;
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
440 }
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
441 chunk_size = MAX2(chunk_size, lower_bound);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
442 }
a61af66fc99e Initial load
duke
parents:
diff changeset
443 }
a61af66fc99e Initial load
duke
parents:
diff changeset
444 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
a61af66fc99e Initial load
duke
parents:
diff changeset
445 return chunk_size;
a61af66fc99e Initial load
duke
parents:
diff changeset
446 }
a61af66fc99e Initial load
duke
parents:
diff changeset
447
a61af66fc99e Initial load
duke
parents:
diff changeset
448
a61af66fc99e Initial load
duke
parents:
diff changeset
449 // Return the bottom_region and the top_region. Align them to page_size() boundary.
a61af66fc99e Initial load
duke
parents:
diff changeset
450 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
451 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
452 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
a61af66fc99e Initial load
duke
parents:
diff changeset
453 MemRegion* bottom_region, MemRegion *top_region) {
a61af66fc99e Initial load
duke
parents:
diff changeset
454 // Is there bottom?
a61af66fc99e Initial load
duke
parents:
diff changeset
455 if (new_region.start() < intersection.start()) { // Yes
a61af66fc99e Initial load
duke
parents:
diff changeset
456 // Try to coalesce small pages into a large one.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
457 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
458 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
459 if (new_region.contains(p)
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
460 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
461 if (intersection.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
462 intersection = MemRegion(p, intersection.end());
a61af66fc99e Initial load
duke
parents:
diff changeset
463 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
464 intersection = MemRegion(p, p);
a61af66fc99e Initial load
duke
parents:
diff changeset
465 }
a61af66fc99e Initial load
duke
parents:
diff changeset
466 }
a61af66fc99e Initial load
duke
parents:
diff changeset
467 }
a61af66fc99e Initial load
duke
parents:
diff changeset
468 *bottom_region = MemRegion(new_region.start(), intersection.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
469 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
470 *bottom_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
471 }
a61af66fc99e Initial load
duke
parents:
diff changeset
472
a61af66fc99e Initial load
duke
parents:
diff changeset
473 // Is there top?
a61af66fc99e Initial load
duke
parents:
diff changeset
474 if (intersection.end() < new_region.end()) { // Yes
a61af66fc99e Initial load
duke
parents:
diff changeset
475 // Try to coalesce small pages into a large one.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
476 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
477 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
478 if (new_region.contains(p)
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
479 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
480 if (intersection.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
481 intersection = MemRegion(intersection.start(), p);
a61af66fc99e Initial load
duke
parents:
diff changeset
482 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
483 intersection = MemRegion(p, p);
a61af66fc99e Initial load
duke
parents:
diff changeset
484 }
a61af66fc99e Initial load
duke
parents:
diff changeset
485 }
a61af66fc99e Initial load
duke
parents:
diff changeset
486 }
a61af66fc99e Initial load
duke
parents:
diff changeset
487 *top_region = MemRegion(intersection.end(), new_region.end());
a61af66fc99e Initial load
duke
parents:
diff changeset
488 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
489 *top_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
490 }
a61af66fc99e Initial load
duke
parents:
diff changeset
491 }
a61af66fc99e Initial load
duke
parents:
diff changeset
492
a61af66fc99e Initial load
duke
parents:
diff changeset
493 // Try to merge the invalid region with the bottom or top region by decreasing
a61af66fc99e Initial load
duke
parents:
diff changeset
494 // the intersection area. Return the invalid_region aligned to the page_size()
a61af66fc99e Initial load
duke
parents:
diff changeset
495 // boundary if it's inside the intersection. Return non-empty invalid_region
a61af66fc99e Initial load
duke
parents:
diff changeset
496 // if it lies inside the intersection (also page-aligned).
a61af66fc99e Initial load
duke
parents:
diff changeset
497 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
498 // |----------------|-------invalid---|--------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
499 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
500 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
a61af66fc99e Initial load
duke
parents:
diff changeset
501 MemRegion *invalid_region) {
a61af66fc99e Initial load
duke
parents:
diff changeset
502 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
503 *intersection = MemRegion(invalid_region->end(), intersection->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
504 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
505 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
506 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
507 *intersection = MemRegion(intersection->start(), invalid_region->start());
a61af66fc99e Initial load
duke
parents:
diff changeset
508 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
509 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
510 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
511 *intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
512 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
513 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
514 if (intersection->contains(invalid_region)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
515 // That's the only case we have to make an additional bias_region() call.
a61af66fc99e Initial load
duke
parents:
diff changeset
516 HeapWord* start = invalid_region->start();
a61af66fc99e Initial load
duke
parents:
diff changeset
517 HeapWord* end = invalid_region->end();
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
518 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
519 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
520 if (new_region.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
521 start = p;
a61af66fc99e Initial load
duke
parents:
diff changeset
522 }
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
523 p = (HeapWord*)round_to((intptr_t) end, alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
524 if (new_region.contains(end)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
525 end = p;
a61af66fc99e Initial load
duke
parents:
diff changeset
526 }
a61af66fc99e Initial load
duke
parents:
diff changeset
527 }
a61af66fc99e Initial load
duke
parents:
diff changeset
528 if (intersection->start() > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
529 *intersection = MemRegion(start, intersection->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
530 }
a61af66fc99e Initial load
duke
parents:
diff changeset
531 if (intersection->end() < end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
532 *intersection = MemRegion(intersection->start(), end);
a61af66fc99e Initial load
duke
parents:
diff changeset
533 }
a61af66fc99e Initial load
duke
parents:
diff changeset
534 *invalid_region = MemRegion(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
535 }
a61af66fc99e Initial load
duke
parents:
diff changeset
536 }
a61af66fc99e Initial load
duke
parents:
diff changeset
537
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
538 void MutableNUMASpace::initialize(MemRegion mr,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
539 bool clear_space,
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
540 bool mangle_space,
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
541 bool setup_pages) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
542 assert(clear_space, "Reallocation will destory data!");
a61af66fc99e Initial load
duke
parents:
diff changeset
543 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
a61af66fc99e Initial load
duke
parents:
diff changeset
544
a61af66fc99e Initial load
duke
parents:
diff changeset
545 MemRegion old_region = region(), new_region;
a61af66fc99e Initial load
duke
parents:
diff changeset
546 set_bottom(mr.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
547 set_end(mr.end());
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
548 // Must always clear the space
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
549 clear(SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
550
a61af66fc99e Initial load
duke
parents:
diff changeset
551 // Compute chunk sizes
a61af66fc99e Initial load
duke
parents:
diff changeset
552 size_t prev_page_size = page_size();
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
553 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
554 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
555 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
556 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
557
a61af66fc99e Initial load
duke
parents:
diff changeset
558 // Try small pages if the chunk size is too small
a61af66fc99e Initial load
duke
parents:
diff changeset
559 if (base_space_size_pages / lgrp_spaces()->length() == 0
a61af66fc99e Initial load
duke
parents:
diff changeset
560 && page_size() > (size_t)os::vm_page_size()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
561 set_page_size(os::vm_page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
562 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
563 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
564 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
565 }
a61af66fc99e Initial load
duke
parents:
diff changeset
566 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
a61af66fc99e Initial load
duke
parents:
diff changeset
567 set_base_space_size(base_space_size_pages);
a61af66fc99e Initial load
duke
parents:
diff changeset
568
a61af66fc99e Initial load
duke
parents:
diff changeset
569 // Handle space resize
a61af66fc99e Initial load
duke
parents:
diff changeset
570 MemRegion top_region, bottom_region;
a61af66fc99e Initial load
duke
parents:
diff changeset
571 if (!old_region.equals(region())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
572 new_region = MemRegion(rounded_bottom, rounded_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
573 MemRegion intersection = new_region.intersection(old_region);
a61af66fc99e Initial load
duke
parents:
diff changeset
574 if (intersection.start() == NULL ||
a61af66fc99e Initial load
duke
parents:
diff changeset
575 intersection.end() == NULL ||
a61af66fc99e Initial load
duke
parents:
diff changeset
576 prev_page_size > page_size()) { // If the page size got smaller we have to change
a61af66fc99e Initial load
duke
parents:
diff changeset
577 // the page size preference for the whole space.
a61af66fc99e Initial load
duke
parents:
diff changeset
578 intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
579 }
a61af66fc99e Initial load
duke
parents:
diff changeset
580 select_tails(new_region, intersection, &bottom_region, &top_region);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
581 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
582 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
583 }
a61af66fc99e Initial load
duke
parents:
diff changeset
584
a61af66fc99e Initial load
duke
parents:
diff changeset
585 // Check if the space layout has changed significantly?
a61af66fc99e Initial load
duke
parents:
diff changeset
586 // This happens when the space has been resized so that either head or tail
a61af66fc99e Initial load
duke
parents:
diff changeset
587 // chunk became less than a page.
a61af66fc99e Initial load
duke
parents:
diff changeset
588 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
a61af66fc99e Initial load
duke
parents:
diff changeset
589 current_chunk_size(0) > page_size() &&
a61af66fc99e Initial load
duke
parents:
diff changeset
590 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
591
a61af66fc99e Initial load
duke
parents:
diff changeset
592
a61af66fc99e Initial load
duke
parents:
diff changeset
593 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
594 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
595 MutableSpace *s = ls->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
596 old_region = s->region();
a61af66fc99e Initial load
duke
parents:
diff changeset
597
a61af66fc99e Initial load
duke
parents:
diff changeset
598 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
599 if (i < lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
600 if (!UseAdaptiveNUMAChunkSizing ||
a61af66fc99e Initial load
duke
parents:
diff changeset
601 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
a61af66fc99e Initial load
duke
parents:
diff changeset
602 samples_count() < AdaptiveSizePolicyReadyThreshold) {
a61af66fc99e Initial load
duke
parents:
diff changeset
603 // No adaptation. Divide the space equally.
a61af66fc99e Initial load
duke
parents:
diff changeset
604 chunk_byte_size = default_chunk_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
605 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
606 if (!layout_valid || NUMASpaceResizeRate == 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
607 // Fast adaptation. If no space resize rate is set, resize
a61af66fc99e Initial load
duke
parents:
diff changeset
608 // the chunks instantly.
a61af66fc99e Initial load
duke
parents:
diff changeset
609 chunk_byte_size = adaptive_chunk_size(i, 0);
a61af66fc99e Initial load
duke
parents:
diff changeset
610 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
611 // Slow adaptation. Resize the chunks moving no more than
a61af66fc99e Initial load
duke
parents:
diff changeset
612 // NUMASpaceResizeRate bytes per collection.
a61af66fc99e Initial load
duke
parents:
diff changeset
613 size_t limit = NUMASpaceResizeRate /
a61af66fc99e Initial load
duke
parents:
diff changeset
614 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
a61af66fc99e Initial load
duke
parents:
diff changeset
615 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
a61af66fc99e Initial load
duke
parents:
diff changeset
616 }
a61af66fc99e Initial load
duke
parents:
diff changeset
617
a61af66fc99e Initial load
duke
parents:
diff changeset
618 assert(chunk_byte_size >= page_size(), "Chunk size too small");
a61af66fc99e Initial load
duke
parents:
diff changeset
619 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
a61af66fc99e Initial load
duke
parents:
diff changeset
620 }
a61af66fc99e Initial load
duke
parents:
diff changeset
621
a61af66fc99e Initial load
duke
parents:
diff changeset
622 if (i == 0) { // Bottom chunk
a61af66fc99e Initial load
duke
parents:
diff changeset
623 if (i != lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
624 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
a61af66fc99e Initial load
duke
parents:
diff changeset
625 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
626 new_region = MemRegion(bottom(), end());
a61af66fc99e Initial load
duke
parents:
diff changeset
627 }
a61af66fc99e Initial load
duke
parents:
diff changeset
628 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
629 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
a61af66fc99e Initial load
duke
parents:
diff changeset
630 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
631 new_region = MemRegion(ps->end(),
a61af66fc99e Initial load
duke
parents:
diff changeset
632 ps->end() + (chunk_byte_size >> LogHeapWordSize));
a61af66fc99e Initial load
duke
parents:
diff changeset
633 } else { // Top chunk
a61af66fc99e Initial load
duke
parents:
diff changeset
634 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
635 new_region = MemRegion(ps->end(), end());
a61af66fc99e Initial load
duke
parents:
diff changeset
636 }
a61af66fc99e Initial load
duke
parents:
diff changeset
637 guarantee(region().contains(new_region), "Region invariant");
a61af66fc99e Initial load
duke
parents:
diff changeset
638
a61af66fc99e Initial load
duke
parents:
diff changeset
639
a61af66fc99e Initial load
duke
parents:
diff changeset
640 // The general case:
a61af66fc99e Initial load
duke
parents:
diff changeset
641 // |---------------------|--invalid---|--------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
642 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
643 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
644 // |----old_region----|
a61af66fc99e Initial load
duke
parents:
diff changeset
645 // The intersection part has all pages in place we don't need to migrate them.
a61af66fc99e Initial load
duke
parents:
diff changeset
646 // Pages for the top and bottom part should be freed and then reallocated.
a61af66fc99e Initial load
duke
parents:
diff changeset
647
a61af66fc99e Initial load
duke
parents:
diff changeset
648 MemRegion intersection = old_region.intersection(new_region);
a61af66fc99e Initial load
duke
parents:
diff changeset
649
a61af66fc99e Initial load
duke
parents:
diff changeset
650 if (intersection.start() == NULL || intersection.end() == NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
651 intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
652 }
a61af66fc99e Initial load
duke
parents:
diff changeset
653
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
654 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
655 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
656 // Invalid region is a range of memory that could've possibly
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
657 // been allocated on the other node. That's relevant only on Solaris where
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
658 // there is no static memory binding.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
659 if (!invalid_region.is_empty()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
660 merge_regions(new_region, &intersection, &invalid_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
661 free_region(invalid_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
662 ls->set_invalid_region(MemRegion());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
663 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
664 }
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
665
0
a61af66fc99e Initial load
duke
parents:
diff changeset
666 select_tails(new_region, intersection, &bottom_region, &top_region);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
667
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
668 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
669 // If that's a system with the first-touch policy then it's enough
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
670 // to free the pages.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
671 free_region(bottom_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
672 free_region(top_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
673 } else {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
674 // In a system with static binding we have to change the bias whenever
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
675 // we reshape the heap.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
676 bias_region(bottom_region, ls->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
677 bias_region(top_region, ls->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
678 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
679
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
680 // Clear space (set top = bottom) but never mangle.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
681 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
682
a61af66fc99e Initial load
duke
parents:
diff changeset
683 set_adaptation_cycles(samples_count());
a61af66fc99e Initial load
duke
parents:
diff changeset
684 }
a61af66fc99e Initial load
duke
parents:
diff changeset
685 }
a61af66fc99e Initial load
duke
parents:
diff changeset
686
a61af66fc99e Initial load
duke
parents:
diff changeset
687 // Set the top of the whole space.
a61af66fc99e Initial load
duke
parents:
diff changeset
688 // Mark the the holes in chunks below the top() as invalid.
a61af66fc99e Initial load
duke
parents:
diff changeset
689 void MutableNUMASpace::set_top(HeapWord* value) {
a61af66fc99e Initial load
duke
parents:
diff changeset
690 bool found_top = false;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
691 for (int i = 0; i < lgrp_spaces()->length();) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
692 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
693 MutableSpace *s = ls->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
694 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
695
a61af66fc99e Initial load
duke
parents:
diff changeset
696 if (s->contains(value)) {
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
697 // Check if setting the chunk's top to a given value would create a hole less than
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
698 // a minimal object; assuming that's not the last chunk in which case we don't care.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
699 if (i < lgrp_spaces()->length() - 1) {
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
700 size_t remainder = pointer_delta(s->end(), value);
481
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
701 const size_t min_fill_size = CollectedHeap::min_fill_size();
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
702 if (remainder < min_fill_size && remainder > 0) {
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
703 // Add a minimum size filler object; it will cross the chunk boundary.
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
704 CollectedHeap::fill_with_object(value, min_fill_size);
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
705 value += min_fill_size;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
706 assert(!s->contains(value), "Should be in the next chunk");
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
707 // Restart the loop from the same chunk, since the value has moved
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
708 // to the next one.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
709 continue;
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
710 }
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
711 }
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
712
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
713 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
714 ls->add_invalid_region(MemRegion(top, value));
a61af66fc99e Initial load
duke
parents:
diff changeset
715 }
a61af66fc99e Initial load
duke
parents:
diff changeset
716 s->set_top(value);
a61af66fc99e Initial load
duke
parents:
diff changeset
717 found_top = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
718 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
719 if (found_top) {
a61af66fc99e Initial load
duke
parents:
diff changeset
720 s->set_top(s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
721 } else {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
722 if (!os::numa_has_static_binding() && top < s->end()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
723 ls->add_invalid_region(MemRegion(top, s->end()));
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
724 }
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
725 s->set_top(s->end());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
726 }
a61af66fc99e Initial load
duke
parents:
diff changeset
727 }
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
728 i++;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
729 }
a61af66fc99e Initial load
duke
parents:
diff changeset
730 MutableSpace::set_top(value);
a61af66fc99e Initial load
duke
parents:
diff changeset
731 }
a61af66fc99e Initial load
duke
parents:
diff changeset
732
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
733 void MutableNUMASpace::clear(bool mangle_space) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
734 MutableSpace::set_top(bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
735 for (int i = 0; i < lgrp_spaces()->length(); i++) {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
736 // Never mangle NUMA spaces because the mangling will
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
737 // bind the memory to a possibly unwanted lgroup.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
738 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
739 }
a61af66fc99e Initial load
duke
parents:
diff changeset
740 }
a61af66fc99e Initial load
duke
parents:
diff changeset
741
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
742 /*
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
743 Linux supports static memory binding, therefore the most part of the
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
744 logic dealing with the possible invalid page allocation is effectively
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
745 disabled. Besides there is no notion of the home node in Linux. A
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
746 thread is allowed to migrate freely. Although the scheduler is rather
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
747 reluctant to move threads between the nodes. We check for the current
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
748 node every allocation. And with a high probability a thread stays on
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
749 the same node for some time allowing local access to recently allocated
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
750 objects.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
751 */
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
752
0
a61af66fc99e Initial load
duke
parents:
diff changeset
753 HeapWord* MutableNUMASpace::allocate(size_t size) {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
754 Thread* thr = Thread::current();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
755 int lgrp_id = thr->lgrp_id();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
756 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
757 lgrp_id = os::numa_get_group_id();
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
758 thr->set_lgrp_id(lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
759 }
a61af66fc99e Initial load
duke
parents:
diff changeset
760
a61af66fc99e Initial load
duke
parents:
diff changeset
761 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
762
a61af66fc99e Initial load
duke
parents:
diff changeset
763 // It is possible that a new CPU has been hotplugged and
a61af66fc99e Initial load
duke
parents:
diff changeset
764 // we haven't reshaped the space accordingly.
a61af66fc99e Initial load
duke
parents:
diff changeset
765 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
766 i = os::random() % lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
767 }
a61af66fc99e Initial load
duke
parents:
diff changeset
768
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
769 LGRPSpace* ls = lgrp_spaces()->at(i);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
770 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
771 HeapWord *p = s->allocate(size);
a61af66fc99e Initial load
duke
parents:
diff changeset
772
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
773 if (p != NULL) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
774 size_t remainder = s->free_in_words();
1571
2d127394260e 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 628
diff changeset
775 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
776 s->set_top(s->top() - size);
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
777 p = NULL;
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
778 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
779 }
a61af66fc99e Initial load
duke
parents:
diff changeset
780 if (p != NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
781 if (top() < s->top()) { // Keep _top updated.
a61af66fc99e Initial load
duke
parents:
diff changeset
782 MutableSpace::set_top(s->top());
a61af66fc99e Initial load
duke
parents:
diff changeset
783 }
a61af66fc99e Initial load
duke
parents:
diff changeset
784 }
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
785 // Make the page allocation happen here if there is no static binding..
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
786 if (p != NULL && !os::numa_has_static_binding()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
787 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
a61af66fc99e Initial load
duke
parents:
diff changeset
788 *(int*)i = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
789 }
a61af66fc99e Initial load
duke
parents:
diff changeset
790 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
791 if (p == NULL) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
792 ls->set_allocation_failed();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
793 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
794 return p;
a61af66fc99e Initial load
duke
parents:
diff changeset
795 }
a61af66fc99e Initial load
duke
parents:
diff changeset
796
a61af66fc99e Initial load
duke
parents:
diff changeset
797 // This version is lock-free.
a61af66fc99e Initial load
duke
parents:
diff changeset
798 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
799 Thread* thr = Thread::current();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
800 int lgrp_id = thr->lgrp_id();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
801 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
802 lgrp_id = os::numa_get_group_id();
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
803 thr->set_lgrp_id(lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
804 }
a61af66fc99e Initial load
duke
parents:
diff changeset
805
a61af66fc99e Initial load
duke
parents:
diff changeset
806 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
807 // It is possible that a new CPU has been hotplugged and
a61af66fc99e Initial load
duke
parents:
diff changeset
808 // we haven't reshaped the space accordingly.
a61af66fc99e Initial load
duke
parents:
diff changeset
809 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
810 i = os::random() % lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
811 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
812 LGRPSpace *ls = lgrp_spaces()->at(i);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
813 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
814 HeapWord *p = s->cas_allocate(size);
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
815 if (p != NULL) {
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
816 size_t remainder = pointer_delta(s->end(), p + size);
1571
2d127394260e 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 628
diff changeset
817 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
818 if (s->cas_deallocate(p, size)) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
819 // We were the last to allocate and created a fragment less than
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
820 // a minimal object.
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
821 p = NULL;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
822 } else {
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
823 guarantee(false, "Deallocation should always succeed");
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
824 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
825 }
a61af66fc99e Initial load
duke
parents:
diff changeset
826 }
a61af66fc99e Initial load
duke
parents:
diff changeset
827 if (p != NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
828 HeapWord* cur_top, *cur_chunk_top = p + size;
a61af66fc99e Initial load
duke
parents:
diff changeset
829 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
a61af66fc99e Initial load
duke
parents:
diff changeset
830 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
a61af66fc99e Initial load
duke
parents:
diff changeset
831 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
832 }
a61af66fc99e Initial load
duke
parents:
diff changeset
833 }
a61af66fc99e Initial load
duke
parents:
diff changeset
834 }
a61af66fc99e Initial load
duke
parents:
diff changeset
835
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
836 // Make the page allocation happen here if there is no static binding.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
837 if (p != NULL && !os::numa_has_static_binding() ) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
838 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
a61af66fc99e Initial load
duke
parents:
diff changeset
839 *(int*)i = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
840 }
a61af66fc99e Initial load
duke
parents:
diff changeset
841 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
842 if (p == NULL) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
843 ls->set_allocation_failed();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
844 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
845 return p;
a61af66fc99e Initial load
duke
parents:
diff changeset
846 }
a61af66fc99e Initial load
duke
parents:
diff changeset
847
a61af66fc99e Initial load
duke
parents:
diff changeset
848 void MutableNUMASpace::print_short_on(outputStream* st) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
849 MutableSpace::print_short_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
850 st->print(" (");
a61af66fc99e Initial load
duke
parents:
diff changeset
851 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
852 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
a61af66fc99e Initial load
duke
parents:
diff changeset
853 lgrp_spaces()->at(i)->space()->print_short_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
854 if (i < lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
855 st->print(", ");
a61af66fc99e Initial load
duke
parents:
diff changeset
856 }
a61af66fc99e Initial load
duke
parents:
diff changeset
857 }
a61af66fc99e Initial load
duke
parents:
diff changeset
858 st->print(")");
a61af66fc99e Initial load
duke
parents:
diff changeset
859 }
a61af66fc99e Initial load
duke
parents:
diff changeset
860
a61af66fc99e Initial load
duke
parents:
diff changeset
861 void MutableNUMASpace::print_on(outputStream* st) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
862 MutableSpace::print_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
863 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
864 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
865 st->print(" lgrp %d", ls->lgrp_id());
a61af66fc99e Initial load
duke
parents:
diff changeset
866 ls->space()->print_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
867 if (NUMAStats) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
868 for (int i = 0; i < lgrp_spaces()->length(); i++) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
869 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
870 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
871 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
a61af66fc99e Initial load
duke
parents:
diff changeset
872 ls->space_stats()->_local_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
873 ls->space_stats()->_remote_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
874 ls->space_stats()->_unbiased_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
875 ls->space_stats()->_uncommited_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
876 ls->space_stats()->_large_pages,
a61af66fc99e Initial load
duke
parents:
diff changeset
877 ls->space_stats()->_small_pages);
a61af66fc99e Initial load
duke
parents:
diff changeset
878 }
a61af66fc99e Initial load
duke
parents:
diff changeset
879 }
a61af66fc99e Initial load
duke
parents:
diff changeset
880 }
a61af66fc99e Initial load
duke
parents:
diff changeset
881
6008
b632e80fc9dc 4988100: oop_verify_old_oop appears to be dead
brutisso
parents: 5965
diff changeset
882 void MutableNUMASpace::verify() {
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
883 // This can be called after setting an arbitary value to the space's top,
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
884 // so an object can cross the chunk boundary. We ensure the parsablity
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
885 // of the space and just walk the objects in linear fashion.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
886 ensure_parsability();
6008
b632e80fc9dc 4988100: oop_verify_old_oop appears to be dead
brutisso
parents: 5965
diff changeset
887 MutableSpace::verify();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
888 }
a61af66fc99e Initial load
duke
parents:
diff changeset
889
a61af66fc99e Initial load
duke
parents:
diff changeset
890 // Scan pages and gather stats about page placement and size.
a61af66fc99e Initial load
duke
parents:
diff changeset
891 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
a61af66fc99e Initial load
duke
parents:
diff changeset
892 clear_space_stats();
a61af66fc99e Initial load
duke
parents:
diff changeset
893 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
894 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
895 if (start < end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
896 for (char *p = start; p < end;) {
a61af66fc99e Initial load
duke
parents:
diff changeset
897 os::page_info info;
a61af66fc99e Initial load
duke
parents:
diff changeset
898 if (os::get_page_info(p, &info)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
899 if (info.size > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
900 if (info.size > (size_t)os::vm_page_size()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
901 space_stats()->_large_pages++;
a61af66fc99e Initial load
duke
parents:
diff changeset
902 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
903 space_stats()->_small_pages++;
a61af66fc99e Initial load
duke
parents:
diff changeset
904 }
a61af66fc99e Initial load
duke
parents:
diff changeset
905 if (info.lgrp_id == lgrp_id()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
906 space_stats()->_local_space += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
907 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
908 space_stats()->_remote_space += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
909 }
a61af66fc99e Initial load
duke
parents:
diff changeset
910 p += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
911 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
912 p += os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
913 space_stats()->_uncommited_space += os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
914 }
a61af66fc99e Initial load
duke
parents:
diff changeset
915 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
916 return;
a61af66fc99e Initial load
duke
parents:
diff changeset
917 }
a61af66fc99e Initial load
duke
parents:
diff changeset
918 }
a61af66fc99e Initial load
duke
parents:
diff changeset
919 }
a61af66fc99e Initial load
duke
parents:
diff changeset
920 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
a61af66fc99e Initial load
duke
parents:
diff changeset
921 pointer_delta(space()->end(), end, sizeof(char));
a61af66fc99e Initial load
duke
parents:
diff changeset
922
a61af66fc99e Initial load
duke
parents:
diff changeset
923 }
a61af66fc99e Initial load
duke
parents:
diff changeset
924
a61af66fc99e Initial load
duke
parents:
diff changeset
925 // Scan page_count pages and verify if they have the right size and right placement.
a61af66fc99e Initial load
duke
parents:
diff changeset
926 // If invalid pages are found they are freed in hope that subsequent reallocation
a61af66fc99e Initial load
duke
parents:
diff changeset
927 // will be more successful.
a61af66fc99e Initial load
duke
parents:
diff changeset
928 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
a61af66fc99e Initial load
duke
parents:
diff changeset
929 {
a61af66fc99e Initial load
duke
parents:
diff changeset
930 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
931 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
932
a61af66fc99e Initial load
duke
parents:
diff changeset
933 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
934 set_last_page_scanned(range_start);
a61af66fc99e Initial load
duke
parents:
diff changeset
935 }
a61af66fc99e Initial load
duke
parents:
diff changeset
936
a61af66fc99e Initial load
duke
parents:
diff changeset
937 char *scan_start = last_page_scanned();
a61af66fc99e Initial load
duke
parents:
diff changeset
938 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
939
a61af66fc99e Initial load
duke
parents:
diff changeset
940 os::page_info page_expected, page_found;
a61af66fc99e Initial load
duke
parents:
diff changeset
941 page_expected.size = page_size;
a61af66fc99e Initial load
duke
parents:
diff changeset
942 page_expected.lgrp_id = lgrp_id();
a61af66fc99e Initial load
duke
parents:
diff changeset
943
a61af66fc99e Initial load
duke
parents:
diff changeset
944 char *s = scan_start;
a61af66fc99e Initial load
duke
parents:
diff changeset
945 while (s < scan_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
946 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
a61af66fc99e Initial load
duke
parents:
diff changeset
947 if (e == NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
948 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
949 }
a61af66fc99e Initial load
duke
parents:
diff changeset
950 if (e != scan_end) {
8739
ca9580859cf4 8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents: 7180
diff changeset
951 assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end));
ca9580859cf4 8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents: 7180
diff changeset
952
0
a61af66fc99e Initial load
duke
parents:
diff changeset
953 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
a61af66fc99e Initial load
duke
parents:
diff changeset
954 && page_expected.size != 0) {
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
955 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
956 }
a61af66fc99e Initial load
duke
parents:
diff changeset
957 page_expected = page_found;
a61af66fc99e Initial load
duke
parents:
diff changeset
958 }
a61af66fc99e Initial load
duke
parents:
diff changeset
959 s = e;
a61af66fc99e Initial load
duke
parents:
diff changeset
960 }
a61af66fc99e Initial load
duke
parents:
diff changeset
961
a61af66fc99e Initial load
duke
parents:
diff changeset
962 set_last_page_scanned(scan_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
963 }