annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 5965:cc74fa5a91a9

7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97 Summary: Make sure that MutableNUMASpace::ensure_parsability() only calls CollectedHeap::fill_with_object() with valid sizes and make sure CollectedHeap::filler_array_max_size() returns a value that can be converted to an int without overflow Reviewed-by: azeemj, jmasa, iveresov
author brutisso
date Fri, 23 Mar 2012 15:28:24 +0100
parents 20bfb6d15a94
children b632e80fc9dc
Ignore whitespace changes - Everywhere: Within whitespace: At end of lines:
rev   line source
0
a61af66fc99e Initial load
duke
parents:
diff changeset
1
a61af66fc99e Initial load
duke
parents:
diff changeset
2 /*
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
3 * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
a61af66fc99e Initial load
duke
parents:
diff changeset
5 *
a61af66fc99e Initial load
duke
parents:
diff changeset
6 * This code is free software; you can redistribute it and/or modify it
a61af66fc99e Initial load
duke
parents:
diff changeset
7 * under the terms of the GNU General Public License version 2 only, as
a61af66fc99e Initial load
duke
parents:
diff changeset
8 * published by the Free Software Foundation.
a61af66fc99e Initial load
duke
parents:
diff changeset
9 *
a61af66fc99e Initial load
duke
parents:
diff changeset
10 * This code is distributed in the hope that it will be useful, but WITHOUT
a61af66fc99e Initial load
duke
parents:
diff changeset
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
a61af66fc99e Initial load
duke
parents:
diff changeset
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
a61af66fc99e Initial load
duke
parents:
diff changeset
13 * version 2 for more details (a copy is included in the LICENSE file that
a61af66fc99e Initial load
duke
parents:
diff changeset
14 * accompanied this code).
a61af66fc99e Initial load
duke
parents:
diff changeset
15 *
a61af66fc99e Initial load
duke
parents:
diff changeset
16 * You should have received a copy of the GNU General Public License version
a61af66fc99e Initial load
duke
parents:
diff changeset
17 * 2 along with this work; if not, write to the Free Software Foundation,
a61af66fc99e Initial load
duke
parents:
diff changeset
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
a61af66fc99e Initial load
duke
parents:
diff changeset
19 *
1552
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
21 * or visit www.oracle.com if you need additional information or have any
c18cbe5936b8 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 628
diff changeset
22 * questions.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
23 *
a61af66fc99e Initial load
duke
parents:
diff changeset
24 */
a61af66fc99e Initial load
duke
parents:
diff changeset
25
1972
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
26 #include "precompiled.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
27 #include "gc_implementation/shared/mutableNUMASpace.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
28 #include "gc_implementation/shared/spaceDecorator.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
29 #include "memory/sharedHeap.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
30 #include "oops/oop.inline.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
31 #ifdef TARGET_OS_FAMILY_linux
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
32 # include "thread_linux.inline.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
33 #endif
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
34 #ifdef TARGET_OS_FAMILY_solaris
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
35 # include "thread_solaris.inline.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
36 #endif
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
37 #ifdef TARGET_OS_FAMILY_windows
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
38 # include "thread_windows.inline.hpp"
f95d63e2154a 6989984: Use standard include model for Hospot
stefank
parents: 1579
diff changeset
39 #endif
3960
f08d439fab8c 7089790: integrate bsd-port changes
never
parents: 1972
diff changeset
40 #ifdef TARGET_OS_FAMILY_bsd
f08d439fab8c 7089790: integrate bsd-port changes
never
parents: 1972
diff changeset
41 # include "thread_bsd.inline.hpp"
f08d439fab8c 7089790: integrate bsd-port changes
never
parents: 1972
diff changeset
42 #endif
0
a61af66fc99e Initial load
duke
parents:
diff changeset
43
a61af66fc99e Initial load
duke
parents:
diff changeset
44
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
45 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
46 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
a61af66fc99e Initial load
duke
parents:
diff changeset
47 _page_size = os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
48 _adaptation_cycles = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
49 _samples_count = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
50 update_layout(true);
a61af66fc99e Initial load
duke
parents:
diff changeset
51 }
a61af66fc99e Initial load
duke
parents:
diff changeset
52
a61af66fc99e Initial load
duke
parents:
diff changeset
53 MutableNUMASpace::~MutableNUMASpace() {
a61af66fc99e Initial load
duke
parents:
diff changeset
54 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
55 delete lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
56 }
a61af66fc99e Initial load
duke
parents:
diff changeset
57 delete lgrp_spaces();
a61af66fc99e Initial load
duke
parents:
diff changeset
58 }
a61af66fc99e Initial load
duke
parents:
diff changeset
59
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
60 #ifndef PRODUCT
0
a61af66fc99e Initial load
duke
parents:
diff changeset
61 void MutableNUMASpace::mangle_unused_area() {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
62 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
63 // It can be called on a numa space during a full compaction.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
64 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
65 void MutableNUMASpace::mangle_unused_area_complete() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
66 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
67 // It can be called on a numa space during a full compaction.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
68 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
69 void MutableNUMASpace::mangle_region(MemRegion mr) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
70 // This method should do nothing because numa spaces are not mangled.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
71 }
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
72 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
73 assert(false, "Do not mangle MutableNUMASpace's");
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
74 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
75 void MutableNUMASpace::set_top_for_allocations() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
76 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
77 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
78 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
79 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
80 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
81 void MutableNUMASpace::check_mangled_unused_area_complete() {
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
82 // This method should do nothing.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
83 }
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
84 #endif // NOT_PRODUCT
0
a61af66fc99e Initial load
duke
parents:
diff changeset
85
a61af66fc99e Initial load
duke
parents:
diff changeset
86 // There may be unallocated holes in the middle chunks
a61af66fc99e Initial load
duke
parents:
diff changeset
87 // that should be filled with dead objects to ensure parseability.
a61af66fc99e Initial load
duke
parents:
diff changeset
88 void MutableNUMASpace::ensure_parsability() {
a61af66fc99e Initial load
duke
parents:
diff changeset
89 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
90 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
91 MutableSpace *s = ls->space();
605
98cb887364d3 6810672: Comment typos
twisti
parents: 535
diff changeset
92 if (s->top() < top()) { // For all spaces preceding the one containing top()
0
a61af66fc99e Initial load
duke
parents:
diff changeset
93 if (s->free_in_words() > 0) {
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
94 intptr_t cur_top = (intptr_t)s->top();
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
95 size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
96 while (words_left_to_fill > 0) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
97 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
98 assert(words_to_fill >= CollectedHeap::min_fill_size(),
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
99 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
100 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
101 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
102 if (!os::numa_has_static_binding()) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
103 size_t touched_words = words_to_fill;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
104 #ifndef ASSERT
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
105 if (!ZapUnusedHeapArea) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
106 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
107 touched_words);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
108 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
109 #endif
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
110 MemRegion invalid;
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
111 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
112 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
113 if (crossing_start != crossing_end) {
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
114 // If object header crossed a small page boundary we mark the area
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
115 // as invalid rounding it to a page_size().
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
116 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
117 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
118 invalid = MemRegion(start, end);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
119 }
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
120
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
121 ls->add_invalid_region(invalid);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
122 }
5965
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
123 cur_top = cur_top + (words_to_fill * HeapWordSize);
cc74fa5a91a9 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 4734
diff changeset
124 words_left_to_fill -= words_to_fill;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
125 }
a61af66fc99e Initial load
duke
parents:
diff changeset
126 }
a61af66fc99e Initial load
duke
parents:
diff changeset
127 } else {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
128 if (!os::numa_has_static_binding()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
129 #ifdef ASSERT
a61af66fc99e Initial load
duke
parents:
diff changeset
130 MemRegion invalid(s->top(), s->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
131 ls->add_invalid_region(invalid);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
132 #else
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
133 if (ZapUnusedHeapArea) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
134 MemRegion invalid(s->top(), s->end());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
135 ls->add_invalid_region(invalid);
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
136 } else {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
137 return;
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
138 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
139 #endif
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
140 } else {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
141 return;
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
142 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
143 }
a61af66fc99e Initial load
duke
parents:
diff changeset
144 }
a61af66fc99e Initial load
duke
parents:
diff changeset
145 }
a61af66fc99e Initial load
duke
parents:
diff changeset
146
a61af66fc99e Initial load
duke
parents:
diff changeset
147 size_t MutableNUMASpace::used_in_words() const {
a61af66fc99e Initial load
duke
parents:
diff changeset
148 size_t s = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
149 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
150 s += lgrp_spaces()->at(i)->space()->used_in_words();
a61af66fc99e Initial load
duke
parents:
diff changeset
151 }
a61af66fc99e Initial load
duke
parents:
diff changeset
152 return s;
a61af66fc99e Initial load
duke
parents:
diff changeset
153 }
a61af66fc99e Initial load
duke
parents:
diff changeset
154
a61af66fc99e Initial load
duke
parents:
diff changeset
155 size_t MutableNUMASpace::free_in_words() const {
a61af66fc99e Initial load
duke
parents:
diff changeset
156 size_t s = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
157 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
158 s += lgrp_spaces()->at(i)->space()->free_in_words();
a61af66fc99e Initial load
duke
parents:
diff changeset
159 }
a61af66fc99e Initial load
duke
parents:
diff changeset
160 return s;
a61af66fc99e Initial load
duke
parents:
diff changeset
161 }
a61af66fc99e Initial load
duke
parents:
diff changeset
162
a61af66fc99e Initial load
duke
parents:
diff changeset
163
a61af66fc99e Initial load
duke
parents:
diff changeset
164 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
165 guarantee(thr != NULL, "No thread");
a61af66fc99e Initial load
duke
parents:
diff changeset
166 int lgrp_id = thr->lgrp_id();
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
167 if (lgrp_id == -1) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
168 // This case can occur after the topology of the system has
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
169 // changed. Thread can change their location, the new home
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
170 // group will be determined during the first allocation
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
171 // attempt. For now we can safely assume that all spaces
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
172 // have equal size because the whole space will be reinitialized.
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
173 if (lgrp_spaces()->length() > 0) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
174 return capacity_in_bytes() / lgrp_spaces()->length();
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
175 } else {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
176 assert(false, "There should be at least one locality group");
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
177 return 0;
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
178 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
179 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
180 // That's the normal case, where we know the locality group of the thread.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
181 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
182 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
183 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
184 }
a61af66fc99e Initial load
duke
parents:
diff changeset
185 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
a61af66fc99e Initial load
duke
parents:
diff changeset
186 }
a61af66fc99e Initial load
duke
parents:
diff changeset
187
a61af66fc99e Initial load
duke
parents:
diff changeset
188 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
189 // Please see the comments for tlab_capacity().
0
a61af66fc99e Initial load
duke
parents:
diff changeset
190 guarantee(thr != NULL, "No thread");
a61af66fc99e Initial load
duke
parents:
diff changeset
191 int lgrp_id = thr->lgrp_id();
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
192 if (lgrp_id == -1) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
193 if (lgrp_spaces()->length() > 0) {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
194 return free_in_bytes() / lgrp_spaces()->length();
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
195 } else {
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
196 assert(false, "There should be at least one locality group");
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
197 return 0;
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
198 }
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
199 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
200 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
201 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
202 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
203 }
a61af66fc99e Initial load
duke
parents:
diff changeset
204 return lgrp_spaces()->at(i)->space()->free_in_bytes();
a61af66fc99e Initial load
duke
parents:
diff changeset
205 }
a61af66fc99e Initial load
duke
parents:
diff changeset
206
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
207
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
208 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
209 guarantee(thr != NULL, "No thread");
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
210 int lgrp_id = thr->lgrp_id();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
211 if (lgrp_id == -1) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
212 if (lgrp_spaces()->length() > 0) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
213 return capacity_in_words() / lgrp_spaces()->length();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
214 } else {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
215 assert(false, "There should be at least one locality group");
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
216 return 0;
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
217 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
218 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
219 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
220 if (i == -1) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
221 return 0;
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
222 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
223 return lgrp_spaces()->at(i)->space()->capacity_in_words();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
224 }
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
225
0
a61af66fc99e Initial load
duke
parents:
diff changeset
226 // Check if the NUMA topology has changed. Add and remove spaces if needed.
a61af66fc99e Initial load
duke
parents:
diff changeset
227 // The update can be forced by setting the force parameter equal to true.
a61af66fc99e Initial load
duke
parents:
diff changeset
228 bool MutableNUMASpace::update_layout(bool force) {
a61af66fc99e Initial load
duke
parents:
diff changeset
229 // Check if the topology had changed.
a61af66fc99e Initial load
duke
parents:
diff changeset
230 bool changed = os::numa_topology_changed();
a61af66fc99e Initial load
duke
parents:
diff changeset
231 if (force || changed) {
a61af66fc99e Initial load
duke
parents:
diff changeset
232 // Compute lgrp intersection. Add/remove spaces.
a61af66fc99e Initial load
duke
parents:
diff changeset
233 int lgrp_limit = (int)os::numa_get_groups_num();
a61af66fc99e Initial load
duke
parents:
diff changeset
234 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
a61af66fc99e Initial load
duke
parents:
diff changeset
235 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
a61af66fc99e Initial load
duke
parents:
diff changeset
236 assert(lgrp_num > 0, "There should be at least one locality group");
a61af66fc99e Initial load
duke
parents:
diff changeset
237 // Add new spaces for the new nodes
a61af66fc99e Initial load
duke
parents:
diff changeset
238 for (int i = 0; i < lgrp_num; i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
239 bool found = false;
a61af66fc99e Initial load
duke
parents:
diff changeset
240 for (int j = 0; j < lgrp_spaces()->length(); j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
241 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
a61af66fc99e Initial load
duke
parents:
diff changeset
242 found = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
243 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
244 }
a61af66fc99e Initial load
duke
parents:
diff changeset
245 }
a61af66fc99e Initial load
duke
parents:
diff changeset
246 if (!found) {
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
247 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
0
a61af66fc99e Initial load
duke
parents:
diff changeset
248 }
a61af66fc99e Initial load
duke
parents:
diff changeset
249 }
a61af66fc99e Initial load
duke
parents:
diff changeset
250
a61af66fc99e Initial load
duke
parents:
diff changeset
251 // Remove spaces for the removed nodes.
a61af66fc99e Initial load
duke
parents:
diff changeset
252 for (int i = 0; i < lgrp_spaces()->length();) {
a61af66fc99e Initial load
duke
parents:
diff changeset
253 bool found = false;
a61af66fc99e Initial load
duke
parents:
diff changeset
254 for (int j = 0; j < lgrp_num; j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
255 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
a61af66fc99e Initial load
duke
parents:
diff changeset
256 found = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
257 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
258 }
a61af66fc99e Initial load
duke
parents:
diff changeset
259 }
a61af66fc99e Initial load
duke
parents:
diff changeset
260 if (!found) {
a61af66fc99e Initial load
duke
parents:
diff changeset
261 delete lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
262 lgrp_spaces()->remove_at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
263 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
264 i++;
a61af66fc99e Initial load
duke
parents:
diff changeset
265 }
a61af66fc99e Initial load
duke
parents:
diff changeset
266 }
a61af66fc99e Initial load
duke
parents:
diff changeset
267
a61af66fc99e Initial load
duke
parents:
diff changeset
268 FREE_C_HEAP_ARRAY(int, lgrp_ids);
a61af66fc99e Initial load
duke
parents:
diff changeset
269
a61af66fc99e Initial load
duke
parents:
diff changeset
270 if (changed) {
a61af66fc99e Initial load
duke
parents:
diff changeset
271 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
272 thread->set_lgrp_id(-1);
a61af66fc99e Initial load
duke
parents:
diff changeset
273 }
a61af66fc99e Initial load
duke
parents:
diff changeset
274 }
a61af66fc99e Initial load
duke
parents:
diff changeset
275 return true;
a61af66fc99e Initial load
duke
parents:
diff changeset
276 }
a61af66fc99e Initial load
duke
parents:
diff changeset
277 return false;
a61af66fc99e Initial load
duke
parents:
diff changeset
278 }
a61af66fc99e Initial load
duke
parents:
diff changeset
279
a61af66fc99e Initial load
duke
parents:
diff changeset
280 // Bias region towards the first-touching lgrp. Set the right page sizes.
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
281 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
282 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
283 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
284 if (end > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
285 MemRegion aligned_region(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
286 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
a61af66fc99e Initial load
duke
parents:
diff changeset
287 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
a61af66fc99e Initial load
duke
parents:
diff changeset
288 assert(region().contains(aligned_region), "Sanity");
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
289 // First we tell the OS which page size we want in the given range. The underlying
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
290 // large page can be broken down if we require small pages.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
291 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
292 // Then we uncommit the pages in the range.
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
293 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
294 // And make them local/first-touch biased.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
295 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
296 }
a61af66fc99e Initial load
duke
parents:
diff changeset
297 }
a61af66fc99e Initial load
duke
parents:
diff changeset
298
a61af66fc99e Initial load
duke
parents:
diff changeset
299 // Free all pages in the region.
a61af66fc99e Initial load
duke
parents:
diff changeset
300 void MutableNUMASpace::free_region(MemRegion mr) {
a61af66fc99e Initial load
duke
parents:
diff changeset
301 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
302 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
303 if (end > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
304 MemRegion aligned_region(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
305 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
a61af66fc99e Initial load
duke
parents:
diff changeset
306 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
a61af66fc99e Initial load
duke
parents:
diff changeset
307 assert(region().contains(aligned_region), "Sanity");
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
308 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
309 }
a61af66fc99e Initial load
duke
parents:
diff changeset
310 }
a61af66fc99e Initial load
duke
parents:
diff changeset
311
a61af66fc99e Initial load
duke
parents:
diff changeset
312 // Update space layout. Perform adaptation.
a61af66fc99e Initial load
duke
parents:
diff changeset
313 void MutableNUMASpace::update() {
a61af66fc99e Initial load
duke
parents:
diff changeset
314 if (update_layout(false)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
315 // If the topology has changed, make all chunks zero-sized.
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
316 // And clear the alloc-rate statistics.
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
317 // In future we may want to handle this more gracefully in order
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
318 // to avoid the reallocation of the pages as much as possible.
0
a61af66fc99e Initial load
duke
parents:
diff changeset
319 for (int i = 0; i < lgrp_spaces()->length(); i++) {
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
320 LGRPSpace *ls = lgrp_spaces()->at(i);
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
321 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
322 s->set_end(s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
323 s->set_top(s->bottom());
268
d6340ab4105b 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 263
diff changeset
324 ls->clear_alloc_rate();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
325 }
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
326 // A NUMA space is never mangled
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
327 initialize(region(),
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
328 SpaceDecorator::Clear,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
329 SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
330 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
331 bool should_initialize = false;
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
332 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
333 for (int i = 0; i < lgrp_spaces()->length(); i++) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
334 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
335 should_initialize = true;
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
336 break;
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
337 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
338 }
a61af66fc99e Initial load
duke
parents:
diff changeset
339 }
a61af66fc99e Initial load
duke
parents:
diff changeset
340
a61af66fc99e Initial load
duke
parents:
diff changeset
341 if (should_initialize ||
a61af66fc99e Initial load
duke
parents:
diff changeset
342 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
343 // A NUMA space is never mangled
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
344 initialize(region(),
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
345 SpaceDecorator::Clear,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
346 SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
347 }
a61af66fc99e Initial load
duke
parents:
diff changeset
348 }
a61af66fc99e Initial load
duke
parents:
diff changeset
349
a61af66fc99e Initial load
duke
parents:
diff changeset
350 if (NUMAStats) {
a61af66fc99e Initial load
duke
parents:
diff changeset
351 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
352 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
353 }
a61af66fc99e Initial load
duke
parents:
diff changeset
354 }
a61af66fc99e Initial load
duke
parents:
diff changeset
355
a61af66fc99e Initial load
duke
parents:
diff changeset
356 scan_pages(NUMAPageScanRate);
a61af66fc99e Initial load
duke
parents:
diff changeset
357 }
a61af66fc99e Initial load
duke
parents:
diff changeset
358
a61af66fc99e Initial load
duke
parents:
diff changeset
359 // Scan pages. Free pages that have smaller size or wrong placement.
a61af66fc99e Initial load
duke
parents:
diff changeset
360 void MutableNUMASpace::scan_pages(size_t page_count)
a61af66fc99e Initial load
duke
parents:
diff changeset
361 {
a61af66fc99e Initial load
duke
parents:
diff changeset
362 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
363 if (pages_per_chunk > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
364 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
365 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
366 ls->scan_pages(page_size(), pages_per_chunk);
a61af66fc99e Initial load
duke
parents:
diff changeset
367 }
a61af66fc99e Initial load
duke
parents:
diff changeset
368 }
a61af66fc99e Initial load
duke
parents:
diff changeset
369 }
a61af66fc99e Initial load
duke
parents:
diff changeset
370
a61af66fc99e Initial load
duke
parents:
diff changeset
371 // Accumulate statistics about the allocation rate of each lgrp.
a61af66fc99e Initial load
duke
parents:
diff changeset
372 void MutableNUMASpace::accumulate_statistics() {
a61af66fc99e Initial load
duke
parents:
diff changeset
373 if (UseAdaptiveNUMAChunkSizing) {
a61af66fc99e Initial load
duke
parents:
diff changeset
374 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
375 lgrp_spaces()->at(i)->sample();
a61af66fc99e Initial load
duke
parents:
diff changeset
376 }
a61af66fc99e Initial load
duke
parents:
diff changeset
377 increment_samples_count();
a61af66fc99e Initial load
duke
parents:
diff changeset
378 }
a61af66fc99e Initial load
duke
parents:
diff changeset
379
a61af66fc99e Initial load
duke
parents:
diff changeset
380 if (NUMAStats) {
a61af66fc99e Initial load
duke
parents:
diff changeset
381 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
382 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
383 }
a61af66fc99e Initial load
duke
parents:
diff changeset
384 }
a61af66fc99e Initial load
duke
parents:
diff changeset
385 }
a61af66fc99e Initial load
duke
parents:
diff changeset
386
a61af66fc99e Initial load
duke
parents:
diff changeset
387 // Get the current size of a chunk.
a61af66fc99e Initial load
duke
parents:
diff changeset
388 // This function computes the size of the chunk based on the
a61af66fc99e Initial load
duke
parents:
diff changeset
389 // difference between chunk ends. This allows it to work correctly in
a61af66fc99e Initial load
duke
parents:
diff changeset
390 // case the whole space is resized and during the process of adaptive
a61af66fc99e Initial load
duke
parents:
diff changeset
391 // chunk resizing.
a61af66fc99e Initial load
duke
parents:
diff changeset
392 size_t MutableNUMASpace::current_chunk_size(int i) {
a61af66fc99e Initial load
duke
parents:
diff changeset
393 HeapWord *cur_end, *prev_end;
a61af66fc99e Initial load
duke
parents:
diff changeset
394 if (i == 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
395 prev_end = bottom();
a61af66fc99e Initial load
duke
parents:
diff changeset
396 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
397 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
a61af66fc99e Initial load
duke
parents:
diff changeset
398 }
a61af66fc99e Initial load
duke
parents:
diff changeset
399 if (i == lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
400 cur_end = end();
a61af66fc99e Initial load
duke
parents:
diff changeset
401 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
402 cur_end = lgrp_spaces()->at(i)->space()->end();
a61af66fc99e Initial load
duke
parents:
diff changeset
403 }
a61af66fc99e Initial load
duke
parents:
diff changeset
404 if (cur_end > prev_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
405 return pointer_delta(cur_end, prev_end, sizeof(char));
a61af66fc99e Initial load
duke
parents:
diff changeset
406 }
a61af66fc99e Initial load
duke
parents:
diff changeset
407 return 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
408 }
a61af66fc99e Initial load
duke
parents:
diff changeset
409
a61af66fc99e Initial load
duke
parents:
diff changeset
410 // Return the default chunk size by equally diving the space.
a61af66fc99e Initial load
duke
parents:
diff changeset
411 // page_size() aligned.
a61af66fc99e Initial load
duke
parents:
diff changeset
412 size_t MutableNUMASpace::default_chunk_size() {
a61af66fc99e Initial load
duke
parents:
diff changeset
413 return base_space_size() / lgrp_spaces()->length() * page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
414 }
a61af66fc99e Initial load
duke
parents:
diff changeset
415
a61af66fc99e Initial load
duke
parents:
diff changeset
416 // Produce a new chunk size. page_size() aligned.
391
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
417 // This function is expected to be called on sequence of i's from 0 to
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
418 // lgrp_spaces()->length().
0
a61af66fc99e Initial load
duke
parents:
diff changeset
419 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
a61af66fc99e Initial load
duke
parents:
diff changeset
420 size_t pages_available = base_space_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
421 for (int j = 0; j < i; j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
422 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
423 }
a61af66fc99e Initial load
duke
parents:
diff changeset
424 pages_available -= lgrp_spaces()->length() - i - 1;
a61af66fc99e Initial load
duke
parents:
diff changeset
425 assert(pages_available > 0, "No pages left");
a61af66fc99e Initial load
duke
parents:
diff changeset
426 float alloc_rate = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
427 for (int j = i; j < lgrp_spaces()->length(); j++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
428 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
a61af66fc99e Initial load
duke
parents:
diff changeset
429 }
a61af66fc99e Initial load
duke
parents:
diff changeset
430 size_t chunk_size = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
431 if (alloc_rate > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
432 LGRPSpace *ls = lgrp_spaces()->at(i);
391
ab4a7734b9c4 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 373
diff changeset
433 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
434 }
a61af66fc99e Initial load
duke
parents:
diff changeset
435 chunk_size = MAX2(chunk_size, page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
436
a61af66fc99e Initial load
duke
parents:
diff changeset
437 if (limit > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
438 limit = round_down(limit, page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
439 if (chunk_size > current_chunk_size(i)) {
462
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
440 size_t upper_bound = pages_available * page_size();
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
441 if (upper_bound > limit &&
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
442 current_chunk_size(i) < upper_bound - limit) {
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
443 // The resulting upper bound should not exceed the available
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
444 // amount of memory (pages_available * page_size()).
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
445 upper_bound = current_chunk_size(i) + limit;
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
446 }
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
447 chunk_size = MIN2(chunk_size, upper_bound);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
448 } else {
462
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
449 size_t lower_bound = page_size();
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
450 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
451 lower_bound = current_chunk_size(i) - limit;
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
452 }
85f1b9537f70 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 391
diff changeset
453 chunk_size = MAX2(chunk_size, lower_bound);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
454 }
a61af66fc99e Initial load
duke
parents:
diff changeset
455 }
a61af66fc99e Initial load
duke
parents:
diff changeset
456 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
a61af66fc99e Initial load
duke
parents:
diff changeset
457 return chunk_size;
a61af66fc99e Initial load
duke
parents:
diff changeset
458 }
a61af66fc99e Initial load
duke
parents:
diff changeset
459
a61af66fc99e Initial load
duke
parents:
diff changeset
460
a61af66fc99e Initial load
duke
parents:
diff changeset
461 // Return the bottom_region and the top_region. Align them to page_size() boundary.
a61af66fc99e Initial load
duke
parents:
diff changeset
462 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
463 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
464 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
a61af66fc99e Initial load
duke
parents:
diff changeset
465 MemRegion* bottom_region, MemRegion *top_region) {
a61af66fc99e Initial load
duke
parents:
diff changeset
466 // Is there bottom?
a61af66fc99e Initial load
duke
parents:
diff changeset
467 if (new_region.start() < intersection.start()) { // Yes
a61af66fc99e Initial load
duke
parents:
diff changeset
468 // Try to coalesce small pages into a large one.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
469 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
470 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
471 if (new_region.contains(p)
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
472 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
473 if (intersection.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
474 intersection = MemRegion(p, intersection.end());
a61af66fc99e Initial load
duke
parents:
diff changeset
475 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
476 intersection = MemRegion(p, p);
a61af66fc99e Initial load
duke
parents:
diff changeset
477 }
a61af66fc99e Initial load
duke
parents:
diff changeset
478 }
a61af66fc99e Initial load
duke
parents:
diff changeset
479 }
a61af66fc99e Initial load
duke
parents:
diff changeset
480 *bottom_region = MemRegion(new_region.start(), intersection.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
481 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
482 *bottom_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
483 }
a61af66fc99e Initial load
duke
parents:
diff changeset
484
a61af66fc99e Initial load
duke
parents:
diff changeset
485 // Is there top?
a61af66fc99e Initial load
duke
parents:
diff changeset
486 if (intersection.end() < new_region.end()) { // Yes
a61af66fc99e Initial load
duke
parents:
diff changeset
487 // Try to coalesce small pages into a large one.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
488 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
489 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
490 if (new_region.contains(p)
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
491 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
492 if (intersection.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
493 intersection = MemRegion(intersection.start(), p);
a61af66fc99e Initial load
duke
parents:
diff changeset
494 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
495 intersection = MemRegion(p, p);
a61af66fc99e Initial load
duke
parents:
diff changeset
496 }
a61af66fc99e Initial load
duke
parents:
diff changeset
497 }
a61af66fc99e Initial load
duke
parents:
diff changeset
498 }
a61af66fc99e Initial load
duke
parents:
diff changeset
499 *top_region = MemRegion(intersection.end(), new_region.end());
a61af66fc99e Initial load
duke
parents:
diff changeset
500 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
501 *top_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
502 }
a61af66fc99e Initial load
duke
parents:
diff changeset
503 }
a61af66fc99e Initial load
duke
parents:
diff changeset
504
a61af66fc99e Initial load
duke
parents:
diff changeset
505 // Try to merge the invalid region with the bottom or top region by decreasing
a61af66fc99e Initial load
duke
parents:
diff changeset
506 // the intersection area. Return the invalid_region aligned to the page_size()
a61af66fc99e Initial load
duke
parents:
diff changeset
507 // boundary if it's inside the intersection. Return non-empty invalid_region
a61af66fc99e Initial load
duke
parents:
diff changeset
508 // if it lies inside the intersection (also page-aligned).
a61af66fc99e Initial load
duke
parents:
diff changeset
509 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
510 // |----------------|-------invalid---|--------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
511 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
512 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
a61af66fc99e Initial load
duke
parents:
diff changeset
513 MemRegion *invalid_region) {
a61af66fc99e Initial load
duke
parents:
diff changeset
514 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
515 *intersection = MemRegion(invalid_region->end(), intersection->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
516 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
517 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
518 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
519 *intersection = MemRegion(intersection->start(), invalid_region->start());
a61af66fc99e Initial load
duke
parents:
diff changeset
520 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
521 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
522 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
523 *intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
524 *invalid_region = MemRegion();
a61af66fc99e Initial load
duke
parents:
diff changeset
525 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
526 if (intersection->contains(invalid_region)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
527 // That's the only case we have to make an additional bias_region() call.
a61af66fc99e Initial load
duke
parents:
diff changeset
528 HeapWord* start = invalid_region->start();
a61af66fc99e Initial load
duke
parents:
diff changeset
529 HeapWord* end = invalid_region->end();
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
530 if (UseLargePages && page_size() >= alignment()) {
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
531 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
532 if (new_region.contains(p)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
533 start = p;
a61af66fc99e Initial load
duke
parents:
diff changeset
534 }
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
535 p = (HeapWord*)round_to((intptr_t) end, alignment());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
536 if (new_region.contains(end)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
537 end = p;
a61af66fc99e Initial load
duke
parents:
diff changeset
538 }
a61af66fc99e Initial load
duke
parents:
diff changeset
539 }
a61af66fc99e Initial load
duke
parents:
diff changeset
540 if (intersection->start() > start) {
a61af66fc99e Initial load
duke
parents:
diff changeset
541 *intersection = MemRegion(start, intersection->end());
a61af66fc99e Initial load
duke
parents:
diff changeset
542 }
a61af66fc99e Initial load
duke
parents:
diff changeset
543 if (intersection->end() < end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
544 *intersection = MemRegion(intersection->start(), end);
a61af66fc99e Initial load
duke
parents:
diff changeset
545 }
a61af66fc99e Initial load
duke
parents:
diff changeset
546 *invalid_region = MemRegion(start, end);
a61af66fc99e Initial load
duke
parents:
diff changeset
547 }
a61af66fc99e Initial load
duke
parents:
diff changeset
548 }
a61af66fc99e Initial load
duke
parents:
diff changeset
549
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
550 void MutableNUMASpace::initialize(MemRegion mr,
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
551 bool clear_space,
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
552 bool mangle_space,
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
553 bool setup_pages) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
554 assert(clear_space, "Reallocation will destory data!");
a61af66fc99e Initial load
duke
parents:
diff changeset
555 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
a61af66fc99e Initial load
duke
parents:
diff changeset
556
a61af66fc99e Initial load
duke
parents:
diff changeset
557 MemRegion old_region = region(), new_region;
a61af66fc99e Initial load
duke
parents:
diff changeset
558 set_bottom(mr.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
559 set_end(mr.end());
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
560 // Must always clear the space
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
561 clear(SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
562
a61af66fc99e Initial load
duke
parents:
diff changeset
563 // Compute chunk sizes
a61af66fc99e Initial load
duke
parents:
diff changeset
564 size_t prev_page_size = page_size();
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
565 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
566 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
567 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
568 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
569
a61af66fc99e Initial load
duke
parents:
diff changeset
570 // Try small pages if the chunk size is too small
a61af66fc99e Initial load
duke
parents:
diff changeset
571 if (base_space_size_pages / lgrp_spaces()->length() == 0
a61af66fc99e Initial load
duke
parents:
diff changeset
572 && page_size() > (size_t)os::vm_page_size()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
573 set_page_size(os::vm_page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
574 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
575 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
a61af66fc99e Initial load
duke
parents:
diff changeset
576 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
577 }
a61af66fc99e Initial load
duke
parents:
diff changeset
578 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
a61af66fc99e Initial load
duke
parents:
diff changeset
579 set_base_space_size(base_space_size_pages);
a61af66fc99e Initial load
duke
parents:
diff changeset
580
a61af66fc99e Initial load
duke
parents:
diff changeset
581 // Handle space resize
a61af66fc99e Initial load
duke
parents:
diff changeset
582 MemRegion top_region, bottom_region;
a61af66fc99e Initial load
duke
parents:
diff changeset
583 if (!old_region.equals(region())) {
a61af66fc99e Initial load
duke
parents:
diff changeset
584 new_region = MemRegion(rounded_bottom, rounded_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
585 MemRegion intersection = new_region.intersection(old_region);
a61af66fc99e Initial load
duke
parents:
diff changeset
586 if (intersection.start() == NULL ||
a61af66fc99e Initial load
duke
parents:
diff changeset
587 intersection.end() == NULL ||
a61af66fc99e Initial load
duke
parents:
diff changeset
588 prev_page_size > page_size()) { // If the page size got smaller we have to change
a61af66fc99e Initial load
duke
parents:
diff changeset
589 // the page size preference for the whole space.
a61af66fc99e Initial load
duke
parents:
diff changeset
590 intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
591 }
a61af66fc99e Initial load
duke
parents:
diff changeset
592 select_tails(new_region, intersection, &bottom_region, &top_region);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
593 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
594 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
595 }
a61af66fc99e Initial load
duke
parents:
diff changeset
596
a61af66fc99e Initial load
duke
parents:
diff changeset
597 // Check if the space layout has changed significantly?
a61af66fc99e Initial load
duke
parents:
diff changeset
598 // This happens when the space has been resized so that either head or tail
a61af66fc99e Initial load
duke
parents:
diff changeset
599 // chunk became less than a page.
a61af66fc99e Initial load
duke
parents:
diff changeset
600 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
a61af66fc99e Initial load
duke
parents:
diff changeset
601 current_chunk_size(0) > page_size() &&
a61af66fc99e Initial load
duke
parents:
diff changeset
602 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
603
a61af66fc99e Initial load
duke
parents:
diff changeset
604
a61af66fc99e Initial load
duke
parents:
diff changeset
605 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
606 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
607 MutableSpace *s = ls->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
608 old_region = s->region();
a61af66fc99e Initial load
duke
parents:
diff changeset
609
a61af66fc99e Initial load
duke
parents:
diff changeset
610 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
611 if (i < lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
612 if (!UseAdaptiveNUMAChunkSizing ||
a61af66fc99e Initial load
duke
parents:
diff changeset
613 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
a61af66fc99e Initial load
duke
parents:
diff changeset
614 samples_count() < AdaptiveSizePolicyReadyThreshold) {
a61af66fc99e Initial load
duke
parents:
diff changeset
615 // No adaptation. Divide the space equally.
a61af66fc99e Initial load
duke
parents:
diff changeset
616 chunk_byte_size = default_chunk_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
617 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
618 if (!layout_valid || NUMASpaceResizeRate == 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
619 // Fast adaptation. If no space resize rate is set, resize
a61af66fc99e Initial load
duke
parents:
diff changeset
620 // the chunks instantly.
a61af66fc99e Initial load
duke
parents:
diff changeset
621 chunk_byte_size = adaptive_chunk_size(i, 0);
a61af66fc99e Initial load
duke
parents:
diff changeset
622 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
623 // Slow adaptation. Resize the chunks moving no more than
a61af66fc99e Initial load
duke
parents:
diff changeset
624 // NUMASpaceResizeRate bytes per collection.
a61af66fc99e Initial load
duke
parents:
diff changeset
625 size_t limit = NUMASpaceResizeRate /
a61af66fc99e Initial load
duke
parents:
diff changeset
626 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
a61af66fc99e Initial load
duke
parents:
diff changeset
627 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
a61af66fc99e Initial load
duke
parents:
diff changeset
628 }
a61af66fc99e Initial load
duke
parents:
diff changeset
629
a61af66fc99e Initial load
duke
parents:
diff changeset
630 assert(chunk_byte_size >= page_size(), "Chunk size too small");
a61af66fc99e Initial load
duke
parents:
diff changeset
631 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
a61af66fc99e Initial load
duke
parents:
diff changeset
632 }
a61af66fc99e Initial load
duke
parents:
diff changeset
633
a61af66fc99e Initial load
duke
parents:
diff changeset
634 if (i == 0) { // Bottom chunk
a61af66fc99e Initial load
duke
parents:
diff changeset
635 if (i != lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
636 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
a61af66fc99e Initial load
duke
parents:
diff changeset
637 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
638 new_region = MemRegion(bottom(), end());
a61af66fc99e Initial load
duke
parents:
diff changeset
639 }
a61af66fc99e Initial load
duke
parents:
diff changeset
640 } else
a61af66fc99e Initial load
duke
parents:
diff changeset
641 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
a61af66fc99e Initial load
duke
parents:
diff changeset
642 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
643 new_region = MemRegion(ps->end(),
a61af66fc99e Initial load
duke
parents:
diff changeset
644 ps->end() + (chunk_byte_size >> LogHeapWordSize));
a61af66fc99e Initial load
duke
parents:
diff changeset
645 } else { // Top chunk
a61af66fc99e Initial load
duke
parents:
diff changeset
646 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
647 new_region = MemRegion(ps->end(), end());
a61af66fc99e Initial load
duke
parents:
diff changeset
648 }
a61af66fc99e Initial load
duke
parents:
diff changeset
649 guarantee(region().contains(new_region), "Region invariant");
a61af66fc99e Initial load
duke
parents:
diff changeset
650
a61af66fc99e Initial load
duke
parents:
diff changeset
651
a61af66fc99e Initial load
duke
parents:
diff changeset
652 // The general case:
a61af66fc99e Initial load
duke
parents:
diff changeset
653 // |---------------------|--invalid---|--------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
654 // |------------------new_region---------------------------------|
a61af66fc99e Initial load
duke
parents:
diff changeset
655 // |----bottom_region--|---intersection---|------top_region------|
a61af66fc99e Initial load
duke
parents:
diff changeset
656 // |----old_region----|
a61af66fc99e Initial load
duke
parents:
diff changeset
657 // The intersection part has all pages in place we don't need to migrate them.
a61af66fc99e Initial load
duke
parents:
diff changeset
658 // Pages for the top and bottom part should be freed and then reallocated.
a61af66fc99e Initial load
duke
parents:
diff changeset
659
a61af66fc99e Initial load
duke
parents:
diff changeset
660 MemRegion intersection = old_region.intersection(new_region);
a61af66fc99e Initial load
duke
parents:
diff changeset
661
a61af66fc99e Initial load
duke
parents:
diff changeset
662 if (intersection.start() == NULL || intersection.end() == NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
663 intersection = MemRegion(new_region.start(), new_region.start());
a61af66fc99e Initial load
duke
parents:
diff changeset
664 }
a61af66fc99e Initial load
duke
parents:
diff changeset
665
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
666 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
667 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
668 // Invalid region is a range of memory that could've possibly
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
669 // been allocated on the other node. That's relevant only on Solaris where
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
670 // there is no static memory binding.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
671 if (!invalid_region.is_empty()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
672 merge_regions(new_region, &intersection, &invalid_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
673 free_region(invalid_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
674 ls->set_invalid_region(MemRegion());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
675 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
676 }
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
677
0
a61af66fc99e Initial load
duke
parents:
diff changeset
678 select_tails(new_region, intersection, &bottom_region, &top_region);
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
679
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
680 if (!os::numa_has_static_binding()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
681 // If that's a system with the first-touch policy then it's enough
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
682 // to free the pages.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
683 free_region(bottom_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
684 free_region(top_region);
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
685 } else {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
686 // In a system with static binding we have to change the bias whenever
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
687 // we reshape the heap.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
688 bias_region(bottom_region, ls->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
689 bias_region(top_region, ls->lgrp_id());
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
690 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
691
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
692 // Clear space (set top = bottom) but never mangle.
535
4e400c36026f 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 481
diff changeset
693 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
694
a61af66fc99e Initial load
duke
parents:
diff changeset
695 set_adaptation_cycles(samples_count());
a61af66fc99e Initial load
duke
parents:
diff changeset
696 }
a61af66fc99e Initial load
duke
parents:
diff changeset
697 }
a61af66fc99e Initial load
duke
parents:
diff changeset
698
a61af66fc99e Initial load
duke
parents:
diff changeset
699 // Set the top of the whole space.
a61af66fc99e Initial load
duke
parents:
diff changeset
700 // Mark the the holes in chunks below the top() as invalid.
a61af66fc99e Initial load
duke
parents:
diff changeset
701 void MutableNUMASpace::set_top(HeapWord* value) {
a61af66fc99e Initial load
duke
parents:
diff changeset
702 bool found_top = false;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
703 for (int i = 0; i < lgrp_spaces()->length();) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
704 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
705 MutableSpace *s = ls->space();
a61af66fc99e Initial load
duke
parents:
diff changeset
706 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
707
a61af66fc99e Initial load
duke
parents:
diff changeset
708 if (s->contains(value)) {
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
709 // Check if setting the chunk's top to a given value would create a hole less than
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
710 // a minimal object; assuming that's not the last chunk in which case we don't care.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
711 if (i < lgrp_spaces()->length() - 1) {
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
712 size_t remainder = pointer_delta(s->end(), value);
481
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
713 const size_t min_fill_size = CollectedHeap::min_fill_size();
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
714 if (remainder < min_fill_size && remainder > 0) {
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
715 // Add a minimum size filler object; it will cross the chunk boundary.
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
716 CollectedHeap::fill_with_object(value, min_fill_size);
7d7a7c599c17 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 462
diff changeset
717 value += min_fill_size;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
718 assert(!s->contains(value), "Should be in the next chunk");
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
719 // Restart the loop from the same chunk, since the value has moved
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
720 // to the next one.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
721 continue;
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
722 }
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
723 }
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
724
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
725 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
726 ls->add_invalid_region(MemRegion(top, value));
a61af66fc99e Initial load
duke
parents:
diff changeset
727 }
a61af66fc99e Initial load
duke
parents:
diff changeset
728 s->set_top(value);
a61af66fc99e Initial load
duke
parents:
diff changeset
729 found_top = true;
a61af66fc99e Initial load
duke
parents:
diff changeset
730 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
731 if (found_top) {
a61af66fc99e Initial load
duke
parents:
diff changeset
732 s->set_top(s->bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
733 } else {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
734 if (!os::numa_has_static_binding() && top < s->end()) {
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
735 ls->add_invalid_region(MemRegion(top, s->end()));
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
736 }
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
737 s->set_top(s->end());
0
a61af66fc99e Initial load
duke
parents:
diff changeset
738 }
a61af66fc99e Initial load
duke
parents:
diff changeset
739 }
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
740 i++;
0
a61af66fc99e Initial load
duke
parents:
diff changeset
741 }
a61af66fc99e Initial load
duke
parents:
diff changeset
742 MutableSpace::set_top(value);
a61af66fc99e Initial load
duke
parents:
diff changeset
743 }
a61af66fc99e Initial load
duke
parents:
diff changeset
744
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
745 void MutableNUMASpace::clear(bool mangle_space) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
746 MutableSpace::set_top(bottom());
a61af66fc99e Initial load
duke
parents:
diff changeset
747 for (int i = 0; i < lgrp_spaces()->length(); i++) {
263
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
748 // Never mangle NUMA spaces because the mangling will
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
749 // bind the memory to a possibly unwanted lgroup.
12eea04c8b06 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 190
diff changeset
750 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
751 }
a61af66fc99e Initial load
duke
parents:
diff changeset
752 }
a61af66fc99e Initial load
duke
parents:
diff changeset
753
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
754 /*
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
755 Linux supports static memory binding, therefore the most part of the
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
756 logic dealing with the possible invalid page allocation is effectively
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
757 disabled. Besides there is no notion of the home node in Linux. A
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
758 thread is allowed to migrate freely. Although the scheduler is rather
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
759 reluctant to move threads between the nodes. We check for the current
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
760 node every allocation. And with a high probability a thread stays on
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
761 the same node for some time allowing local access to recently allocated
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
762 objects.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
763 */
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
764
0
a61af66fc99e Initial load
duke
parents:
diff changeset
765 HeapWord* MutableNUMASpace::allocate(size_t size) {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
766 Thread* thr = Thread::current();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
767 int lgrp_id = thr->lgrp_id();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
768 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
769 lgrp_id = os::numa_get_group_id();
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
770 thr->set_lgrp_id(lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
771 }
a61af66fc99e Initial load
duke
parents:
diff changeset
772
a61af66fc99e Initial load
duke
parents:
diff changeset
773 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
774
a61af66fc99e Initial load
duke
parents:
diff changeset
775 // It is possible that a new CPU has been hotplugged and
a61af66fc99e Initial load
duke
parents:
diff changeset
776 // we haven't reshaped the space accordingly.
a61af66fc99e Initial load
duke
parents:
diff changeset
777 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
778 i = os::random() % lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
779 }
a61af66fc99e Initial load
duke
parents:
diff changeset
780
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
781 LGRPSpace* ls = lgrp_spaces()->at(i);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
782 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
783 HeapWord *p = s->allocate(size);
a61af66fc99e Initial load
duke
parents:
diff changeset
784
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
785 if (p != NULL) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
786 size_t remainder = s->free_in_words();
1571
2d127394260e 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 628
diff changeset
787 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
788 s->set_top(s->top() - size);
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
789 p = NULL;
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
790 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
791 }
a61af66fc99e Initial load
duke
parents:
diff changeset
792 if (p != NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
793 if (top() < s->top()) { // Keep _top updated.
a61af66fc99e Initial load
duke
parents:
diff changeset
794 MutableSpace::set_top(s->top());
a61af66fc99e Initial load
duke
parents:
diff changeset
795 }
a61af66fc99e Initial load
duke
parents:
diff changeset
796 }
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
797 // Make the page allocation happen here if there is no static binding..
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
798 if (p != NULL && !os::numa_has_static_binding()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
799 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
a61af66fc99e Initial load
duke
parents:
diff changeset
800 *(int*)i = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
801 }
a61af66fc99e Initial load
duke
parents:
diff changeset
802 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
803 if (p == NULL) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
804 ls->set_allocation_failed();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
805 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
806 return p;
a61af66fc99e Initial load
duke
parents:
diff changeset
807 }
a61af66fc99e Initial load
duke
parents:
diff changeset
808
a61af66fc99e Initial load
duke
parents:
diff changeset
809 // This version is lock-free.
a61af66fc99e Initial load
duke
parents:
diff changeset
810 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
811 Thread* thr = Thread::current();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
812 int lgrp_id = thr->lgrp_id();
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
813 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
814 lgrp_id = os::numa_get_group_id();
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
815 thr->set_lgrp_id(lgrp_id);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
816 }
a61af66fc99e Initial load
duke
parents:
diff changeset
817
a61af66fc99e Initial load
duke
parents:
diff changeset
818 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
a61af66fc99e Initial load
duke
parents:
diff changeset
819 // It is possible that a new CPU has been hotplugged and
a61af66fc99e Initial load
duke
parents:
diff changeset
820 // we haven't reshaped the space accordingly.
a61af66fc99e Initial load
duke
parents:
diff changeset
821 if (i == -1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
822 i = os::random() % lgrp_spaces()->length();
a61af66fc99e Initial load
duke
parents:
diff changeset
823 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
824 LGRPSpace *ls = lgrp_spaces()->at(i);
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
825 MutableSpace *s = ls->space();
0
a61af66fc99e Initial load
duke
parents:
diff changeset
826 HeapWord *p = s->cas_allocate(size);
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
827 if (p != NULL) {
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
828 size_t remainder = pointer_delta(s->end(), p + size);
1571
2d127394260e 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 628
diff changeset
829 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
830 if (s->cas_deallocate(p, size)) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
831 // We were the last to allocate and created a fragment less than
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
832 // a minimal object.
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
833 p = NULL;
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
834 } else {
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
835 guarantee(false, "Deallocation should always succeed");
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
836 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
837 }
a61af66fc99e Initial load
duke
parents:
diff changeset
838 }
a61af66fc99e Initial load
duke
parents:
diff changeset
839 if (p != NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
840 HeapWord* cur_top, *cur_chunk_top = p + size;
a61af66fc99e Initial load
duke
parents:
diff changeset
841 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
a61af66fc99e Initial load
duke
parents:
diff changeset
842 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
a61af66fc99e Initial load
duke
parents:
diff changeset
843 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
844 }
a61af66fc99e Initial load
duke
parents:
diff changeset
845 }
a61af66fc99e Initial load
duke
parents:
diff changeset
846 }
a61af66fc99e Initial load
duke
parents:
diff changeset
847
141
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
848 // Make the page allocation happen here if there is no static binding.
fcbfc50865ab 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 0
diff changeset
849 if (p != NULL && !os::numa_has_static_binding() ) {
0
a61af66fc99e Initial load
duke
parents:
diff changeset
850 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
a61af66fc99e Initial load
duke
parents:
diff changeset
851 *(int*)i = 0;
a61af66fc99e Initial load
duke
parents:
diff changeset
852 }
a61af66fc99e Initial load
duke
parents:
diff changeset
853 }
373
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
854 if (p == NULL) {
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
855 ls->set_allocation_failed();
06df86c2ec37 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 269
diff changeset
856 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
857 return p;
a61af66fc99e Initial load
duke
parents:
diff changeset
858 }
a61af66fc99e Initial load
duke
parents:
diff changeset
859
a61af66fc99e Initial load
duke
parents:
diff changeset
860 void MutableNUMASpace::print_short_on(outputStream* st) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
861 MutableSpace::print_short_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
862 st->print(" (");
a61af66fc99e Initial load
duke
parents:
diff changeset
863 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
864 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
a61af66fc99e Initial load
duke
parents:
diff changeset
865 lgrp_spaces()->at(i)->space()->print_short_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
866 if (i < lgrp_spaces()->length() - 1) {
a61af66fc99e Initial load
duke
parents:
diff changeset
867 st->print(", ");
a61af66fc99e Initial load
duke
parents:
diff changeset
868 }
a61af66fc99e Initial load
duke
parents:
diff changeset
869 }
a61af66fc99e Initial load
duke
parents:
diff changeset
870 st->print(")");
a61af66fc99e Initial load
duke
parents:
diff changeset
871 }
a61af66fc99e Initial load
duke
parents:
diff changeset
872
a61af66fc99e Initial load
duke
parents:
diff changeset
873 void MutableNUMASpace::print_on(outputStream* st) const {
a61af66fc99e Initial load
duke
parents:
diff changeset
874 MutableSpace::print_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
875 for (int i = 0; i < lgrp_spaces()->length(); i++) {
a61af66fc99e Initial load
duke
parents:
diff changeset
876 LGRPSpace *ls = lgrp_spaces()->at(i);
a61af66fc99e Initial load
duke
parents:
diff changeset
877 st->print(" lgrp %d", ls->lgrp_id());
a61af66fc99e Initial load
duke
parents:
diff changeset
878 ls->space()->print_on(st);
a61af66fc99e Initial load
duke
parents:
diff changeset
879 if (NUMAStats) {
144
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
880 for (int i = 0; i < lgrp_spaces()->length(); i++) {
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
881 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
e3729351c946 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 141
diff changeset
882 }
0
a61af66fc99e Initial load
duke
parents:
diff changeset
883 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
a61af66fc99e Initial load
duke
parents:
diff changeset
884 ls->space_stats()->_local_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
885 ls->space_stats()->_remote_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
886 ls->space_stats()->_unbiased_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
887 ls->space_stats()->_uncommited_space / K,
a61af66fc99e Initial load
duke
parents:
diff changeset
888 ls->space_stats()->_large_pages,
a61af66fc99e Initial load
duke
parents:
diff changeset
889 ls->space_stats()->_small_pages);
a61af66fc99e Initial load
duke
parents:
diff changeset
890 }
a61af66fc99e Initial load
duke
parents:
diff changeset
891 }
a61af66fc99e Initial load
duke
parents:
diff changeset
892 }
a61af66fc99e Initial load
duke
parents:
diff changeset
893
190
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
894 void MutableNUMASpace::verify(bool allow_dirty) {
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
895 // This can be called after setting an arbitary value to the space's top,
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
896 // so an object can cross the chunk boundary. We ensure the parsablity
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
897 // of the space and just walk the objects in linear fashion.
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
898 ensure_parsability();
d1635bf93939 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 144
diff changeset
899 MutableSpace::verify(allow_dirty);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
900 }
a61af66fc99e Initial load
duke
parents:
diff changeset
901
a61af66fc99e Initial load
duke
parents:
diff changeset
902 // Scan pages and gather stats about page placement and size.
a61af66fc99e Initial load
duke
parents:
diff changeset
903 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
a61af66fc99e Initial load
duke
parents:
diff changeset
904 clear_space_stats();
a61af66fc99e Initial load
duke
parents:
diff changeset
905 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
906 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
907 if (start < end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
908 for (char *p = start; p < end;) {
a61af66fc99e Initial load
duke
parents:
diff changeset
909 os::page_info info;
a61af66fc99e Initial load
duke
parents:
diff changeset
910 if (os::get_page_info(p, &info)) {
a61af66fc99e Initial load
duke
parents:
diff changeset
911 if (info.size > 0) {
a61af66fc99e Initial load
duke
parents:
diff changeset
912 if (info.size > (size_t)os::vm_page_size()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
913 space_stats()->_large_pages++;
a61af66fc99e Initial load
duke
parents:
diff changeset
914 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
915 space_stats()->_small_pages++;
a61af66fc99e Initial load
duke
parents:
diff changeset
916 }
a61af66fc99e Initial load
duke
parents:
diff changeset
917 if (info.lgrp_id == lgrp_id()) {
a61af66fc99e Initial load
duke
parents:
diff changeset
918 space_stats()->_local_space += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
919 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
920 space_stats()->_remote_space += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
921 }
a61af66fc99e Initial load
duke
parents:
diff changeset
922 p += info.size;
a61af66fc99e Initial load
duke
parents:
diff changeset
923 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
924 p += os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
925 space_stats()->_uncommited_space += os::vm_page_size();
a61af66fc99e Initial load
duke
parents:
diff changeset
926 }
a61af66fc99e Initial load
duke
parents:
diff changeset
927 } else {
a61af66fc99e Initial load
duke
parents:
diff changeset
928 return;
a61af66fc99e Initial load
duke
parents:
diff changeset
929 }
a61af66fc99e Initial load
duke
parents:
diff changeset
930 }
a61af66fc99e Initial load
duke
parents:
diff changeset
931 }
a61af66fc99e Initial load
duke
parents:
diff changeset
932 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
a61af66fc99e Initial load
duke
parents:
diff changeset
933 pointer_delta(space()->end(), end, sizeof(char));
a61af66fc99e Initial load
duke
parents:
diff changeset
934
a61af66fc99e Initial load
duke
parents:
diff changeset
935 }
a61af66fc99e Initial load
duke
parents:
diff changeset
936
a61af66fc99e Initial load
duke
parents:
diff changeset
937 // Scan page_count pages and verify if they have the right size and right placement.
a61af66fc99e Initial load
duke
parents:
diff changeset
938 // If invalid pages are found they are freed in hope that subsequent reallocation
a61af66fc99e Initial load
duke
parents:
diff changeset
939 // will be more successful.
a61af66fc99e Initial load
duke
parents:
diff changeset
940 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
a61af66fc99e Initial load
duke
parents:
diff changeset
941 {
a61af66fc99e Initial load
duke
parents:
diff changeset
942 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
943 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
a61af66fc99e Initial load
duke
parents:
diff changeset
944
a61af66fc99e Initial load
duke
parents:
diff changeset
945 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
946 set_last_page_scanned(range_start);
a61af66fc99e Initial load
duke
parents:
diff changeset
947 }
a61af66fc99e Initial load
duke
parents:
diff changeset
948
a61af66fc99e Initial load
duke
parents:
diff changeset
949 char *scan_start = last_page_scanned();
a61af66fc99e Initial load
duke
parents:
diff changeset
950 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
951
a61af66fc99e Initial load
duke
parents:
diff changeset
952 os::page_info page_expected, page_found;
a61af66fc99e Initial load
duke
parents:
diff changeset
953 page_expected.size = page_size;
a61af66fc99e Initial load
duke
parents:
diff changeset
954 page_expected.lgrp_id = lgrp_id();
a61af66fc99e Initial load
duke
parents:
diff changeset
955
a61af66fc99e Initial load
duke
parents:
diff changeset
956 char *s = scan_start;
a61af66fc99e Initial load
duke
parents:
diff changeset
957 while (s < scan_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
958 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
a61af66fc99e Initial load
duke
parents:
diff changeset
959 if (e == NULL) {
a61af66fc99e Initial load
duke
parents:
diff changeset
960 break;
a61af66fc99e Initial load
duke
parents:
diff changeset
961 }
a61af66fc99e Initial load
duke
parents:
diff changeset
962 if (e != scan_end) {
a61af66fc99e Initial load
duke
parents:
diff changeset
963 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
a61af66fc99e Initial load
duke
parents:
diff changeset
964 && page_expected.size != 0) {
4734
20bfb6d15a94 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 3960
diff changeset
965 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
0
a61af66fc99e Initial load
duke
parents:
diff changeset
966 }
a61af66fc99e Initial load
duke
parents:
diff changeset
967 page_expected = page_found;
a61af66fc99e Initial load
duke
parents:
diff changeset
968 }
a61af66fc99e Initial load
duke
parents:
diff changeset
969 s = e;
a61af66fc99e Initial load
duke
parents:
diff changeset
970 }
a61af66fc99e Initial load
duke
parents:
diff changeset
971
a61af66fc99e Initial load
duke
parents:
diff changeset
972 set_last_page_scanned(scan_end);
a61af66fc99e Initial load
duke
parents:
diff changeset
973 }