comparison src/share/vm/gc_interface/collectedHeap.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_collectedHeap.cpp.incl"
27
28
29 #ifdef ASSERT
30 int CollectedHeap::_fire_out_of_memory_count = 0;
31 #endif
32
33 // Memory state functions.
34
35 CollectedHeap::CollectedHeap() :
36 _reserved(), _barrier_set(NULL), _is_gc_active(false),
37 _total_collections(0), _total_full_collections(0),
38 _max_heap_capacity(0),
39 _gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
40 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
41 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
42
43 if (UsePerfData) {
44 EXCEPTION_MARK;
45
46 // create the gc cause jvmstat counters
47 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
48 80, GCCause::to_string(_gc_cause), CHECK);
49
50 _perf_gc_lastcause =
51 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
52 80, GCCause::to_string(_gc_lastcause), CHECK);
53 }
54 }
55
56
57 #ifndef PRODUCT
58 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
59 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
60 for (size_t slot = 0; slot < size; slot += 1) {
61 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
62 "Found badHeapWordValue in post-allocation check");
63 }
64 }
65 }
66
67 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
68 {
69 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
70 for (size_t slot = 0; slot < size; slot += 1) {
71 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
72 "Found non badHeapWordValue in pre-allocation check");
73 }
74 }
75 }
76 #endif // PRODUCT
77
78 #ifdef ASSERT
79 void CollectedHeap::check_for_valid_allocation_state() {
80 Thread *thread = Thread::current();
81 // How to choose between a pending exception and a potential
82 // OutOfMemoryError? Don't allow pending exceptions.
83 // This is a VM policy failure, so how do we exhaustively test it?
84 assert(!thread->has_pending_exception(),
85 "shouldn't be allocating with pending exception");
86 if (StrictSafepointChecks) {
87 assert(thread->allow_allocation(),
88 "Allocation done by thread for which allocation is blocked "
89 "by No_Allocation_Verifier!");
90 // Allocation of an oop can always invoke a safepoint,
91 // hence, the true argument
92 thread->check_for_valid_safepoint_state(true);
93 }
94 }
95 #endif
96
97 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
98
99 // Retain tlab and allocate object in shared space if
100 // the amount free in the tlab is too large to discard.
101 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
102 thread->tlab().record_slow_allocation(size);
103 return NULL;
104 }
105
106 // Discard tlab and allocate a new one.
107 // To minimize fragmentation, the last TLAB may be smaller than the rest.
108 size_t new_tlab_size = thread->tlab().compute_size(size);
109
110 thread->tlab().clear_before_allocation();
111
112 if (new_tlab_size == 0) {
113 return NULL;
114 }
115
116 // Allocate a new TLAB...
117 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
118 if (obj == NULL) {
119 return NULL;
120 }
121 if (ZeroTLAB) {
122 // ..and clear it.
123 Copy::zero_to_words(obj, new_tlab_size);
124 } else {
125 // ...and clear just the allocated object.
126 Copy::zero_to_words(obj, size);
127 }
128 thread->tlab().fill(obj, obj + size, new_tlab_size);
129 return obj;
130 }
131
132 oop CollectedHeap::new_store_barrier(oop new_obj) {
133 // %%% This needs refactoring. (It was imported from the server compiler.)
134 guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
135 BarrierSet* bs = this->barrier_set();
136 assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
137 int new_size = new_obj->size();
138 bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
139 return new_obj;
140 }
141
142 bool CollectedHeap::can_elide_permanent_oop_store_barriers() const {
143 // %%% This needs refactoring. (It was gating logic from the server compiler.)
144 guarantee(kind() < CollectedHeap::G1CollectedHeap, "");
145 return !UseConcMarkSweepGC;
146 }
147
148
149 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
150 guarantee(false, "thread-local allocation buffers not supported");
151 return NULL;
152 }
153
154 void CollectedHeap::fill_all_tlabs(bool retire) {
155 assert(UseTLAB, "should not reach here");
156 // See note in ensure_parsability() below.
157 assert(SafepointSynchronize::is_at_safepoint() ||
158 !is_init_completed(),
159 "should only fill tlabs at safepoint");
160 // The main thread starts allocating via a TLAB even before it
161 // has added itself to the threads list at vm boot-up.
162 assert(Threads::first() != NULL,
163 "Attempt to fill tlabs before main thread has been added"
164 " to threads list is doomed to failure!");
165 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
166 thread->tlab().make_parsable(retire);
167 }
168 }
169
170 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
171 // The second disjunct in the assertion below makes a concession
172 // for the start-up verification done while the VM is being
173 // created. Callers be careful that you know that mutators
174 // aren't going to interfere -- for instance, this is permissible
175 // if we are still single-threaded and have either not yet
176 // started allocating (nothing much to verify) or we have
177 // started allocating but are now a full-fledged JavaThread
178 // (and have thus made our TLAB's) available for filling.
179 assert(SafepointSynchronize::is_at_safepoint() ||
180 !is_init_completed(),
181 "Should only be called at a safepoint or at start-up"
182 " otherwise concurrent mutator activity may make heap "
183 " unparsable again");
184 if (UseTLAB) {
185 fill_all_tlabs(retire_tlabs);
186 }
187 }
188
189 void CollectedHeap::accumulate_statistics_all_tlabs() {
190 if (UseTLAB) {
191 assert(SafepointSynchronize::is_at_safepoint() ||
192 !is_init_completed(),
193 "should only accumulate statistics on tlabs at safepoint");
194
195 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
196 }
197 }
198
199 void CollectedHeap::resize_all_tlabs() {
200 if (UseTLAB) {
201 assert(SafepointSynchronize::is_at_safepoint() ||
202 !is_init_completed(),
203 "should only resize tlabs at safepoint");
204
205 ThreadLocalAllocBuffer::resize_all_tlabs();
206 }
207 }