342
|
1 /*
|
|
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // Inline functions for G1CollectedHeap
|
|
26
|
|
27 inline HeapRegion*
|
|
28 G1CollectedHeap::heap_region_containing(const void* addr) const {
|
|
29 HeapRegion* hr = _hrs->addr_to_region(addr);
|
|
30 // hr can be null if addr in perm_gen
|
|
31 if (hr != NULL && hr->continuesHumongous()) {
|
|
32 hr = hr->humongous_start_region();
|
|
33 }
|
|
34 return hr;
|
|
35 }
|
|
36
|
|
37 inline HeapRegion*
|
|
38 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
|
|
39 HeapRegion* res = _hrs->addr_to_region(addr);
|
|
40 assert(res != NULL, "addr outside of heap?");
|
|
41 return res;
|
|
42 }
|
|
43
|
|
44 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
|
45 HeapRegion* r = _hrs->addr_to_region(obj);
|
|
46 return r != NULL && r->in_collection_set();
|
|
47 }
|
|
48
|
|
49 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
|
50 bool permit_collection_pause) {
|
|
51 HeapWord* res = NULL;
|
|
52
|
|
53 assert( SafepointSynchronize::is_at_safepoint() ||
|
|
54 Heap_lock->owned_by_self(), "pre-condition of the call" );
|
|
55
|
|
56 if (_cur_alloc_region != NULL) {
|
|
57
|
|
58 // If this allocation causes a region to become non empty,
|
|
59 // then we need to update our free_regions count.
|
|
60
|
|
61 if (_cur_alloc_region->is_empty()) {
|
|
62 res = _cur_alloc_region->allocate(word_size);
|
|
63 if (res != NULL)
|
|
64 _free_regions--;
|
|
65 } else {
|
|
66 res = _cur_alloc_region->allocate(word_size);
|
|
67 }
|
|
68 }
|
|
69 if (res != NULL) {
|
|
70 if (!SafepointSynchronize::is_at_safepoint()) {
|
|
71 assert( Heap_lock->owned_by_self(), "invariant" );
|
|
72 Heap_lock->unlock();
|
|
73 }
|
|
74 return res;
|
|
75 }
|
|
76 // attempt_allocation_slow will also unlock the heap lock when appropriate.
|
|
77 return attempt_allocation_slow(word_size, permit_collection_pause);
|
|
78 }
|
|
79
|
|
80 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) {
|
|
81 return _task_queues->queue(i);
|
|
82 }
|
|
83
|
|
84
|
|
85 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
|
|
86 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
|
|
87 }
|
|
88
|
|
89 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
|
90 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
|
|
91 }
|