comparison src/share/vm/gc_implementation/g1/heapRegion.inline.hpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents
children c18cbe5936b8
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
26 HeapWord* res = ContiguousSpace::allocate(size);
27 if (res != NULL) {
28 _offsets.alloc_block(res, size);
29 }
30 return res;
31 }
32
33 // Because of the requirement of keeping "_offsets" up to date with the
34 // allocations, we sequentialize these with a lock. Therefore, best if
35 // this is used for larger LAB allocations only.
36 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
37 MutexLocker x(&_par_alloc_lock);
38 // This ought to be just "allocate", because of the lock above, but that
39 // ContiguousSpace::allocate asserts that either the allocating thread
40 // holds the heap lock or it is the VM thread and we're at a safepoint.
41 // The best I (dld) could figure was to put a field in ContiguousSpace
42 // meaning "locking at safepoint taken care of", and set/reset that
43 // here. But this will do for now, especially in light of the comment
44 // above. Perhaps in the future some lock-free manner of keeping the
45 // coordination.
46 HeapWord* res = ContiguousSpace::par_allocate(size);
47 if (res != NULL) {
48 _offsets.alloc_block(res, size);
49 }
50 return res;
51 }
52
53 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
54 return _offsets.block_start(p);
55 }
56
57 inline HeapWord*
58 G1OffsetTableContigSpace::block_start_const(const void* p) const {
59 return _offsets.block_start_const(p);
60 }