comparison src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp @ 187:790e66e5fbac

6687581: Make CMS work with compressed oops Summary: Make FreeChunk read markword instead of LSB in _klass pointer to indicate that it's a FreeChunk for compressed oops. Reviewed-by: ysr, jmasa
author coleenp
date Mon, 09 Jun 2008 11:51:19 -0400
parents
children d1605aabd0a1 12eea04c8b06
comparison
equal deleted inserted replaced
185:8759d37f2524 187:790e66e5fbac
1 /*
2 * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 //
26 // Free block maintenance for Concurrent Mark Sweep Generation
27 //
28 // The main data structure for free blocks are
29 // . an indexed array of small free blocks, and
30 // . a dictionary of large free blocks
31 //
32
33 // No virtuals in FreeChunk (don't want any vtables).
34
35 // A FreeChunk is merely a chunk that can be in a doubly linked list
36 // and has a size field. NOTE: FreeChunks are distinguished from allocated
37 // objects in two ways (by the sweeper), depending on whether the VM is 32 or
38 // 64 bits.
39 // In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
40 // LSB set to indicate a free chunk; allocated objects' klass() pointers
41 // don't have their LSB set. The corresponding bit in the CMSBitMap is
42 // set when the chunk is allocated. There are also blocks that "look free"
43 // but are not part of the free list and should not be coalesced into larger
44 // free blocks. These free blocks have their two LSB's set.
45
46 class FreeChunk VALUE_OBJ_CLASS_SPEC {
47 friend class VMStructs;
48 // For 64 bit compressed oops, the markOop encodes both the size and the
49 // indication that this is a FreeChunk and not an object.
50 volatile size_t _size;
51 FreeChunk* _prev;
52 FreeChunk* _next;
53
54 markOop mark() const volatile { return (markOop)_size; }
55 void set_mark(markOop m) { _size = (size_t)m; }
56
57 public:
58 NOT_PRODUCT(static const size_t header_size();)
59
60 // Returns "true" if the address indicates that the block represents
61 // a free chunk.
62 static bool indicatesFreeChunk(const HeapWord* addr) {
63 // Force volatile read from addr because value might change between
64 // calls. We really want the read of _mark and _prev from this pointer
65 // to be volatile but making the fields volatile causes all sorts of
66 // compilation errors.
67 return ((volatile FreeChunk*)addr)->isFree();
68 }
69
70 bool isFree() const volatile {
71 LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
72 return (((intptr_t)_prev) & 0x1) == 0x1;
73 }
74 bool cantCoalesce() const {
75 assert(isFree(), "can't get coalesce bit on not free");
76 return (((intptr_t)_prev) & 0x2) == 0x2;
77 }
78 void dontCoalesce() {
79 // the block should be free
80 assert(isFree(), "Should look like a free block");
81 _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
82 }
83 FreeChunk* prev() const {
84 return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
85 }
86
87 debug_only(void* prev_addr() const { return (void*)&_prev; })
88
89 size_t size() const volatile {
90 LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
91 return _size;
92 }
93 void setSize(size_t sz) {
94 LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
95 _size = sz;
96 }
97
98 FreeChunk* next() const { return _next; }
99
100 void linkAfter(FreeChunk* ptr) {
101 linkNext(ptr);
102 if (ptr != NULL) ptr->linkPrev(this);
103 }
104 void linkAfterNonNull(FreeChunk* ptr) {
105 assert(ptr != NULL, "precondition violation");
106 linkNext(ptr);
107 ptr->linkPrev(this);
108 }
109 void linkNext(FreeChunk* ptr) { _next = ptr; }
110 void linkPrev(FreeChunk* ptr) {
111 LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
112 _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
113 }
114 void clearPrev() { _prev = NULL; }
115 void clearNext() { _next = NULL; }
116 void markNotFree() {
117 LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());)
118 // Also set _prev to null
119 _prev = NULL;
120 }
121
122 // Return the address past the end of this chunk
123 HeapWord* end() const { return ((HeapWord*) this) + size(); }
124
125 // debugging
126 void verify() const PRODUCT_RETURN;
127 void verifyList() const PRODUCT_RETURN;
128 void mangleAllocated(size_t size) PRODUCT_RETURN;
129 void mangleFreed(size_t size) PRODUCT_RETURN;
130 };
131
132 // Alignment helpers etc.
133 #define numQuanta(x,y) ((x+y-1)/y)
134 enum AlignmentConstants {
135 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
136 };
137