comparison src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @ 4909:95f6641e38e0

7144296: PS: Optimize nmethods processing Summary: Prunes scavenge roots in code list every young GC, promote objects directly pointed by the code immediately Reviewed-by: johnc, jcoomes
author iveresov
date Fri, 10 Feb 2012 17:40:20 -0800
parents f95d63e2154a
children 8a729074feae
comparison
equal deleted inserted replaced
4888:3c4621be5149 4909:95f6641e38e0
1 /* 1 /*
2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
59 assert(Universe::heap()->is_in(p), "pointer outside heap"); 59 assert(Universe::heap()->is_in(p), "pointer outside heap");
60 60
61 claim_or_forward_internal_depth(p); 61 claim_or_forward_internal_depth(p);
62 } 62 }
63 63
64 //
65 // This method is pretty bulky. It would be nice to split it up
66 // into smaller submethods, but we need to be careful not to hurt
67 // performance.
68 //
69 template<bool promote_immediately>
70 oop PSPromotionManager::copy_to_survivor_space(oop o) {
71 assert(PSScavenge::should_scavenge(&o), "Sanity");
72
73 oop new_obj = NULL;
74
75 // NOTE! We must be very careful with any methods that access the mark
76 // in o. There may be multiple threads racing on it, and it may be forwarded
77 // at any time. Do not use oop methods for accessing the mark!
78 markOop test_mark = o->mark();
79
80 // The same test as "o->is_forwarded()"
81 if (!test_mark->is_marked()) {
82 bool new_obj_is_tenured = false;
83 size_t new_obj_size = o->size();
84
85 if (!promote_immediately) {
86 // Find the objects age, MT safe.
87 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
88 test_mark->displaced_mark_helper()->age() : test_mark->age();
89
90 // Try allocating obj in to-space (unless too old)
91 if (age < PSScavenge::tenuring_threshold()) {
92 new_obj = (oop) _young_lab.allocate(new_obj_size);
93 if (new_obj == NULL && !_young_gen_is_full) {
94 // Do we allocate directly, or flush and refill?
95 if (new_obj_size > (YoungPLABSize / 2)) {
96 // Allocate this object directly
97 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
98 } else {
99 // Flush and fill
100 _young_lab.flush();
101
102 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
103 if (lab_base != NULL) {
104 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
105 // Try the young lab allocation again.
106 new_obj = (oop) _young_lab.allocate(new_obj_size);
107 } else {
108 _young_gen_is_full = true;
109 }
110 }
111 }
112 }
113 }
114
115 // Otherwise try allocating obj tenured
116 if (new_obj == NULL) {
117 #ifndef PRODUCT
118 if (Universe::heap()->promotion_should_fail()) {
119 return oop_promotion_failed(o, test_mark);
120 }
121 #endif // #ifndef PRODUCT
122
123 new_obj = (oop) _old_lab.allocate(new_obj_size);
124 new_obj_is_tenured = true;
125
126 if (new_obj == NULL) {
127 if (!_old_gen_is_full) {
128 // Do we allocate directly, or flush and refill?
129 if (new_obj_size > (OldPLABSize / 2)) {
130 // Allocate this object directly
131 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
132 } else {
133 // Flush and fill
134 _old_lab.flush();
135
136 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
137 if(lab_base != NULL) {
138 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
139 // Try the old lab allocation again.
140 new_obj = (oop) _old_lab.allocate(new_obj_size);
141 }
142 }
143 }
144
145 // This is the promotion failed test, and code handling.
146 // The code belongs here for two reasons. It is slightly
147 // different thatn the code below, and cannot share the
148 // CAS testing code. Keeping the code here also minimizes
149 // the impact on the common case fast path code.
150
151 if (new_obj == NULL) {
152 _old_gen_is_full = true;
153 return oop_promotion_failed(o, test_mark);
154 }
155 }
156 }
157
158 assert(new_obj != NULL, "allocation should have succeeded");
159
160 // Copy obj
161 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
162
163 // Now we have to CAS in the header.
164 if (o->cas_forward_to(new_obj, test_mark)) {
165 // We won any races, we "own" this object.
166 assert(new_obj == o->forwardee(), "Sanity");
167
168 // Increment age if obj still in new generation. Now that
169 // we're dealing with a markOop that cannot change, it is
170 // okay to use the non mt safe oop methods.
171 if (!new_obj_is_tenured) {
172 new_obj->incr_age();
173 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
174 }
175
176 // Do the size comparison first with new_obj_size, which we
177 // already have. Hopefully, only a few objects are larger than
178 // _min_array_size_for_chunking, and most of them will be arrays.
179 // So, the is->objArray() test would be very infrequent.
180 if (new_obj_size > _min_array_size_for_chunking &&
181 new_obj->is_objArray() &&
182 PSChunkLargeArrays) {
183 // we'll chunk it
184 oop* const masked_o = mask_chunked_array_oop(o);
185 push_depth(masked_o);
186 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
187 } else {
188 // we'll just push its contents
189 new_obj->push_contents(this);
190 }
191 } else {
192 // We lost, someone else "owns" this object
193 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
194
195 // Try to deallocate the space. If it was directly allocated we cannot
196 // deallocate it, so we have to test. If the deallocation fails,
197 // overwrite with a filler object.
198 if (new_obj_is_tenured) {
199 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
200 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
201 }
202 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
203 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
204 }
205
206 // don't update this before the unallocation!
207 new_obj = o->forwardee();
208 }
209 } else {
210 assert(o->is_forwarded(), "Sanity");
211 new_obj = o->forwardee();
212 }
213
214 #ifdef DEBUG
215 // This code must come after the CAS test, or it will print incorrect
216 // information.
217 if (TraceScavenge) {
218 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
219 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
220 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
221 }
222 #endif
223
224 return new_obj;
225 }
226
227
64 inline void PSPromotionManager::process_popped_location_depth(StarTask p) { 228 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
65 if (is_oop_masked(p)) { 229 if (is_oop_masked(p)) {
66 assert(PSChunkLargeArrays, "invariant"); 230 assert(PSChunkLargeArrays, "invariant");
67 oop const old = unmask_chunked_array_oop(p); 231 oop const old = unmask_chunked_array_oop(p);
68 process_array_chunk(old); 232 process_array_chunk(old);
69 } else { 233 } else {
70 if (p.is_narrow()) { 234 if (p.is_narrow()) {
71 assert(UseCompressedOops, "Error"); 235 assert(UseCompressedOops, "Error");
72 PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); 236 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
73 } else { 237 } else {
74 PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); 238 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
75 } 239 }
76 } 240 }
77 } 241 }
78 242
79 #if TASKQUEUE_STATS 243 #if TASKQUEUE_STATS