comparison src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_psPromotionManager.cpp.incl"
27
28 PSPromotionManager** PSPromotionManager::_manager_array = NULL;
29 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
30 OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL;
31 PSOldGen* PSPromotionManager::_old_gen = NULL;
32 MutableSpace* PSPromotionManager::_young_space = NULL;
33
34 void PSPromotionManager::initialize() {
35 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
36 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
37
38 _old_gen = heap->old_gen();
39 _young_space = heap->young_gen()->to_space();
40
41 assert(_manager_array == NULL, "Attempt to initialize twice");
42 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
43 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
44
45 if (UseDepthFirstScavengeOrder) {
46 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
47 guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
48 } else {
49 _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
50 guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
51 }
52
53 // Create and register the PSPromotionManager(s) for the worker threads.
54 for(uint i=0; i<ParallelGCThreads; i++) {
55 _manager_array[i] = new PSPromotionManager();
56 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
57 if (UseDepthFirstScavengeOrder) {
58 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
59 } else {
60 stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
61 }
62 }
63
64 // The VMThread gets its own PSPromotionManager, which is not available
65 // for work stealing.
66 _manager_array[ParallelGCThreads] = new PSPromotionManager();
67 guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
68 }
69
70 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
71 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
72 assert(_manager_array != NULL, "Sanity");
73 return _manager_array[index];
74 }
75
76 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
77 assert(_manager_array != NULL, "Sanity");
78 return _manager_array[ParallelGCThreads];
79 }
80
81 void PSPromotionManager::pre_scavenge() {
82 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
83 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
84
85 _young_space = heap->young_gen()->to_space();
86
87 for(uint i=0; i<ParallelGCThreads+1; i++) {
88 manager_array(i)->reset();
89 }
90 }
91
92 void PSPromotionManager::post_scavenge() {
93 #if PS_PM_STATS
94 print_stats();
95 #endif // PS_PM_STATS
96
97 for(uint i=0; i<ParallelGCThreads+1; i++) {
98 PSPromotionManager* manager = manager_array(i);
99
100 // the guarantees are a bit gratuitous but, if one fires, we'll
101 // have a better idea of what went wrong
102 if (i < ParallelGCThreads) {
103 guarantee((!UseDepthFirstScavengeOrder ||
104 manager->overflow_stack_depth()->length() <= 0),
105 "promotion manager overflow stack must be empty");
106 guarantee((UseDepthFirstScavengeOrder ||
107 manager->overflow_stack_breadth()->length() <= 0),
108 "promotion manager overflow stack must be empty");
109
110 guarantee((!UseDepthFirstScavengeOrder ||
111 manager->claimed_stack_depth()->size() <= 0),
112 "promotion manager claimed stack must be empty");
113 guarantee((UseDepthFirstScavengeOrder ||
114 manager->claimed_stack_breadth()->size() <= 0),
115 "promotion manager claimed stack must be empty");
116 } else {
117 guarantee((!UseDepthFirstScavengeOrder ||
118 manager->overflow_stack_depth()->length() <= 0),
119 "VM Thread promotion manager overflow stack "
120 "must be empty");
121 guarantee((UseDepthFirstScavengeOrder ||
122 manager->overflow_stack_breadth()->length() <= 0),
123 "VM Thread promotion manager overflow stack "
124 "must be empty");
125
126 guarantee((!UseDepthFirstScavengeOrder ||
127 manager->claimed_stack_depth()->size() <= 0),
128 "VM Thread promotion manager claimed stack "
129 "must be empty");
130 guarantee((UseDepthFirstScavengeOrder ||
131 manager->claimed_stack_breadth()->size() <= 0),
132 "VM Thread promotion manager claimed stack "
133 "must be empty");
134 }
135
136 manager->flush_labs();
137 }
138 }
139
140 #if PS_PM_STATS
141
142 void
143 PSPromotionManager::print_stats(uint i) {
144 tty->print_cr("---- GC Worker %2d Stats", i);
145 tty->print_cr(" total pushes %8d", _total_pushes);
146 tty->print_cr(" masked pushes %8d", _masked_pushes);
147 tty->print_cr(" overflow pushes %8d", _overflow_pushes);
148 tty->print_cr(" max overflow length %8d", _max_overflow_length);
149 tty->print_cr("");
150 tty->print_cr(" arrays chunked %8d", _arrays_chunked);
151 tty->print_cr(" array chunks processed %8d", _array_chunks_processed);
152 tty->print_cr("");
153 tty->print_cr(" total steals %8d", _total_steals);
154 tty->print_cr(" masked steals %8d", _masked_steals);
155 tty->print_cr("");
156 }
157
158 void
159 PSPromotionManager::print_stats() {
160 tty->print_cr("== GC Tasks Stats (%s), GC %3d",
161 (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
162 Universe::heap()->total_collections());
163
164 for (uint i = 0; i < ParallelGCThreads+1; ++i) {
165 PSPromotionManager* manager = manager_array(i);
166 manager->print_stats(i);
167 }
168 }
169
170 #endif // PS_PM_STATS
171
172 PSPromotionManager::PSPromotionManager() {
173 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
174 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
175 _depth_first = UseDepthFirstScavengeOrder;
176
177 // We set the old lab's start array.
178 _old_lab.set_start_array(old_gen()->start_array());
179
180 uint queue_size;
181 if (depth_first()) {
182 claimed_stack_depth()->initialize();
183 queue_size = claimed_stack_depth()->max_elems();
184 // We want the overflow stack to be permanent
185 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
186 _overflow_stack_breadth = NULL;
187 } else {
188 claimed_stack_breadth()->initialize();
189 queue_size = claimed_stack_breadth()->max_elems();
190 // We want the overflow stack to be permanent
191 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
192 _overflow_stack_depth = NULL;
193 }
194
195 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
196 if (_totally_drain) {
197 _target_stack_size = 0;
198 } else {
199 // don't let the target stack size to be more than 1/4 of the entries
200 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
201 (uint) (queue_size / 4));
202 }
203
204 _array_chunk_size = ParGCArrayScanChunk;
205 // let's choose 1.5x the chunk size
206 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
207
208 reset();
209 }
210
211 void PSPromotionManager::reset() {
212 assert(claimed_stack_empty(), "reset of non-empty claimed stack");
213 assert(overflow_stack_empty(), "reset of non-empty overflow stack");
214
215 // We need to get an assert in here to make sure the labs are always flushed.
216
217 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
218 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
219
220 // Do not prefill the LAB's, save heap wastage!
221 HeapWord* lab_base = young_space()->top();
222 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
223 _young_gen_is_full = false;
224
225 lab_base = old_gen()->object_space()->top();
226 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
227 _old_gen_is_full = false;
228
229 _prefetch_queue.clear();
230
231 #if PS_PM_STATS
232 _total_pushes = 0;
233 _masked_pushes = 0;
234 _overflow_pushes = 0;
235 _max_overflow_length = 0;
236 _arrays_chunked = 0;
237 _array_chunks_processed = 0;
238 _total_steals = 0;
239 _masked_steals = 0;
240 #endif // PS_PM_STATS
241 }
242
243 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
244 assert(depth_first(), "invariant");
245 assert(overflow_stack_depth() != NULL, "invariant");
246 totally_drain = totally_drain || _totally_drain;
247
248 #ifdef ASSERT
249 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
250 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
251 MutableSpace* to_space = heap->young_gen()->to_space();
252 MutableSpace* old_space = heap->old_gen()->object_space();
253 MutableSpace* perm_space = heap->perm_gen()->object_space();
254 #endif /* ASSERT */
255
256 do {
257 oop* p;
258
259 // Drain overflow stack first, so other threads can steal from
260 // claimed stack while we work.
261 while(!overflow_stack_depth()->is_empty()) {
262 p = overflow_stack_depth()->pop();
263 process_popped_location_depth(p);
264 }
265
266 if (totally_drain) {
267 while (claimed_stack_depth()->pop_local(p)) {
268 process_popped_location_depth(p);
269 }
270 } else {
271 while (claimed_stack_depth()->size() > _target_stack_size &&
272 claimed_stack_depth()->pop_local(p)) {
273 process_popped_location_depth(p);
274 }
275 }
276 } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
277 (overflow_stack_depth()->length() > 0) );
278
279 assert(!totally_drain || claimed_stack_empty(), "Sanity");
280 assert(totally_drain ||
281 claimed_stack_depth()->size() <= _target_stack_size,
282 "Sanity");
283 assert(overflow_stack_empty(), "Sanity");
284 }
285
286 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
287 assert(!depth_first(), "invariant");
288 assert(overflow_stack_breadth() != NULL, "invariant");
289 totally_drain = totally_drain || _totally_drain;
290
291 #ifdef ASSERT
292 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
293 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
294 MutableSpace* to_space = heap->young_gen()->to_space();
295 MutableSpace* old_space = heap->old_gen()->object_space();
296 MutableSpace* perm_space = heap->perm_gen()->object_space();
297 #endif /* ASSERT */
298
299 do {
300 oop obj;
301
302 // Drain overflow stack first, so other threads can steal from
303 // claimed stack while we work.
304 while(!overflow_stack_breadth()->is_empty()) {
305 obj = overflow_stack_breadth()->pop();
306 obj->copy_contents(this);
307 }
308
309 if (totally_drain) {
310 // obj is a reference!!!
311 while (claimed_stack_breadth()->pop_local(obj)) {
312 // It would be nice to assert about the type of objects we might
313 // pop, but they can come from anywhere, unfortunately.
314 obj->copy_contents(this);
315 }
316 } else {
317 // obj is a reference!!!
318 while (claimed_stack_breadth()->size() > _target_stack_size &&
319 claimed_stack_breadth()->pop_local(obj)) {
320 // It would be nice to assert about the type of objects we might
321 // pop, but they can come from anywhere, unfortunately.
322 obj->copy_contents(this);
323 }
324 }
325
326 // If we could not find any other work, flush the prefetch queue
327 if (claimed_stack_breadth()->size() == 0 &&
328 (overflow_stack_breadth()->length() == 0)) {
329 flush_prefetch_queue();
330 }
331 } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
332 (overflow_stack_breadth()->length() > 0));
333
334 assert(!totally_drain || claimed_stack_empty(), "Sanity");
335 assert(totally_drain ||
336 claimed_stack_breadth()->size() <= _target_stack_size,
337 "Sanity");
338 assert(overflow_stack_empty(), "Sanity");
339 }
340
341 void PSPromotionManager::flush_labs() {
342 assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
343 assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
344
345 // If either promotion lab fills up, we can flush the
346 // lab but not refill it, so check first.
347 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
348 if (!_young_lab.is_flushed())
349 _young_lab.flush();
350
351 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
352 if (!_old_lab.is_flushed())
353 _old_lab.flush();
354
355 // Let PSScavenge know if we overflowed
356 if (_young_gen_is_full) {
357 PSScavenge::set_survivor_overflow(true);
358 }
359 }
360
361 //
362 // This method is pretty bulky. It would be nice to split it up
363 // into smaller submethods, but we need to be careful not to hurt
364 // performance.
365 //
366
367 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
368 assert(PSScavenge::should_scavenge(o), "Sanity");
369
370 oop new_obj = NULL;
371
372 // NOTE! We must be very careful with any methods that access the mark
373 // in o. There may be multiple threads racing on it, and it may be forwarded
374 // at any time. Do not use oop methods for accessing the mark!
375 markOop test_mark = o->mark();
376
377 // The same test as "o->is_forwarded()"
378 if (!test_mark->is_marked()) {
379 bool new_obj_is_tenured = false;
380 size_t new_obj_size = o->size();
381
382 // Find the objects age, MT safe.
383 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
384 test_mark->displaced_mark_helper()->age() : test_mark->age();
385
386 // Try allocating obj in to-space (unless too old)
387 if (age < PSScavenge::tenuring_threshold()) {
388 new_obj = (oop) _young_lab.allocate(new_obj_size);
389 if (new_obj == NULL && !_young_gen_is_full) {
390 // Do we allocate directly, or flush and refill?
391 if (new_obj_size > (YoungPLABSize / 2)) {
392 // Allocate this object directly
393 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
394 } else {
395 // Flush and fill
396 _young_lab.flush();
397
398 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
399 if (lab_base != NULL) {
400 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
401 // Try the young lab allocation again.
402 new_obj = (oop) _young_lab.allocate(new_obj_size);
403 } else {
404 _young_gen_is_full = true;
405 }
406 }
407 }
408 }
409
410 // Otherwise try allocating obj tenured
411 if (new_obj == NULL) {
412 #ifndef PRODUCT
413 if (Universe::heap()->promotion_should_fail()) {
414 return oop_promotion_failed(o, test_mark);
415 }
416 #endif // #ifndef PRODUCT
417
418 new_obj = (oop) _old_lab.allocate(new_obj_size);
419 new_obj_is_tenured = true;
420
421 if (new_obj == NULL) {
422 if (!_old_gen_is_full) {
423 // Do we allocate directly, or flush and refill?
424 if (new_obj_size > (OldPLABSize / 2)) {
425 // Allocate this object directly
426 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
427 } else {
428 // Flush and fill
429 _old_lab.flush();
430
431 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
432 if(lab_base != NULL) {
433 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
434 // Try the old lab allocation again.
435 new_obj = (oop) _old_lab.allocate(new_obj_size);
436 }
437 }
438 }
439
440 // This is the promotion failed test, and code handling.
441 // The code belongs here for two reasons. It is slightly
442 // different thatn the code below, and cannot share the
443 // CAS testing code. Keeping the code here also minimizes
444 // the impact on the common case fast path code.
445
446 if (new_obj == NULL) {
447 _old_gen_is_full = true;
448 return oop_promotion_failed(o, test_mark);
449 }
450 }
451 }
452
453 assert(new_obj != NULL, "allocation should have succeeded");
454
455 // Copy obj
456 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
457
458 // Now we have to CAS in the header.
459 if (o->cas_forward_to(new_obj, test_mark)) {
460 // We won any races, we "own" this object.
461 assert(new_obj == o->forwardee(), "Sanity");
462
463 // Increment age if obj still in new generation. Now that
464 // we're dealing with a markOop that cannot change, it is
465 // okay to use the non mt safe oop methods.
466 if (!new_obj_is_tenured) {
467 new_obj->incr_age();
468 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
469 }
470
471 if (depth_first) {
472 // Do the size comparison first with new_obj_size, which we
473 // already have. Hopefully, only a few objects are larger than
474 // _min_array_size_for_chunking, and most of them will be arrays.
475 // So, the is->objArray() test would be very infrequent.
476 if (new_obj_size > _min_array_size_for_chunking &&
477 new_obj->is_objArray() &&
478 PSChunkLargeArrays) {
479 // we'll chunk it
480 #if PS_PM_STATS
481 ++_arrays_chunked;
482 #endif // PS_PM_STATS
483 oop* const masked_o = mask_chunked_array_oop(o);
484 push_depth(masked_o);
485 #if PS_PM_STATS
486 ++_masked_pushes;
487 #endif // PS_PM_STATS
488 } else {
489 // we'll just push its contents
490 new_obj->push_contents(this);
491 }
492 } else {
493 push_breadth(new_obj);
494 }
495 } else {
496 // We lost, someone else "owns" this object
497 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
498
499 // Unallocate the space used. NOTE! We may have directly allocated
500 // the object. If so, we cannot deallocate it, so we have to test!
501 if (new_obj_is_tenured) {
502 if (!_old_lab.unallocate_object(new_obj)) {
503 // The promotion lab failed to unallocate the object.
504 // We need to overwrite the object with a filler that
505 // contains no interior pointers.
506 MemRegion mr((HeapWord*)new_obj, new_obj_size);
507 // Clean this up and move to oopFactory (see bug 4718422)
508 SharedHeap::fill_region_with_object(mr);
509 }
510 } else {
511 if (!_young_lab.unallocate_object(new_obj)) {
512 // The promotion lab failed to unallocate the object.
513 // We need to overwrite the object with a filler that
514 // contains no interior pointers.
515 MemRegion mr((HeapWord*)new_obj, new_obj_size);
516 // Clean this up and move to oopFactory (see bug 4718422)
517 SharedHeap::fill_region_with_object(mr);
518 }
519 }
520
521 // don't update this before the unallocation!
522 new_obj = o->forwardee();
523 }
524 } else {
525 assert(o->is_forwarded(), "Sanity");
526 new_obj = o->forwardee();
527 }
528
529 #ifdef DEBUG
530 // This code must come after the CAS test, or it will print incorrect
531 // information.
532 if (TraceScavenge) {
533 gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
534 PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
535 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
536
537 }
538 #endif
539
540 return new_obj;
541 }
542
543 void PSPromotionManager::process_array_chunk(oop old) {
544 assert(PSChunkLargeArrays, "invariant");
545 assert(old->is_objArray(), "invariant");
546 assert(old->is_forwarded(), "invariant");
547
548 #if PS_PM_STATS
549 ++_array_chunks_processed;
550 #endif // PS_PM_STATS
551
552 oop const obj = old->forwardee();
553
554 int start;
555 int const end = arrayOop(old)->length();
556 if (end > (int) _min_array_size_for_chunking) {
557 // we'll chunk more
558 start = end - _array_chunk_size;
559 assert(start > 0, "invariant");
560 arrayOop(old)->set_length(start);
561 push_depth(mask_chunked_array_oop(old));
562 #if PS_PM_STATS
563 ++_masked_pushes;
564 #endif // PS_PM_STATS
565 } else {
566 // this is the final chunk for this array
567 start = 0;
568 int const actual_length = arrayOop(obj)->length();
569 arrayOop(old)->set_length(actual_length);
570 }
571
572 assert(start < end, "invariant");
573 oop* const base = objArrayOop(obj)->base();
574 oop* p = base + start;
575 oop* const chunk_end = base + end;
576 while (p < chunk_end) {
577 if (PSScavenge::should_scavenge(*p)) {
578 claim_or_forward_depth(p);
579 }
580 ++p;
581 }
582 }
583
584 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
585 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
586
587 // Attempt to CAS in the header.
588 // This tests if the header is still the same as when
589 // this started. If it is the same (i.e., no forwarding
590 // pointer has been installed), then this thread owns
591 // it.
592 if (obj->cas_forward_to(obj, obj_mark)) {
593 // We won any races, we "own" this object.
594 assert(obj == obj->forwardee(), "Sanity");
595
596 if (depth_first()) {
597 obj->push_contents(this);
598 } else {
599 // Don't bother incrementing the age, just push
600 // onto the claimed_stack..
601 push_breadth(obj);
602 }
603
604 // Save the mark if needed
605 PSScavenge::oop_promotion_failed(obj, obj_mark);
606 } else {
607 // We lost, someone else "owns" this object
608 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
609
610 // No unallocation to worry about.
611 obj = obj->forwardee();
612 }
613
614 #ifdef DEBUG
615 if (TraceScavenge) {
616 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
617 "promotion-failure",
618 obj->blueprint()->internal_name(),
619 obj, obj->size());
620
621 }
622 #endif
623
624 return obj;
625 }