Mercurial > hg > truffle
comparison src/share/vm/memory/space.hpp @ 20198:c49dcaf78a65
8042737: Introduce umbrella header prefetch.inline.hpp
Reviewed-by: twisti, stefank
author | goetz |
---|---|
date | Thu, 08 May 2014 15:37:17 +0200 |
parents | a9becfeecd1b |
children | 30c99d8e0f02 |
comparison
equal
deleted
inserted
replaced
20197:ce8f6bb717c9 | 20198:c49dcaf78a65 |
---|---|
31 #include "memory/iterator.hpp" | 31 #include "memory/iterator.hpp" |
32 #include "memory/memRegion.hpp" | 32 #include "memory/memRegion.hpp" |
33 #include "memory/watermark.hpp" | 33 #include "memory/watermark.hpp" |
34 #include "oops/markOop.hpp" | 34 #include "oops/markOop.hpp" |
35 #include "runtime/mutexLocker.hpp" | 35 #include "runtime/mutexLocker.hpp" |
36 #include "runtime/prefetch.hpp" | |
37 #include "utilities/macros.hpp" | 36 #include "utilities/macros.hpp" |
38 #include "utilities/workgroup.hpp" | 37 #include "utilities/workgroup.hpp" |
39 #ifdef TARGET_OS_FAMILY_linux | |
40 # include "os_linux.inline.hpp" | |
41 #endif | |
42 #ifdef TARGET_OS_FAMILY_solaris | |
43 # include "os_solaris.inline.hpp" | |
44 #endif | |
45 #ifdef TARGET_OS_FAMILY_windows | |
46 # include "os_windows.inline.hpp" | |
47 #endif | |
48 #ifdef TARGET_OS_FAMILY_aix | |
49 # include "os_aix.inline.hpp" | |
50 #endif | |
51 #ifdef TARGET_OS_FAMILY_bsd | |
52 # include "os_bsd.inline.hpp" | |
53 #endif | |
54 | 38 |
55 // A space is an abstraction for the "storage units" backing | 39 // A space is an abstraction for the "storage units" backing |
56 // up the generation abstraction. It includes specific | 40 // up the generation abstraction. It includes specific |
57 // implementations for keeping track of free and used space, | 41 // implementations for keeping track of free and used space, |
58 // for iterating over objects and free blocks, etc. | 42 // for iterating over objects and free blocks, etc. |
510 // words remaining after this operation. | 494 // words remaining after this operation. |
511 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, | 495 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, |
512 size_t word_len); | 496 size_t word_len); |
513 }; | 497 }; |
514 | 498 |
515 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ | |
516 /* Compute the new addresses for the live objects and store it in the mark \ | |
517 * Used by universe::mark_sweep_phase2() \ | |
518 */ \ | |
519 HeapWord* compact_top; /* This is where we are currently compacting to. */ \ | |
520 \ | |
521 /* We're sure to be here before any objects are compacted into this \ | |
522 * space, so this is a good time to initialize this: \ | |
523 */ \ | |
524 set_compaction_top(bottom()); \ | |
525 \ | |
526 if (cp->space == NULL) { \ | |
527 assert(cp->gen != NULL, "need a generation"); \ | |
528 assert(cp->threshold == NULL, "just checking"); \ | |
529 assert(cp->gen->first_compaction_space() == this, "just checking"); \ | |
530 cp->space = cp->gen->first_compaction_space(); \ | |
531 compact_top = cp->space->bottom(); \ | |
532 cp->space->set_compaction_top(compact_top); \ | |
533 cp->threshold = cp->space->initialize_threshold(); \ | |
534 } else { \ | |
535 compact_top = cp->space->compaction_top(); \ | |
536 } \ | |
537 \ | |
538 /* We allow some amount of garbage towards the bottom of the space, so \ | |
539 * we don't start compacting before there is a significant gain to be made.\ | |
540 * Occasionally, we want to ensure a full compaction, which is determined \ | |
541 * by the MarkSweepAlwaysCompactCount parameter. \ | |
542 */ \ | |
543 uint invocations = MarkSweep::total_invocations(); \ | |
544 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ | |
545 \ | |
546 size_t allowed_deadspace = 0; \ | |
547 if (skip_dead) { \ | |
548 const size_t ratio = allowed_dead_ratio(); \ | |
549 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ | |
550 } \ | |
551 \ | |
552 HeapWord* q = bottom(); \ | |
553 HeapWord* t = scan_limit(); \ | |
554 \ | |
555 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ | |
556 live object. */ \ | |
557 HeapWord* first_dead = end();/* The first dead object. */ \ | |
558 LiveRange* liveRange = NULL; /* The current live range, recorded in the \ | |
559 first header of preceding free area. */ \ | |
560 _first_dead = first_dead; \ | |
561 \ | |
562 const intx interval = PrefetchScanIntervalInBytes; \ | |
563 \ | |
564 while (q < t) { \ | |
565 assert(!block_is_obj(q) || \ | |
566 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ | |
567 oop(q)->mark()->has_bias_pattern(), \ | |
568 "these are the only valid states during a mark sweep"); \ | |
569 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ | |
570 /* prefetch beyond q */ \ | |
571 Prefetch::write(q, interval); \ | |
572 size_t size = block_size(q); \ | |
573 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ | |
574 q += size; \ | |
575 end_of_live = q; \ | |
576 } else { \ | |
577 /* run over all the contiguous dead objects */ \ | |
578 HeapWord* end = q; \ | |
579 do { \ | |
580 /* prefetch beyond end */ \ | |
581 Prefetch::write(end, interval); \ | |
582 end += block_size(end); \ | |
583 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ | |
584 \ | |
585 /* see if we might want to pretend this object is alive so that \ | |
586 * we don't have to compact quite as often. \ | |
587 */ \ | |
588 if (allowed_deadspace > 0 && q == compact_top) { \ | |
589 size_t sz = pointer_delta(end, q); \ | |
590 if (insert_deadspace(allowed_deadspace, q, sz)) { \ | |
591 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ | |
592 q = end; \ | |
593 end_of_live = end; \ | |
594 continue; \ | |
595 } \ | |
596 } \ | |
597 \ | |
598 /* otherwise, it really is a free region. */ \ | |
599 \ | |
600 /* for the previous LiveRange, record the end of the live objects. */ \ | |
601 if (liveRange) { \ | |
602 liveRange->set_end(q); \ | |
603 } \ | |
604 \ | |
605 /* record the current LiveRange object. \ | |
606 * liveRange->start() is overlaid on the mark word. \ | |
607 */ \ | |
608 liveRange = (LiveRange*)q; \ | |
609 liveRange->set_start(end); \ | |
610 liveRange->set_end(end); \ | |
611 \ | |
612 /* see if this is the first dead region. */ \ | |
613 if (q < first_dead) { \ | |
614 first_dead = q; \ | |
615 } \ | |
616 \ | |
617 /* move on to the next object */ \ | |
618 q = end; \ | |
619 } \ | |
620 } \ | |
621 \ | |
622 assert(q == t, "just checking"); \ | |
623 if (liveRange != NULL) { \ | |
624 liveRange->set_end(q); \ | |
625 } \ | |
626 _end_of_live = end_of_live; \ | |
627 if (end_of_live < first_dead) { \ | |
628 first_dead = end_of_live; \ | |
629 } \ | |
630 _first_dead = first_dead; \ | |
631 \ | |
632 /* save the compaction_top of the compaction space. */ \ | |
633 cp->space->set_compaction_top(compact_top); \ | |
634 } | |
635 | |
636 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ | |
637 /* adjust all the interior pointers to point at the new locations of objects \ | |
638 * Used by MarkSweep::mark_sweep_phase3() */ \ | |
639 \ | |
640 HeapWord* q = bottom(); \ | |
641 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ | |
642 \ | |
643 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ | |
644 \ | |
645 if (q < t && _first_dead > q && \ | |
646 !oop(q)->is_gc_marked()) { \ | |
647 /* we have a chunk of the space which hasn't moved and we've \ | |
648 * reinitialized the mark word during the previous pass, so we can't \ | |
649 * use is_gc_marked for the traversal. */ \ | |
650 HeapWord* end = _first_dead; \ | |
651 \ | |
652 while (q < end) { \ | |
653 /* I originally tried to conjoin "block_start(q) == q" to the \ | |
654 * assertion below, but that doesn't work, because you can't \ | |
655 * accurately traverse previous objects to get to the current one \ | |
656 * after their pointers have been \ | |
657 * updated, until the actual compaction is done. dld, 4/00 */ \ | |
658 assert(block_is_obj(q), \ | |
659 "should be at block boundaries, and should be looking at objs"); \ | |
660 \ | |
661 /* point all the oops to the new location */ \ | |
662 size_t size = oop(q)->adjust_pointers(); \ | |
663 size = adjust_obj_size(size); \ | |
664 \ | |
665 q += size; \ | |
666 } \ | |
667 \ | |
668 if (_first_dead == t) { \ | |
669 q = t; \ | |
670 } else { \ | |
671 /* $$$ This is funky. Using this to read the previously written \ | |
672 * LiveRange. See also use below. */ \ | |
673 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ | |
674 } \ | |
675 } \ | |
676 \ | |
677 const intx interval = PrefetchScanIntervalInBytes; \ | |
678 \ | |
679 debug_only(HeapWord* prev_q = NULL); \ | |
680 while (q < t) { \ | |
681 /* prefetch beyond q */ \ | |
682 Prefetch::write(q, interval); \ | |
683 if (oop(q)->is_gc_marked()) { \ | |
684 /* q is alive */ \ | |
685 /* point all the oops to the new location */ \ | |
686 size_t size = oop(q)->adjust_pointers(); \ | |
687 size = adjust_obj_size(size); \ | |
688 debug_only(prev_q = q); \ | |
689 q += size; \ | |
690 } else { \ | |
691 /* q is not a live object, so its mark should point at the next \ | |
692 * live object */ \ | |
693 debug_only(prev_q = q); \ | |
694 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ | |
695 assert(q > prev_q, "we should be moving forward through memory"); \ | |
696 } \ | |
697 } \ | |
698 \ | |
699 assert(q == t, "just checking"); \ | |
700 } | |
701 | |
702 #define SCAN_AND_COMPACT(obj_size) { \ | |
703 /* Copy all live objects to their new location \ | |
704 * Used by MarkSweep::mark_sweep_phase4() */ \ | |
705 \ | |
706 HeapWord* q = bottom(); \ | |
707 HeapWord* const t = _end_of_live; \ | |
708 debug_only(HeapWord* prev_q = NULL); \ | |
709 \ | |
710 if (q < t && _first_dead > q && \ | |
711 !oop(q)->is_gc_marked()) { \ | |
712 debug_only( \ | |
713 /* we have a chunk of the space which hasn't moved and we've reinitialized \ | |
714 * the mark word during the previous pass, so we can't use is_gc_marked for \ | |
715 * the traversal. */ \ | |
716 HeapWord* const end = _first_dead; \ | |
717 \ | |
718 while (q < end) { \ | |
719 size_t size = obj_size(q); \ | |
720 assert(!oop(q)->is_gc_marked(), \ | |
721 "should be unmarked (special dense prefix handling)"); \ | |
722 debug_only(prev_q = q); \ | |
723 q += size; \ | |
724 } \ | |
725 ) /* debug_only */ \ | |
726 \ | |
727 if (_first_dead == t) { \ | |
728 q = t; \ | |
729 } else { \ | |
730 /* $$$ Funky */ \ | |
731 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ | |
732 } \ | |
733 } \ | |
734 \ | |
735 const intx scan_interval = PrefetchScanIntervalInBytes; \ | |
736 const intx copy_interval = PrefetchCopyIntervalInBytes; \ | |
737 while (q < t) { \ | |
738 if (!oop(q)->is_gc_marked()) { \ | |
739 /* mark is pointer to next marked oop */ \ | |
740 debug_only(prev_q = q); \ | |
741 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ | |
742 assert(q > prev_q, "we should be moving forward through memory"); \ | |
743 } else { \ | |
744 /* prefetch beyond q */ \ | |
745 Prefetch::read(q, scan_interval); \ | |
746 \ | |
747 /* size and destination */ \ | |
748 size_t size = obj_size(q); \ | |
749 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ | |
750 \ | |
751 /* prefetch beyond compaction_top */ \ | |
752 Prefetch::write(compaction_top, copy_interval); \ | |
753 \ | |
754 /* copy object and reinit its mark */ \ | |
755 assert(q != compaction_top, "everything in this pass should be moving"); \ | |
756 Copy::aligned_conjoint_words(q, compaction_top, size); \ | |
757 oop(compaction_top)->init_mark(); \ | |
758 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ | |
759 \ | |
760 debug_only(prev_q = q); \ | |
761 q += size; \ | |
762 } \ | |
763 } \ | |
764 \ | |
765 /* Let's remember if we were empty before we did the compaction. */ \ | |
766 bool was_empty = used_region().is_empty(); \ | |
767 /* Reset space after compaction is complete */ \ | |
768 reset_after_compaction(); \ | |
769 /* We do this clear, below, since it has overloaded meanings for some */ \ | |
770 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ | |
771 /* compacted into will have had their offset table thresholds updated */ \ | |
772 /* continuously, but those that weren't need to have their thresholds */ \ | |
773 /* re-initialized. Also mangles unused area for debugging. */ \ | |
774 if (used_region().is_empty()) { \ | |
775 if (!was_empty) clear(SpaceDecorator::Mangle); \ | |
776 } else { \ | |
777 if (ZapUnusedHeapArea) mangle_unused_area(); \ | |
778 } \ | |
779 } | |
780 | |
781 class GenSpaceMangler; | 499 class GenSpaceMangler; |
782 | 500 |
783 // A space in which the free area is contiguous. It therefore supports | 501 // A space in which the free area is contiguous. It therefore supports |
784 // faster allocation, and compaction. | 502 // faster allocation, and compaction. |
785 class ContiguousSpace: public CompactibleSpace { | 503 class ContiguousSpace: public CompactibleSpace { |