comparison src/share/vm/gc_implementation/g1/g1OopClosures.hpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents 97300b6165f8
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
1 /* 1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
36 class CMTask; 36 class CMTask;
37 class ReferenceProcessor; 37 class ReferenceProcessor;
38 38
39 // A class that scans oops in a given heap region (much as OopsInGenClosure 39 // A class that scans oops in a given heap region (much as OopsInGenClosure
40 // scans oops in a generation.) 40 // scans oops in a generation.)
41 class OopsInHeapRegionClosure: public ExtendedOopClosure { 41 class OopsInHeapRegionClosure: public OopsInGenClosure {
42 protected: 42 protected:
43 HeapRegion* _from; 43 HeapRegion* _from;
44 public: 44 public:
45 void set_region(HeapRegion* from) { _from = from; } 45 void set_region(HeapRegion* from) { _from = from; }
46 }; 46 };
47 47
48 class G1ParClosureSuper : public OopsInHeapRegionClosure { 48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
49 protected: 49 protected:
50 G1CollectedHeap* _g1; 50 G1CollectedHeap* _g1;
51 G1RemSet* _g1_rem;
52 ConcurrentMark* _cm;
51 G1ParScanThreadState* _par_scan_state; 53 G1ParScanThreadState* _par_scan_state;
52 uint _worker_id; 54 uint _worker_id;
55 bool _during_initial_mark;
56 bool _mark_in_progress;
53 public: 57 public:
54 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); 58 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
55 bool apply_to_weak_ref_discovered_field() { return true; } 59 bool apply_to_weak_ref_discovered_field() { return true; }
56 }; 60 };
57 61
80 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 84 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
81 }; 85 };
82 86
83 #define G1_PARTIAL_ARRAY_MASK 0x2 87 #define G1_PARTIAL_ARRAY_MASK 0x2
84 88
85 inline bool has_partial_array_mask(oop* ref) { 89 template <class T> inline bool has_partial_array_mask(T* ref) {
86 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; 90 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
87 } 91 }
88 92
89 // We never encode partial array oops as narrowOop*, so return false immediately. 93 template <class T> inline T* set_partial_array_mask(T obj) {
90 // This allows the compiler to create optimized code when popping references from
91 // the work queue.
92 inline bool has_partial_array_mask(narrowOop* ref) {
93 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
94 return false;
95 }
96
97 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
98 // We always encode partial arrays as regular oop, to allow the
99 // specialization for has_partial_array_mask() for narrowOops above.
100 // This means that unintentional use of this method with narrowOops are caught
101 // by the compiler.
102 inline oop* set_partial_array_mask(oop obj) {
103 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); 94 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
104 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); 95 return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
105 } 96 }
106 97
107 template <class T> inline oop clear_partial_array_mask(T* ref) { 98 template <class T> inline oop clear_partial_array_mask(T* ref) {
108 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); 99 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
109 } 100 }
127 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 118 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
128 }; 119 };
129 120
130 // Add back base class for metadata 121 // Add back base class for metadata
131 class G1ParCopyHelper : public G1ParClosureSuper { 122 class G1ParCopyHelper : public G1ParClosureSuper {
123 Klass* _scanned_klass;
124
125 public:
126 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
127 _scanned_klass(NULL),
128 G1ParClosureSuper(g1, par_scan_state) {}
129
130 void set_scanned_klass(Klass* k) { _scanned_klass = k; }
131 template <class T> void do_klass_barrier(T* p, oop new_obj);
132 };
133
134 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
135 class G1ParCopyClosure : public G1ParCopyHelper {
136 G1ParScanClosure _scanner;
137 template <class T> void do_oop_work(T* p);
138
132 protected: 139 protected:
133 Klass* _scanned_klass;
134 ConcurrentMark* _cm;
135
136 // Mark the object if it's not already marked. This is used to mark 140 // Mark the object if it's not already marked. This is used to mark
137 // objects pointed to by roots that are guaranteed not to move 141 // objects pointed to by roots that are guaranteed not to move
138 // during the GC (i.e., non-CSet objects). It is MT-safe. 142 // during the GC (i.e., non-CSet objects). It is MT-safe.
139 void mark_object(oop obj); 143 void mark_object(oop obj);
140 144
141 // Mark the object if it's not already marked. This is used to mark 145 // Mark the object if it's not already marked. This is used to mark
142 // objects pointed to by roots that have been forwarded during a 146 // objects pointed to by roots that have been forwarded during a
143 // GC. It is MT-safe. 147 // GC. It is MT-safe.
144 void mark_forwarded_object(oop from_obj, oop to_obj); 148 void mark_forwarded_object(oop from_obj, oop to_obj);
145 public: 149
146 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); 150 oop copy_to_survivor_space(oop obj);
147
148 void set_scanned_klass(Klass* k) { _scanned_klass = k; }
149 template <class T> void do_klass_barrier(T* p, oop new_obj);
150 };
151
152 template <G1Barrier barrier, bool do_mark_object>
153 class G1ParCopyClosure : public G1ParCopyHelper {
154 private:
155 template <class T> void do_oop_work(T* p);
156 151
157 public: 152 public:
158 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, 153 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
159 ReferenceProcessor* rp) : 154 ReferenceProcessor* rp) :
155 _scanner(g1, par_scan_state, rp),
160 G1ParCopyHelper(g1, par_scan_state) { 156 G1ParCopyHelper(g1, par_scan_state) {
161 assert(_ref_processor == NULL, "sanity"); 157 assert(_ref_processor == NULL, "sanity");
162 } 158 }
163 159
164 template <class T> void do_oop_nv(T* p) { do_oop_work(p); } 160 G1ParScanClosure* scanner() { return &_scanner; }
161
162 template <class T> void do_oop_nv(T* p) {
163 do_oop_work(p);
164 }
165 virtual void do_oop(oop* p) { do_oop_nv(p); } 165 virtual void do_oop(oop* p) { do_oop_nv(p); }
166 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 166 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
167 }; 167 };
168 168
169 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure; 169 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
170 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure; 170 typedef G1ParCopyClosure<false, G1BarrierKlass, false> G1ParScanMetadataClosure;
171 171
172 172
173 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure; 173 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
174 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure; 174 typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkClosure;
175 typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
176
177 // The following closure types are no longer used but are retained
178 // for historical reasons:
179 // typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
180 // typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
175 181
176 // The following closure type is defined in g1_specialized_oop_closures.hpp: 182 // The following closure type is defined in g1_specialized_oop_closures.hpp:
177 // 183 //
178 // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure; 184 // typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
179 185
180 // We use a separate closure to handle references during evacuation 186 // We use a separate closure to handle references during evacuation
181 // failure processing. 187 // failure processing.
182 // We could have used another instance of G1ParScanHeapEvacClosure 188 // We could have used another instance of G1ParScanHeapEvacClosure
183 // (since that closure no longer assumes that the references it 189 // (since that closure no longer assumes that the references it
184 // handles point into the collection set). 190 // handles point into the collection set).
185 191
186 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure; 192 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
187 193
188 class FilterIntoCSClosure: public ExtendedOopClosure { 194 class FilterIntoCSClosure: public ExtendedOopClosure {
189 G1CollectedHeap* _g1; 195 G1CollectedHeap* _g1;
190 OopClosure* _oc; 196 OopClosure* _oc;
191 DirtyCardToOopClosure* _dcto_cl; 197 DirtyCardToOopClosure* _dcto_cl;