Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp @ 17844:8847586c9037
8016302: Change type of the number of GC workers to unsigned int (2)
Reviewed-by: tschatzl, jwilhelm
author | vkempik |
---|---|
date | Thu, 03 Apr 2014 17:49:31 +0400 |
parents | bfdf528be8e8 |
children | 570cb6369f17 |
comparison
equal
deleted
inserted
replaced
17843:81d7a4b28dc5 | 17844:8847586c9037 |
---|---|
32 #include "runtime/thread.inline.hpp" | 32 #include "runtime/thread.inline.hpp" |
33 #include "utilities/workgroup.hpp" | 33 #include "utilities/workgroup.hpp" |
34 | 34 |
35 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, | 35 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, |
36 bool consume, | 36 bool consume, |
37 size_t worker_i) { | 37 uint worker_i) { |
38 bool res = true; | 38 bool res = true; |
39 if (_buf != NULL) { | 39 if (_buf != NULL) { |
40 res = apply_closure_to_buffer(cl, _buf, _index, _sz, | 40 res = apply_closure_to_buffer(cl, _buf, _index, _sz, |
41 consume, | 41 consume, |
42 (int) worker_i); | 42 worker_i); |
43 if (res && consume) _index = _sz; | 43 if (res && consume) _index = _sz; |
44 } | 44 } |
45 return res; | 45 return res; |
46 } | 46 } |
47 | 47 |
48 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, | 48 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, |
49 void** buf, | 49 void** buf, |
50 size_t index, size_t sz, | 50 size_t index, size_t sz, |
51 bool consume, | 51 bool consume, |
52 int worker_i) { | 52 uint worker_i) { |
53 if (cl == NULL) return true; | 53 if (cl == NULL) return true; |
54 for (size_t i = index; i < sz; i += oopSize) { | 54 for (size_t i = index; i < sz; i += oopSize) { |
55 int ind = byte_index_to_index((int)i); | 55 int ind = byte_index_to_index((int)i); |
56 jbyte* card_ptr = (jbyte*)buf[ind]; | 56 jbyte* card_ptr = (jbyte*)buf[ind]; |
57 if (card_ptr != NULL) { | 57 if (card_ptr != NULL) { |
77 { | 77 { |
78 _all_active = true; | 78 _all_active = true; |
79 } | 79 } |
80 | 80 |
81 // Determines how many mutator threads can process the buffers in parallel. | 81 // Determines how many mutator threads can process the buffers in parallel. |
82 size_t DirtyCardQueueSet::num_par_ids() { | 82 uint DirtyCardQueueSet::num_par_ids() { |
83 return os::processor_count(); | 83 return (uint)os::processor_count(); |
84 } | 84 } |
85 | 85 |
86 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, | 86 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, |
87 int process_completed_threshold, | 87 int process_completed_threshold, |
88 int max_completed_queue, | 88 int max_completed_queue, |
101 void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) { | 101 void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) { |
102 _closure = closure; | 102 _closure = closure; |
103 } | 103 } |
104 | 104 |
105 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, | 105 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, |
106 size_t worker_i) { | 106 uint worker_i) { |
107 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); | 107 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
108 for(JavaThread* t = Threads::first(); t; t = t->next()) { | 108 for(JavaThread* t = Threads::first(); t; t = t->next()) { |
109 bool b = t->dirty_card_queue().apply_closure(_closure, consume); | 109 bool b = t->dirty_card_queue().apply_closure(_closure, consume); |
110 guarantee(b, "Should not be interrupted."); | 110 guarantee(b, "Should not be interrupted."); |
111 } | 111 } |
124 // We grab the current JavaThread. | 124 // We grab the current JavaThread. |
125 JavaThread* thread = JavaThread::current(); | 125 JavaThread* thread = JavaThread::current(); |
126 | 126 |
127 // We get the the number of any par_id that this thread | 127 // We get the the number of any par_id that this thread |
128 // might have already claimed. | 128 // might have already claimed. |
129 int worker_i = thread->get_claimed_par_id(); | 129 uint worker_i = thread->get_claimed_par_id(); |
130 | 130 |
131 // If worker_i is not -1 then the thread has already claimed | 131 // If worker_i is not UINT_MAX then the thread has already claimed |
132 // a par_id. We make note of it using the already_claimed value | 132 // a par_id. We make note of it using the already_claimed value |
133 if (worker_i != -1) { | 133 if (worker_i != UINT_MAX) { |
134 already_claimed = true; | 134 already_claimed = true; |
135 } else { | 135 } else { |
136 | 136 |
137 // Otherwise we need to claim a par id | 137 // Otherwise we need to claim a par id |
138 worker_i = _free_ids->claim_par_id(); | 138 worker_i = _free_ids->claim_par_id(); |
140 // And store the par_id value in the thread | 140 // And store the par_id value in the thread |
141 thread->set_claimed_par_id(worker_i); | 141 thread->set_claimed_par_id(worker_i); |
142 } | 142 } |
143 | 143 |
144 bool b = false; | 144 bool b = false; |
145 if (worker_i != -1) { | 145 if (worker_i != UINT_MAX) { |
146 b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0, | 146 b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0, |
147 _sz, true, worker_i); | 147 _sz, true, worker_i); |
148 if (b) Atomic::inc(&_processed_buffers_mut); | 148 if (b) Atomic::inc(&_processed_buffers_mut); |
149 | 149 |
150 // If we had not claimed an id before entering the method | 150 // If we had not claimed an id before entering the method |
152 if (!already_claimed) { | 152 if (!already_claimed) { |
153 | 153 |
154 // we release the id | 154 // we release the id |
155 _free_ids->release_par_id(worker_i); | 155 _free_ids->release_par_id(worker_i); |
156 | 156 |
157 // and set the claimed_id in the thread to -1 | 157 // and set the claimed_id in the thread to UINT_MAX |
158 thread->set_claimed_par_id(-1); | 158 thread->set_claimed_par_id(UINT_MAX); |
159 } | 159 } |
160 } | 160 } |
161 return b; | 161 return b; |
162 } | 162 } |
163 | 163 |
184 return nd; | 184 return nd; |
185 } | 185 } |
186 | 186 |
187 bool DirtyCardQueueSet:: | 187 bool DirtyCardQueueSet:: |
188 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl, | 188 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl, |
189 int worker_i, | 189 uint worker_i, |
190 BufferNode* nd) { | 190 BufferNode* nd) { |
191 if (nd != NULL) { | 191 if (nd != NULL) { |
192 void **buf = BufferNode::make_buffer_from_node(nd); | 192 void **buf = BufferNode::make_buffer_from_node(nd); |
193 size_t index = nd->index(); | 193 size_t index = nd->index(); |
194 bool b = | 194 bool b = |
206 return false; | 206 return false; |
207 } | 207 } |
208 } | 208 } |
209 | 209 |
210 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl, | 210 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl, |
211 int worker_i, | 211 uint worker_i, |
212 int stop_at, | 212 int stop_at, |
213 bool during_pause) { | 213 bool during_pause) { |
214 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); | 214 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); |
215 BufferNode* nd = get_completed_buffer(stop_at); | 215 BufferNode* nd = get_completed_buffer(stop_at); |
216 bool res = apply_closure_to_completed_buffer_helper(cl, worker_i, nd); | 216 bool res = apply_closure_to_completed_buffer_helper(cl, worker_i, nd); |
217 if (res) Atomic::inc(&_processed_buffers_rs_thread); | 217 if (res) Atomic::inc(&_processed_buffers_rs_thread); |
218 return res; | 218 return res; |
219 } | 219 } |
220 | 220 |
221 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i, | 221 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i, |
222 int stop_at, | 222 int stop_at, |
223 bool during_pause) { | 223 bool during_pause) { |
224 return apply_closure_to_completed_buffer(_closure, worker_i, | 224 return apply_closure_to_completed_buffer(_closure, worker_i, |
225 stop_at, during_pause); | 225 stop_at, during_pause); |
226 } | 226 } |