0
|
1 /*
|
|
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_codeCache.cpp.incl"
|
|
27
|
|
28 // Helper class for printing in CodeCache
|
|
29
|
|
30 class CodeBlob_sizes {
|
|
31 private:
|
|
32 int count;
|
|
33 int total_size;
|
|
34 int header_size;
|
|
35 int code_size;
|
|
36 int stub_size;
|
|
37 int relocation_size;
|
|
38 int scopes_oop_size;
|
|
39 int scopes_data_size;
|
|
40 int scopes_pcs_size;
|
|
41
|
|
42 public:
|
|
43 CodeBlob_sizes() {
|
|
44 count = 0;
|
|
45 total_size = 0;
|
|
46 header_size = 0;
|
|
47 code_size = 0;
|
|
48 stub_size = 0;
|
|
49 relocation_size = 0;
|
|
50 scopes_oop_size = 0;
|
|
51 scopes_data_size = 0;
|
|
52 scopes_pcs_size = 0;
|
|
53 }
|
|
54
|
|
55 int total() { return total_size; }
|
|
56 bool is_empty() { return count == 0; }
|
|
57
|
|
58 void print(const char* title) {
|
|
59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
|
|
60 count,
|
|
61 title,
|
|
62 total() / K,
|
|
63 header_size * 100 / total_size,
|
|
64 relocation_size * 100 / total_size,
|
|
65 code_size * 100 / total_size,
|
|
66 stub_size * 100 / total_size,
|
|
67 scopes_oop_size * 100 / total_size,
|
|
68 scopes_data_size * 100 / total_size,
|
|
69 scopes_pcs_size * 100 / total_size);
|
|
70 }
|
|
71
|
|
72 void add(CodeBlob* cb) {
|
|
73 count++;
|
|
74 total_size += cb->size();
|
|
75 header_size += cb->header_size();
|
|
76 relocation_size += cb->relocation_size();
|
|
77 scopes_oop_size += cb->oops_size();
|
|
78 if (cb->is_nmethod()) {
|
|
79 nmethod *nm = (nmethod*)cb;
|
|
80 code_size += nm->code_size();
|
|
81 stub_size += nm->stub_size();
|
|
82
|
|
83 scopes_data_size += nm->scopes_data_size();
|
|
84 scopes_pcs_size += nm->scopes_pcs_size();
|
|
85 } else {
|
|
86 code_size += cb->instructions_size();
|
|
87 }
|
|
88 }
|
|
89 };
|
|
90
|
|
91
|
|
92 // CodeCache implementation
|
|
93
|
|
94 CodeHeap * CodeCache::_heap = new CodeHeap();
|
|
95 int CodeCache::_number_of_blobs = 0;
|
|
96 int CodeCache::_number_of_nmethods_with_dependencies = 0;
|
|
97 bool CodeCache::_needs_cache_clean = false;
|
989
|
98 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
|
1202
|
99 nmethod* CodeCache::_saved_nmethods = NULL;
|
0
|
100
|
|
101
|
|
102 CodeBlob* CodeCache::first() {
|
|
103 assert_locked_or_safepoint(CodeCache_lock);
|
|
104 return (CodeBlob*)_heap->first();
|
|
105 }
|
|
106
|
|
107
|
|
108 CodeBlob* CodeCache::next(CodeBlob* cb) {
|
|
109 assert_locked_or_safepoint(CodeCache_lock);
|
|
110 return (CodeBlob*)_heap->next(cb);
|
|
111 }
|
|
112
|
|
113
|
|
114 CodeBlob* CodeCache::alive(CodeBlob *cb) {
|
|
115 assert_locked_or_safepoint(CodeCache_lock);
|
|
116 while (cb != NULL && !cb->is_alive()) cb = next(cb);
|
|
117 return cb;
|
|
118 }
|
|
119
|
|
120
|
|
121 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
|
|
122 assert_locked_or_safepoint(CodeCache_lock);
|
|
123 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
|
|
124 return (nmethod*)cb;
|
|
125 }
|
|
126
|
|
127
|
|
128 CodeBlob* CodeCache::allocate(int size) {
|
|
129 // Do not seize the CodeCache lock here--if the caller has not
|
|
130 // already done so, we are going to lose bigtime, since the code
|
|
131 // cache will contain a garbage CodeBlob until the caller can
|
|
132 // run the constructor for the CodeBlob subclass he is busy
|
|
133 // instantiating.
|
|
134 guarantee(size >= 0, "allocation request must be reasonable");
|
|
135 assert_locked_or_safepoint(CodeCache_lock);
|
|
136 CodeBlob* cb = NULL;
|
|
137 _number_of_blobs++;
|
|
138 while (true) {
|
|
139 cb = (CodeBlob*)_heap->allocate(size);
|
|
140 if (cb != NULL) break;
|
|
141 if (!_heap->expand_by(CodeCacheExpansionSize)) {
|
|
142 // Expansion failed
|
|
143 return NULL;
|
|
144 }
|
|
145 if (PrintCodeCacheExtension) {
|
|
146 ResourceMark rm;
|
|
147 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
|
|
148 (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
|
|
149 (address)_heap->end() - (address)_heap->begin());
|
|
150 }
|
|
151 }
|
|
152 verify_if_often();
|
989
|
153 print_trace("allocation", cb, size);
|
0
|
154 return cb;
|
|
155 }
|
|
156
|
|
157 void CodeCache::free(CodeBlob* cb) {
|
|
158 assert_locked_or_safepoint(CodeCache_lock);
|
|
159 verify_if_often();
|
|
160
|
989
|
161 print_trace("free", cb);
|
0
|
162 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
|
|
163 _number_of_nmethods_with_dependencies--;
|
|
164 }
|
|
165 _number_of_blobs--;
|
|
166
|
|
167 _heap->deallocate(cb);
|
|
168
|
|
169 verify_if_often();
|
|
170 assert(_number_of_blobs >= 0, "sanity check");
|
|
171 }
|
|
172
|
|
173
|
|
174 void CodeCache::commit(CodeBlob* cb) {
|
|
175 // this is called by nmethod::nmethod, which must already own CodeCache_lock
|
|
176 assert_locked_or_safepoint(CodeCache_lock);
|
|
177 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
|
|
178 _number_of_nmethods_with_dependencies++;
|
|
179 }
|
|
180 // flush the hardware I-cache
|
|
181 ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
|
|
182 }
|
|
183
|
|
184
|
|
185 void CodeCache::flush() {
|
|
186 assert_locked_or_safepoint(CodeCache_lock);
|
|
187 Unimplemented();
|
|
188 }
|
|
189
|
|
190
|
|
191 // Iteration over CodeBlobs
|
|
192
|
|
193 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
|
|
194 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
|
|
195 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
|
|
196
|
|
197
|
|
198 bool CodeCache::contains(void *p) {
|
|
199 // It should be ok to call contains without holding a lock
|
|
200 return _heap->contains(p);
|
|
201 }
|
|
202
|
|
203
|
|
204 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
|
|
205 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
|
|
206 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
|
|
207 CodeBlob* CodeCache::find_blob(void* start) {
|
|
208 CodeBlob* result = find_blob_unsafe(start);
|
|
209 if (result == NULL) return NULL;
|
|
210 // We could potientially look up non_entrant methods
|
|
211 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
|
|
212 return result;
|
|
213 }
|
|
214
|
|
215 nmethod* CodeCache::find_nmethod(void* start) {
|
|
216 CodeBlob *cb = find_blob(start);
|
|
217 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
|
|
218 return (nmethod*)cb;
|
|
219 }
|
|
220
|
|
221
|
|
222 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
|
|
223 assert_locked_or_safepoint(CodeCache_lock);
|
|
224 FOR_ALL_BLOBS(p) {
|
|
225 f(p);
|
|
226 }
|
|
227 }
|
|
228
|
|
229
|
|
230 void CodeCache::nmethods_do(void f(nmethod* nm)) {
|
|
231 assert_locked_or_safepoint(CodeCache_lock);
|
|
232 FOR_ALL_BLOBS(nm) {
|
|
233 if (nm->is_nmethod()) f((nmethod*)nm);
|
|
234 }
|
|
235 }
|
|
236
|
|
237
|
|
238 int CodeCache::alignment_unit() {
|
|
239 return (int)_heap->alignment_unit();
|
|
240 }
|
|
241
|
|
242
|
|
243 int CodeCache::alignment_offset() {
|
|
244 return (int)_heap->alignment_offset();
|
|
245 }
|
|
246
|
|
247
|
|
248 // Mark code blobs for unloading if they contain otherwise
|
|
249 // unreachable oops.
|
|
250 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
|
|
251 OopClosure* keep_alive,
|
|
252 bool unloading_occurred) {
|
|
253 assert_locked_or_safepoint(CodeCache_lock);
|
|
254 FOR_ALL_ALIVE_BLOBS(cb) {
|
|
255 cb->do_unloading(is_alive, keep_alive, unloading_occurred);
|
|
256 }
|
|
257 }
|
|
258
|
989
|
259 void CodeCache::blobs_do(CodeBlobClosure* f) {
|
0
|
260 assert_locked_or_safepoint(CodeCache_lock);
|
|
261 FOR_ALL_ALIVE_BLOBS(cb) {
|
989
|
262 f->do_code_blob(cb);
|
|
263
|
|
264 #ifdef ASSERT
|
|
265 if (cb->is_nmethod())
|
|
266 ((nmethod*)cb)->verify_scavenge_root_oops();
|
|
267 #endif //ASSERT
|
0
|
268 }
|
|
269 }
|
|
270
|
989
|
271 // Walk the list of methods which might contain non-perm oops.
|
|
272 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
|
273 assert_locked_or_safepoint(CodeCache_lock);
|
|
274 debug_only(mark_scavenge_root_nmethods());
|
|
275
|
|
276 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
|
277 debug_only(cur->clear_scavenge_root_marked());
|
|
278 assert(cur->scavenge_root_not_marked(), "");
|
|
279 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
|
|
280
|
|
281 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
|
|
282 #ifndef PRODUCT
|
|
283 if (TraceScavenge) {
|
|
284 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
|
|
285 }
|
|
286 #endif //PRODUCT
|
|
287 if (is_live)
|
|
288 // Perform cur->oops_do(f), maybe just once per nmethod.
|
|
289 f->do_code_blob(cur);
|
|
290 }
|
|
291
|
|
292 // Check for stray marks.
|
|
293 debug_only(verify_perm_nmethods(NULL));
|
|
294 }
|
|
295
|
|
296 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
|
|
297 assert_locked_or_safepoint(CodeCache_lock);
|
|
298 nm->set_on_scavenge_root_list();
|
|
299 nm->set_scavenge_root_link(_scavenge_root_nmethods);
|
|
300 set_scavenge_root_nmethods(nm);
|
|
301 print_trace("add_scavenge_root", nm);
|
|
302 }
|
|
303
|
|
304 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
|
|
305 assert_locked_or_safepoint(CodeCache_lock);
|
|
306 print_trace("drop_scavenge_root", nm);
|
|
307 nmethod* last = NULL;
|
|
308 nmethod* cur = scavenge_root_nmethods();
|
|
309 while (cur != NULL) {
|
|
310 nmethod* next = cur->scavenge_root_link();
|
|
311 if (cur == nm) {
|
|
312 if (last != NULL)
|
|
313 last->set_scavenge_root_link(next);
|
|
314 else set_scavenge_root_nmethods(next);
|
|
315 nm->set_scavenge_root_link(NULL);
|
|
316 nm->clear_on_scavenge_root_list();
|
|
317 return;
|
|
318 }
|
|
319 last = cur;
|
|
320 cur = next;
|
|
321 }
|
|
322 assert(false, "should have been on list");
|
|
323 }
|
|
324
|
|
325 void CodeCache::prune_scavenge_root_nmethods() {
|
|
326 assert_locked_or_safepoint(CodeCache_lock);
|
|
327 debug_only(mark_scavenge_root_nmethods());
|
|
328
|
|
329 nmethod* last = NULL;
|
|
330 nmethod* cur = scavenge_root_nmethods();
|
|
331 while (cur != NULL) {
|
|
332 nmethod* next = cur->scavenge_root_link();
|
|
333 debug_only(cur->clear_scavenge_root_marked());
|
|
334 assert(cur->scavenge_root_not_marked(), "");
|
|
335 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
|
|
336
|
|
337 if (!cur->is_zombie() && !cur->is_unloaded()
|
|
338 && cur->detect_scavenge_root_oops()) {
|
|
339 // Keep it. Advance 'last' to prevent deletion.
|
|
340 last = cur;
|
|
341 } else {
|
|
342 // Prune it from the list, so we don't have to look at it any more.
|
|
343 print_trace("prune_scavenge_root", cur);
|
|
344 cur->set_scavenge_root_link(NULL);
|
|
345 cur->clear_on_scavenge_root_list();
|
|
346 if (last != NULL)
|
|
347 last->set_scavenge_root_link(next);
|
|
348 else set_scavenge_root_nmethods(next);
|
|
349 }
|
|
350 cur = next;
|
|
351 }
|
|
352
|
|
353 // Check for stray marks.
|
|
354 debug_only(verify_perm_nmethods(NULL));
|
|
355 }
|
|
356
|
|
357 #ifndef PRODUCT
|
|
358 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
|
|
359 // While we are here, verify the integrity of the list.
|
|
360 mark_scavenge_root_nmethods();
|
|
361 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
|
362 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
|
|
363 cur->clear_scavenge_root_marked();
|
|
364 }
|
|
365 verify_perm_nmethods(f);
|
|
366 }
|
|
367
|
|
368 // Temporarily mark nmethods that are claimed to be on the non-perm list.
|
|
369 void CodeCache::mark_scavenge_root_nmethods() {
|
|
370 FOR_ALL_ALIVE_BLOBS(cb) {
|
|
371 if (cb->is_nmethod()) {
|
|
372 nmethod *nm = (nmethod*)cb;
|
|
373 assert(nm->scavenge_root_not_marked(), "clean state");
|
|
374 if (nm->on_scavenge_root_list())
|
|
375 nm->set_scavenge_root_marked();
|
|
376 }
|
|
377 }
|
|
378 }
|
|
379
|
|
380 // If the closure is given, run it on the unlisted nmethods.
|
|
381 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
|
|
382 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
|
383 FOR_ALL_ALIVE_BLOBS(cb) {
|
|
384 bool call_f = (f_or_null != NULL);
|
|
385 if (cb->is_nmethod()) {
|
|
386 nmethod *nm = (nmethod*)cb;
|
|
387 assert(nm->scavenge_root_not_marked(), "must be already processed");
|
|
388 if (nm->on_scavenge_root_list())
|
|
389 call_f = false; // don't show this one to the client
|
|
390 nm->verify_scavenge_root_oops();
|
|
391 } else {
|
|
392 call_f = false; // not an nmethod
|
|
393 }
|
|
394 if (call_f) f_or_null->do_code_blob(cb);
|
|
395 }
|
|
396 }
|
|
397 #endif //PRODUCT
|
|
398
|
1202
|
399
|
|
400 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
|
|
401 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
402 nmethod* saved = _saved_nmethods;
|
|
403 nmethod* prev = NULL;
|
|
404 while (saved != NULL) {
|
|
405 if (saved->is_in_use() && saved->method() == m) {
|
|
406 if (prev != NULL) {
|
|
407 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
|
408 } else {
|
|
409 _saved_nmethods = saved->saved_nmethod_link();
|
|
410 }
|
|
411 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
|
412 saved->set_speculatively_disconnected(false);
|
|
413 saved->set_saved_nmethod_link(NULL);
|
|
414 if (PrintMethodFlushing) {
|
|
415 saved->print_on(tty, " ### nmethod is reconnected");
|
|
416 }
|
|
417 if (LogCompilation && (xtty != NULL)) {
|
|
418 ttyLocker ttyl;
|
|
419 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
|
|
420 xtty->method(methodOop(m));
|
|
421 xtty->stamp();
|
|
422 xtty->end_elem();
|
|
423 }
|
|
424 return saved;
|
|
425 }
|
|
426 prev = saved;
|
|
427 saved = saved->saved_nmethod_link();
|
|
428 }
|
|
429 return NULL;
|
|
430 }
|
|
431
|
|
432 void CodeCache::remove_saved_code(nmethod* nm) {
|
|
433 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
434 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
|
435 nmethod* saved = _saved_nmethods;
|
|
436 nmethod* prev = NULL;
|
|
437 while (saved != NULL) {
|
|
438 if (saved == nm) {
|
|
439 if (prev != NULL) {
|
|
440 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
|
441 } else {
|
|
442 _saved_nmethods = saved->saved_nmethod_link();
|
|
443 }
|
|
444 if (LogCompilation && (xtty != NULL)) {
|
|
445 ttyLocker ttyl;
|
|
446 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
|
|
447 xtty->stamp();
|
|
448 xtty->end_elem();
|
|
449 }
|
|
450 return;
|
|
451 }
|
|
452 prev = saved;
|
|
453 saved = saved->saved_nmethod_link();
|
|
454 }
|
|
455 ShouldNotReachHere();
|
|
456 }
|
|
457
|
|
458 void CodeCache::speculatively_disconnect(nmethod* nm) {
|
|
459 assert_locked_or_safepoint(CodeCache_lock);
|
|
460 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
|
|
461 nm->set_saved_nmethod_link(_saved_nmethods);
|
|
462 _saved_nmethods = nm;
|
|
463 if (PrintMethodFlushing) {
|
|
464 nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
|
465 }
|
|
466 if (LogCompilation && (xtty != NULL)) {
|
|
467 ttyLocker ttyl;
|
|
468 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
|
|
469 xtty->method(methodOop(nm->method()));
|
|
470 xtty->stamp();
|
|
471 xtty->end_elem();
|
|
472 }
|
|
473 nm->method()->clear_code();
|
|
474 nm->set_speculatively_disconnected(true);
|
|
475 }
|
|
476
|
|
477
|
0
|
478 void CodeCache::gc_prologue() {
|
989
|
479 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
|
0
|
480 }
|
|
481
|
|
482
|
|
483 void CodeCache::gc_epilogue() {
|
|
484 assert_locked_or_safepoint(CodeCache_lock);
|
|
485 FOR_ALL_ALIVE_BLOBS(cb) {
|
|
486 if (cb->is_nmethod()) {
|
|
487 nmethod *nm = (nmethod*)cb;
|
|
488 assert(!nm->is_unloaded(), "Tautology");
|
|
489 if (needs_cache_clean()) {
|
|
490 nm->cleanup_inline_caches();
|
|
491 }
|
|
492 debug_only(nm->verify();)
|
|
493 }
|
|
494 cb->fix_oop_relocations();
|
|
495 }
|
|
496 set_needs_cache_clean(false);
|
989
|
497 prune_scavenge_root_nmethods();
|
|
498 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
0
|
499 }
|
|
500
|
|
501
|
|
502 address CodeCache::first_address() {
|
|
503 assert_locked_or_safepoint(CodeCache_lock);
|
|
504 return (address)_heap->begin();
|
|
505 }
|
|
506
|
|
507
|
|
508 address CodeCache::last_address() {
|
|
509 assert_locked_or_safepoint(CodeCache_lock);
|
|
510 return (address)_heap->end();
|
|
511 }
|
|
512
|
|
513
|
|
514 void icache_init();
|
|
515
|
|
516 void CodeCache::initialize() {
|
|
517 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
|
|
518 #ifdef COMPILER2
|
|
519 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
|
|
520 #endif
|
|
521 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
|
|
522 // This was originally just a check of the alignment, causing failure, instead, round
|
|
523 // the code cache to the page size. In particular, Solaris is moving to a larger
|
|
524 // default page size.
|
|
525 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
|
|
526 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
|
|
527 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
|
|
528 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
|
|
529 vm_exit_during_initialization("Could not reserve enough space for code cache");
|
|
530 }
|
|
531
|
|
532 MemoryService::add_code_heap_memory_pool(_heap);
|
|
533
|
|
534 // Initialize ICache flush mechanism
|
|
535 // This service is needed for os::register_code_area
|
|
536 icache_init();
|
|
537
|
|
538 // Give OS a chance to register generated code area.
|
|
539 // This is used on Windows 64 bit platforms to register
|
|
540 // Structured Exception Handlers for our generated code.
|
|
541 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
|
|
542 }
|
|
543
|
|
544
|
|
545 void codeCache_init() {
|
|
546 CodeCache::initialize();
|
|
547 }
|
|
548
|
|
549 //------------------------------------------------------------------------------------------------
|
|
550
|
|
551 int CodeCache::number_of_nmethods_with_dependencies() {
|
|
552 return _number_of_nmethods_with_dependencies;
|
|
553 }
|
|
554
|
|
555 void CodeCache::clear_inline_caches() {
|
|
556 assert_locked_or_safepoint(CodeCache_lock);
|
|
557 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
558 nm->clear_inline_caches();
|
|
559 }
|
|
560 }
|
|
561
|
|
562 #ifndef PRODUCT
|
|
563 // used to keep track of how much time is spent in mark_for_deoptimization
|
|
564 static elapsedTimer dependentCheckTime;
|
|
565 static int dependentCheckCount = 0;
|
|
566 #endif // PRODUCT
|
|
567
|
|
568
|
|
569 int CodeCache::mark_for_deoptimization(DepChange& changes) {
|
|
570 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
571
|
|
572 #ifndef PRODUCT
|
|
573 dependentCheckTime.start();
|
|
574 dependentCheckCount++;
|
|
575 #endif // PRODUCT
|
|
576
|
|
577 int number_of_marked_CodeBlobs = 0;
|
|
578
|
|
579 // search the hierarchy looking for nmethods which are affected by the loading of this class
|
|
580
|
|
581 // then search the interfaces this class implements looking for nmethods
|
|
582 // which might be dependent of the fact that an interface only had one
|
|
583 // implementor.
|
|
584
|
|
585 { No_Safepoint_Verifier nsv;
|
|
586 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
|
|
587 klassOop d = str.klass();
|
|
588 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
|
|
589 }
|
|
590 }
|
|
591
|
|
592 if (VerifyDependencies) {
|
|
593 // Turn off dependency tracing while actually testing deps.
|
|
594 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
|
595 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
596 if (!nm->is_marked_for_deoptimization() &&
|
|
597 nm->check_all_dependencies()) {
|
|
598 ResourceMark rm;
|
|
599 tty->print_cr("Should have been marked for deoptimization:");
|
|
600 changes.print();
|
|
601 nm->print();
|
|
602 nm->print_dependencies();
|
|
603 }
|
|
604 }
|
|
605 }
|
|
606
|
|
607 #ifndef PRODUCT
|
|
608 dependentCheckTime.stop();
|
|
609 #endif // PRODUCT
|
|
610
|
|
611 return number_of_marked_CodeBlobs;
|
|
612 }
|
|
613
|
|
614
|
|
615 #ifdef HOTSWAP
|
|
616 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
|
|
617 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
618 int number_of_marked_CodeBlobs = 0;
|
|
619
|
|
620 // Deoptimize all methods of the evolving class itself
|
|
621 objArrayOop old_methods = dependee->methods();
|
|
622 for (int i = 0; i < old_methods->length(); i++) {
|
|
623 ResourceMark rm;
|
|
624 methodOop old_method = (methodOop) old_methods->obj_at(i);
|
|
625 nmethod *nm = old_method->code();
|
|
626 if (nm != NULL) {
|
|
627 nm->mark_for_deoptimization();
|
|
628 number_of_marked_CodeBlobs++;
|
|
629 }
|
|
630 }
|
|
631
|
|
632 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
633 if (nm->is_marked_for_deoptimization()) {
|
|
634 // ...Already marked in the previous pass; don't count it again.
|
|
635 } else if (nm->is_evol_dependent_on(dependee())) {
|
|
636 ResourceMark rm;
|
|
637 nm->mark_for_deoptimization();
|
|
638 number_of_marked_CodeBlobs++;
|
|
639 } else {
|
|
640 // flush caches in case they refer to a redefined methodOop
|
|
641 nm->clear_inline_caches();
|
|
642 }
|
|
643 }
|
|
644
|
|
645 return number_of_marked_CodeBlobs;
|
|
646 }
|
|
647 #endif // HOTSWAP
|
|
648
|
|
649
|
|
650 // Deoptimize all methods
|
|
651 void CodeCache::mark_all_nmethods_for_deoptimization() {
|
|
652 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
653 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
654 nm->mark_for_deoptimization();
|
|
655 }
|
|
656 }
|
|
657
|
|
658
|
|
659 int CodeCache::mark_for_deoptimization(methodOop dependee) {
|
|
660 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
661 int number_of_marked_CodeBlobs = 0;
|
|
662
|
|
663 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
664 if (nm->is_dependent_on_method(dependee)) {
|
|
665 ResourceMark rm;
|
|
666 nm->mark_for_deoptimization();
|
|
667 number_of_marked_CodeBlobs++;
|
|
668 }
|
|
669 }
|
|
670
|
|
671 return number_of_marked_CodeBlobs;
|
|
672 }
|
|
673
|
|
674 void CodeCache::make_marked_nmethods_zombies() {
|
|
675 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
|
676 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
677 if (nm->is_marked_for_deoptimization()) {
|
|
678
|
|
679 // If the nmethod has already been made non-entrant and it can be converted
|
|
680 // then zombie it now. Otherwise make it non-entrant and it will eventually
|
|
681 // be zombied when it is no longer seen on the stack. Note that the nmethod
|
|
682 // might be "entrant" and not on the stack and so could be zombied immediately
|
|
683 // but we can't tell because we don't track it on stack until it becomes
|
|
684 // non-entrant.
|
|
685
|
|
686 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
|
|
687 nm->make_zombie();
|
|
688 } else {
|
|
689 nm->make_not_entrant();
|
|
690 }
|
|
691 }
|
|
692 }
|
|
693 }
|
|
694
|
|
695 void CodeCache::make_marked_nmethods_not_entrant() {
|
|
696 assert_locked_or_safepoint(CodeCache_lock);
|
|
697 FOR_ALL_ALIVE_NMETHODS(nm) {
|
|
698 if (nm->is_marked_for_deoptimization()) {
|
|
699 nm->make_not_entrant();
|
|
700 }
|
|
701 }
|
|
702 }
|
|
703
|
|
704 void CodeCache::verify() {
|
|
705 _heap->verify();
|
|
706 FOR_ALL_ALIVE_BLOBS(p) {
|
|
707 p->verify();
|
|
708 }
|
|
709 }
|
|
710
|
|
711 //------------------------------------------------------------------------------------------------
|
|
712 // Non-product version
|
|
713
|
|
714 #ifndef PRODUCT
|
|
715
|
|
716 void CodeCache::verify_if_often() {
|
|
717 if (VerifyCodeCacheOften) {
|
|
718 _heap->verify();
|
|
719 }
|
|
720 }
|
|
721
|
989
|
722 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
|
|
723 if (PrintCodeCache2) { // Need to add a new flag
|
|
724 ResourceMark rm;
|
|
725 if (size == 0) size = cb->size();
|
|
726 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
|
|
727 }
|
|
728 }
|
|
729
|
0
|
730 void CodeCache::print_internals() {
|
|
731 int nmethodCount = 0;
|
|
732 int runtimeStubCount = 0;
|
|
733 int adapterCount = 0;
|
|
734 int deoptimizationStubCount = 0;
|
|
735 int uncommonTrapStubCount = 0;
|
|
736 int bufferBlobCount = 0;
|
|
737 int total = 0;
|
|
738 int nmethodAlive = 0;
|
|
739 int nmethodNotEntrant = 0;
|
|
740 int nmethodZombie = 0;
|
|
741 int nmethodUnloaded = 0;
|
|
742 int nmethodJava = 0;
|
|
743 int nmethodNative = 0;
|
|
744 int maxCodeSize = 0;
|
|
745 ResourceMark rm;
|
|
746
|
|
747 CodeBlob *cb;
|
|
748 for (cb = first(); cb != NULL; cb = next(cb)) {
|
|
749 total++;
|
|
750 if (cb->is_nmethod()) {
|
|
751 nmethod* nm = (nmethod*)cb;
|
|
752
|
|
753 if (Verbose && nm->method() != NULL) {
|
|
754 ResourceMark rm;
|
|
755 char *method_name = nm->method()->name_and_sig_as_C_string();
|
|
756 tty->print("%s", method_name);
|
|
757 if(nm->is_alive()) { tty->print_cr(" alive"); }
|
|
758 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
|
|
759 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
|
|
760 }
|
|
761
|
|
762 nmethodCount++;
|
|
763
|
|
764 if(nm->is_alive()) { nmethodAlive++; }
|
|
765 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
|
|
766 if(nm->is_zombie()) { nmethodZombie++; }
|
|
767 if(nm->is_unloaded()) { nmethodUnloaded++; }
|
|
768 if(nm->is_native_method()) { nmethodNative++; }
|
|
769
|
|
770 if(nm->method() != NULL && nm->is_java_method()) {
|
|
771 nmethodJava++;
|
|
772 if(nm->code_size() > maxCodeSize) {
|
|
773 maxCodeSize = nm->code_size();
|
|
774 }
|
|
775 }
|
|
776 } else if (cb->is_runtime_stub()) {
|
|
777 runtimeStubCount++;
|
|
778 } else if (cb->is_deoptimization_stub()) {
|
|
779 deoptimizationStubCount++;
|
|
780 } else if (cb->is_uncommon_trap_stub()) {
|
|
781 uncommonTrapStubCount++;
|
|
782 } else if (cb->is_adapter_blob()) {
|
|
783 adapterCount++;
|
|
784 } else if (cb->is_buffer_blob()) {
|
|
785 bufferBlobCount++;
|
|
786 }
|
|
787 }
|
|
788
|
|
789 int bucketSize = 512;
|
|
790 int bucketLimit = maxCodeSize / bucketSize + 1;
|
|
791 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
|
|
792 memset(buckets,0,sizeof(int) * bucketLimit);
|
|
793
|
|
794 for (cb = first(); cb != NULL; cb = next(cb)) {
|
|
795 if (cb->is_nmethod()) {
|
|
796 nmethod* nm = (nmethod*)cb;
|
|
797 if(nm->is_java_method()) {
|
|
798 buckets[nm->code_size() / bucketSize]++;
|
|
799 }
|
|
800 }
|
|
801 }
|
|
802 tty->print_cr("Code Cache Entries (total of %d)",total);
|
|
803 tty->print_cr("-------------------------------------------------");
|
|
804 tty->print_cr("nmethods: %d",nmethodCount);
|
|
805 tty->print_cr("\talive: %d",nmethodAlive);
|
|
806 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
|
|
807 tty->print_cr("\tzombie: %d",nmethodZombie);
|
|
808 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
|
|
809 tty->print_cr("\tjava: %d",nmethodJava);
|
|
810 tty->print_cr("\tnative: %d",nmethodNative);
|
|
811 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
|
|
812 tty->print_cr("adapters: %d",adapterCount);
|
|
813 tty->print_cr("buffer blobs: %d",bufferBlobCount);
|
|
814 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
|
|
815 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
|
|
816 tty->print_cr("\nnmethod size distribution (non-zombie java)");
|
|
817 tty->print_cr("-------------------------------------------------");
|
|
818
|
|
819 for(int i=0; i<bucketLimit; i++) {
|
|
820 if(buckets[i] != 0) {
|
|
821 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
|
|
822 tty->fill_to(40);
|
|
823 tty->print_cr("%d",buckets[i]);
|
|
824 }
|
|
825 }
|
|
826
|
|
827 FREE_C_HEAP_ARRAY(int, buckets);
|
|
828 }
|
|
829
|
|
830 void CodeCache::print() {
|
|
831 CodeBlob_sizes live;
|
|
832 CodeBlob_sizes dead;
|
|
833
|
|
834 FOR_ALL_BLOBS(p) {
|
|
835 if (!p->is_alive()) {
|
|
836 dead.add(p);
|
|
837 } else {
|
|
838 live.add(p);
|
|
839 }
|
|
840 }
|
|
841
|
|
842 tty->print_cr("CodeCache:");
|
|
843
|
|
844 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
|
|
845 dependentCheckTime.seconds() / dependentCheckCount);
|
|
846
|
|
847 if (!live.is_empty()) {
|
|
848 live.print("live");
|
|
849 }
|
|
850 if (!dead.is_empty()) {
|
|
851 dead.print("dead");
|
|
852 }
|
|
853
|
|
854
|
|
855 if (Verbose) {
|
|
856 // print the oop_map usage
|
|
857 int code_size = 0;
|
|
858 int number_of_blobs = 0;
|
|
859 int number_of_oop_maps = 0;
|
|
860 int map_size = 0;
|
|
861 FOR_ALL_BLOBS(p) {
|
|
862 if (p->is_alive()) {
|
|
863 number_of_blobs++;
|
|
864 code_size += p->instructions_size();
|
|
865 OopMapSet* set = p->oop_maps();
|
|
866 if (set != NULL) {
|
|
867 number_of_oop_maps += set->size();
|
|
868 map_size += set->heap_size();
|
|
869 }
|
|
870 }
|
|
871 }
|
|
872 tty->print_cr("OopMaps");
|
|
873 tty->print_cr(" #blobs = %d", number_of_blobs);
|
|
874 tty->print_cr(" code size = %d", code_size);
|
|
875 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
|
|
876 tty->print_cr(" map size = %d", map_size);
|
|
877 }
|
|
878
|
|
879 }
|
|
880
|
|
881 #endif // PRODUCT
|