Mercurial > hg > graal-compiler
annotate src/share/vm/c1/c1_Runtime1.cpp @ 2607:008adfd6d850
Fixed the stateBefore of invokes and monitorenter instructions to include the arguments of the instruction.
This is necessary to ensure correct continuation in the interpreter when the stateBefore is used as a deoptimization point.
author | Thomas Wuerthinger <thomas@wuerthinger.net> |
---|---|
date | Fri, 06 May 2011 17:47:17 +0200 |
parents | 0654ee04b214 |
children | 75a99b4f1c98 |
rev | line source |
---|---|
0 | 1 /* |
2142 | 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1247
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1247
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1247
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "asm/codeBuffer.hpp" | |
27 #include "c1/c1_CodeStubs.hpp" | |
28 #include "c1/c1_Defs.hpp" | |
29 #include "c1/c1_FrameMap.hpp" | |
30 #include "c1/c1_LIRAssembler.hpp" | |
31 #include "c1/c1_MacroAssembler.hpp" | |
32 #include "c1/c1_Runtime1.hpp" | |
33 #include "classfile/systemDictionary.hpp" | |
34 #include "classfile/vmSymbols.hpp" | |
35 #include "code/codeBlob.hpp" | |
36 #include "code/compiledIC.hpp" | |
37 #include "code/pcDesc.hpp" | |
38 #include "code/scopeDesc.hpp" | |
39 #include "code/vtableStubs.hpp" | |
40 #include "compiler/disassembler.hpp" | |
41 #include "gc_interface/collectedHeap.hpp" | |
42 #include "interpreter/bytecode.hpp" | |
43 #include "interpreter/interpreter.hpp" | |
44 #include "memory/allocation.inline.hpp" | |
45 #include "memory/barrierSet.hpp" | |
46 #include "memory/oopFactory.hpp" | |
47 #include "memory/resourceArea.hpp" | |
48 #include "oops/objArrayKlass.hpp" | |
49 #include "oops/oop.inline.hpp" | |
50 #include "runtime/biasedLocking.hpp" | |
51 #include "runtime/compilationPolicy.hpp" | |
52 #include "runtime/interfaceSupport.hpp" | |
53 #include "runtime/javaCalls.hpp" | |
54 #include "runtime/sharedRuntime.hpp" | |
55 #include "runtime/threadCritical.hpp" | |
56 #include "runtime/vframe.hpp" | |
57 #include "runtime/vframeArray.hpp" | |
58 #include "utilities/copy.hpp" | |
59 #include "utilities/events.hpp" | |
0 | 60 |
61 | |
62 // Implementation of StubAssembler | |
63 | |
64 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) { | |
65 _name = name; | |
66 _must_gc_arguments = false; | |
67 _frame_size = no_frame_size; | |
68 _num_rt_args = 0; | |
69 _stub_id = stub_id; | |
70 } | |
71 | |
72 | |
73 void StubAssembler::set_info(const char* name, bool must_gc_arguments) { | |
74 _name = name; | |
75 _must_gc_arguments = must_gc_arguments; | |
76 } | |
77 | |
78 | |
79 void StubAssembler::set_frame_size(int size) { | |
80 if (_frame_size == no_frame_size) { | |
81 _frame_size = size; | |
82 } | |
83 assert(_frame_size == size, "can't change the frame size"); | |
84 } | |
85 | |
86 | |
87 void StubAssembler::set_num_rt_args(int args) { | |
88 if (_num_rt_args == 0) { | |
89 _num_rt_args = args; | |
90 } | |
91 assert(_num_rt_args == args, "can't change the number of args"); | |
92 } | |
93 | |
94 // Implementation of Runtime1 | |
95 | |
96 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids]; | |
97 const char *Runtime1::_blob_names[] = { | |
98 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME) | |
99 }; | |
100 | |
101 #ifndef PRODUCT | |
102 // statistics | |
103 int Runtime1::_generic_arraycopy_cnt = 0; | |
104 int Runtime1::_primitive_arraycopy_cnt = 0; | |
105 int Runtime1::_oop_arraycopy_cnt = 0; | |
2446 | 106 int Runtime1::_generic_arraycopystub_cnt = 0; |
0 | 107 int Runtime1::_arraycopy_slowcase_cnt = 0; |
2446 | 108 int Runtime1::_arraycopy_checkcast_cnt = 0; |
109 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0; | |
0 | 110 int Runtime1::_new_type_array_slowcase_cnt = 0; |
111 int Runtime1::_new_object_array_slowcase_cnt = 0; | |
112 int Runtime1::_new_instance_slowcase_cnt = 0; | |
113 int Runtime1::_new_multi_array_slowcase_cnt = 0; | |
114 int Runtime1::_monitorenter_slowcase_cnt = 0; | |
115 int Runtime1::_monitorexit_slowcase_cnt = 0; | |
116 int Runtime1::_patch_code_slowcase_cnt = 0; | |
117 int Runtime1::_throw_range_check_exception_count = 0; | |
118 int Runtime1::_throw_index_exception_count = 0; | |
119 int Runtime1::_throw_div0_exception_count = 0; | |
120 int Runtime1::_throw_null_pointer_exception_count = 0; | |
121 int Runtime1::_throw_class_cast_exception_count = 0; | |
122 int Runtime1::_throw_incompatible_class_change_error_count = 0; | |
123 int Runtime1::_throw_array_store_exception_count = 0; | |
124 int Runtime1::_throw_count = 0; | |
2446 | 125 |
126 static int _byte_arraycopy_cnt = 0; | |
127 static int _short_arraycopy_cnt = 0; | |
128 static int _int_arraycopy_cnt = 0; | |
129 static int _long_arraycopy_cnt = 0; | |
130 static int _oop_arraycopy_cnt = 0; | |
131 | |
132 address Runtime1::arraycopy_count_address(BasicType type) { | |
133 switch (type) { | |
134 case T_BOOLEAN: | |
135 case T_BYTE: return (address)&_byte_arraycopy_cnt; | |
136 case T_CHAR: | |
137 case T_SHORT: return (address)&_short_arraycopy_cnt; | |
138 case T_FLOAT: | |
139 case T_INT: return (address)&_int_arraycopy_cnt; | |
140 case T_DOUBLE: | |
141 case T_LONG: return (address)&_long_arraycopy_cnt; | |
142 case T_ARRAY: | |
143 case T_OBJECT: return (address)&_oop_arraycopy_cnt; | |
144 default: | |
145 ShouldNotReachHere(); | |
146 return NULL; | |
147 } | |
148 } | |
149 | |
150 | |
0 | 151 #endif |
152 | |
153 // Simple helper to see if the caller of a runtime stub which | |
154 // entered the VM has been deoptimized | |
155 | |
156 static bool caller_is_deopted() { | |
157 JavaThread* thread = JavaThread::current(); | |
158 RegisterMap reg_map(thread, false); | |
159 frame runtime_frame = thread->last_frame(); | |
160 frame caller_frame = runtime_frame.sender(®_map); | |
161 assert(caller_frame.is_compiled_frame(), "must be compiled"); | |
162 return caller_frame.is_deoptimized_frame(); | |
163 } | |
164 | |
165 // Stress deoptimization | |
166 static void deopt_caller() { | |
167 if ( !caller_is_deopted()) { | |
168 JavaThread* thread = JavaThread::current(); | |
169 RegisterMap reg_map(thread, false); | |
170 frame runtime_frame = thread->last_frame(); | |
171 frame caller_frame = runtime_frame.sender(®_map); | |
1213
6deeaebad47a
6902182: 4/4 Starting with jdwp agent should not incur performance penalty
dcubed
parents:
1142
diff
changeset
|
172 Deoptimization::deoptimize_frame(thread, caller_frame.id()); |
0 | 173 assert(caller_is_deopted(), "Must be deoptimized"); |
174 } | |
175 } | |
176 | |
177 | |
1584 | 178 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { |
0 | 179 assert(0 <= id && id < number_of_ids, "illegal stub id"); |
180 ResourceMark rm; | |
181 // create code buffer for code storage | |
1748 | 182 CodeBuffer code(buffer_blob); |
0 | 183 |
1584 | 184 Compilation::setup_code_buffer(&code, 0); |
0 | 185 |
186 // create assembler for code generation | |
187 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id); | |
188 // generate code for runtime stub | |
189 OopMapSet* oop_maps; | |
190 oop_maps = generate_code_for(id, sasm); | |
191 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size, | |
192 "if stub has an oop map it must have a valid frame size"); | |
193 | |
194 #ifdef ASSERT | |
195 // Make sure that stubs that need oopmaps have them | |
196 switch (id) { | |
197 // These stubs don't need to have an oopmap | |
198 case dtrace_object_alloc_id: | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
199 case g1_pre_barrier_slow_id: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
200 case g1_post_barrier_slow_id: |
0 | 201 case slow_subtype_check_id: |
202 case fpu2long_stub_id: | |
203 case unwind_exception_id: | |
1449
8cfe3537a0d3
Pointer verification stub. Two loose oop fixes in C1X C++ part. Logging which methods have been compiled.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1434
diff
changeset
|
204 case c1x_verify_pointer_id: |
1429
abc670a709dc
* -XX:TraceC1X=0...5 controls the native c1x tracing
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1247
diff
changeset
|
205 case c1x_unwind_exception_call_id: |
abc670a709dc
* -XX:TraceC1X=0...5 controls the native c1x tracing
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1247
diff
changeset
|
206 case c1x_slow_subtype_check_id: |
1434
72cfb36c6bb2
* enabled all jtt tests
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1429
diff
changeset
|
207 case c1x_arithmetic_frem_id: |
72cfb36c6bb2
* enabled all jtt tests
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1429
diff
changeset
|
208 case c1x_arithmetic_drem_id: |
0 | 209 #ifndef TIERED |
210 case counter_overflow_id: // Not generated outside the tiered world | |
211 #endif | |
212 #ifdef SPARC | |
213 case handle_exception_nofpu_id: // Unused on sparc | |
214 #endif | |
215 break; | |
216 | |
217 // All other stubs should have oopmaps | |
218 default: | |
2607
008adfd6d850
Fixed the stateBefore of invokes and monitorenter instructions to include the arguments of the instruction.
Thomas Wuerthinger <thomas@wuerthinger.net>
parents:
2491
diff
changeset
|
219 tty->print_cr("No oopmap found for %d", id); |
0 | 220 assert(oop_maps != NULL, "must have an oopmap"); |
221 } | |
222 #endif | |
223 | |
224 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) | |
225 sasm->align(BytesPerWord); | |
226 // make sure all code is in code buffer | |
227 sasm->flush(); | |
228 // create blob - distinguish a few special cases | |
229 CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id), | |
230 &code, | |
231 CodeOffsets::frame_never_safe, | |
232 sasm->frame_size(), | |
233 oop_maps, | |
234 sasm->must_gc_arguments()); | |
235 // install blob | |
236 assert(blob != NULL, "blob must exist"); | |
237 _blobs[id] = blob; | |
238 } | |
239 | |
240 | |
1584 | 241 void Runtime1::initialize(BufferBlob* blob) { |
242 // platform-dependent initialization | |
243 initialize_pd(); | |
244 // generate stubs | |
245 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id); | |
246 // printing | |
0 | 247 #ifndef PRODUCT |
1584 | 248 if (PrintSimpleStubs) { |
249 ResourceMark rm; | |
250 for (int id = 0; id < number_of_ids; id++) { | |
251 _blobs[id]->print(); | |
252 if (_blobs[id]->oop_maps() != NULL) { | |
253 _blobs[id]->oop_maps()->print(); | |
0 | 254 } |
255 } | |
1584 | 256 } |
0 | 257 #endif |
258 } | |
259 | |
260 | |
261 CodeBlob* Runtime1::blob_for(StubID id) { | |
262 assert(0 <= id && id < number_of_ids, "illegal stub id"); | |
263 return _blobs[id]; | |
264 } | |
265 | |
266 | |
267 const char* Runtime1::name_for(StubID id) { | |
268 assert(0 <= id && id < number_of_ids, "illegal stub id"); | |
269 return _blob_names[id]; | |
270 } | |
271 | |
272 const char* Runtime1::name_for_address(address entry) { | |
273 for (int id = 0; id < number_of_ids; id++) { | |
274 if (entry == entry_for((StubID)id)) return name_for((StubID)id); | |
275 } | |
276 | |
277 #define FUNCTION_CASE(a, f) \ | |
278 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f | |
279 | |
280 FUNCTION_CASE(entry, os::javaTimeMillis); | |
281 FUNCTION_CASE(entry, os::javaTimeNanos); | |
282 FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end); | |
283 FUNCTION_CASE(entry, SharedRuntime::d2f); | |
284 FUNCTION_CASE(entry, SharedRuntime::d2i); | |
285 FUNCTION_CASE(entry, SharedRuntime::d2l); | |
286 FUNCTION_CASE(entry, SharedRuntime::dcos); | |
287 FUNCTION_CASE(entry, SharedRuntime::dexp); | |
288 FUNCTION_CASE(entry, SharedRuntime::dlog); | |
289 FUNCTION_CASE(entry, SharedRuntime::dlog10); | |
290 FUNCTION_CASE(entry, SharedRuntime::dpow); | |
291 FUNCTION_CASE(entry, SharedRuntime::drem); | |
292 FUNCTION_CASE(entry, SharedRuntime::dsin); | |
293 FUNCTION_CASE(entry, SharedRuntime::dtan); | |
294 FUNCTION_CASE(entry, SharedRuntime::f2i); | |
295 FUNCTION_CASE(entry, SharedRuntime::f2l); | |
296 FUNCTION_CASE(entry, SharedRuntime::frem); | |
297 FUNCTION_CASE(entry, SharedRuntime::l2d); | |
298 FUNCTION_CASE(entry, SharedRuntime::l2f); | |
299 FUNCTION_CASE(entry, SharedRuntime::ldiv); | |
300 FUNCTION_CASE(entry, SharedRuntime::lmul); | |
301 FUNCTION_CASE(entry, SharedRuntime::lrem); | |
302 FUNCTION_CASE(entry, SharedRuntime::lrem); | |
303 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry); | |
304 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); | |
305 FUNCTION_CASE(entry, trace_block_entry); | |
306 | |
307 #undef FUNCTION_CASE | |
308 | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
309 // Soft float adds more runtime names. |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
310 return pd_name_for_address(entry); |
0 | 311 } |
312 | |
313 | |
314 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass)) | |
315 NOT_PRODUCT(_new_instance_slowcase_cnt++;) | |
316 | |
317 assert(oop(klass)->is_klass(), "not a class"); | |
318 instanceKlassHandle h(thread, klass); | |
319 h->check_valid_for_instantiation(true, CHECK); | |
320 // make sure klass is initialized | |
321 h->initialize(CHECK); | |
322 // allocate instance and return via TLS | |
323 oop obj = h->allocate_instance(CHECK); | |
324 thread->set_vm_result(obj); | |
325 JRT_END | |
326 | |
327 | |
328 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, klassOopDesc* klass, jint length)) | |
329 NOT_PRODUCT(_new_type_array_slowcase_cnt++;) | |
330 // Note: no handle for klass needed since they are not used | |
331 // anymore after new_typeArray() and no GC can happen before. | |
332 // (This may have to change if this code changes!) | |
333 assert(oop(klass)->is_klass(), "not a class"); | |
334 BasicType elt_type = typeArrayKlass::cast(klass)->element_type(); | |
335 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); | |
336 thread->set_vm_result(obj); | |
337 // This is pretty rare but this runtime patch is stressful to deoptimization | |
338 // if we deoptimize here so force a deopt to stress the path. | |
339 if (DeoptimizeALot) { | |
340 deopt_caller(); | |
341 } | |
342 | |
343 JRT_END | |
344 | |
345 | |
346 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, klassOopDesc* array_klass, jint length)) | |
347 NOT_PRODUCT(_new_object_array_slowcase_cnt++;) | |
348 | |
349 // Note: no handle for klass needed since they are not used | |
350 // anymore after new_objArray() and no GC can happen before. | |
351 // (This may have to change if this code changes!) | |
352 assert(oop(array_klass)->is_klass(), "not a class"); | |
353 klassOop elem_klass = objArrayKlass::cast(array_klass)->element_klass(); | |
354 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); | |
355 thread->set_vm_result(obj); | |
356 // This is pretty rare but this runtime patch is stressful to deoptimization | |
357 // if we deoptimize here so force a deopt to stress the path. | |
358 if (DeoptimizeALot) { | |
359 deopt_caller(); | |
360 } | |
361 JRT_END | |
362 | |
363 | |
364 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims)) | |
365 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;) | |
366 | |
367 assert(oop(klass)->is_klass(), "not a class"); | |
368 assert(rank >= 1, "rank must be nonzero"); | |
369 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); | |
370 thread->set_vm_result(obj); | |
371 JRT_END | |
372 | |
373 | |
374 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id)) | |
375 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id); | |
376 JRT_END | |
377 | |
378 | |
2168
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2142
diff
changeset
|
379 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj)) |
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2142
diff
changeset
|
380 ResourceMark rm(thread); |
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2142
diff
changeset
|
381 const char* klass_name = Klass::cast(obj->klass())->external_name(); |
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2142
diff
changeset
|
382 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name); |
0 | 383 JRT_END |
384 | |
385 | |
386 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread)) | |
1213
6deeaebad47a
6902182: 4/4 Starting with jdwp agent should not incur performance penalty
dcubed
parents:
1142
diff
changeset
|
387 if (JvmtiExport::can_post_on_exceptions()) { |
0 | 388 vframeStream vfst(thread, true); |
389 address bcp = vfst.method()->bcp_from(vfst.bci()); | |
390 JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop()); | |
391 } | |
392 JRT_END | |
393 | |
1783 | 394 // This is a helper to allow us to safepoint but allow the outer entry |
395 // to be safepoint free if we need to do an osr | |
396 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { | |
397 nmethod* osr_nm = NULL; | |
398 methodHandle method(THREAD, m); | |
399 | |
400 RegisterMap map(THREAD, false); | |
401 frame fr = THREAD->last_frame().sender(&map); | |
0 | 402 nmethod* nm = (nmethod*) fr.cb(); |
1783 | 403 assert(nm!= NULL && nm->is_nmethod(), "Sanity check"); |
404 methodHandle enclosing_method(THREAD, nm->method()); | |
405 | |
406 CompLevel level = (CompLevel)nm->comp_level(); | |
407 int bci = InvocationEntryBci; | |
408 if (branch_bci != InvocationEntryBci) { | |
409 // Compute desination bci | |
410 address pc = method()->code_base() + branch_bci; | |
2142 | 411 Bytecodes::Code branch = Bytecodes::code_at(method(), pc); |
1783 | 412 int offset = 0; |
413 switch (branch) { | |
414 case Bytecodes::_if_icmplt: case Bytecodes::_iflt: | |
415 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt: | |
416 case Bytecodes::_if_icmple: case Bytecodes::_ifle: | |
417 case Bytecodes::_if_icmpge: case Bytecodes::_ifge: | |
418 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq: | |
419 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne: | |
420 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto: | |
421 offset = (int16_t)Bytes::get_Java_u2(pc + 1); | |
422 break; | |
423 case Bytecodes::_goto_w: | |
424 offset = Bytes::get_Java_u4(pc + 1); | |
425 break; | |
426 default: ; | |
0 | 427 } |
1783 | 428 bci = branch_bci + offset; |
429 } | |
430 | |
431 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD); | |
432 return osr_nm; | |
433 } | |
434 | |
435 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method)) | |
436 nmethod* osr_nm; | |
437 JRT_BLOCK | |
438 osr_nm = counter_overflow_helper(thread, bci, method); | |
439 if (osr_nm != NULL) { | |
440 RegisterMap map(thread, false); | |
441 frame fr = thread->last_frame().sender(&map); | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1783
diff
changeset
|
442 Deoptimization::deoptimize_frame(thread, fr.id()); |
0 | 443 } |
1783 | 444 JRT_BLOCK_END |
445 return NULL; | |
0 | 446 JRT_END |
447 | |
448 extern void vm_exit(int code); | |
449 | |
450 // Enter this method from compiled code handler below. This is where we transition | |
451 // to VM mode. This is done as a helper routine so that the method called directly | |
452 // from compiled code does not have to transition to VM. This allows the entry | |
453 // method to see if the nmethod that we have just looked up a handler for has | |
454 // been deoptimized while we were in the vm. This simplifies the assembly code | |
455 // cpu directories. | |
456 // | |
457 // We are entering here from exception stub (via the entry method below) | |
458 // If there is a compiled exception handler in this method, we will continue there; | |
459 // otherwise we will unwind the stack and continue at the caller of top frame method | |
460 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to | |
461 // control the area where we can allow a safepoint. After we exit the safepoint area we can | |
462 // check to see if the handler we are going to return is now in a nmethod that has | |
463 // been deoptimized. If that is the case we return the deopt blob | |
464 // unpack_with_exception entry instead. This makes life for the exception blob easier | |
465 // because making that same check and diverting is painful from assembly language. | |
466 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm)) | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
467 // Reset method handle flag. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
468 thread->set_is_method_handle_return(false); |
0 | 469 |
470 Handle exception(thread, ex); | |
1429
abc670a709dc
* -XX:TraceC1X=0...5 controls the native c1x tracing
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1247
diff
changeset
|
471 if (UseC1X && exception.is_null()) { |
abc670a709dc
* -XX:TraceC1X=0...5 controls the native c1x tracing
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1247
diff
changeset
|
472 exception = Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL); |
abc670a709dc
* -XX:TraceC1X=0...5 controls the native c1x tracing
Lukas Stadler <lukas.stadler@oracle.com>
parents:
1247
diff
changeset
|
473 } |
0 | 474 nm = CodeCache::find_nmethod(pc); |
475 assert(nm != NULL, "this is not an nmethod"); | |
476 // Adjust the pc as needed/ | |
477 if (nm->is_deopt_pc(pc)) { | |
478 RegisterMap map(thread, false); | |
479 frame exception_frame = thread->last_frame().sender(&map); | |
480 // if the frame isn't deopted then pc must not correspond to the caller of last_frame | |
481 assert(exception_frame.is_deoptimized_frame(), "must be deopted"); | |
482 pc = exception_frame.pc(); | |
483 } | |
484 #ifdef ASSERT | |
485 assert(exception.not_null(), "NULL exceptions should be handled by throw_exception"); | |
486 assert(exception->is_oop(), "just checking"); | |
487 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError | |
1142 | 488 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { |
0 | 489 if (ExitVMOnVerifyError) vm_exit(-1); |
490 ShouldNotReachHere(); | |
491 } | |
492 #endif | |
493 | |
494 // Check the stack guard pages and reenable them if necessary and there is | |
495 // enough space on the stack to do so. Use fast exceptions only if the guard | |
496 // pages are enabled. | |
497 bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); | |
498 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); | |
499 | |
1213
6deeaebad47a
6902182: 4/4 Starting with jdwp agent should not incur performance penalty
dcubed
parents:
1142
diff
changeset
|
500 if (JvmtiExport::can_post_on_exceptions()) { |
0 | 501 // To ensure correct notification of exception catches and throws |
502 // we have to deoptimize here. If we attempted to notify the | |
503 // catches and throws during this exception lookup it's possible | |
504 // we could deoptimize on the way out of the VM and end back in | |
505 // the interpreter at the throw site. This would result in double | |
506 // notifications since the interpreter would also notify about | |
507 // these same catches and throws as it unwound the frame. | |
508 | |
509 RegisterMap reg_map(thread); | |
510 frame stub_frame = thread->last_frame(); | |
511 frame caller_frame = stub_frame.sender(®_map); | |
512 | |
513 // We don't really want to deoptimize the nmethod itself since we | |
514 // can actually continue in the exception handler ourselves but I | |
515 // don't see an easy way to have the desired effect. | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1783
diff
changeset
|
516 Deoptimization::deoptimize_frame(thread, caller_frame.id()); |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1783
diff
changeset
|
517 assert(caller_is_deopted(), "Must be deoptimized"); |
0 | 518 |
519 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); | |
520 } | |
521 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
522 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions |
0 | 523 if (guard_pages_enabled) { |
524 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); | |
525 if (fast_continuation != NULL) { | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
526 // Set flag if return address is a method handle call site. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
527 thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); |
0 | 528 return fast_continuation; |
529 } | |
530 } | |
531 | |
532 // If the stack guard pages are enabled, check whether there is a handler in | |
533 // the current method. Otherwise (guard pages disabled), force an unwind and | |
534 // skip the exception cache update (i.e., just leave continuation==NULL). | |
535 address continuation = NULL; | |
536 if (guard_pages_enabled) { | |
537 | |
538 // New exception handling mechanism can support inlined methods | |
539 // with exception handlers since the mappings are from PC to PC | |
540 | |
541 // debugging support | |
542 // tracing | |
543 if (TraceExceptions) { | |
544 ttyLocker ttyl; | |
545 ResourceMark rm; | |
546 tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " for thread 0x%x", | |
547 exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, thread); | |
548 } | |
549 // for AbortVMOnException flag | |
550 NOT_PRODUCT(Exceptions::debug_check_abort(exception)); | |
551 | |
552 // Clear out the exception oop and pc since looking up an | |
553 // exception handler can cause class loading, which might throw an | |
554 // exception and those fields are expected to be clear during | |
555 // normal bytecode execution. | |
556 thread->set_exception_oop(NULL); | |
557 thread->set_exception_pc(NULL); | |
558 | |
559 continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false); | |
560 // If an exception was thrown during exception dispatch, the exception oop may have changed | |
561 thread->set_exception_oop(exception()); | |
562 thread->set_exception_pc(pc); | |
563 | |
564 // the exception cache is used only by non-implicit exceptions | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
565 if (continuation != NULL) { |
0 | 566 nm->add_handler_for_exception_and_pc(exception, pc, continuation); |
567 } | |
568 } | |
569 | |
570 thread->set_vm_result(exception()); | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
571 // Set flag if return address is a method handle call site. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
572 thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); |
0 | 573 |
574 if (TraceExceptions) { | |
575 ttyLocker ttyl; | |
576 ResourceMark rm; | |
577 tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT, | |
578 thread, continuation, pc); | |
579 } | |
580 | |
581 return continuation; | |
582 JRT_END | |
583 | |
584 // Enter this method from compiled code only if there is a Java exception handler | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
585 // in the method handling the exception. |
0 | 586 // We are entering here from exception stub. We don't do a normal VM transition here. |
587 // We do it in a helper. This is so we can check to see if the nmethod we have just | |
588 // searched for an exception handler has been deoptimized in the meantime. | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
589 address Runtime1::exception_handler_for_pc(JavaThread* thread) { |
0 | 590 oop exception = thread->exception_oop(); |
591 address pc = thread->exception_pc(); | |
592 // Still in Java mode | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
593 DEBUG_ONLY(ResetNoHandleMark rnhm); |
0 | 594 nmethod* nm = NULL; |
595 address continuation = NULL; | |
596 { | |
597 // Enter VM mode by calling the helper | |
598 ResetNoHandleMark rnhm; | |
599 continuation = exception_handler_for_pc_helper(thread, exception, pc, nm); | |
600 } | |
601 // Back in JAVA, use no oops DON'T safepoint | |
602 | |
603 // Now check to see if the nmethod we were called from is now deoptimized. | |
604 // If so we must return to the deopt blob and deoptimize the nmethod | |
605 if (nm != NULL && caller_is_deopted()) { | |
606 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); | |
607 } | |
608 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
609 assert(continuation != NULL, "no handler found"); |
0 | 610 return continuation; |
611 } | |
612 | |
613 | |
614 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index)) | |
615 NOT_PRODUCT(_throw_range_check_exception_count++;) | |
616 Events::log("throw_range_check"); | |
617 char message[jintAsStringSize]; | |
618 sprintf(message, "%d", index); | |
619 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message); | |
620 JRT_END | |
621 | |
622 | |
623 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index)) | |
624 NOT_PRODUCT(_throw_index_exception_count++;) | |
625 Events::log("throw_index"); | |
626 char message[16]; | |
627 sprintf(message, "%d", index); | |
628 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message); | |
629 JRT_END | |
630 | |
631 | |
632 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread)) | |
633 NOT_PRODUCT(_throw_div0_exception_count++;) | |
634 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); | |
635 JRT_END | |
636 | |
637 | |
638 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread)) | |
639 NOT_PRODUCT(_throw_null_pointer_exception_count++;) | |
640 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); | |
641 JRT_END | |
642 | |
643 | |
644 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object)) | |
645 NOT_PRODUCT(_throw_class_cast_exception_count++;) | |
646 ResourceMark rm(thread); | |
647 char* message = SharedRuntime::generate_class_cast_message( | |
648 thread, Klass::cast(object->klass())->external_name()); | |
649 SharedRuntime::throw_and_post_jvmti_exception( | |
650 thread, vmSymbols::java_lang_ClassCastException(), message); | |
651 JRT_END | |
652 | |
653 | |
654 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread)) | |
655 NOT_PRODUCT(_throw_incompatible_class_change_error_count++;) | |
656 ResourceMark rm(thread); | |
657 SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError()); | |
658 JRT_END | |
659 | |
660 | |
661 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock)) | |
662 NOT_PRODUCT(_monitorenter_slowcase_cnt++;) | |
1942
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
663 #ifdef ASSERT |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
664 if (TraceC1X >= 3) { |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
665 tty->print_cr("entered locking slow case with obj=" INTPTR_FORMAT " and lock= " INTPTR_FORMAT, obj, lock); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
666 } |
0 | 667 if (PrintBiasedLockingStatistics) { |
668 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); | |
669 } | |
1942
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
670 #endif |
0 | 671 Handle h_obj(thread, obj); |
672 assert(h_obj()->is_oop(), "must be NULL or an object"); | |
673 if (UseBiasedLocking) { | |
1942
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
674 if (UseFastLocking) { |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
675 assert(obj == lock->obj(), "must match"); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
676 } else { |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
677 lock->set_obj(obj); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
678 } |
0 | 679 // Retry fast entry if bias is revoked to avoid unnecessary inflation |
680 ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK); | |
681 } else { | |
682 if (UseFastLocking) { | |
683 // When using fast locking, the compiled code has already tried the fast case | |
684 assert(obj == lock->obj(), "must match"); | |
685 ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD); | |
686 } else { | |
687 lock->set_obj(obj); | |
688 ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD); | |
689 } | |
690 } | |
1942
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
691 #ifdef ASSERT |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
692 if (TraceC1X >= 3) { |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
693 tty->print_cr("exiting locking lock state: obj=" INTPTR_FORMAT, lock->obj()); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
694 lock->lock()->print_on(tty); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
695 tty->print_cr(""); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
696 tty->print_cr("done"); |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
697 } |
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
698 #endif |
0 | 699 JRT_END |
700 | |
701 | |
702 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock)) | |
703 NOT_PRODUCT(_monitorexit_slowcase_cnt++;) | |
704 assert(thread == JavaThread::current(), "threads must correspond"); | |
705 assert(thread->last_Java_sp(), "last_Java_sp must be set"); | |
706 // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown | |
707 EXCEPTION_MARK; | |
708 | |
709 oop obj = lock->obj(); | |
1480
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
710 |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
711 #ifdef DEBUG |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
712 if (!obj->is_oop()) { |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
713 ResetNoHandleMark rhm; |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
714 nmethod* method = thread->last_frame().cb()->as_nmethod_or_null(); |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
715 if (method != NULL) { |
1942
00bc9eaf0e24
Support for -XX:+UseFastLocking flag. Fixed monitor enter XIR template for correct debug info at the runtime call.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1931
diff
changeset
|
716 tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), obj); |
1480
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
717 } |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
718 thread->print_stack_on(tty); |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
719 assert(false, "invalid lock object pointer dected"); |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
720 } |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
721 #endif |
2fe369533fed
Additional debug output.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1472
diff
changeset
|
722 |
0 | 723 if (UseFastLocking) { |
724 // When using fast locking, the compiled code has already tried the fast case | |
725 ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD); | |
726 } else { | |
727 ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD); | |
728 } | |
729 JRT_END | |
730 | |
731 | |
732 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { | |
2142 | 733 Bytecode_field field_access(caller, bci); |
0 | 734 // This can be static or non-static field access |
2142 | 735 Bytecodes::Code code = field_access.code(); |
0 | 736 |
737 // We must load class, initialize class and resolvethe field | |
738 FieldAccessInfo result; // initialize class if needed | |
739 constantPoolHandle constants(THREAD, caller->constants()); | |
2142 | 740 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL); |
0 | 741 return result.klass()(); |
742 } | |
743 | |
744 | |
745 // | |
746 // This routine patches sites where a class wasn't loaded or | |
747 // initialized at the time the code was generated. It handles | |
748 // references to classes, fields and forcing of initialization. Most | |
749 // of the cases are straightforward and involving simply forcing | |
750 // resolution of a class, rewriting the instruction stream with the | |
751 // needed constant and replacing the call in this function with the | |
752 // patched code. The case for static field is more complicated since | |
753 // the thread which is in the process of initializing a class can | |
754 // access it's static fields but other threads can't so the code | |
755 // either has to deoptimize when this case is detected or execute a | |
756 // check that the current thread is the initializing thread. The | |
757 // current | |
758 // | |
759 // Patches basically look like this: | |
760 // | |
761 // | |
762 // patch_site: jmp patch stub ;; will be patched | |
763 // continue: ... | |
764 // ... | |
765 // ... | |
766 // ... | |
767 // | |
768 // They have a stub which looks like this: | |
769 // | |
770 // ;; patch body | |
771 // movl <const>, reg (for class constants) | |
772 // <or> movl [reg1 + <const>], reg (for field offsets) | |
773 // <or> movl reg, [reg1 + <const>] (for field offsets) | |
774 // <being_init offset> <bytes to copy> <bytes to skip> | |
775 // patch_stub: call Runtime1::patch_code (through a runtime stub) | |
776 // jmp patch_site | |
777 // | |
778 // | |
779 // A normal patch is done by rewriting the patch body, usually a move, | |
780 // and then copying it into place over top of the jmp instruction | |
781 // being careful to flush caches and doing it in an MP-safe way. The | |
782 // constants following the patch body are used to find various pieces | |
783 // of the patch relative to the call site for Runtime1::patch_code. | |
784 // The case for getstatic and putstatic is more complicated because | |
785 // getstatic and putstatic have special semantics when executing while | |
786 // the class is being initialized. getstatic/putstatic on a class | |
787 // which is being_initialized may be executed by the initializing | |
788 // thread but other threads have to block when they execute it. This | |
789 // is accomplished in compiled code by executing a test of the current | |
790 // thread against the initializing thread of the class. It's emitted | |
791 // as boilerplate in their stub which allows the patched code to be | |
792 // executed before it's copied back into the main body of the nmethod. | |
793 // | |
794 // being_init: get_thread(<tmp reg> | |
795 // cmpl [reg1 + <init_thread_offset>], <tmp reg> | |
796 // jne patch_stub | |
797 // movl [reg1 + <const>], reg (for field offsets) <or> | |
798 // movl reg, [reg1 + <const>] (for field offsets) | |
799 // jmp continue | |
800 // <being_init offset> <bytes to copy> <bytes to skip> | |
801 // patch_stub: jmp Runtim1::patch_code (through a runtime stub) | |
802 // jmp patch_site | |
803 // | |
804 // If the class is being initialized the patch body is rewritten and | |
805 // the patch site is rewritten to jump to being_init, instead of | |
806 // patch_stub. Whenever this code is executed it checks the current | |
807 // thread against the intializing thread so other threads will enter | |
808 // the runtime and end up blocked waiting the class to finish | |
809 // initializing inside the calls to resolve_field below. The | |
810 // initializing class will continue on it's way. Once the class is | |
811 // fully_initialized, the intializing_thread of the class becomes | |
812 // NULL, so the next thread to execute this code will fail the test, | |
813 // call into patch_code and complete the patching process by copying | |
814 // the patch body back into the main part of the nmethod and resume | |
815 // executing. | |
816 // | |
817 // | |
818 | |
819 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id )) | |
820 NOT_PRODUCT(_patch_code_slowcase_cnt++;) | |
821 | |
822 ResourceMark rm(thread); | |
823 RegisterMap reg_map(thread, false); | |
824 frame runtime_frame = thread->last_frame(); | |
825 frame caller_frame = runtime_frame.sender(®_map); | |
826 | |
827 // last java frame on stack | |
828 vframeStream vfst(thread, true); | |
829 assert(!vfst.at_end(), "Java frame must exist"); | |
830 | |
831 methodHandle caller_method(THREAD, vfst.method()); | |
832 // Note that caller_method->code() may not be same as caller_code because of OSR's | |
833 // Note also that in the presence of inlining it is not guaranteed | |
834 // that caller_method() == caller_code->method() | |
835 | |
836 | |
837 int bci = vfst.bci(); | |
838 | |
839 Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc()); | |
840 | |
2142 | 841 Bytecodes::Code code = caller_method()->java_code_at(bci); |
0 | 842 |
843 #ifndef PRODUCT | |
844 // this is used by assertions in the access_field_patching_id | |
845 BasicType patch_field_type = T_ILLEGAL; | |
846 #endif // PRODUCT | |
847 bool deoptimize_for_volatile = false; | |
848 int patch_field_offset = -1; | |
849 KlassHandle init_klass(THREAD, klassOop(NULL)); // klass needed by access_field_patching code | |
850 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code | |
851 if (stub_id == Runtime1::access_field_patching_id) { | |
852 | |
2142 | 853 Bytecode_field field_access(caller_method, bci); |
0 | 854 FieldAccessInfo result; // initialize class if needed |
2142 | 855 Bytecodes::Code code = field_access.code(); |
0 | 856 constantPoolHandle constants(THREAD, caller_method->constants()); |
2142 | 857 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK); |
0 | 858 patch_field_offset = result.field_offset(); |
859 | |
860 // If we're patching a field which is volatile then at compile it | |
861 // must not have been know to be volatile, so the generated code | |
862 // isn't correct for a volatile reference. The nmethod has to be | |
863 // deoptimized so that the code can be regenerated correctly. | |
864 // This check is only needed for access_field_patching since this | |
865 // is the path for patching field offsets. load_klass is only | |
866 // used for patching references to oops which don't need special | |
867 // handling in the volatile case. | |
868 deoptimize_for_volatile = result.access_flags().is_volatile(); | |
869 | |
870 #ifndef PRODUCT | |
871 patch_field_type = result.field_type(); | |
872 #endif | |
873 } else if (stub_id == Runtime1::load_klass_patching_id) { | |
874 oop k; | |
875 switch (code) { | |
876 case Bytecodes::_putstatic: | |
877 case Bytecodes::_getstatic: | |
878 { klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK); | |
879 // Save a reference to the class that has to be checked for initialization | |
880 init_klass = KlassHandle(THREAD, klass); | |
2376
c7f3d0b4570f
7017732: move static fields into Class to prepare for perm gen removal
never
parents:
2321
diff
changeset
|
881 k = klass->java_mirror(); |
0 | 882 } |
883 break; | |
884 case Bytecodes::_new: | |
2142 | 885 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); |
886 k = caller_method->constants()->klass_at(bnew.index(), CHECK); | |
0 | 887 } |
888 break; | |
889 case Bytecodes::_multianewarray: | |
2142 | 890 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); |
891 k = caller_method->constants()->klass_at(mna.index(), CHECK); | |
0 | 892 } |
893 break; | |
894 case Bytecodes::_instanceof: | |
2142 | 895 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci)); |
896 k = caller_method->constants()->klass_at(io.index(), CHECK); | |
0 | 897 } |
898 break; | |
899 case Bytecodes::_checkcast: | |
2142 | 900 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci)); |
901 k = caller_method->constants()->klass_at(cc.index(), CHECK); | |
0 | 902 } |
903 break; | |
904 case Bytecodes::_anewarray: | |
2142 | 905 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); |
906 klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK); | |
0 | 907 k = Klass::cast(ek)->array_klass(CHECK); |
908 } | |
909 break; | |
910 case Bytecodes::_ldc: | |
911 case Bytecodes::_ldc_w: | |
912 { | |
2142 | 913 Bytecode_loadconstant cc(caller_method, bci); |
914 k = cc.resolve_constant(CHECK); | |
1602 | 915 assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); |
0 | 916 } |
917 break; | |
1472
7641338cfc92
Small NPE fix. More detailed error in case of wrong bytecode in patching stub.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1449
diff
changeset
|
918 default: |
7641338cfc92
Small NPE fix. More detailed error in case of wrong bytecode in patching stub.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1449
diff
changeset
|
919 tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc()); |
7641338cfc92
Small NPE fix. More detailed error in case of wrong bytecode in patching stub.
Thomas Wuerthinger <wuerthinger@ssw.jku.at>
parents:
1449
diff
changeset
|
920 Unimplemented(); |
0 | 921 } |
922 // convert to handle | |
923 load_klass = Handle(THREAD, k); | |
924 } else { | |
925 ShouldNotReachHere(); | |
926 } | |
927 | |
928 if (deoptimize_for_volatile) { | |
929 // At compile time we assumed the field wasn't volatile but after | |
930 // loading it turns out it was volatile so we have to throw the | |
931 // compiled code out and let it be regenerated. | |
932 if (TracePatching) { | |
933 tty->print_cr("Deoptimizing for patching volatile field reference"); | |
934 } | |
485
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
935 // It's possible the nmethod was invalidated in the last |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
936 // safepoint, but if it's still alive then make it not_entrant. |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
937 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
938 if (nm != NULL) { |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
939 nm->make_not_entrant(); |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
940 } |
ac8fe14c93e4
6767587: missing call to make_not_entrant after deoptimizing for patching volatiles
never
parents:
362
diff
changeset
|
941 |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1783
diff
changeset
|
942 Deoptimization::deoptimize_frame(thread, caller_frame.id()); |
0 | 943 |
944 // Return to the now deoptimized frame. | |
945 } | |
946 | |
1602 | 947 // If we are patching in a non-perm oop, make sure the nmethod |
948 // is on the right list. | |
949 if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) { | |
950 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
951 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); | |
952 guarantee(nm != NULL, "only nmethods can contain non-perm oops"); | |
953 if (!nm->on_scavenge_root_list()) | |
954 CodeCache::add_scavenge_root_nmethod(nm); | |
955 } | |
0 | 956 |
957 // Now copy code back | |
958 | |
959 { | |
960 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag); | |
961 // | |
962 // Deoptimization may have happened while we waited for the lock. | |
963 // In that case we don't bother to do any patching we just return | |
964 // and let the deopt happen | |
965 if (!caller_is_deopted()) { | |
966 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc()); | |
967 address instr_pc = jump->jump_destination(); | |
968 NativeInstruction* ni = nativeInstruction_at(instr_pc); | |
969 if (ni->is_jump() ) { | |
970 // the jump has not been patched yet | |
971 // The jump destination is slow case and therefore not part of the stubs | |
972 // (stubs are only for StaticCalls) | |
973 | |
974 // format of buffer | |
975 // .... | |
976 // instr byte 0 <-- copy_buff | |
977 // instr byte 1 | |
978 // .. | |
979 // instr byte n-1 | |
980 // n | |
981 // .... <-- call destination | |
982 | |
983 address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset(); | |
984 unsigned char* byte_count = (unsigned char*) (stub_location - 1); | |
985 unsigned char* byte_skip = (unsigned char*) (stub_location - 2); | |
986 unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3); | |
987 address copy_buff = stub_location - *byte_skip - *byte_count; | |
988 address being_initialized_entry = stub_location - *being_initialized_entry_offset; | |
989 if (TracePatching) { | |
990 tty->print_cr(" Patching %s at bci %d at address 0x%x (%s)", Bytecodes::name(code), bci, | |
991 instr_pc, (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass"); | |
992 nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc()); | |
993 assert(caller_code != NULL, "nmethod not found"); | |
994 | |
995 // NOTE we use pc() not original_pc() because we already know they are | |
996 // identical otherwise we'd have never entered this block of code | |
997 | |
998 OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc()); | |
999 assert(map != NULL, "null check"); | |
1000 map->print(); | |
1001 tty->cr(); | |
1002 | |
1003 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); | |
1004 } | |
1005 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod | |
1006 bool do_patch = true; | |
1007 if (stub_id == Runtime1::access_field_patching_id) { | |
1008 // The offset may not be correct if the class was not loaded at code generation time. | |
1009 // Set it now. | |
1010 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); | |
1011 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); | |
1012 assert(patch_field_offset >= 0, "illegal offset"); | |
1013 n_move->add_offset_in_bytes(patch_field_offset); | |
1014 } else if (stub_id == Runtime1::load_klass_patching_id) { | |
1015 // If a getstatic or putstatic is referencing a klass which | |
1016 // isn't fully initialized, the patch body isn't copied into | |
1017 // place until initialization is complete. In this case the | |
1018 // patch site is setup so that any threads besides the | |
1019 // initializing thread are forced to come into the VM and | |
1020 // block. | |
1021 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || | |
1022 instanceKlass::cast(init_klass())->is_initialized(); | |
1023 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); | |
1024 if (jump->jump_destination() == being_initialized_entry) { | |
1025 assert(do_patch == true, "initialization must be complete at this point"); | |
1026 } else { | |
1027 // patch the instruction <move reg, klass> | |
1028 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1029 |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1030 assert(n_copy->data() == 0 || |
1783 | 1031 n_copy->data() == (intptr_t)Universe::non_oop_word(), |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1032 "illegal init value"); |
0 | 1033 assert(load_klass() != NULL, "klass not set"); |
1034 n_copy->set_data((intx) (load_klass())); | |
1035 | |
1036 if (TracePatching) { | |
1037 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); | |
1038 } | |
1039 | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1040 #if defined(SPARC) || defined(PPC) |
0 | 1041 // Update the oop location in the nmethod with the proper |
1042 // oop. When the code was generated, a NULL was stuffed | |
1043 // in the oop table and that table needs to be update to | |
1044 // have the right value. On intel the value is kept | |
1045 // directly in the instruction instead of in the oop | |
1046 // table, so set_data above effectively updated the value. | |
1047 nmethod* nm = CodeCache::find_nmethod(instr_pc); | |
1048 assert(nm != NULL, "invalid nmethod_pc"); | |
1049 RelocIterator oops(nm, copy_buff, copy_buff + 1); | |
1050 bool found = false; | |
1051 while (oops.next() && !found) { | |
1052 if (oops.type() == relocInfo::oop_type) { | |
1053 oop_Relocation* r = oops.oop_reloc(); | |
1054 oop* oop_adr = r->oop_addr(); | |
1055 *oop_adr = load_klass(); | |
1056 r->fix_oop_relocation(); | |
1057 found = true; | |
1058 } | |
1059 } | |
1060 assert(found, "the oop must exist!"); | |
1061 #endif | |
1062 | |
1063 } | |
1064 } else { | |
1065 ShouldNotReachHere(); | |
1066 } | |
1067 if (do_patch) { | |
1068 // replace instructions | |
1069 // first replace the tail, then the call | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1070 #ifdef ARM |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1071 if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1072 copy_buff -= *byte_count; |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1073 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1074 n_copy2->set_data((intx) (load_klass()), instr_pc); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1075 } |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1076 #endif |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1077 |
0 | 1078 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { |
1079 address ptr = copy_buff + i; | |
1080 int a_byte = (*ptr) & 0xFF; | |
1081 address dst = instr_pc + i; | |
1082 *(unsigned char*)dst = (unsigned char) a_byte; | |
1083 } | |
1084 ICache::invalidate_range(instr_pc, *byte_count); | |
1085 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); | |
1086 | |
1087 if (stub_id == Runtime1::load_klass_patching_id) { | |
1088 // update relocInfo to oop | |
1089 nmethod* nm = CodeCache::find_nmethod(instr_pc); | |
1090 assert(nm != NULL, "invalid nmethod_pc"); | |
1091 | |
1092 // The old patch site is now a move instruction so update | |
1093 // the reloc info so that it will get updated during | |
1094 // future GCs. | |
1095 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); | |
1096 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, | |
1097 relocInfo::none, relocInfo::oop_type); | |
1098 #ifdef SPARC | |
1099 // Sparc takes two relocations for an oop so update the second one. | |
1100 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; | |
1101 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); | |
1102 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, | |
1103 relocInfo::none, relocInfo::oop_type); | |
1104 #endif | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1105 #ifdef PPC |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1106 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset; |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1107 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1108 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1109 } |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1603
diff
changeset
|
1110 #endif |
0 | 1111 } |
1112 | |
1113 } else { | |
1114 ICache::invalidate_range(copy_buff, *byte_count); | |
1115 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry); | |
1116 } | |
1117 } | |
1118 } | |
1119 } | |
1120 JRT_END | |
1121 | |
1122 // | |
1123 // Entry point for compiled code. We want to patch a nmethod. | |
1124 // We don't do a normal VM transition here because we want to | |
1125 // know after the patching is complete and any safepoint(s) are taken | |
1126 // if the calling nmethod was deoptimized. We do this by calling a | |
1127 // helper method which does the normal VM transition and when it | |
1128 // completes we can check for deoptimization. This simplifies the | |
1129 // assembly code in the cpu directories. | |
1130 // | |
1131 int Runtime1::move_klass_patching(JavaThread* thread) { | |
1132 // | |
1133 // NOTE: we are still in Java | |
1134 // | |
1135 Thread* THREAD = thread; | |
1136 debug_only(NoHandleMark nhm;) | |
1137 { | |
1138 // Enter VM mode | |
1139 | |
1140 ResetNoHandleMark rnhm; | |
1141 patch_code(thread, load_klass_patching_id); | |
1142 } | |
1143 // Back in JAVA, use no oops DON'T safepoint | |
1144 | |
1145 // Return true if calling code is deoptimized | |
1146 | |
1147 return caller_is_deopted(); | |
1148 } | |
1149 | |
1150 // | |
1151 // Entry point for compiled code. We want to patch a nmethod. | |
1152 // We don't do a normal VM transition here because we want to | |
1153 // know after the patching is complete and any safepoint(s) are taken | |
1154 // if the calling nmethod was deoptimized. We do this by calling a | |
1155 // helper method which does the normal VM transition and when it | |
1156 // completes we can check for deoptimization. This simplifies the | |
1157 // assembly code in the cpu directories. | |
1158 // | |
1159 | |
1160 int Runtime1::access_field_patching(JavaThread* thread) { | |
1161 // | |
1162 // NOTE: we are still in Java | |
1163 // | |
1164 Thread* THREAD = thread; | |
1165 debug_only(NoHandleMark nhm;) | |
1166 { | |
1167 // Enter VM mode | |
1168 | |
1169 ResetNoHandleMark rnhm; | |
1170 patch_code(thread, access_field_patching_id); | |
1171 } | |
1172 // Back in JAVA, use no oops DON'T safepoint | |
1173 | |
1174 // Return true if calling code is deoptimized | |
1175 | |
1176 return caller_is_deopted(); | |
1177 JRT_END | |
1178 | |
1179 | |
1180 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id)) | |
1181 // for now we just print out the block id | |
1182 tty->print("%d ", block_id); | |
1183 JRT_END | |
1184 | |
1185 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1186 // Array copy return codes. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1187 enum { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1188 ac_failed = -1, // arraycopy failed |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1189 ac_ok = 0 // arraycopy succeeded |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1190 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1191 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1192 |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1193 // Below length is the # elements copied. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1194 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1195 oopDesc* dst, T* dst_addr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1196 int length) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1197 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1198 // For performance reasons, we assume we are using a card marking write |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1199 // barrier. The assert will fail if this is not the case. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1200 // Note that we use the non-virtual inlineable variant of write_ref_array. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1201 BarrierSet* bs = Universe::heap()->barrier_set(); |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1202 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1203 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1204 if (src == dst) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1205 // same object, no check |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1206 bs->write_ref_array_pre(dst_addr, length); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1207 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1208 bs->write_ref_array((HeapWord*)dst_addr, length); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1209 return ac_ok; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1210 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1211 klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1212 klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1213 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1214 // Elements are guaranteed to be subtypes, so no check necessary |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1215 bs->write_ref_array_pre(dst_addr, length); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1216 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1217 bs->write_ref_array((HeapWord*)dst_addr, length); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1218 return ac_ok; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1219 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1220 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1221 return ac_failed; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1222 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1223 |
0 | 1224 // fast and direct copy of arrays; returning -1, means that an exception may be thrown |
1225 // and we did not copy anything | |
1226 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length)) | |
1227 #ifndef PRODUCT | |
1228 _generic_arraycopy_cnt++; // Slow-path oop array copy | |
1229 #endif | |
1230 | |
1231 if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed; | |
1232 if (!dst->is_array() || !src->is_array()) return ac_failed; | |
1233 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed; | |
1234 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed; | |
1235 | |
1236 if (length == 0) return ac_ok; | |
1237 if (src->is_typeArray()) { | |
1238 const klassOop klass_oop = src->klass(); | |
1239 if (klass_oop != dst->klass()) return ac_failed; | |
1240 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop); | |
1241 const int l2es = klass->log2_element_size(); | |
1242 const int ihs = klass->array_header_in_bytes() / wordSize; | |
1243 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es); | |
1244 char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es); | |
1245 // Potential problem: memmove is not guaranteed to be word atomic | |
1246 // Revisit in Merlin | |
1247 memmove(dst_addr, src_addr, length << l2es); | |
1248 return ac_ok; | |
1249 } else if (src->is_objArray() && dst->is_objArray()) { | |
2002 | 1250 if (UseCompressedOops) { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1251 narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1252 narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1253 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); |
0 | 1254 } else { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1255 oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1256 oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1257 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); |
0 | 1258 } |
1259 } | |
1260 return ac_failed; | |
1261 JRT_END | |
1262 | |
1263 | |
1264 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length)) | |
1265 #ifndef PRODUCT | |
1266 _primitive_arraycopy_cnt++; | |
1267 #endif | |
1268 | |
1269 if (length == 0) return; | |
1270 // Not guaranteed to be word atomic, but that doesn't matter | |
1271 // for anything but an oop array, which is covered by oop_arraycopy. | |
1603
d93949c5bdcc
6730276: JDI_REGRESSION tests fail with "Error: count must be non-zero" error on x86
kvn
parents:
1602
diff
changeset
|
1272 Copy::conjoint_jbytes(src, dst, length); |
0 | 1273 JRT_END |
1274 | |
1275 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num)) | |
1276 #ifndef PRODUCT | |
1277 _oop_arraycopy_cnt++; | |
1278 #endif | |
1279 | |
1280 if (num == 0) return; | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1281 BarrierSet* bs = Universe::heap()->barrier_set(); |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1282 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1283 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1284 if (UseCompressedOops) { |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1285 bs->write_ref_array_pre((narrowOop*)dst, num); |
2002 | 1286 Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num); |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1287 } else { |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1288 bs->write_ref_array_pre((oop*)dst, num); |
2002 | 1289 Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1290 } |
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1142
diff
changeset
|
1291 bs->write_ref_array(dst, num); |
0 | 1292 JRT_END |
1293 | |
1294 | |
1295 #ifndef PRODUCT | |
1296 void Runtime1::print_statistics() { | |
1297 tty->print_cr("C1 Runtime statistics:"); | |
1298 tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr); | |
1299 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr); | |
1300 tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr); | |
1301 tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr); | |
1302 tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr); | |
1303 tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt); | |
2446 | 1304 tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt); |
1305 tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt); | |
1306 tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt); | |
1307 tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt); | |
1308 tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt); | |
0 | 1309 tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt); |
2446 | 1310 tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt); |
1311 tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt); | |
0 | 1312 tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt); |
2446 | 1313 tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt); |
1314 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt); | |
0 | 1315 |
1316 tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt); | |
1317 tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt); | |
1318 tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt); | |
1319 tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt); | |
1320 tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt); | |
1321 tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt); | |
1322 tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt); | |
1323 | |
1324 tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count); | |
1325 tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count); | |
1326 tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count); | |
1327 tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count); | |
1328 tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count); | |
1329 tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count); | |
1330 tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count); | |
1331 tty->print_cr(" _throw_count: %d:", _throw_count); | |
1332 | |
1333 SharedRuntime::print_ic_miss_histogram(); | |
1334 tty->cr(); | |
1335 } | |
1336 #endif // PRODUCT |