comparison src/cpu/x86/vm/methodHandles_x86.cpp @ 6266:1d7922586cf6

7023639: JSR 292 method handle invocation needs a fast path for compiled code 6984705: JSR 292 method handle creation should not go through JNI Summary: remove assembly code for JDK 7 chained method handles Reviewed-by: jrose, twisti, kvn, mhaupt Contributed-by: John Rose <john.r.rose@oracle.com>, Christian Thalinger <christian.thalinger@oracle.com>, Michael Haupt <michael.haupt@oracle.com>
author twisti
date Tue, 24 Jul 2012 10:51:00 -0700
parents 45a1bf98f1bb
children da91efe96a93
comparison
equal deleted inserted replaced
6241:aba91a731143 6266:1d7922586cf6
30 30
31 #define __ _masm-> 31 #define __ _masm->
32 32
33 #ifdef PRODUCT 33 #ifdef PRODUCT
34 #define BLOCK_COMMENT(str) /* nothing */ 34 #define BLOCK_COMMENT(str) /* nothing */
35 #define STOP(error) stop(error)
35 #else 36 #else
36 #define BLOCK_COMMENT(str) __ block_comment(str) 37 #define BLOCK_COMMENT(str) __ block_comment(str)
38 #define STOP(error) block_comment(error); __ stop(error)
37 #endif 39 #endif
38 40
39 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 41 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
40 42
41 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. 43 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
42 static RegisterOrConstant constant(int value) { 44 static RegisterOrConstant constant(int value) {
43 return RegisterOrConstant(value); 45 return RegisterOrConstant(value);
44 } 46 }
45 47
46 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
47 address interpreted_entry) {
48 // Just before the actual machine code entry point, allocate space
49 // for a MethodHandleEntry::Data record, so that we can manage everything
50 // from one base pointer.
51 __ align(wordSize);
52 address target = __ pc() + sizeof(Data);
53 while (__ pc() < target) {
54 __ nop();
55 __ align(wordSize);
56 }
57
58 MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
59 me->set_end_address(__ pc()); // set a temporary end_address
60 me->set_from_interpreted_entry(interpreted_entry);
61 me->set_type_checking_entry(NULL);
62
63 return (address) me;
64 }
65
66 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
67 address start_addr) {
68 MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
69 assert(me->end_address() == start_addr, "valid ME");
70
71 // Fill in the real end_address:
72 __ align(wordSize);
73 me->set_end_address(__ pc());
74
75 return me;
76 }
77
78 // stack walking support
79
80 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
81 RicochetFrame* f = RicochetFrame::from_frame(fr);
82 if (map->update_map())
83 frame::update_map_with_saved_link(map, &f->_sender_link);
84 return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc());
85 }
86
87 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
88 RicochetFrame* f = RicochetFrame::from_frame(fr);
89
90 // pick up the argument type descriptor:
91 Thread* thread = Thread::current();
92 Handle cookie(thread, f->compute_saved_args_layout(true, true));
93
94 // process fixed part
95 blk->do_oop((oop*)f->saved_target_addr());
96 blk->do_oop((oop*)f->saved_args_layout_addr());
97
98 // process variable arguments:
99 if (cookie.is_null()) return; // no arguments to describe
100
101 // the cookie is actually the invokeExact method for my target
102 // his argument signature is what I'm interested in
103 assert(cookie->is_method(), "");
104 methodHandle invoker(thread, methodOop(cookie()));
105 assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
106 assert(!invoker->is_static(), "must have MH argument");
107 int slot_count = invoker->size_of_parameters();
108 assert(slot_count >= 1, "must include 'this'");
109 intptr_t* base = f->saved_args_base();
110 intptr_t* retval = NULL;
111 if (f->has_return_value_slot())
112 retval = f->return_value_slot_addr();
113 int slot_num = slot_count;
114 intptr_t* loc = &base[slot_num -= 1];
115 //blk->do_oop((oop*) loc); // original target, which is irrelevant
116 int arg_num = 0;
117 for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
118 if (ss.at_return_type()) continue;
119 BasicType ptype = ss.type();
120 if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT
121 assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
122 loc = &base[slot_num -= type2size[ptype]];
123 bool is_oop = (ptype == T_OBJECT && loc != retval);
124 if (is_oop) blk->do_oop((oop*)loc);
125 arg_num += 1;
126 }
127 assert(slot_num == 0, "must have processed all the arguments");
128 }
129
130 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
131 oop cookie = NULL;
132 if (read_cache) {
133 cookie = saved_args_layout();
134 if (cookie != NULL) return cookie;
135 }
136 oop target = saved_target();
137 oop mtype = java_lang_invoke_MethodHandle::type(target);
138 oop mtform = java_lang_invoke_MethodType::form(mtype);
139 cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
140 if (write_cache) {
141 (*saved_args_layout_addr()) = cookie;
142 }
143 return cookie;
144 }
145
146 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
147 // output params:
148 int* bounce_offset,
149 int* exception_offset,
150 int* frame_size_in_words) {
151 (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
152
153 address start = __ pc();
154
155 #ifdef ASSERT
156 __ hlt(); __ hlt(); __ hlt();
157 // here's a hint of something special:
158 __ push(MAGIC_NUMBER_1);
159 __ push(MAGIC_NUMBER_2);
160 #endif //ASSERT
161 __ hlt(); // not reached
162
163 // A return PC has just been popped from the stack.
164 // Return values are in registers.
165 // The ebp points into the RicochetFrame, which contains
166 // a cleanup continuation we must return to.
167
168 (*bounce_offset) = __ pc() - start;
169 BLOCK_COMMENT("ricochet_blob.bounce");
170
171 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
172 trace_method_handle(_masm, "return/ricochet_blob.bounce");
173
174 __ jmp(frame_address(continuation_offset_in_bytes()));
175 __ hlt();
176 DEBUG_ONLY(__ push(MAGIC_NUMBER_2));
177
178 (*exception_offset) = __ pc() - start;
179 BLOCK_COMMENT("ricochet_blob.exception");
180
181 // compare this to Interpreter::rethrow_exception_entry, which is parallel code
182 // for example, see TemplateInterpreterGenerator::generate_throw_exception
183 // Live registers in:
184 // rax: exception
185 // rdx: return address/pc that threw exception (ignored, always equal to bounce addr)
186 __ verify_oop(rax);
187
188 // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed
189
190 // Take down the frame.
191
192 // Cf. InterpreterMacroAssembler::remove_activation.
193 leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg,
194 saved_last_sp_register(),
195 /*sender_pc_reg=*/ rdx);
196
197 // In between activations - previous activation type unknown yet
198 // compute continuation point - the continuation point expects the
199 // following registers set up:
200 //
201 // rax: exception
202 // rdx: return address/pc that threw exception
203 // rsp: expression stack of caller
204 // rbp: ebp of caller
205 __ push(rax); // save exception
206 __ push(rdx); // save return address
207 Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi);
208 NOT_LP64(__ get_thread(thread_reg));
209 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
210 SharedRuntime::exception_handler_for_return_address),
211 thread_reg, rdx);
212 __ mov(rbx, rax); // save exception handler
213 __ pop(rdx); // restore return address
214 __ pop(rax); // restore exception
215 __ jmp(rbx); // jump to exception
216 // handler of caller
217 }
218
219 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
220 Register rcx_recv,
221 Register rax_argv,
222 address return_handler,
223 Register rbx_temp) {
224 const Register saved_last_sp = saved_last_sp_register();
225 Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
226 Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
227
228 // Push the RicochetFrame a word at a time.
229 // This creates something similar to an interpreter frame.
230 // Cf. TemplateInterpreterGenerator::generate_fixed_frame.
231 BLOCK_COMMENT("push RicochetFrame {");
232 DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame));
233 assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), "");
234 #define RF_FIELD(push_value, name) \
235 { push_value; \
236 assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); }
237 RF_FIELD(__ push(rbp), sender_link);
238 RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13
239 RF_FIELD(__ pushptr(rcx_amh_conversion), conversion);
240 RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted
241 RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie
242 if (UseCompressedOops) {
243 __ load_heap_oop(rbx_temp, rcx_mh_vmtarget);
244 RF_FIELD(__ push(rbx_temp), saved_target);
245 } else {
246 RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target);
247 }
248 __ lea(rbx_temp, ExternalAddress(return_handler));
249 RF_FIELD(__ push(rbx_temp), continuation);
250 #undef RF_FIELD
251 assert(rfo == 0, "fully initialized the RicochetFrame");
252 // compute new frame pointer:
253 __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes()));
254 // Push guard word #1 in debug mode.
255 DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1));
256 // For debugging, leave behind an indication of which stub built this frame.
257 DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); });
258 BLOCK_COMMENT("} RicochetFrame");
259 }
260
261 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
262 Register rcx_recv,
263 Register new_sp_reg,
264 Register sender_pc_reg) {
265 assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg);
266 const Register saved_last_sp = saved_last_sp_register();
267 // Take down the frame.
268 // Cf. InterpreterMacroAssembler::remove_activation.
269 BLOCK_COMMENT("end_ricochet_frame {");
270 // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down.
271 // This will keep stack in bounds even with unlimited tailcalls, each with an adapter.
272 if (rcx_recv->is_valid())
273 __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes()));
274 __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes()));
275 __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()));
276 __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes()));
277 __ mov(rsp, new_sp_reg);
278 BLOCK_COMMENT("} end_ricochet_frame");
279 }
280
281 // Emit code to verify that RBP is pointing at a valid ricochet frame.
282 #ifndef PRODUCT
283 enum {
284 ARG_LIMIT = 255, SLOP = 4,
285 // use this parameter for checking for garbage stack movements:
286 UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
287 // the slop defends against false alarms due to fencepost errors
288 };
289 #endif
290
291 #ifdef ASSERT
292 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
293 // The stack should look like this:
294 // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
295 // Check various invariants.
296 verify_offsets();
297
298 Register rdi_temp = rdi;
299 Register rcx_temp = rcx;
300 { __ push(rdi_temp); __ push(rcx_temp); }
301 #define UNPUSH_TEMPS \
302 { __ pop(rcx_temp); __ pop(rdi_temp); }
303
304 Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes());
305 Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes());
306 Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes());
307 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
308 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
309
310 Label L_bad, L_ok;
311 BLOCK_COMMENT("verify_clean {");
312 // Magic numbers must check out:
313 __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1);
314 __ jcc(Assembler::notEqual, L_bad);
315 __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2);
316 __ jcc(Assembler::notEqual, L_bad);
317
318 // Arguments pointer must look reasonable:
319 __ movptr(rcx_temp, saved_args_base_addr);
320 __ cmpptr(rcx_temp, rbp);
321 __ jcc(Assembler::below, L_bad);
322 __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize);
323 __ cmpptr(rcx_temp, rbp);
324 __ jcc(Assembler::above, L_bad);
325
326 load_conversion_dest_type(_masm, rdi_temp, conversion_addr);
327 __ cmpl(rdi_temp, T_VOID);
328 __ jcc(Assembler::equal, L_ok);
329 __ movptr(rcx_temp, saved_args_base_addr);
330 load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
331 __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()),
332 (int32_t) RETURN_VALUE_PLACEHOLDER);
333 __ jcc(Assembler::equal, L_ok);
334 __ BIND(L_bad);
335 UNPUSH_TEMPS;
336 __ stop("damaged ricochet frame");
337 __ BIND(L_ok);
338 UNPUSH_TEMPS;
339 BLOCK_COMMENT("} verify_clean");
340
341 #undef UNPUSH_TEMPS
342
343 }
344 #endif //ASSERT
345
346 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { 48 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
347 if (VerifyMethodHandles) 49 if (VerifyMethodHandles)
348 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), 50 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(),
349 "AMH argument is a Class"); 51 "MH argument is a Class");
350 __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); 52 __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
351 } 53 }
352 54
353 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
354 int bits = BitsPerByte;
355 int offset = (CONV_VMINFO_SHIFT / bits);
356 int shift = (CONV_VMINFO_SHIFT % bits);
357 __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
358 assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
359 assert(shift == 0, "no shift needed");
360 }
361
362 void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
363 int bits = BitsPerByte;
364 int offset = (CONV_DEST_TYPE_SHIFT / bits);
365 int shift = (CONV_DEST_TYPE_SHIFT % bits);
366 __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
367 assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
368 __ shrl(reg, shift);
369 DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
370 assert((shift + conv_type_bits) == bits, "left justified in byte");
371 }
372
373 void MethodHandles::load_stack_move(MacroAssembler* _masm,
374 Register rdi_stack_move,
375 Register rcx_amh,
376 bool might_be_negative) {
377 BLOCK_COMMENT("load_stack_move {");
378 Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
379 __ movl(rdi_stack_move, rcx_amh_conversion);
380 __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
381 #ifdef _LP64
382 if (might_be_negative) {
383 // clean high bits of stack motion register (was loaded as an int)
384 __ movslq(rdi_stack_move, rdi_stack_move);
385 }
386 #endif //_LP64
387 #ifdef ASSERT 55 #ifdef ASSERT
388 if (VerifyMethodHandles) { 56 static int check_nonzero(const char* xname, int x) {
389 Label L_ok, L_bad; 57 assert(x != 0, err_msg("%s should be nonzero", xname));
390 int32_t stack_move_limit = 0x4000; // extra-large 58 return x;
391 __ cmpptr(rdi_stack_move, stack_move_limit); 59 }
392 __ jcc(Assembler::greaterEqual, L_bad); 60 #define NONZERO(x) check_nonzero(#x, x)
393 __ cmpptr(rdi_stack_move, -stack_move_limit); 61 #else //ASSERT
394 __ jcc(Assembler::greater, L_ok); 62 #define NONZERO(x) (x)
395 __ bind(L_bad); 63 #endif //ASSERT
396 __ stop("load_stack_move of garbage value");
397 __ BIND(L_ok);
398 }
399 #endif
400 BLOCK_COMMENT("} load_stack_move");
401 }
402 64
403 #ifdef ASSERT 65 #ifdef ASSERT
404 void MethodHandles::RicochetFrame::verify_offsets() {
405 // Check compatibility of this struct with the more generally used offsets of class frame:
406 int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value
407 assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), "");
408 assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), "");
409 assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), "");
410 // These last two have to be exact:
411 assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), "");
412 assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), "");
413 }
414
415 void MethodHandles::RicochetFrame::verify() const {
416 verify_offsets();
417 assert(magic_number_1() == MAGIC_NUMBER_1, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_1(), MAGIC_NUMBER_1));
418 assert(magic_number_2() == MAGIC_NUMBER_2, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_2(), MAGIC_NUMBER_2));
419 if (!Universe::heap()->is_gc_active()) {
420 if (saved_args_layout() != NULL) {
421 assert(saved_args_layout()->is_method(), "must be valid oop");
422 }
423 if (saved_target() != NULL) {
424 assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
425 }
426 }
427 int conv_op = adapter_conversion_op(conversion());
428 assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
429 conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
430 conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
431 "must be a sane conversion");
432 if (has_return_value_slot()) {
433 assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
434 }
435 }
436 #endif //PRODUCT
437
438 #ifdef ASSERT
439 void MethodHandles::verify_argslot(MacroAssembler* _masm,
440 Register argslot_reg,
441 const char* error_message) {
442 // Verify that argslot lies within (rsp, rbp].
443 Label L_ok, L_bad;
444 BLOCK_COMMENT("verify_argslot {");
445 __ cmpptr(argslot_reg, rbp);
446 __ jccb(Assembler::above, L_bad);
447 __ cmpptr(rsp, argslot_reg);
448 __ jccb(Assembler::below, L_ok);
449 __ bind(L_bad);
450 __ stop(error_message);
451 __ BIND(L_ok);
452 BLOCK_COMMENT("} verify_argslot");
453 }
454
455 void MethodHandles::verify_argslots(MacroAssembler* _masm,
456 RegisterOrConstant arg_slots,
457 Register arg_slot_base_reg,
458 bool negate_argslots,
459 const char* error_message) {
460 // Verify that [argslot..argslot+size) lies within (rsp, rbp).
461 Label L_ok, L_bad;
462 Register rdi_temp = rdi;
463 BLOCK_COMMENT("verify_argslots {");
464 __ push(rdi_temp);
465 if (negate_argslots) {
466 if (arg_slots.is_constant()) {
467 arg_slots = -1 * arg_slots.as_constant();
468 } else {
469 __ movptr(rdi_temp, arg_slots);
470 __ negptr(rdi_temp);
471 arg_slots = rdi_temp;
472 }
473 }
474 __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale()));
475 __ cmpptr(rdi_temp, rbp);
476 __ pop(rdi_temp);
477 __ jcc(Assembler::above, L_bad);
478 __ cmpptr(rsp, arg_slot_base_reg);
479 __ jcc(Assembler::below, L_ok);
480 __ bind(L_bad);
481 __ stop(error_message);
482 __ BIND(L_ok);
483 BLOCK_COMMENT("} verify_argslots");
484 }
485
486 // Make sure that arg_slots has the same sign as the given direction.
487 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
488 void MethodHandles::verify_stack_move(MacroAssembler* _masm,
489 RegisterOrConstant arg_slots, int direction) {
490 bool allow_zero = arg_slots.is_constant();
491 if (direction == 0) { direction = +1; allow_zero = true; }
492 assert(stack_move_unit() == -1, "else add extra checks here");
493 if (arg_slots.is_register()) {
494 Label L_ok, L_bad;
495 BLOCK_COMMENT("verify_stack_move {");
496 // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need
497 // jcc(Assembler::notZero, L_bad);
498 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
499 if (direction > 0) {
500 __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad);
501 __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
502 __ jcc(Assembler::less, L_ok);
503 } else {
504 __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad);
505 __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
506 __ jcc(Assembler::greater, L_ok);
507 }
508 __ bind(L_bad);
509 if (direction > 0)
510 __ stop("assert arg_slots > 0");
511 else
512 __ stop("assert arg_slots < 0");
513 __ BIND(L_ok);
514 BLOCK_COMMENT("} verify_stack_move");
515 } else {
516 intptr_t size = arg_slots.as_constant();
517 if (direction < 0) size = -size;
518 assert(size >= 0, "correct direction of constant move");
519 assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
520 }
521 }
522
523 void MethodHandles::verify_klass(MacroAssembler* _masm, 66 void MethodHandles::verify_klass(MacroAssembler* _masm,
524 Register obj, KlassHandle klass, 67 Register obj, KlassHandle klass,
525 const char* error_message) { 68 const char* error_message) {
526 oop* klass_addr = klass.raw_value(); 69 oop* klass_addr = klass.raw_value();
527 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && 70 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
528 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), 71 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
529 "must be one of the SystemDictionaryHandles"); 72 "must be one of the SystemDictionaryHandles");
530 Register temp = rdi; 73 Register temp = rdi;
74 Register temp2 = noreg;
75 LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr
531 Label L_ok, L_bad; 76 Label L_ok, L_bad;
532 BLOCK_COMMENT("verify_klass {"); 77 BLOCK_COMMENT("verify_klass {");
533 __ verify_oop(obj); 78 __ verify_oop(obj);
534 __ testptr(obj, obj); 79 __ testptr(obj, obj);
535 __ jcc(Assembler::zero, L_bad); 80 __ jcc(Assembler::zero, L_bad);
536 __ push(temp); 81 __ push(temp); if (temp2 != noreg) __ push(temp2);
82 #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); }
537 __ load_klass(temp, obj); 83 __ load_klass(temp, obj);
538 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 84 __ cmpptr(temp, ExternalAddress((address) klass_addr));
539 __ jcc(Assembler::equal, L_ok); 85 __ jcc(Assembler::equal, L_ok);
540 intptr_t super_check_offset = klass->super_check_offset(); 86 intptr_t super_check_offset = klass->super_check_offset();
541 __ movptr(temp, Address(temp, super_check_offset)); 87 __ movptr(temp, Address(temp, super_check_offset));
542 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 88 __ cmpptr(temp, ExternalAddress((address) klass_addr));
543 __ jcc(Assembler::equal, L_ok); 89 __ jcc(Assembler::equal, L_ok);
544 __ pop(temp); 90 UNPUSH;
545 __ bind(L_bad); 91 __ bind(L_bad);
546 __ stop(error_message); 92 __ STOP(error_message);
547 __ BIND(L_ok); 93 __ BIND(L_ok);
548 __ pop(temp); 94 UNPUSH;
549 BLOCK_COMMENT("} verify_klass"); 95 BLOCK_COMMENT("} verify_klass");
550 } 96 }
97
98 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
99 Label L;
100 BLOCK_COMMENT("verify_ref_kind {");
101 __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
102 __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
103 __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
104 __ cmpl(temp, ref_kind);
105 __ jcc(Assembler::equal, L);
106 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
107 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
108 if (ref_kind == JVM_REF_invokeVirtual ||
109 ref_kind == JVM_REF_invokeSpecial)
110 // could do this for all ref_kinds, but would explode assembly code size
111 trace_method_handle(_masm, buf);
112 __ STOP(buf);
113 }
114 BLOCK_COMMENT("} verify_ref_kind");
115 __ bind(L);
116 }
117
551 #endif //ASSERT 118 #endif //ASSERT
552 119
553 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) { 120 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
554 if (JvmtiExport::can_post_interpreter_events()) { 121 bool for_compiler_entry) {
122 assert(method == rbx, "interpreter calling convention");
123 __ verify_oop(method);
124
125 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
555 Label run_compiled_code; 126 Label run_compiled_code;
556 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 127 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
557 // compiled code in threads for which the event is enabled. Check here for 128 // compiled code in threads for which the event is enabled. Check here for
558 // interp_only_mode if these events CAN be enabled. 129 // interp_only_mode if these events CAN be enabled.
559 #ifdef _LP64 130 #ifdef _LP64
565 // interp_only is an int, on little endian it is sufficient to test the byte only 136 // interp_only is an int, on little endian it is sufficient to test the byte only
566 // Is a cmpl faster? 137 // Is a cmpl faster?
567 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); 138 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
568 __ jccb(Assembler::zero, run_compiled_code); 139 __ jccb(Assembler::zero, run_compiled_code);
569 __ jmp(Address(method, methodOopDesc::interpreter_entry_offset())); 140 __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
570 __ bind(run_compiled_code); 141 __ BIND(run_compiled_code);
571 } 142 }
572 __ jmp(Address(method, methodOopDesc::from_interpreted_offset())); 143
573 } 144 const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() :
145 methodOopDesc::from_interpreted_offset();
146 __ jmp(Address(method, entry_offset));
147 }
148
149 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
150 Register recv, Register method_temp,
151 Register temp2,
152 bool for_compiler_entry) {
153 BLOCK_COMMENT("jump_to_lambda_form {");
154 // This is the initial entry point of a lazy method handle.
155 // After type checking, it picks up the invoker from the LambdaForm.
156 assert_different_registers(recv, method_temp, temp2);
157 assert(recv != noreg, "required register");
158 assert(method_temp == rbx, "required register for loading method");
159
160 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
161
162 // Load the invoker, as MH -> MH.form -> LF.vmentry
163 __ verify_oop(recv);
164 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
165 __ verify_oop(method_temp);
166 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
167 __ verify_oop(method_temp);
168 // the following assumes that a methodOop is normally compressed in the vmtarget field:
169 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
170 __ verify_oop(method_temp);
171
172 if (VerifyMethodHandles && !for_compiler_entry) {
173 // make sure recv is already on stack
174 __ load_sized_value(temp2,
175 Address(method_temp, methodOopDesc::size_of_parameters_offset()),
176 sizeof(u2), /*is_signed*/ false);
177 // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
178 Label L;
179 __ cmpptr(recv, __ argument_address(temp2, -1));
180 __ jcc(Assembler::equal, L);
181 __ movptr(rax, __ argument_address(temp2, -1));
182 __ STOP("receiver not on stack");
183 __ BIND(L);
184 }
185
186 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
187 BLOCK_COMMENT("} jump_to_lambda_form");
188 }
189
574 190
575 // Code generation 191 // Code generation
576 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 192 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
193 vmIntrinsics::ID iid) {
194 const bool not_for_compiler_entry = false; // this is the interpreter entry
195 assert(is_signature_polymorphic(iid), "expected invoke iid");
196 if (iid == vmIntrinsics::_invokeGeneric ||
197 iid == vmIntrinsics::_compiledLambdaForm) {
198 // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
199 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
200 // They all allow an appendix argument.
201 __ hlt(); // empty stubs make SG sick
202 return NULL;
203 }
204
205 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
577 // rbx: methodOop 206 // rbx: methodOop
578 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 207 // rdx: argument locator (parameter slot count, added to rsp)
579 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) 208 // rcx: used as temp to hold mh or receiver
580 // rdx, rdi: garbage temp, blown away 209 // rax, rdi: garbage temps, blown away
581 210 Register rdx_argp = rdx; // argument list ptr, live on error paths
582 Register rbx_method = rbx; 211 Register rax_temp = rax;
583 Register rcx_recv = rcx; 212 Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled
584 Register rax_mtype = rax; 213 Register rbx_method = rbx; // eventual target of this invocation
585 Register rdx_temp = rdx; 214
586 Register rdi_temp = rdi; 215 address code_start = __ pc();
587
588 // emit WrongMethodType path first, to enable jccb back-branch from main path
589 Label wrong_method_type;
590 __ bind(wrong_method_type);
591 Label invoke_generic_slow_path, invoke_exact_error_path;
592 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
593 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
594 __ jcc(Assembler::notEqual, invoke_generic_slow_path);
595 __ jmp(invoke_exact_error_path);
596 216
597 // here's where control starts out: 217 // here's where control starts out:
598 __ align(CodeEntryAlignment); 218 __ align(CodeEntryAlignment);
599 address entry_point = __ pc(); 219 address entry_point = __ pc();
600 220
601 // fetch the MethodType from the method handle into rax (the 'check' register)
602 // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
603 // This would simplify several touchy bits of code.
604 // See 6984712: JSR 292 method handle calls need a clean argument base pointer
605 {
606 Register tem = rbx_method;
607 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
608 __ movptr(rax_mtype, Address(tem, *pchase));
609 tem = rax_mtype; // in case there is another indirection
610 }
611 }
612
613 // given the MethodType, find out where the MH argument is buried
614 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
615 Register rdx_vmslots = rdx_temp;
616 __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
617 Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots);
618 __ movptr(rcx_recv, mh_receiver_slot_addr);
619
620 trace_method_handle(_masm, "invokeExact");
621
622 __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
623
624 // Nobody uses the MH receiver slot after this. Make sure.
625 DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999));
626
627 __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
628
629 // error path for invokeExact (only)
630 __ bind(invoke_exact_error_path);
631 // ensure that the top of stack is properly aligned.
632 __ mov(rdi, rsp);
633 __ andptr(rsp, -StackAlignmentInBytes); // Align the stack for the ABI
634 __ pushptr(Address(rdi, 0)); // Pick up the return address
635
636 // Stub wants expected type in rax and the actual type in rcx
637 __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
638
639 // for invokeGeneric (only), apply argument and result conversions on the fly
640 __ bind(invoke_generic_slow_path);
641 #ifdef ASSERT
642 if (VerifyMethodHandles) { 221 if (VerifyMethodHandles) {
643 Label L; 222 Label L;
644 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); 223 BLOCK_COMMENT("verify_intrinsic_id {");
224 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) iid);
645 __ jcc(Assembler::equal, L); 225 __ jcc(Assembler::equal, L);
646 __ stop("bad methodOop::intrinsic_id"); 226 if (iid == vmIntrinsics::_linkToVirtual ||
227 iid == vmIntrinsics::_linkToSpecial) {
228 // could do this for all kinds, but would explode assembly code size
229 trace_method_handle(_masm, "bad methodOop::intrinsic_id");
230 }
231 __ STOP("bad methodOop::intrinsic_id");
647 __ bind(L); 232 __ bind(L);
648 } 233 BLOCK_COMMENT("} verify_intrinsic_id");
649 #endif //ASSERT 234 }
650 Register rbx_temp = rbx_method; // don't need it now 235
651 236 // First task: Find out how big the argument list is.
652 // make room on the stack for another pointer: 237 Address rdx_first_arg_addr;
653 Register rcx_argslot = rcx_recv; 238 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
654 __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); 239 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
655 insert_arg_slots(_masm, 2 * stack_move_unit(), 240 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
656 rcx_argslot, rbx_temp, rdx_temp); 241 __ load_sized_value(rdx_argp,
657 242 Address(rbx_method, methodOopDesc::size_of_parameters_offset()),
658 // load up an adapter from the calling type (Java weaves this) 243 sizeof(u2), /*is_signed*/ false);
659 Register rdx_adapter = rdx_temp; 244 // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
660 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); 245 rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
661 __ load_heap_oop(rdx_adapter, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); 246 } else {
662 __ verify_oop(rdx_adapter); 247 DEBUG_ONLY(rdx_argp = noreg);
663 __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); 248 }
664 // As a trusted first argument, pass the type being called, so the adapter knows 249
665 // the actual types of the arguments and return values. 250 if (!is_signature_polymorphic_static(iid)) {
666 // (Generic invokers are shared among form-families of method-type.) 251 __ movptr(rcx_mh, rdx_first_arg_addr);
667 __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype); 252 DEBUG_ONLY(rdx_argp = noreg);
668 // FIXME: assert that rdx_adapter is of the right method-type. 253 }
669 __ mov(rcx, rdx_adapter); 254
670 trace_method_handle(_masm, "invokeGeneric"); 255 // rdx_first_arg_addr is live!
671 __ jump_to_method_handle_entry(rcx, rdi_temp); 256
257 if (TraceMethodHandles) {
258 const char* name = vmIntrinsics::name_at(iid);
259 if (*name == '_') name += 1;
260 const size_t len = strlen(name) + 50;
261 char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
262 const char* suffix = "";
263 if (vmIntrinsics::method_for(iid) == NULL ||
264 !vmIntrinsics::method_for(iid)->access_flags().is_public()) {
265 if (is_signature_polymorphic_static(iid))
266 suffix = "/static";
267 else
268 suffix = "/private";
269 }
270 jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
271 // note: stub look for mh in rcx
272 trace_method_handle(_masm, qname);
273 }
274
275 if (iid == vmIntrinsics::_invokeBasic) {
276 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
277
278 } else {
279 // Adjust argument list by popping the trailing MemberName argument.
280 Register rcx_recv = noreg;
281 if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
282 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
283 __ movptr(rcx_recv = rcx, rdx_first_arg_addr);
284 }
285 DEBUG_ONLY(rdx_argp = noreg);
286 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
287 __ pop(rax_temp); // return address
288 __ pop(rbx_member); // extract last argument
289 __ push(rax_temp); // re-push return address
290 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
291 }
292
293 if (PrintMethodHandleStubs) {
294 address code_end = __ pc();
295 tty->print_cr("--------");
296 tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid));
297 Disassembler::decode(code_start, code_end);
298 tty->cr();
299 }
672 300
673 return entry_point; 301 return entry_point;
674 } 302 }
675 303
676 // Helper to insert argument slots into the stack. 304 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
677 // arg_slots must be a multiple of stack_move_unit() and < 0 305 vmIntrinsics::ID iid,
678 // rax_argslot is decremented to point to the new (shifted) location of the argslot 306 Register receiver_reg,
679 // But, rdx_temp ends up holding the original value of rax_argslot. 307 Register member_reg,
680 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 308 bool for_compiler_entry) {
681 RegisterOrConstant arg_slots, 309 assert(is_signature_polymorphic(iid), "expected invoke iid");
682 Register rax_argslot, 310 Register rbx_method = rbx; // eventual target of this invocation
683 Register rbx_temp, Register rdx_temp) { 311 // temps used in this code are not used in *either* compiled or interpreted calling sequences
684 // allow constant zero 312 #ifdef _LP64
685 if (arg_slots.is_constant() && arg_slots.as_constant() == 0) 313 Register temp1 = rscratch1;
686 return; 314 Register temp2 = rscratch2;
687 assert_different_registers(rax_argslot, rbx_temp, rdx_temp, 315 Register temp3 = rax;
688 (!arg_slots.is_register() ? rsp : arg_slots.as_register())); 316 if (for_compiler_entry) {
689 if (VerifyMethodHandles) 317 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
690 verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); 318 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
691 if (VerifyMethodHandles) 319 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
692 verify_stack_move(_masm, arg_slots, -1); 320 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
693 321 }
694 // Make space on the stack for the inserted argument(s). 322 #else
695 // Then pull down everything shallower than rax_argslot. 323 Register temp1 = (for_compiler_entry ? rsi : rdx);
696 // The stacked return address gets pulled down with everything else. 324 Register temp2 = rdi;
697 // That is, copy [rsp, argslot) downward by -size words. In pseudo-code: 325 Register temp3 = rax;
698 // rsp -= size; 326 if (for_compiler_entry) {
699 // for (rdx = rsp + size; rdx < argslot; rdx++) 327 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
700 // rdx[-size] = rdx[0] 328 assert_different_registers(temp1, rcx, rdx);
701 // argslot -= size; 329 assert_different_registers(temp2, rcx, rdx);
702 BLOCK_COMMENT("insert_arg_slots {"); 330 assert_different_registers(temp3, rcx, rdx);
703 __ mov(rdx_temp, rsp); // source pointer for copy 331 }
704 __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); 332 #endif
705 { 333 assert_different_registers(temp1, temp2, temp3, receiver_reg);
706 Label loop; 334 assert_different_registers(temp1, temp2, temp3, member_reg);
707 __ BIND(loop); 335 if (!for_compiler_entry)
708 // pull one word down each time through the loop 336 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP
709 __ movptr(rbx_temp, Address(rdx_temp, 0)); 337
710 __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); 338 if (iid == vmIntrinsics::_invokeBasic) {
711 __ addptr(rdx_temp, wordSize); 339 // indirect through MH.form.vmentry.vmtarget
712 __ cmpptr(rdx_temp, rax_argslot); 340 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
713 __ jcc(Assembler::below, loop); 341
714 }
715
716 // Now move the argslot down, to point to the opened-up space.
717 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
718 BLOCK_COMMENT("} insert_arg_slots");
719 }
720
721 // Helper to remove argument slots from the stack.
722 // arg_slots must be a multiple of stack_move_unit() and > 0
723 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
724 RegisterOrConstant arg_slots,
725 Register rax_argslot,
726 Register rbx_temp, Register rdx_temp) {
727 // allow constant zero
728 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
729 return;
730 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
731 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
732 if (VerifyMethodHandles)
733 verify_argslots(_masm, arg_slots, rax_argslot, false,
734 "deleted argument(s) must fall within current frame");
735 if (VerifyMethodHandles)
736 verify_stack_move(_masm, arg_slots, +1);
737
738 BLOCK_COMMENT("remove_arg_slots {");
739 // Pull up everything shallower than rax_argslot.
740 // Then remove the excess space on the stack.
741 // The stacked return address gets pulled up with everything else.
742 // That is, copy [rsp, argslot) upward by size words. In pseudo-code:
743 // for (rdx = argslot-1; rdx >= rsp; --rdx)
744 // rdx[size] = rdx[0]
745 // argslot += size;
746 // rsp += size;
747 __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
748 {
749 Label loop;
750 __ BIND(loop);
751 // pull one word up each time through the loop
752 __ movptr(rbx_temp, Address(rdx_temp, 0));
753 __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
754 __ addptr(rdx_temp, -wordSize);
755 __ cmpptr(rdx_temp, rsp);
756 __ jcc(Assembler::aboveEqual, loop);
757 }
758
759 // Now move the argslot up, to point to the just-copied block.
760 __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
761 // And adjust the argslot address to point at the deletion point.
762 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
763 BLOCK_COMMENT("} remove_arg_slots");
764 }
765
766 // Helper to copy argument slots to the top of the stack.
767 // The sequence starts with rax_argslot and is counted by slot_count
768 // slot_count must be a multiple of stack_move_unit() and >= 0
769 // This function blows the temps but does not change rax_argslot.
770 void MethodHandles::push_arg_slots(MacroAssembler* _masm,
771 Register rax_argslot,
772 RegisterOrConstant slot_count,
773 int skip_words_count,
774 Register rbx_temp, Register rdx_temp) {
775 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
776 (!slot_count.is_register() ? rbp : slot_count.as_register()),
777 rsp);
778 assert(Interpreter::stackElementSize == wordSize, "else change this code");
779
780 if (VerifyMethodHandles)
781 verify_stack_move(_masm, slot_count, 0);
782
783 // allow constant zero
784 if (slot_count.is_constant() && slot_count.as_constant() == 0)
785 return;
786
787 BLOCK_COMMENT("push_arg_slots {");
788
789 Register rbx_top = rbx_temp;
790
791 // There is at most 1 word to carry down with the TOS.
792 switch (skip_words_count) {
793 case 1: __ pop(rdx_temp); break;
794 case 0: break;
795 default: ShouldNotReachHere();
796 }
797
798 if (slot_count.is_constant()) {
799 for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
800 __ pushptr(Address(rax_argslot, i * wordSize));
801 }
802 } else { 342 } else {
803 Label L_plural, L_loop, L_break; 343 // The method is a member invoker used by direct method handles.
804 // Emit code to dynamically check for the common cases, zero and one slot. 344 if (VerifyMethodHandles) {
805 __ cmpl(slot_count.as_register(), (int32_t) 1); 345 // make sure the trailing argument really is a MemberName (caller responsibility)
806 __ jccb(Assembler::greater, L_plural); 346 verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(),
807 __ jccb(Assembler::less, L_break); 347 "MemberName required for invokeVirtual etc.");
808 __ pushptr(Address(rax_argslot, 0)); 348 }
809 __ jmpb(L_break); 349
810 __ BIND(L_plural); 350 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
811 351 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
812 // Loop for 2 or more: 352 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
813 // rbx = &rax[slot_count] 353
814 // while (rbx > rax) *(--rsp) = *(--rbx) 354 Register temp1_recv_klass = temp1;
815 __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr)); 355 if (iid != vmIntrinsics::_linkToStatic) {
816 __ BIND(L_loop); 356 __ verify_oop(receiver_reg);
817 __ subptr(rbx_top, wordSize); 357 if (iid == vmIntrinsics::_linkToSpecial) {
818 __ pushptr(Address(rbx_top, 0)); 358 // Don't actually load the klass; just null-check the receiver.
819 __ cmpptr(rbx_top, rax_argslot); 359 __ null_check(receiver_reg);
820 __ jcc(Assembler::above, L_loop); 360 } else {
821 __ bind(L_break); 361 // load receiver klass itself
822 } 362 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
823 switch (skip_words_count) { 363 __ load_klass(temp1_recv_klass, receiver_reg);
824 case 1: __ push(rdx_temp); break; 364 __ verify_oop(temp1_recv_klass);
825 case 0: break; 365 }
826 default: ShouldNotReachHere(); 366 BLOCK_COMMENT("check_receiver {");
827 } 367 // The receiver for the MemberName must be in receiver_reg.
828 BLOCK_COMMENT("} push_arg_slots"); 368 // Check the receiver against the MemberName.clazz
829 } 369 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
830 370 // Did not load it above...
831 // in-place movement; no change to rsp 371 __ load_klass(temp1_recv_klass, receiver_reg);
832 // blows rax_temp, rdx_temp 372 __ verify_oop(temp1_recv_klass);
833 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, 373 }
834 Register rbx_bottom, // invariant 374 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
835 Address top_addr, // can use rax_temp 375 Label L_ok;
836 RegisterOrConstant positive_distance_in_slots, 376 Register temp2_defc = temp2;
837 Register rax_temp, Register rdx_temp) { 377 __ load_heap_oop(temp2_defc, member_clazz);
838 BLOCK_COMMENT("move_arg_slots_up {"); 378 load_klass_from_Class(_masm, temp2_defc);
839 assert_different_registers(rbx_bottom, 379 __ verify_oop(temp2_defc);
840 rax_temp, rdx_temp, 380 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
841 positive_distance_in_slots.register_or_noreg()); 381 // If we get here, the type check failed!
842 Label L_loop, L_break; 382 __ STOP("receiver class disagrees with MemberName.clazz");
843 Register rax_top = rax_temp; 383 __ bind(L_ok);
844 if (!top_addr.is_same_address(Address(rax_top, 0))) 384 }
845 __ lea(rax_top, top_addr); 385 BLOCK_COMMENT("} check_receiver");
846 // Detect empty (or broken) loop: 386 }
847 #ifdef ASSERT 387 if (iid == vmIntrinsics::_linkToSpecial ||
848 if (VerifyMethodHandles) { 388 iid == vmIntrinsics::_linkToStatic) {
849 // Verify that &bottom < &top (non-empty interval) 389 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
850 Label L_ok, L_bad; 390 }
851 if (positive_distance_in_slots.is_register()) { 391
852 __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0); 392 // Live registers at this point:
853 __ jcc(Assembler::lessEqual, L_bad); 393 // member_reg - MemberName that was the trailing argument
854 } 394 // temp1_recv_klass - klass of stacked receiver, if needed
855 __ cmpptr(rbx_bottom, rax_top); 395 // rsi/r13 - interpreter linkage (if interpreted)
856 __ jcc(Assembler::below, L_ok); 396 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
857 __ bind(L_bad); 397
858 __ stop("valid bounds (copy up)"); 398 bool method_is_live = false;
859 __ BIND(L_ok); 399 switch (iid) {
860 } 400 case vmIntrinsics::_linkToSpecial:
861 #endif 401 if (VerifyMethodHandles) {
862 __ cmpptr(rbx_bottom, rax_top); 402 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
863 __ jccb(Assembler::aboveEqual, L_break); 403 }
864 // work rax down to rbx, copying contiguous data upwards 404 __ load_heap_oop(rbx_method, member_vmtarget);
865 // In pseudo-code: 405 method_is_live = true;
866 // [rbx, rax) = &[bottom, top) 406 break;
867 // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--; 407
868 __ BIND(L_loop); 408 case vmIntrinsics::_linkToStatic:
869 __ subptr(rax_top, wordSize); 409 if (VerifyMethodHandles) {
870 __ movptr(rdx_temp, Address(rax_top, 0)); 410 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
871 __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp); 411 }
872 __ cmpptr(rax_top, rbx_bottom); 412 __ load_heap_oop(rbx_method, member_vmtarget);
873 __ jcc(Assembler::above, L_loop); 413 method_is_live = true;
874 assert(Interpreter::stackElementSize == wordSize, "else change loop"); 414 break;
875 __ bind(L_break); 415
876 BLOCK_COMMENT("} move_arg_slots_up"); 416 case vmIntrinsics::_linkToVirtual:
877 } 417 {
878 418 // same as TemplateTable::invokevirtual,
879 // in-place movement; no change to rsp 419 // minus the CP setup and profiling:
880 // blows rax_temp, rdx_temp 420
881 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, 421 if (VerifyMethodHandles) {
882 Address bottom_addr, // can use rax_temp 422 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
883 Register rbx_top, // invariant 423 }
884 RegisterOrConstant negative_distance_in_slots, 424
885 Register rax_temp, Register rdx_temp) { 425 // pick out the vtable index from the MemberName, and then we can discard it:
886 BLOCK_COMMENT("move_arg_slots_down {"); 426 Register temp2_index = temp2;
887 assert_different_registers(rbx_top, 427 __ movptr(temp2_index, member_vmindex);
888 negative_distance_in_slots.register_or_noreg(), 428
889 rax_temp, rdx_temp); 429 if (VerifyMethodHandles) {
890 Label L_loop, L_break; 430 Label L_index_ok;
891 Register rax_bottom = rax_temp; 431 __ cmpl(temp2_index, 0);
892 if (!bottom_addr.is_same_address(Address(rax_bottom, 0))) 432 __ jcc(Assembler::greaterEqual, L_index_ok);
893 __ lea(rax_bottom, bottom_addr); 433 __ STOP("no virtual index");
894 // Detect empty (or broken) loop: 434 __ BIND(L_index_ok);
895 #ifdef ASSERT 435 }
896 assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); 436
897 if (VerifyMethodHandles) { 437 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
898 // Verify that &bottom < &top (non-empty interval) 438 // at this point. And VerifyMethodHandles has already checked clazz, if needed.
899 Label L_ok, L_bad; 439
900 if (negative_distance_in_slots.is_register()) { 440 // get target methodOop & entry point
901 __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0); 441 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
902 __ jcc(Assembler::greaterEqual, L_bad); 442 method_is_live = true;
903 } 443 break;
904 __ cmpptr(rax_bottom, rbx_top); 444 }
905 __ jcc(Assembler::below, L_ok); 445
906 __ bind(L_bad); 446 case vmIntrinsics::_linkToInterface:
907 __ stop("valid bounds (copy down)"); 447 {
908 __ BIND(L_ok); 448 // same as TemplateTable::invokeinterface
909 } 449 // (minus the CP setup and profiling, with different argument motion)
910 #endif 450 if (VerifyMethodHandles) {
911 __ cmpptr(rax_bottom, rbx_top); 451 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
912 __ jccb(Assembler::aboveEqual, L_break); 452 }
913 // work rax up to rbx, copying contiguous data downwards 453
914 // In pseudo-code: 454 Register temp3_intf = temp3;
915 // [rax, rbx) = &[bottom, top) 455 __ load_heap_oop(temp3_intf, member_clazz);
916 // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++; 456 load_klass_from_Class(_masm, temp3_intf);
917 __ BIND(L_loop); 457 __ verify_oop(temp3_intf);
918 __ movptr(rdx_temp, Address(rax_bottom, 0)); 458
919 __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp); 459 Register rbx_index = rbx_method;
920 __ addptr(rax_bottom, wordSize); 460 __ movptr(rbx_index, member_vmindex);
921 __ cmpptr(rax_bottom, rbx_top); 461 if (VerifyMethodHandles) {
922 __ jcc(Assembler::below, L_loop); 462 Label L;
923 assert(Interpreter::stackElementSize == wordSize, "else change loop"); 463 __ cmpl(rbx_index, 0);
924 __ bind(L_break); 464 __ jcc(Assembler::greaterEqual, L);
925 BLOCK_COMMENT("} move_arg_slots_down"); 465 __ STOP("invalid vtable index for MH.invokeInterface");
926 } 466 __ bind(L);
927 467 }
928 // Copy from a field or array element to a stacked argument slot. 468
929 // is_element (ignored) says whether caller is loading an array element instead of an instance field. 469 // given intf, index, and recv klass, dispatch to the implementation method
930 void MethodHandles::move_typed_arg(MacroAssembler* _masm, 470 Label L_no_such_interface;
931 BasicType type, bool is_element, 471 __ lookup_interface_method(temp1_recv_klass, temp3_intf,
932 Address slot_dest, Address value_src, 472 // note: next two args must be the same:
933 Register rbx_temp, Register rdx_temp) { 473 rbx_index, rbx_method,
934 BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); 474 temp2,
935 if (type == T_OBJECT || type == T_ARRAY) { 475 L_no_such_interface);
936 __ load_heap_oop(rbx_temp, value_src); 476
937 __ movptr(slot_dest, rbx_temp); 477 __ verify_oop(rbx_method);
938 } else if (type != T_VOID) { 478 jump_from_method_handle(_masm, rbx_method, temp2, for_compiler_entry);
939 int arg_size = type2aelembytes(type); 479 __ hlt();
940 bool arg_is_signed = is_signed_subword_type(type); 480
941 int slot_size = (arg_size > wordSize) ? arg_size : wordSize; 481 __ bind(L_no_such_interface);
942 __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp); 482 __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
943 __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp); 483 break;
944 } 484 }
945 BLOCK_COMMENT("} move_typed_arg"); 485
946 } 486 default:
947 487 fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
948 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, 488 break;
949 Address return_slot) { 489 }
950 BLOCK_COMMENT("move_return_value {"); 490
951 // Old versions of the JVM must clean the FPU stack after every return. 491 if (method_is_live) {
952 #ifndef _LP64 492 // live at this point: rbx_method, rsi/r13 (if interpreted)
953 #ifdef COMPILER2 493
954 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 494 // After figuring out which concrete method to call, jump into it.
955 if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) { 495 // Note that this works in the interpreter with no data motion.
956 for (int i = 1; i < 8; i++) { 496 // But the compiled version will require that rcx_recv be shifted out.
957 __ ffree(i); 497 __ verify_oop(rbx_method);
958 } 498 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
959 } else if (UseSSE < 2) { 499 }
960 __ empty_FPU_stack(); 500 }
961 }
962 #endif //COMPILER2
963 #endif //!_LP64
964
965 // Look at the type and pull the value out of the corresponding register.
966 if (type == T_VOID) {
967 // nothing to do
968 } else if (type == T_OBJECT) {
969 __ movptr(return_slot, rax);
970 } else if (type == T_INT || is_subword_type(type)) {
971 // write the whole word, even if only 32 bits is significant
972 __ movptr(return_slot, rax);
973 } else if (type == T_LONG) {
974 // store the value by parts
975 // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
976 __ store_sized_value(return_slot, rax, BytesPerLong, rdx);
977 } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) ||
978 (type == T_DOUBLE && UseSSE < 2) ||)
979 false) {
980 // Use old x86 FPU registers:
981 if (type == T_FLOAT)
982 __ fstp_s(return_slot);
983 else
984 __ fstp_d(return_slot);
985 } else if (type == T_FLOAT) {
986 __ movflt(return_slot, xmm0);
987 } else if (type == T_DOUBLE) {
988 __ movdbl(return_slot, xmm0);
989 } else {
990 ShouldNotReachHere();
991 }
992 BLOCK_COMMENT("} move_return_value");
993 } 501 }
994 502
995 #ifndef PRODUCT 503 #ifndef PRODUCT
996 #define DESCRIBE_RICOCHET_OFFSET(rf, name) \
997 values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
998
999 void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
1000 address bp = (address) fr->fp();
1001 RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
1002
1003 // ricochet slots
1004 DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
1005 DESCRIBE_RICOCHET_OFFSET(rf, conversion);
1006 DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
1007 DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
1008 DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
1009 DESCRIBE_RICOCHET_OFFSET(rf, continuation);
1010
1011 // relevant ricochet targets (in caller frame)
1012 values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
1013 }
1014 #endif // ASSERT
1015
1016 #ifndef PRODUCT
1017 extern "C" void print_method_handle(oop mh);
1018 void trace_method_handle_stub(const char* adaptername, 504 void trace_method_handle_stub(const char* adaptername,
1019 oop mh, 505 oop mh,
1020 intptr_t* saved_regs, 506 intptr_t* saved_regs,
1021 intptr_t* entry_sp) { 507 intptr_t* entry_sp) {
1022 // called as a leaf from native code: do not block the JVM! 508 // called as a leaf from native code: do not block the JVM!
1023 bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh 509 bool has_mh = (strstr(adaptername, "/static") == NULL &&
510 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
1024 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; 511 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
1025 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp); 512 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
513 adaptername, mh_reg_name,
514 mh, entry_sp);
1026 515
1027 if (Verbose) { 516 if (Verbose) {
1028 tty->print_cr("Registers:"); 517 tty->print_cr("Registers:");
1029 const int saved_regs_count = RegisterImpl::number_of_registers; 518 const int saved_regs_count = RegisterImpl::number_of_registers;
1030 for (int i = 0; i < saved_regs_count; i++) { 519 for (int i = 0; i < saved_regs_count; i++) {
1084 // Stack may not be walkable (invalid PC above FP): 573 // Stack may not be walkable (invalid PC above FP):
1085 // Add descriptions without building a Java frame to avoid issues 574 // Add descriptions without building a Java frame to avoid issues
1086 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 575 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
1087 values.describe(-1, dump_sp, "sp for #1"); 576 values.describe(-1, dump_sp, "sp for #1");
1088 } 577 }
578 values.describe(-1, entry_sp, "raw top of stack");
1089 579
1090 tty->print_cr("Stack layout:"); 580 tty->print_cr("Stack layout:");
1091 values.print(p); 581 values.print(p);
1092 } 582 }
1093 if (has_mh) 583 if (has_mh && mh->is_oop()) {
1094 print_method_handle(mh); 584 mh->print();
585 if (java_lang_invoke_MethodHandle::is_instance(mh)) {
586 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
587 java_lang_invoke_MethodHandle::form(mh)->print();
588 }
589 }
1095 } 590 }
1096 } 591 }
1097 592
1098 // The stub wraps the arguments in a struct on the stack to avoid 593 // The stub wraps the arguments in a struct on the stack to avoid
1099 // dealing with the different calling conventions for passing 6 594 // dealing with the different calling conventions for passing 6
1157 __ leave(); 652 __ leave();
1158 BLOCK_COMMENT("} trace_method_handle"); 653 BLOCK_COMMENT("} trace_method_handle");
1159 } 654 }
1160 #endif //PRODUCT 655 #endif //PRODUCT
1161 656
1162 // which conversion op types are implemented here?
1163 int MethodHandles::adapter_conversion_ops_supported_mask() {
1164 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
1165 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
1166 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
1167 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
1168 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
1169 //OP_PRIM_TO_REF is below...
1170 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
1171 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
1172 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
1173 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
1174 //OP_COLLECT_ARGS is below...
1175 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
1176 |(
1177 java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
1178 ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
1179 |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
1180 |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
1181 ))
1182 );
1183 }
1184
1185 //------------------------------------------------------------------------------
1186 // MethodHandles::generate_method_handle_stub
1187 //
1188 // Generate an "entry" field for a method handle.
1189 // This determines how the method handle will respond to calls.
1190 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
1191 MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
1192
1193 // Here is the register state during an interpreted call,
1194 // as set up by generate_method_handle_interpreter_entry():
1195 // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
1196 // - rcx: receiver method handle
1197 // - rax: method handle type (only used by the check_mtype entry point)
1198 // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
1199 // - rdx: garbage temp, can blow away
1200
1201 const Register rcx_recv = rcx;
1202 const Register rax_argslot = rax;
1203 const Register rbx_temp = rbx;
1204 const Register rdx_temp = rdx;
1205 const Register rdi_temp = rdi;
1206
1207 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
1208 // and gen_c2i_adapter (from compiled calls):
1209 const Register saved_last_sp = saved_last_sp_register();
1210
1211 // Argument registers for _raise_exception.
1212 // 32-bit: Pass first two oop/int args in registers ECX and EDX.
1213 const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
1214 const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
1215 const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
1216 assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);
1217
1218 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
1219
1220 // some handy addresses
1221 Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
1222 Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
1223
1224 Address rcx_bmh_vmargslot( rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() );
1225 Address rcx_bmh_argument( rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() );
1226
1227 Address rcx_amh_vmargslot( rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() );
1228 Address rcx_amh_argument( rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() );
1229 Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
1230 Address vmarg; // __ argument_address(vmargslot)
1231
1232 const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
1233
1234 if (have_entry(ek)) {
1235 __ nop(); // empty stubs make SG sick
1236 return;
1237 }
1238
1239 #ifdef ASSERT
1240 __ push((int32_t) 0xEEEEEEEE);
1241 __ push((int32_t) (intptr_t) entry_name(ek));
1242 LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
1243 __ push((int32_t) 0x33333333);
1244 #endif //ASSERT
1245
1246 address interp_entry = __ pc();
1247
1248 trace_method_handle(_masm, entry_name(ek));
1249
1250 BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
1251
1252 switch ((int) ek) {
1253 case _raise_exception:
1254 {
1255 // Not a real MH entry, but rather shared code for raising an
1256 // exception. Since we use the compiled entry, arguments are
1257 // expected in compiler argument registers.
1258 assert(raise_exception_method(), "must be set");
1259 assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
1260
1261 const Register rax_pc = rax;
1262 __ pop(rax_pc); // caller PC
1263 __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started
1264
1265 Register rbx_method = rbx_temp;
1266 __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
1267
1268 const int jobject_oop_offset = 0;
1269 __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
1270
1271 __ movptr(saved_last_sp, rsp);
1272 __ subptr(rsp, 3 * wordSize);
1273 __ push(rax_pc); // restore caller PC
1274
1275 __ movl (__ argument_address(constant(2)), rarg0_code);
1276 __ movptr(__ argument_address(constant(1)), rarg1_actual);
1277 __ movptr(__ argument_address(constant(0)), rarg2_required);
1278 jump_from_method_handle(_masm, rbx_method, rax);
1279 }
1280 break;
1281
1282 case _invokestatic_mh:
1283 case _invokespecial_mh:
1284 {
1285 Register rbx_method = rbx_temp;
1286 __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
1287 __ verify_oop(rbx_method);
1288 // same as TemplateTable::invokestatic or invokespecial,
1289 // minus the CP setup and profiling:
1290 if (ek == _invokespecial_mh) {
1291 // Must load & check the first argument before entering the target method.
1292 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
1293 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
1294 __ null_check(rcx_recv);
1295 __ verify_oop(rcx_recv);
1296 }
1297 jump_from_method_handle(_masm, rbx_method, rax);
1298 }
1299 break;
1300
1301 case _invokevirtual_mh:
1302 {
1303 // same as TemplateTable::invokevirtual,
1304 // minus the CP setup and profiling:
1305
1306 // pick out the vtable index and receiver offset from the MH,
1307 // and then we can discard it:
1308 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
1309 Register rbx_index = rbx_temp;
1310 __ movl(rbx_index, rcx_dmh_vmindex);
1311 // Note: The verifier allows us to ignore rcx_mh_vmtarget.
1312 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
1313 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
1314
1315 // get receiver klass
1316 Register rax_klass = rax_argslot;
1317 __ load_klass(rax_klass, rcx_recv);
1318 __ verify_oop(rax_klass);
1319
1320 // get target methodOop & entry point
1321 const int base = instanceKlass::vtable_start_offset() * wordSize;
1322 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1323 Address vtable_entry_addr(rax_klass,
1324 rbx_index, Address::times_ptr,
1325 base + vtableEntry::method_offset_in_bytes());
1326 Register rbx_method = rbx_temp;
1327 __ movptr(rbx_method, vtable_entry_addr);
1328
1329 __ verify_oop(rbx_method);
1330 jump_from_method_handle(_masm, rbx_method, rax);
1331 }
1332 break;
1333
1334 case _invokeinterface_mh:
1335 {
1336 // same as TemplateTable::invokeinterface,
1337 // minus the CP setup and profiling:
1338
1339 // pick out the interface and itable index from the MH.
1340 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
1341 Register rdx_intf = rdx_temp;
1342 Register rbx_index = rbx_temp;
1343 __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
1344 __ movl(rbx_index, rcx_dmh_vmindex);
1345 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
1346 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
1347
1348 // get receiver klass
1349 Register rax_klass = rax_argslot;
1350 __ load_klass(rax_klass, rcx_recv);
1351 __ verify_oop(rax_klass);
1352
1353 Register rbx_method = rbx_index;
1354
1355 // get interface klass
1356 Label no_such_interface;
1357 __ verify_oop(rdx_intf);
1358 __ lookup_interface_method(rax_klass, rdx_intf,
1359 // note: next two args must be the same:
1360 rbx_index, rbx_method,
1361 rdi_temp,
1362 no_such_interface);
1363
1364 __ verify_oop(rbx_method);
1365 jump_from_method_handle(_masm, rbx_method, rax);
1366 __ hlt();
1367
1368 __ bind(no_such_interface);
1369 // Throw an exception.
1370 // For historical reasons, it will be IncompatibleClassChangeError.
1371 __ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX
1372 assert_different_registers(rarg2_required, rbx_temp);
1373 __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface
1374 __ mov( rarg1_actual, rbx_temp); // bad receiver
1375 __ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining?
1376 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1377 }
1378 break;
1379
1380 case _bound_ref_mh:
1381 case _bound_int_mh:
1382 case _bound_long_mh:
1383 case _bound_ref_direct_mh:
1384 case _bound_int_direct_mh:
1385 case _bound_long_direct_mh:
1386 {
1387 const bool direct_to_method = (ek >= _bound_ref_direct_mh);
1388 BasicType arg_type = ek_bound_mh_arg_type(ek);
1389 int arg_slots = type2size[arg_type];
1390
1391 // make room for the new argument:
1392 __ movl(rax_argslot, rcx_bmh_vmargslot);
1393 __ lea(rax_argslot, __ argument_address(rax_argslot));
1394
1395 insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
1396
1397 // store bound argument into the new stack slot:
1398 __ load_heap_oop(rbx_temp, rcx_bmh_argument);
1399 if (arg_type == T_OBJECT) {
1400 __ movptr(Address(rax_argslot, 0), rbx_temp);
1401 } else {
1402 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
1403 move_typed_arg(_masm, arg_type, false,
1404 Address(rax_argslot, 0),
1405 prim_value_addr,
1406 rbx_temp, rdx_temp);
1407 }
1408
1409 if (direct_to_method) {
1410 Register rbx_method = rbx_temp;
1411 __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
1412 __ verify_oop(rbx_method);
1413 jump_from_method_handle(_masm, rbx_method, rax);
1414 } else {
1415 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1416 __ verify_oop(rcx_recv);
1417 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1418 }
1419 }
1420 break;
1421
1422 case _adapter_opt_profiling:
1423 if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
1424 Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
1425 __ incrementl(rcx_mh_vmcount);
1426 }
1427 // fall through
1428
1429 case _adapter_retype_only:
1430 case _adapter_retype_raw:
1431 // immediately jump to the next MH layer:
1432 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1433 __ verify_oop(rcx_recv);
1434 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1435 // This is OK when all parameter types widen.
1436 // It is also OK when a return type narrows.
1437 break;
1438
1439 case _adapter_check_cast:
1440 {
1441 // temps:
1442 Register rbx_klass = rbx_temp; // interesting AMH data
1443
1444 // check a reference argument before jumping to the next layer of MH:
1445 __ movl(rax_argslot, rcx_amh_vmargslot);
1446 vmarg = __ argument_address(rax_argslot);
1447
1448 // What class are we casting to?
1449 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
1450 load_klass_from_Class(_masm, rbx_klass);
1451
1452 Label done;
1453 __ movptr(rdx_temp, vmarg);
1454 __ testptr(rdx_temp, rdx_temp);
1455 __ jcc(Assembler::zero, done); // no cast if null
1456 __ load_klass(rdx_temp, rdx_temp);
1457
1458 // live at this point:
1459 // - rbx_klass: klass required by the target method
1460 // - rdx_temp: argument klass to test
1461 // - rcx_recv: adapter method handle
1462 __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
1463
1464 // If we get here, the type check failed!
1465 // Call the wrong_method_type stub, passing the failing argument type in rax.
1466 Register rax_mtype = rax_argslot;
1467 __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field
1468 __ movptr(rdx_temp, vmarg);
1469
1470 assert_different_registers(rarg2_required, rdx_temp);
1471 __ load_heap_oop(rarg2_required, rcx_amh_argument); // required class
1472 __ mov( rarg1_actual, rdx_temp); // bad object
1473 __ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining?
1474 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1475
1476 __ bind(done);
1477 // get the new MH:
1478 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1479 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1480 }
1481 break;
1482
1483 case _adapter_prim_to_prim:
1484 case _adapter_ref_to_prim:
1485 case _adapter_prim_to_ref:
1486 // handled completely by optimized cases
1487 __ stop("init_AdapterMethodHandle should not issue this");
1488 break;
1489
1490 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
1491 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
1492 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
1493 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
1494 {
1495 // perform an in-place conversion to int or an int subword
1496 __ movl(rax_argslot, rcx_amh_vmargslot);
1497 vmarg = __ argument_address(rax_argslot);
1498
1499 switch (ek) {
1500 case _adapter_opt_i2i:
1501 __ movl(rdx_temp, vmarg);
1502 break;
1503 case _adapter_opt_l2i:
1504 {
1505 // just delete the extra slot; on a little-endian machine we keep the first
1506 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
1507 remove_arg_slots(_masm, -stack_move_unit(),
1508 rax_argslot, rbx_temp, rdx_temp);
1509 vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
1510 __ movl(rdx_temp, vmarg);
1511 }
1512 break;
1513 case _adapter_opt_unboxi:
1514 {
1515 // Load the value up from the heap.
1516 __ movptr(rdx_temp, vmarg);
1517 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
1518 #ifdef ASSERT
1519 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1520 if (is_subword_type(BasicType(bt)))
1521 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
1522 }
1523 #endif
1524 __ null_check(rdx_temp, value_offset);
1525 __ movl(rdx_temp, Address(rdx_temp, value_offset));
1526 // We load this as a word. Because we are little-endian,
1527 // the low bits will be correct, but the high bits may need cleaning.
1528 // The vminfo will guide us to clean those bits.
1529 }
1530 break;
1531 default:
1532 ShouldNotReachHere();
1533 }
1534
1535 // Do the requested conversion and store the value.
1536 Register rbx_vminfo = rbx_temp;
1537 load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
1538
1539 // get the new MH:
1540 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1541 // (now we are done with the old MH)
1542
1543 // original 32-bit vmdata word must be of this form:
1544 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
1545 __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts
1546 __ shll(rdx_temp /*, rcx*/);
1547 Label zero_extend, done;
1548 __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
1549 __ jccb(Assembler::zero, zero_extend);
1550
1551 // this path is taken for int->byte, int->short
1552 __ sarl(rdx_temp /*, rcx*/);
1553 __ jmpb(done);
1554
1555 __ bind(zero_extend);
1556 // this is taken for int->char
1557 __ shrl(rdx_temp /*, rcx*/);
1558
1559 __ bind(done);
1560 __ movl(vmarg, rdx_temp); // Store the value.
1561 __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
1562
1563 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1564 }
1565 break;
1566
1567 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
1568 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
1569 {
1570 // perform an in-place int-to-long or ref-to-long conversion
1571 __ movl(rax_argslot, rcx_amh_vmargslot);
1572
1573 // on a little-endian machine we keep the first slot and add another after
1574 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
1575 insert_arg_slots(_masm, stack_move_unit(),
1576 rax_argslot, rbx_temp, rdx_temp);
1577 Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
1578 Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
1579
1580 switch (ek) {
1581 case _adapter_opt_i2l:
1582 {
1583 #ifdef _LP64
1584 __ movslq(rdx_temp, vmarg1); // Load sign-extended
1585 __ movq(vmarg1, rdx_temp); // Store into first slot
1586 #else
1587 __ movl(rdx_temp, vmarg1);
1588 __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign()
1589 __ movl(vmarg2, rdx_temp); // store second word
1590 #endif
1591 }
1592 break;
1593 case _adapter_opt_unboxl:
1594 {
1595 // Load the value up from the heap.
1596 __ movptr(rdx_temp, vmarg1);
1597 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
1598 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
1599 __ null_check(rdx_temp, value_offset);
1600 #ifdef _LP64
1601 __ movq(rbx_temp, Address(rdx_temp, value_offset));
1602 __ movq(vmarg1, rbx_temp);
1603 #else
1604 __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
1605 __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
1606 __ movl(vmarg1, rbx_temp);
1607 __ movl(vmarg2, rdx_temp);
1608 #endif
1609 }
1610 break;
1611 default:
1612 ShouldNotReachHere();
1613 }
1614
1615 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1616 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1617 }
1618 break;
1619
1620 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
1621 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
1622 {
1623 // perform an in-place floating primitive conversion
1624 __ movl(rax_argslot, rcx_amh_vmargslot);
1625 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
1626 if (ek == _adapter_opt_f2d) {
1627 insert_arg_slots(_masm, stack_move_unit(),
1628 rax_argslot, rbx_temp, rdx_temp);
1629 }
1630 Address vmarg(rax_argslot, -Interpreter::stackElementSize);
1631
1632 #ifdef _LP64
1633 if (ek == _adapter_opt_f2d) {
1634 __ movflt(xmm0, vmarg);
1635 __ cvtss2sd(xmm0, xmm0);
1636 __ movdbl(vmarg, xmm0);
1637 } else {
1638 __ movdbl(xmm0, vmarg);
1639 __ cvtsd2ss(xmm0, xmm0);
1640 __ movflt(vmarg, xmm0);
1641 }
1642 #else //_LP64
1643 if (ek == _adapter_opt_f2d) {
1644 __ fld_s(vmarg); // load float to ST0
1645 __ fstp_d(vmarg); // store double
1646 } else {
1647 __ fld_d(vmarg); // load double to ST0
1648 __ fstp_s(vmarg); // store single
1649 }
1650 #endif //_LP64
1651
1652 if (ek == _adapter_opt_d2f) {
1653 remove_arg_slots(_masm, -stack_move_unit(),
1654 rax_argslot, rbx_temp, rdx_temp);
1655 }
1656
1657 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1658 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1659 }
1660 break;
1661
1662 case _adapter_swap_args:
1663 case _adapter_rot_args:
1664 // handled completely by optimized cases
1665 __ stop("init_AdapterMethodHandle should not issue this");
1666 break;
1667
1668 case _adapter_opt_swap_1:
1669 case _adapter_opt_swap_2:
1670 case _adapter_opt_rot_1_up:
1671 case _adapter_opt_rot_1_down:
1672 case _adapter_opt_rot_2_up:
1673 case _adapter_opt_rot_2_down:
1674 {
1675 int swap_slots = ek_adapter_opt_swap_slots(ek);
1676 int rotate = ek_adapter_opt_swap_mode(ek);
1677
1678 // 'argslot' is the position of the first argument to swap
1679 __ movl(rax_argslot, rcx_amh_vmargslot);
1680 __ lea(rax_argslot, __ argument_address(rax_argslot));
1681
1682 // 'vminfo' is the second
1683 Register rbx_destslot = rbx_temp;
1684 load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion);
1685 __ lea(rbx_destslot, __ argument_address(rbx_destslot));
1686 if (VerifyMethodHandles)
1687 verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame");
1688
1689 assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
1690 if (!rotate) {
1691 // simple swap
1692 for (int i = 0; i < swap_slots; i++) {
1693 __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
1694 __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize));
1695 __ movptr(Address(rax_argslot, i * wordSize), rdx_temp);
1696 __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
1697 }
1698 } else {
1699 // A rotate is actually pair of moves, with an "odd slot" (or pair)
1700 // changing place with a series of other slots.
1701 // First, push the "odd slot", which is going to get overwritten
1702 for (int i = swap_slots - 1; i >= 0; i--) {
1703 // handle one with rdi_temp instead of a push:
1704 if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
1705 else __ pushptr( Address(rax_argslot, i * wordSize));
1706 }
1707 if (rotate > 0) {
1708 // Here is rotate > 0:
1709 // (low mem) (high mem)
1710 // | dest: more_slots... | arg: odd_slot :arg+1 |
1711 // =>
1712 // | dest: odd_slot | dest+1: more_slots... :arg+1 |
1713 // work argslot down to destslot, copying contiguous data upwards
1714 // pseudo-code:
1715 // rax = src_addr - swap_bytes
1716 // rbx = dest_addr
1717 // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
1718 move_arg_slots_up(_masm,
1719 rbx_destslot,
1720 Address(rax_argslot, 0),
1721 swap_slots,
1722 rax_argslot, rdx_temp);
1723 } else {
1724 // Here is the other direction, rotate < 0:
1725 // (low mem) (high mem)
1726 // | arg: odd_slot | arg+1: more_slots... :dest+1 |
1727 // =>
1728 // | arg: more_slots... | dest: odd_slot :dest+1 |
1729 // work argslot up to destslot, copying contiguous data downwards
1730 // pseudo-code:
1731 // rax = src_addr + swap_bytes
1732 // rbx = dest_addr
1733 // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
1734 // dest_slot denotes an exclusive upper limit
1735 int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
1736 if (limit_bias != 0)
1737 __ addptr(rbx_destslot, - limit_bias * wordSize);
1738 move_arg_slots_down(_masm,
1739 Address(rax_argslot, swap_slots * wordSize),
1740 rbx_destslot,
1741 -swap_slots,
1742 rax_argslot, rdx_temp);
1743 __ subptr(rbx_destslot, swap_slots * wordSize);
1744 }
1745 // pop the original first chunk into the destination slot, now free
1746 for (int i = 0; i < swap_slots; i++) {
1747 if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
1748 else __ popptr(Address(rbx_destslot, i * wordSize));
1749 }
1750 }
1751
1752 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1753 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1754 }
1755 break;
1756
1757 case _adapter_dup_args:
1758 {
1759 // 'argslot' is the position of the first argument to duplicate
1760 __ movl(rax_argslot, rcx_amh_vmargslot);
1761 __ lea(rax_argslot, __ argument_address(rax_argslot));
1762
1763 // 'stack_move' is negative number of words to duplicate
1764 Register rdi_stack_move = rdi_temp;
1765 load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
1766
1767 if (VerifyMethodHandles) {
1768 verify_argslots(_masm, rdi_stack_move, rax_argslot, true,
1769 "copied argument(s) must fall within current frame");
1770 }
1771
1772 // insert location is always the bottom of the argument list:
1773 Address insert_location = __ argument_address(constant(0));
1774 int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed
1775 assert(insert_location.base() == rsp, "");
1776
1777 __ negl(rdi_stack_move);
1778 push_arg_slots(_masm, rax_argslot, rdi_stack_move,
1779 pre_arg_words, rbx_temp, rdx_temp);
1780
1781 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1782 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1783 }
1784 break;
1785
1786 case _adapter_drop_args:
1787 {
1788 // 'argslot' is the position of the first argument to nuke
1789 __ movl(rax_argslot, rcx_amh_vmargslot);
1790 __ lea(rax_argslot, __ argument_address(rax_argslot));
1791
1792 // (must do previous push after argslot address is taken)
1793
1794 // 'stack_move' is number of words to drop
1795 Register rdi_stack_move = rdi_temp;
1796 load_stack_move(_masm, rdi_stack_move, rcx_recv, false);
1797 remove_arg_slots(_masm, rdi_stack_move,
1798 rax_argslot, rbx_temp, rdx_temp);
1799
1800 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1801 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1802 }
1803 break;
1804
1805 case _adapter_collect_args:
1806 case _adapter_fold_args:
1807 case _adapter_spread_args:
1808 // handled completely by optimized cases
1809 __ stop("init_AdapterMethodHandle should not issue this");
1810 break;
1811
1812 case _adapter_opt_collect_ref:
1813 case _adapter_opt_collect_int:
1814 case _adapter_opt_collect_long:
1815 case _adapter_opt_collect_float:
1816 case _adapter_opt_collect_double:
1817 case _adapter_opt_collect_void:
1818 case _adapter_opt_collect_0_ref:
1819 case _adapter_opt_collect_1_ref:
1820 case _adapter_opt_collect_2_ref:
1821 case _adapter_opt_collect_3_ref:
1822 case _adapter_opt_collect_4_ref:
1823 case _adapter_opt_collect_5_ref:
1824 case _adapter_opt_filter_S0_ref:
1825 case _adapter_opt_filter_S1_ref:
1826 case _adapter_opt_filter_S2_ref:
1827 case _adapter_opt_filter_S3_ref:
1828 case _adapter_opt_filter_S4_ref:
1829 case _adapter_opt_filter_S5_ref:
1830 case _adapter_opt_collect_2_S0_ref:
1831 case _adapter_opt_collect_2_S1_ref:
1832 case _adapter_opt_collect_2_S2_ref:
1833 case _adapter_opt_collect_2_S3_ref:
1834 case _adapter_opt_collect_2_S4_ref:
1835 case _adapter_opt_collect_2_S5_ref:
1836 case _adapter_opt_fold_ref:
1837 case _adapter_opt_fold_int:
1838 case _adapter_opt_fold_long:
1839 case _adapter_opt_fold_float:
1840 case _adapter_opt_fold_double:
1841 case _adapter_opt_fold_void:
1842 case _adapter_opt_fold_1_ref:
1843 case _adapter_opt_fold_2_ref:
1844 case _adapter_opt_fold_3_ref:
1845 case _adapter_opt_fold_4_ref:
1846 case _adapter_opt_fold_5_ref:
1847 {
1848 // Given a fresh incoming stack frame, build a new ricochet frame.
1849 // On entry, TOS points at a return PC, and RBP is the callers frame ptr.
1850 // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
1851 // RCX contains an AdapterMethodHandle of the indicated kind.
1852
1853 // Relevant AMH fields:
1854 // amh.vmargslot:
1855 // points to the trailing edge of the arguments
1856 // to filter, collect, or fold. For a boxing operation,
1857 // it points just after the single primitive value.
1858 // amh.argument:
1859 // recursively called MH, on |collect| arguments
1860 // amh.vmtarget:
1861 // final destination MH, on return value, etc.
1862 // amh.conversion.dest:
1863 // tells what is the type of the return value
1864 // (not needed here, since dest is also derived from ek)
1865 // amh.conversion.vminfo:
1866 // points to the trailing edge of the return value
1867 // when the vmtarget is to be called; this is
1868 // equal to vmargslot + (retained ? |collect| : 0)
1869
1870 // Pass 0 or more argument slots to the recursive target.
1871 int collect_count_constant = ek_adapter_opt_collect_count(ek);
1872
1873 // The collected arguments are copied from the saved argument list:
1874 int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
1875
1876 assert(ek_orig == _adapter_collect_args ||
1877 ek_orig == _adapter_fold_args, "");
1878 bool retain_original_args = (ek_orig == _adapter_fold_args);
1879
1880 // The return value is replaced (or inserted) at the 'vminfo' argslot.
1881 // Sometimes we can compute this statically.
1882 int dest_slot_constant = -1;
1883 if (!retain_original_args)
1884 dest_slot_constant = collect_slot_constant;
1885 else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
1886 // We are preserving all the arguments, and the return value is prepended,
1887 // so the return slot is to the left (above) the |collect| sequence.
1888 dest_slot_constant = collect_slot_constant + collect_count_constant;
1889
1890 // Replace all those slots by the result of the recursive call.
1891 // The result type can be one of ref, int, long, float, double, void.
1892 // In the case of void, nothing is pushed on the stack after return.
1893 BasicType dest = ek_adapter_opt_collect_type(ek);
1894 assert(dest == type2wfield[dest], "dest is a stack slot type");
1895 int dest_count = type2size[dest];
1896 assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
1897
1898 // Choose a return continuation.
1899 EntryKind ek_ret = _adapter_opt_return_any;
1900 if (dest != T_CONFLICT && OptimizeMethodHandles) {
1901 switch (dest) {
1902 case T_INT : ek_ret = _adapter_opt_return_int; break;
1903 case T_LONG : ek_ret = _adapter_opt_return_long; break;
1904 case T_FLOAT : ek_ret = _adapter_opt_return_float; break;
1905 case T_DOUBLE : ek_ret = _adapter_opt_return_double; break;
1906 case T_OBJECT : ek_ret = _adapter_opt_return_ref; break;
1907 case T_VOID : ek_ret = _adapter_opt_return_void; break;
1908 default : ShouldNotReachHere();
1909 }
1910 if (dest == T_OBJECT && dest_slot_constant >= 0) {
1911 EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
1912 if (ek_try <= _adapter_opt_return_LAST &&
1913 ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
1914 ek_ret = ek_try;
1915 }
1916 }
1917 assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
1918 }
1919
1920 // Already pushed: ... keep1 | collect | keep2 | sender_pc |
1921 // push(sender_pc);
1922
1923 // Compute argument base:
1924 Register rax_argv = rax_argslot;
1925 __ lea(rax_argv, __ argument_address(constant(0)));
1926
1927 // Push a few extra argument words, if we need them to store the return value.
1928 {
1929 int extra_slots = 0;
1930 if (retain_original_args) {
1931 extra_slots = dest_count;
1932 } else if (collect_count_constant == -1) {
1933 extra_slots = dest_count; // collect_count might be zero; be generous
1934 } else if (dest_count > collect_count_constant) {
1935 extra_slots = (dest_count - collect_count_constant);
1936 } else {
1937 // else we know we have enough dead space in |collect| to repurpose for return values
1938 }
1939 DEBUG_ONLY(extra_slots += 1);
1940 if (extra_slots > 0) {
1941 __ pop(rbx_temp); // return value
1942 __ subptr(rsp, (extra_slots * Interpreter::stackElementSize));
1943 // Push guard word #2 in debug mode.
1944 DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2));
1945 __ push(rbx_temp);
1946 }
1947 }
1948
1949 RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv,
1950 entry(ek_ret)->from_interpreted_entry(), rbx_temp);
1951
1952 // Now pushed: ... keep1 | collect | keep2 | RF |
1953 // some handy frame slots:
1954 Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes());
1955 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
1956 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
1957
1958 #ifdef ASSERT
1959 if (VerifyMethodHandles && dest != T_CONFLICT) {
1960 BLOCK_COMMENT("verify AMH.conv.dest");
1961 load_conversion_dest_type(_masm, rbx_temp, conversion_addr);
1962 Label L_dest_ok;
1963 __ cmpl(rbx_temp, (int) dest);
1964 __ jcc(Assembler::equal, L_dest_ok);
1965 if (dest == T_INT) {
1966 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1967 if (is_subword_type(BasicType(bt))) {
1968 __ cmpl(rbx_temp, (int) bt);
1969 __ jcc(Assembler::equal, L_dest_ok);
1970 }
1971 }
1972 }
1973 __ stop("bad dest in AMH.conv");
1974 __ BIND(L_dest_ok);
1975 }
1976 #endif //ASSERT
1977
1978 // Find out where the original copy of the recursive argument sequence begins.
1979 Register rax_coll = rax_argv;
1980 {
1981 RegisterOrConstant collect_slot = collect_slot_constant;
1982 if (collect_slot_constant == -1) {
1983 __ movl(rdi_temp, rcx_amh_vmargslot);
1984 collect_slot = rdi_temp;
1985 }
1986 if (collect_slot_constant != 0)
1987 __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale()));
1988 // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2|
1989 }
1990
1991 // Replace the old AMH with the recursive MH. (No going back now.)
1992 // In the case of a boxing call, the recursive call is to a 'boxer' method,
1993 // such as Integer.valueOf or Long.valueOf. In the case of a filter
1994 // or collect call, it will take one or more arguments, transform them,
1995 // and return some result, to store back into argument_base[vminfo].
1996 __ load_heap_oop(rcx_recv, rcx_amh_argument);
1997 if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv);
1998
1999 // Push a space for the recursively called MH first:
2000 __ push((int32_t)NULL_WORD);
2001
2002 // Calculate |collect|, the number of arguments we are collecting.
2003 Register rdi_collect_count = rdi_temp;
2004 RegisterOrConstant collect_count;
2005 if (collect_count_constant >= 0) {
2006 collect_count = collect_count_constant;
2007 } else {
2008 __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp);
2009 collect_count = rdi_collect_count;
2010 }
2011 #ifdef ASSERT
2012 if (VerifyMethodHandles && collect_count_constant >= 0) {
2013 __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
2014 Label L_count_ok;
2015 __ cmpl(rbx_temp, collect_count_constant);
2016 __ jcc(Assembler::equal, L_count_ok);
2017 __ stop("bad vminfo in AMH.conv");
2018 __ BIND(L_count_ok);
2019 }
2020 #endif //ASSERT
2021
2022 // copy |collect| slots directly to TOS:
2023 push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
2024 // Now pushed: ... keep1 | collect | keep2 | RF... | collect |
2025 // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
2026
2027 // If necessary, adjust the saved arguments to make room for the eventual return value.
2028 // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
2029 // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect |
2030 // In the non-retaining case, this might move keep2 either up or down.
2031 // We don't have to copy the whole | RF... collect | complex,
2032 // but we must adjust RF.saved_args_base.
2033 // Also, from now on, we will forget about the original copy of |collect|.
2034 // If we are retaining it, we will treat it as part of |keep2|.
2035 // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
2036
2037 BLOCK_COMMENT("adjust trailing arguments {");
2038 // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
2039 int open_count = dest_count;
2040 RegisterOrConstant close_count = collect_count_constant;
2041 Register rdi_close_count = rdi_collect_count;
2042 if (retain_original_args) {
2043 close_count = constant(0);
2044 } else if (collect_count_constant == -1) {
2045 close_count = rdi_collect_count;
2046 }
2047
2048 // How many slots need moving? This is simply dest_slot (0 => no |keep3|).
2049 RegisterOrConstant keep3_count;
2050 Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp
2051 if (dest_slot_constant >= 0) {
2052 keep3_count = dest_slot_constant;
2053 } else {
2054 load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr);
2055 keep3_count = rsi_keep3_count;
2056 }
2057 #ifdef ASSERT
2058 if (VerifyMethodHandles && dest_slot_constant >= 0) {
2059 load_conversion_vminfo(_masm, rbx_temp, conversion_addr);
2060 Label L_vminfo_ok;
2061 __ cmpl(rbx_temp, dest_slot_constant);
2062 __ jcc(Assembler::equal, L_vminfo_ok);
2063 __ stop("bad vminfo in AMH.conv");
2064 __ BIND(L_vminfo_ok);
2065 }
2066 #endif //ASSERT
2067
2068 // tasks remaining:
2069 bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
2070 bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
2071 bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
2072
2073 if (stomp_dest | fix_arg_base) {
2074 // we will probably need an updated rax_argv value
2075 if (collect_slot_constant >= 0) {
2076 // rax_coll already holds the leading edge of |keep2|, so tweak it
2077 assert(rax_coll == rax_argv, "elided a move");
2078 if (collect_slot_constant != 0)
2079 __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
2080 } else {
2081 // Just reload from RF.saved_args_base.
2082 __ movptr(rax_argv, saved_args_base_addr);
2083 }
2084 }
2085
2086 // Old and new argument locations (based at slot 0).
2087 // Net shift (&new_argv - &old_argv) is (close_count - open_count).
2088 bool zero_open_count = (open_count == 0); // remember this bit of info
2089 if (move_keep3 && fix_arg_base) {
2090 // It will be easier to have everything in one register:
2091 if (close_count.is_register()) {
2092 // Deduct open_count from close_count register to get a clean +/- value.
2093 __ subptr(close_count.as_register(), open_count);
2094 } else {
2095 close_count = close_count.as_constant() - open_count;
2096 }
2097 open_count = 0;
2098 }
2099 Address old_argv(rax_argv, 0);
2100 Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(),
2101 - open_count * Interpreter::stackElementSize);
2102
2103 // First decide if any actual data are to be moved.
2104 // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
2105 // (As it happens, all movements involve an argument list size change.)
2106
2107 // If there are variable parameters, use dynamic checks to skip around the whole mess.
2108 Label L_done;
2109 if (!keep3_count.is_constant()) {
2110 __ testl(keep3_count.as_register(), keep3_count.as_register());
2111 __ jcc(Assembler::zero, L_done);
2112 }
2113 if (!close_count.is_constant()) {
2114 __ cmpl(close_count.as_register(), open_count);
2115 __ jcc(Assembler::equal, L_done);
2116 }
2117
2118 if (move_keep3 && fix_arg_base) {
2119 bool emit_move_down = false, emit_move_up = false, emit_guard = false;
2120 if (!close_count.is_constant()) {
2121 emit_move_down = emit_guard = !zero_open_count;
2122 emit_move_up = true;
2123 } else if (open_count != close_count.as_constant()) {
2124 emit_move_down = (open_count > close_count.as_constant());
2125 emit_move_up = !emit_move_down;
2126 }
2127 Label L_move_up;
2128 if (emit_guard) {
2129 __ cmpl(close_count.as_register(), open_count);
2130 __ jcc(Assembler::greater, L_move_up);
2131 }
2132
2133 if (emit_move_down) {
2134 // Move arguments down if |+dest+| > |-collect-|
2135 // (This is rare, except when arguments are retained.)
2136 // This opens space for the return value.
2137 if (keep3_count.is_constant()) {
2138 for (int i = 0; i < keep3_count.as_constant(); i++) {
2139 __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
2140 __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
2141 }
2142 } else {
2143 Register rbx_argv_top = rbx_temp;
2144 __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()));
2145 move_arg_slots_down(_masm,
2146 old_argv, // beginning of old argv
2147 rbx_argv_top, // end of old argv
2148 close_count, // distance to move down (must be negative)
2149 rax_argv, rdx_temp);
2150 // Used argv as an iteration variable; reload from RF.saved_args_base.
2151 __ movptr(rax_argv, saved_args_base_addr);
2152 }
2153 }
2154
2155 if (emit_guard) {
2156 __ jmp(L_done); // assumes emit_move_up is true also
2157 __ BIND(L_move_up);
2158 }
2159
2160 if (emit_move_up) {
2161
2162 // Move arguments up if |+dest+| < |-collect-|
2163 // (This is usual, except when |keep3| is empty.)
2164 // This closes up the space occupied by the now-deleted collect values.
2165 if (keep3_count.is_constant()) {
2166 for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
2167 __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
2168 __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
2169 }
2170 } else {
2171 Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale());
2172 move_arg_slots_up(_masm,
2173 rax_argv, // beginning of old argv
2174 argv_top, // end of old argv
2175 close_count, // distance to move up (must be positive)
2176 rbx_temp, rdx_temp);
2177 }
2178 }
2179 }
2180 __ BIND(L_done);
2181
2182 if (fix_arg_base) {
2183 // adjust RF.saved_args_base by adding (close_count - open_count)
2184 if (!new_argv.is_same_address(Address(rax_argv, 0)))
2185 __ lea(rax_argv, new_argv);
2186 __ movptr(saved_args_base_addr, rax_argv);
2187 }
2188
2189 if (stomp_dest) {
2190 // Stomp the return slot, so it doesn't hold garbage.
2191 // This isn't strictly necessary, but it may help detect bugs.
2192 int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER;
2193 __ movptr(Address(rax_argv, keep3_count, Address::times_ptr),
2194 (int32_t) forty_two);
2195 // uses rsi_keep3_count
2196 }
2197 BLOCK_COMMENT("} adjust trailing arguments");
2198
2199 BLOCK_COMMENT("do_recursive_call");
2200 __ mov(saved_last_sp, rsp); // set rsi/r13 for callee
2201 __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr());
2202 // The globally unique bounce address has two purposes:
2203 // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
2204 // 2. When returned to, it cuts back the stack and redirects control flow
2205 // to the return handler.
2206 // The return handler will further cut back the stack when it takes
2207 // down the RF. Perhaps there is a way to streamline this further.
2208
2209 // State during recursive call:
2210 // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
2211 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
2212
2213 break;
2214 }
2215
2216 case _adapter_opt_return_ref:
2217 case _adapter_opt_return_int:
2218 case _adapter_opt_return_long:
2219 case _adapter_opt_return_float:
2220 case _adapter_opt_return_double:
2221 case _adapter_opt_return_void:
2222 case _adapter_opt_return_S0_ref:
2223 case _adapter_opt_return_S1_ref:
2224 case _adapter_opt_return_S2_ref:
2225 case _adapter_opt_return_S3_ref:
2226 case _adapter_opt_return_S4_ref:
2227 case _adapter_opt_return_S5_ref:
2228 {
2229 BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
2230 int dest_slot_constant = ek_adapter_opt_return_slot(ek);
2231
2232 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2233
2234 if (dest_slot_constant == -1) {
2235 // The current stub is a general handler for this dest_type.
2236 // It can be called from _adapter_opt_return_any below.
2237 // Stash the address in a little table.
2238 assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
2239 address return_handler = __ pc();
2240 _adapter_return_handlers[dest_type_constant] = return_handler;
2241 if (dest_type_constant == T_INT) {
2242 // do the subword types too
2243 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
2244 if (is_subword_type(BasicType(bt)) &&
2245 _adapter_return_handlers[bt] == NULL) {
2246 _adapter_return_handlers[bt] = return_handler;
2247 }
2248 }
2249 }
2250 }
2251
2252 Register rbx_arg_base = rbx_temp;
2253 assert_different_registers(rax, rdx, // possibly live return value registers
2254 rdi_temp, rbx_arg_base);
2255
2256 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
2257 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
2258
2259 __ movptr(rbx_arg_base, saved_args_base_addr);
2260 RegisterOrConstant dest_slot = dest_slot_constant;
2261 if (dest_slot_constant == -1) {
2262 load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
2263 dest_slot = rdi_temp;
2264 }
2265 // Store the result back into the argslot.
2266 // This code uses the interpreter calling sequence, in which the return value
2267 // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
2268 // There are certain irregularities with floating point values, which can be seen
2269 // in TemplateInterpreterGenerator::generate_return_entry_for.
2270 move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale()));
2271
2272 RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp);
2273 __ push(rdx_temp); // repush the return PC
2274
2275 // Load the final target and go.
2276 if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv);
2277 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
2278 __ hlt(); // --------------------
2279 break;
2280 }
2281
2282 case _adapter_opt_return_any:
2283 {
2284 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2285 Register rdi_conv = rdi_temp;
2286 assert_different_registers(rax, rdx, // possibly live return value registers
2287 rdi_conv, rbx_temp);
2288
2289 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
2290 load_conversion_dest_type(_masm, rdi_conv, conversion_addr);
2291 __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0]));
2292 __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr));
2293
2294 #ifdef ASSERT
2295 { Label L_badconv;
2296 __ testptr(rbx_temp, rbx_temp);
2297 __ jccb(Assembler::zero, L_badconv);
2298 __ jmp(rbx_temp);
2299 __ bind(L_badconv);
2300 __ stop("bad method handle return");
2301 }
2302 #else //ASSERT
2303 __ jmp(rbx_temp);
2304 #endif //ASSERT
2305 break;
2306 }
2307
2308 case _adapter_opt_spread_0:
2309 case _adapter_opt_spread_1_ref:
2310 case _adapter_opt_spread_2_ref:
2311 case _adapter_opt_spread_3_ref:
2312 case _adapter_opt_spread_4_ref:
2313 case _adapter_opt_spread_5_ref:
2314 case _adapter_opt_spread_ref:
2315 case _adapter_opt_spread_byte:
2316 case _adapter_opt_spread_char:
2317 case _adapter_opt_spread_short:
2318 case _adapter_opt_spread_int:
2319 case _adapter_opt_spread_long:
2320 case _adapter_opt_spread_float:
2321 case _adapter_opt_spread_double:
2322 {
2323 // spread an array out into a group of arguments
2324 int length_constant = ek_adapter_opt_spread_count(ek);
2325 bool length_can_be_zero = (length_constant == 0);
2326 if (length_constant < 0) {
2327 // some adapters with variable length must handle the zero case
2328 if (!OptimizeMethodHandles ||
2329 ek_adapter_opt_spread_type(ek) != T_OBJECT)
2330 length_can_be_zero = true;
2331 }
2332
2333 // find the address of the array argument
2334 __ movl(rax_argslot, rcx_amh_vmargslot);
2335 __ lea(rax_argslot, __ argument_address(rax_argslot));
2336
2337 // grab another temp
2338 Register rsi_temp = rsi;
2339
2340 // arx_argslot points both to the array and to the first output arg
2341 vmarg = Address(rax_argslot, 0);
2342
2343 // Get the array value.
2344 Register rdi_array = rdi_temp;
2345 Register rdx_array_klass = rdx_temp;
2346 BasicType elem_type = ek_adapter_opt_spread_type(ek);
2347 int elem_slots = type2size[elem_type]; // 1 or 2
2348 int array_slots = 1; // array is always a T_OBJECT
2349 int length_offset = arrayOopDesc::length_offset_in_bytes();
2350 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
2351 __ movptr(rdi_array, vmarg);
2352
2353 Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
2354 if (length_can_be_zero) {
2355 // handle the null pointer case, if zero is allowed
2356 Label L_skip;
2357 if (length_constant < 0) {
2358 load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion);
2359 __ testl(rbx_temp, rbx_temp);
2360 __ jcc(Assembler::notZero, L_skip);
2361 }
2362 __ testptr(rdi_array, rdi_array);
2363 __ jcc(Assembler::notZero, L_skip);
2364
2365 // If 'rsi' contains the 'saved_last_sp' (this is only the
2366 // case in a 32-bit version of the VM) we have to save 'rsi'
2367 // on the stack because later on (at 'L_array_is_empty') 'rsi'
2368 // will be overwritten.
2369 { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
2370 // Also prepare a handy macro which restores 'rsi' if required.
2371 #define UNPUSH_RSI \
2372 { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
2373
2374 __ jmp(L_array_is_empty);
2375 __ bind(L_skip);
2376 }
2377 __ null_check(rdi_array, oopDesc::klass_offset_in_bytes());
2378 __ load_klass(rdx_array_klass, rdi_array);
2379
2380 // Save 'rsi' if required (see comment above). Do this only
2381 // after the null check such that the exception handler which is
2382 // called in the case of a null pointer exception will not be
2383 // confused by the extra value on the stack (it expects the
2384 // return pointer on top of the stack)
2385 { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
2386
2387 // Check the array type.
2388 Register rbx_klass = rbx_temp;
2389 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
2390 load_klass_from_Class(_masm, rbx_klass);
2391
2392 Label ok_array_klass, bad_array_klass, bad_array_length;
2393 __ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass);
2394 // If we get here, the type check failed!
2395 __ jmp(bad_array_klass);
2396 __ BIND(ok_array_klass);
2397
2398 // Check length.
2399 if (length_constant >= 0) {
2400 __ cmpl(Address(rdi_array, length_offset), length_constant);
2401 } else {
2402 Register rbx_vminfo = rbx_temp;
2403 load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
2404 __ cmpl(rbx_vminfo, Address(rdi_array, length_offset));
2405 }
2406 __ jcc(Assembler::notEqual, bad_array_length);
2407
2408 Register rdx_argslot_limit = rdx_temp;
2409
2410 // Array length checks out. Now insert any required stack slots.
2411 if (length_constant == -1) {
2412 // Form a pointer to the end of the affected region.
2413 __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
2414 // 'stack_move' is negative number of words to insert
2415 // This number already accounts for elem_slots.
2416 Register rsi_stack_move = rsi_temp;
2417 load_stack_move(_masm, rsi_stack_move, rcx_recv, true);
2418 __ cmpptr(rsi_stack_move, 0);
2419 assert(stack_move_unit() < 0, "else change this comparison");
2420 __ jcc(Assembler::less, L_insert_arg_space);
2421 __ jcc(Assembler::equal, L_copy_args);
2422 // single argument case, with no array movement
2423 __ BIND(L_array_is_empty);
2424 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2425 rax_argslot, rbx_temp, rdx_temp);
2426 __ jmp(L_args_done); // no spreading to do
2427 __ BIND(L_insert_arg_space);
2428 // come here in the usual case, stack_move < 0 (2 or more spread arguments)
2429 Register rdi_temp = rdi_array; // spill this
2430 insert_arg_slots(_masm, rsi_stack_move,
2431 rax_argslot, rbx_temp, rdi_temp);
2432 // reload the array since rsi was killed
2433 // reload from rdx_argslot_limit since rax_argslot is now decremented
2434 __ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
2435 } else if (length_constant >= 1) {
2436 int new_slots = (length_constant * elem_slots) - array_slots;
2437 insert_arg_slots(_masm, new_slots * stack_move_unit(),
2438 rax_argslot, rbx_temp, rdx_temp);
2439 } else if (length_constant == 0) {
2440 __ BIND(L_array_is_empty);
2441 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2442 rax_argslot, rbx_temp, rdx_temp);
2443 } else {
2444 ShouldNotReachHere();
2445 }
2446
2447 // Copy from the array to the new slots.
2448 // Note: Stack change code preserves integrity of rax_argslot pointer.
2449 // So even after slot insertions, rax_argslot still points to first argument.
2450 // Beware: Arguments that are shallow on the stack are deep in the array,
2451 // and vice versa. So a downward-growing stack (the usual) has to be copied
2452 // elementwise in reverse order from the source array.
2453 __ BIND(L_copy_args);
2454 if (length_constant == -1) {
2455 // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
2456 // Array element [0] goes at rdx_argslot_limit[-wordSize].
2457 Register rdi_source = rdi_array;
2458 __ lea(rdi_source, Address(rdi_array, elem0_offset));
2459 Register rdx_fill_ptr = rdx_argslot_limit;
2460 Label loop;
2461 __ BIND(loop);
2462 __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
2463 move_typed_arg(_masm, elem_type, true,
2464 Address(rdx_fill_ptr, 0), Address(rdi_source, 0),
2465 rbx_temp, rsi_temp);
2466 __ addptr(rdi_source, type2aelembytes(elem_type));
2467 __ cmpptr(rdx_fill_ptr, rax_argslot);
2468 __ jcc(Assembler::above, loop);
2469 } else if (length_constant == 0) {
2470 // nothing to copy
2471 } else {
2472 int elem_offset = elem0_offset;
2473 int slot_offset = length_constant * Interpreter::stackElementSize;
2474 for (int index = 0; index < length_constant; index++) {
2475 slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
2476 move_typed_arg(_masm, elem_type, true,
2477 Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset),
2478 rbx_temp, rsi_temp);
2479 elem_offset += type2aelembytes(elem_type);
2480 }
2481 }
2482 __ BIND(L_args_done);
2483
2484 // Arguments are spread. Move to next method handle.
2485 UNPUSH_RSI;
2486 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
2487 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
2488
2489 __ bind(bad_array_klass);
2490 UNPUSH_RSI;
2491 assert(!vmarg.uses(rarg2_required), "must be different registers");
2492 __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type
2493 __ movptr( rarg1_actual, vmarg); // bad array
2494 __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining?
2495 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2496
2497 __ bind(bad_array_length);
2498 UNPUSH_RSI;
2499 assert(!vmarg.uses(rarg2_required), "must be different registers");
2500 __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length
2501 __ movptr( rarg1_actual, vmarg); // bad array
2502 __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining?
2503 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2504 #undef UNPUSH_RSI
2505
2506 break;
2507 }
2508
2509 default:
2510 // do not require all platforms to recognize all adapter types
2511 __ nop();
2512 return;
2513 }
2514 BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
2515 __ hlt();
2516
2517 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
2518 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
2519
2520 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
2521 }