comparison src/cpu/sparc/vm/methodHandles_sparc.cpp @ 3753:cba7b5c2d53f

7045514: SPARC assembly code for JSR 292 ricochet frames Reviewed-by: kvn, jrose
author never
date Fri, 03 Jun 2011 22:31:43 -0700
parents fabcf26ee72f
children a9b8b43b115f
comparison
equal deleted inserted replaced
3752:f918d6096e23 3753:cba7b5c2d53f
67 me->set_end_address(__ pc()); 67 me->set_end_address(__ pc());
68 68
69 return me; 69 return me;
70 } 70 }
71 71
72 // stack walking support
73
74 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
75 //RicochetFrame* f = RicochetFrame::from_frame(fr);
76 // Cf. is_interpreted_frame path of frame::sender
77 intptr_t* younger_sp = fr.sp();
78 intptr_t* sp = fr.sender_sp();
79 map->make_integer_regs_unsaved();
80 map->shift_window(sp, younger_sp);
81 bool this_frame_adjusted_stack = true; // I5_savedSP is live in this RF
82 return frame(sp, younger_sp, this_frame_adjusted_stack);
83 }
84
85 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
86 ResourceMark rm;
87 RicochetFrame* f = RicochetFrame::from_frame(fr);
88
89 // pick up the argument type descriptor:
90 Thread* thread = Thread::current();
91 Handle cookie(thread, f->compute_saved_args_layout(true, true));
92
93 // process fixed part
94 blk->do_oop((oop*)f->saved_target_addr());
95 blk->do_oop((oop*)f->saved_args_layout_addr());
96
97 // process variable arguments:
98 if (cookie.is_null()) return; // no arguments to describe
99
100 // the cookie is actually the invokeExact method for my target
101 // his argument signature is what I'm interested in
102 assert(cookie->is_method(), "");
103 methodHandle invoker(thread, methodOop(cookie()));
104 assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
105 assert(!invoker->is_static(), "must have MH argument");
106 int slot_count = invoker->size_of_parameters();
107 assert(slot_count >= 1, "must include 'this'");
108 intptr_t* base = f->saved_args_base();
109 intptr_t* retval = NULL;
110 if (f->has_return_value_slot())
111 retval = f->return_value_slot_addr();
112 int slot_num = slot_count - 1;
113 intptr_t* loc = &base[slot_num];
114 //blk->do_oop((oop*) loc); // original target, which is irrelevant
115 int arg_num = 0;
116 for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
117 if (ss.at_return_type()) continue;
118 BasicType ptype = ss.type();
119 if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT
120 assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
121 slot_num -= type2size[ptype];
122 loc = &base[slot_num];
123 bool is_oop = (ptype == T_OBJECT && loc != retval);
124 if (is_oop) blk->do_oop((oop*)loc);
125 arg_num += 1;
126 }
127 assert(slot_num == 0, "must have processed all the arguments");
128 }
129
130 // Ricochet Frames
131 const Register MethodHandles::RicochetFrame::L1_continuation = L1;
132 const Register MethodHandles::RicochetFrame::L2_saved_target = L2;
133 const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3;
134 const Register MethodHandles::RicochetFrame::L4_saved_args_base = L4; // cf. Gargs = G4
135 const Register MethodHandles::RicochetFrame::L5_conversion = L5;
136 #ifdef ASSERT
137 const Register MethodHandles::RicochetFrame::L0_magic_number_1 = L0;
138 #endif //ASSERT
139
140 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
141 if (read_cache) {
142 oop cookie = saved_args_layout();
143 if (cookie != NULL) return cookie;
144 }
145 oop target = saved_target();
146 oop mtype = java_lang_invoke_MethodHandle::type(target);
147 oop mtform = java_lang_invoke_MethodType::form(mtype);
148 oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
149 if (write_cache) {
150 (*saved_args_layout_addr()) = cookie;
151 }
152 return cookie;
153 }
154
155 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
156 // output params:
157 int* bounce_offset,
158 int* exception_offset,
159 int* frame_size_in_words) {
160 (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
161
162 address start = __ pc();
163
164 #ifdef ASSERT
165 __ illtrap(0); __ illtrap(0); __ illtrap(0);
166 // here's a hint of something special:
167 __ set(MAGIC_NUMBER_1, G0);
168 __ set(MAGIC_NUMBER_2, G0);
169 #endif //ASSERT
170 __ illtrap(0); // not reached
171
172 // Return values are in registers.
173 // L1_continuation contains a cleanup continuation we must return
174 // to.
175
176 (*bounce_offset) = __ pc() - start;
177 BLOCK_COMMENT("ricochet_blob.bounce");
178
179 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
180 trace_method_handle(_masm, "ricochet_blob.bounce");
181
182 __ JMP(L1_continuation, 0);
183 __ delayed()->nop();
184 __ illtrap(0);
185
186 DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0));
187
188 (*exception_offset) = __ pc() - start;
189 BLOCK_COMMENT("ricochet_blob.exception");
190
191 // compare this to Interpreter::rethrow_exception_entry, which is parallel code
192 // for example, see TemplateInterpreterGenerator::generate_throw_exception
193 // Live registers in:
194 // Oexception (O0): exception
195 // Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr)
196 __ verify_oop(Oexception);
197
198 // Take down the frame.
199
200 // Cf. InterpreterMacroAssembler::remove_activation.
201 leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7);
202
203 // We are done with this activation frame; find out where to go next.
204 // The continuation point will be an exception handler, which expects
205 // the following registers set up:
206 //
207 // Oexception: exception
208 // Oissuing_pc: the local call that threw exception
209 // Other On: garbage
210 // In/Ln: the contents of the caller's register window
211 //
212 // We do the required restore at the last possible moment, because we
213 // need to preserve some state across a runtime call.
214 // (Remember that the caller activation is unknown--it might not be
215 // interpreted, so things like Lscratch are useless in the caller.)
216 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
217 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
218 __ call_VM_leaf(L7_thread_cache,
219 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
220 G2_thread, Oissuing_pc->after_save());
221
222 // The caller's SP was adjusted upon method entry to accomodate
223 // the callee's non-argument locals. Undo that adjustment.
224 __ JMP(O0, 0); // return exception handler in caller
225 __ delayed()->restore(I5_savedSP, G0, SP);
226
227 // (same old exception object is already in Oexception; see above)
228 // Note that an "issuing PC" is actually the next PC after the call
229 }
230
231 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
232 Register recv_reg,
233 Register argv_reg,
234 address return_handler) {
235 // does not include the __ save()
236 assert(argv_reg == Gargs, "");
237 Address G3_mh_vmtarget( recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
238 Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
239
240 // Create the RicochetFrame.
241 // Unlike on x86 we can store all required information in local
242 // registers.
243 BLOCK_COMMENT("push RicochetFrame {");
244 __ set(ExternalAddress(return_handler), L1_continuation);
245 __ load_heap_oop(G3_mh_vmtarget, L2_saved_target);
246 __ mov(G0, L3_saved_args_layout);
247 __ mov(Gargs, L4_saved_args_base);
248 __ lduw(G3_amh_conversion, L5_conversion); // 32-bit field
249 // I5, I6, I7 are already set up
250 DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1, L0_magic_number_1));
251 BLOCK_COMMENT("} RicochetFrame");
252 }
253
254 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
255 Register recv_reg,
256 Register new_sp_reg,
257 Register sender_pc_reg) {
258 assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place");
259 assert(sender_pc_reg == I7, "in a fixed place");
260 // does not include the __ ret() & __ restore()
261 assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg);
262 // Take down the frame.
263 // Cf. InterpreterMacroAssembler::remove_activation.
264 BLOCK_COMMENT("end_ricochet_frame {");
265 if (recv_reg->is_valid())
266 __ mov(L2_saved_target, recv_reg);
267 BLOCK_COMMENT("} end_ricochet_frame");
268 }
269
270 // Emit code to verify that FP is pointing at a valid ricochet frame.
271 #ifdef ASSERT
272 enum {
273 ARG_LIMIT = 255, SLOP = 35,
274 // use this parameter for checking for garbage stack movements:
275 UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
276 // the slop defends against false alarms due to fencepost errors
277 };
278
279 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
280 // The stack should look like this:
281 // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
282 // Check various invariants.
283
284 Register O7_temp = O7, O5_temp = O5;
285
286 Label L_ok_1, L_ok_2, L_ok_3, L_ok_4;
287 BLOCK_COMMENT("verify_clean {");
288 // Magic numbers must check out:
289 __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
290 __ cmp(O7_temp, L0_magic_number_1);
291 __ br(Assembler::equal, false, Assembler::pt, L_ok_1);
292 __ delayed()->nop();
293 __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
294
295 __ BIND(L_ok_1);
296
297 // Arguments pointer must look reasonable:
298 #ifdef _LP64
299 Register FP_temp = O5_temp;
300 __ add(FP, STACK_BIAS, FP_temp);
301 #else
302 Register FP_temp = FP;
303 #endif
304 __ cmp(L4_saved_args_base, FP_temp);
305 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok_2);
306 __ delayed()->nop();
307 __ stop("damaged ricochet frame: L4 < FP");
308
309 __ BIND(L_ok_2);
310 __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp);
311 __ cmp(O7_temp, FP_temp);
312 __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3);
313 __ delayed()->nop();
314 __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP");
315
316 __ BIND(L_ok_3);
317 extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
318 __ cmp(O7_temp, T_VOID);
319 __ br(Assembler::equal, false, Assembler::pt, L_ok_4);
320 __ delayed()->nop();
321 extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
322 __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
323 assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
324 __ cmp(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER);
325 __ brx(Assembler::equal, false, Assembler::pt, L_ok_4);
326 __ delayed()->nop();
327 __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
328 __ BIND(L_ok_4);
329 BLOCK_COMMENT("} verify_clean");
330 }
331 #endif //ASSERT
332
333 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
334 if (VerifyMethodHandles)
335 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
336 "AMH argument is a Class");
337 __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
338 }
339
340 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) {
341 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
342 assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load");
343 __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg);
344 }
345
346 void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
347 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
348 __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg);
349 }
350
351 void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
352 __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg);
353 __ and3(reg, 0x0F, reg);
354 }
355
356 void MethodHandles::load_stack_move(MacroAssembler* _masm,
357 Address G3_amh_conversion,
358 Register stack_move_reg) {
359 BLOCK_COMMENT("load_stack_move {");
360 __ ldsw(G3_amh_conversion, stack_move_reg);
361 __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
362 if (VerifyMethodHandles) {
363 Label L_ok, L_bad;
364 int32_t stack_move_limit = 0x0800; // extra-large
365 __ cmp(stack_move_reg, stack_move_limit);
366 __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
367 __ delayed()->nop();
368 __ cmp(stack_move_reg, -stack_move_limit);
369 __ br(Assembler::greater, false, Assembler::pt, L_ok);
370 __ delayed()->nop();
371 __ BIND(L_bad);
372 __ stop("load_stack_move of garbage value");
373 __ BIND(L_ok);
374 }
375 BLOCK_COMMENT("} load_stack_move");
376 }
377
378 #ifdef ASSERT
379 void MethodHandles::RicochetFrame::verify() const {
380 assert(magic_number_1() == MAGIC_NUMBER_1, "");
381 if (!Universe::heap()->is_gc_active()) {
382 if (saved_args_layout() != NULL) {
383 assert(saved_args_layout()->is_method(), "must be valid oop");
384 }
385 if (saved_target() != NULL) {
386 assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
387 }
388 }
389 int conv_op = adapter_conversion_op(conversion());
390 assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
391 conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
392 conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
393 "must be a sane conversion");
394 if (has_return_value_slot()) {
395 assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
396 }
397 }
398
399 void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
400 // Verify that argslot lies within (Gargs, FP].
401 Label L_ok, L_bad;
402 BLOCK_COMMENT("verify_argslot {");
403 __ add(FP, STACK_BIAS, temp_reg); // STACK_BIAS is zero on !_LP64
404 __ cmp(argslot_reg, temp_reg);
405 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
406 __ delayed()->nop();
407 __ cmp(Gargs, argslot_reg);
408 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
409 __ delayed()->nop();
410 __ BIND(L_bad);
411 __ stop(error_message);
412 __ BIND(L_ok);
413 BLOCK_COMMENT("} verify_argslot");
414 }
415
416 void MethodHandles::verify_argslots(MacroAssembler* _masm,
417 RegisterOrConstant arg_slots,
418 Register arg_slot_base_reg,
419 Register temp_reg,
420 Register temp2_reg,
421 bool negate_argslots,
422 const char* error_message) {
423 // Verify that [argslot..argslot+size) lies within (Gargs, FP).
424 Label L_ok, L_bad;
425 BLOCK_COMMENT("verify_argslots {");
426 if (negate_argslots) {
427 if (arg_slots.is_constant()) {
428 arg_slots = -1 * arg_slots.as_constant();
429 } else {
430 __ neg(arg_slots.as_register(), temp_reg);
431 arg_slots = temp_reg;
432 }
433 }
434 __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
435 __ add(FP, STACK_BIAS, temp2_reg); // STACK_BIAS is zero on !_LP64
436 __ cmp(temp_reg, temp2_reg);
437 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
438 __ delayed()->nop();
439 // Gargs points to the first word so adjust by BytesPerWord
440 __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
441 __ cmp(Gargs, temp_reg);
442 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
443 __ delayed()->nop();
444 __ BIND(L_bad);
445 __ stop(error_message);
446 __ BIND(L_ok);
447 BLOCK_COMMENT("} verify_argslots");
448 }
449
450 // Make sure that arg_slots has the same sign as the given direction.
451 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
452 void MethodHandles::verify_stack_move(MacroAssembler* _masm,
453 RegisterOrConstant arg_slots, int direction) {
454 enum { UNREASONABLE_STACK_MOVE = 256 * 4 }; // limit of 255 arguments
455 bool allow_zero = arg_slots.is_constant();
456 if (direction == 0) { direction = +1; allow_zero = true; }
457 assert(stack_move_unit() == -1, "else add extra checks here");
458 if (arg_slots.is_register()) {
459 Label L_ok, L_bad;
460 BLOCK_COMMENT("verify_stack_move {");
461 // __ btst(-stack_move_unit() - 1, arg_slots.as_register()); // no need
462 // __ br(Assembler::notZero, false, Assembler::pn, L_bad);
463 // __ delayed()->nop();
464 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
465 if (direction > 0) {
466 __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad);
467 __ delayed()->nop();
468 __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
469 __ br(Assembler::less, false, Assembler::pn, L_ok);
470 __ delayed()->nop();
471 } else {
472 __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad);
473 __ delayed()->nop();
474 __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
475 __ br(Assembler::greater, false, Assembler::pn, L_ok);
476 __ delayed()->nop();
477 }
478 __ BIND(L_bad);
479 if (direction > 0)
480 __ stop("assert arg_slots > 0");
481 else
482 __ stop("assert arg_slots < 0");
483 __ BIND(L_ok);
484 BLOCK_COMMENT("} verify_stack_move");
485 } else {
486 intptr_t size = arg_slots.as_constant();
487 if (direction < 0) size = -size;
488 assert(size >= 0, "correct direction of constant move");
489 assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
490 }
491 }
492
493 void MethodHandles::verify_klass(MacroAssembler* _masm,
494 Register obj_reg, KlassHandle klass,
495 Register temp_reg, Register temp2_reg,
496 const char* error_message) {
497 oop* klass_addr = klass.raw_value();
498 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
499 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
500 "must be one of the SystemDictionaryHandles");
501 Label L_ok, L_bad;
502 BLOCK_COMMENT("verify_klass {");
503 __ verify_oop(obj_reg);
504 __ br_null(obj_reg, false, Assembler::pn, L_bad);
505 __ delayed()->nop();
506 __ load_klass(obj_reg, temp_reg);
507 __ set(ExternalAddress(klass_addr), temp2_reg);
508 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
509 __ cmp(temp_reg, temp2_reg);
510 __ brx(Assembler::equal, false, Assembler::pt, L_ok);
511 __ delayed()->nop();
512 intptr_t super_check_offset = klass->super_check_offset();
513 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
514 __ set(ExternalAddress(klass_addr), temp2_reg);
515 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
516 __ cmp(temp_reg, temp2_reg);
517 __ brx(Assembler::equal, false, Assembler::pt, L_ok);
518 __ delayed()->nop();
519 __ BIND(L_bad);
520 __ stop(error_message);
521 __ BIND(L_ok);
522 BLOCK_COMMENT("} verify_klass");
523 }
524 #endif // ASSERT
72 525
73 // Code generation 526 // Code generation
74 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 527 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
75 // I5_savedSP/O5_savedSP: sender SP (must preserve) 528 // I5_savedSP/O5_savedSP: sender SP (must preserve)
76 // G4 (Gargs): incoming argument list (must preserve) 529 // G4 (Gargs): incoming argument list (must preserve)
101 // here's where control starts out: 554 // here's where control starts out:
102 __ align(CodeEntryAlignment); 555 __ align(CodeEntryAlignment);
103 address entry_point = __ pc(); 556 address entry_point = __ pc();
104 557
105 // fetch the MethodType from the method handle 558 // fetch the MethodType from the method handle
559 // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
560 // This would simplify several touchy bits of code.
561 // See 6984712: JSR 292 method handle calls need a clean argument base pointer
106 { 562 {
107 Register tem = G5_method; 563 Register tem = G5_method;
108 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 564 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
109 __ ld_ptr(Address(tem, *pchase), O0_mtype); 565 __ ld_ptr(Address(tem, *pchase), O0_mtype);
110 tem = O0_mtype; // in case there is another indirection 566 tem = O0_mtype; // in case there is another indirection
112 } 568 }
113 569
114 // given the MethodType, find out where the MH argument is buried 570 // given the MethodType, find out where the MH argument is buried
115 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); 571 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot);
116 __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); 572 __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
117 __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase); 573 __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase);
118 // Note: argument_address uses its input as a scratch register! 574 // Note: argument_address uses its input as a scratch register!
119 __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle); 575 Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize);
576 __ ld_ptr(mh_receiver_slot_addr, G3_method_handle);
120 577
121 trace_method_handle(_masm, "invokeExact"); 578 trace_method_handle(_masm, "invokeExact");
122 579
123 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); 580 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
581
582 // Nobody uses the MH receiver slot after this. Make sure.
583 DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
584
124 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 585 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
125 586
126 // for invokeGeneric (only), apply argument and result conversions on the fly 587 // for invokeGeneric (only), apply argument and result conversions on the fly
127 __ bind(invoke_generic_slow_path); 588 __ bind(invoke_generic_slow_path);
128 #ifdef ASSERT 589 #ifdef ASSERT
129 { Label L; 590 if (VerifyMethodHandles) {
591 Label L;
130 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); 592 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
131 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); 593 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
132 __ brx(Assembler::equal, false, Assembler::pt, L); 594 __ brx(Assembler::equal, false, Assembler::pt, L);
133 __ delayed()->nop(); 595 __ delayed()->nop();
134 __ stop("bad methodOop::intrinsic_id"); 596 __ stop("bad methodOop::intrinsic_id");
135 __ bind(L); 597 __ bind(L);
136 } 598 }
137 #endif //ASSERT 599 #endif //ASSERT
138 600
139 // make room on the stack for another pointer: 601 // make room on the stack for another pointer:
140 insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch); 602 insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch);
141 // load up an adapter from the calling type (Java weaves this) 603 // load up an adapter from the calling type (Java weaves this)
142 Register O2_form = O2_scratch; 604 Register O2_form = O2_scratch;
143 Register O3_adapter = O3_scratch; 605 Register O3_adapter = O3_scratch;
144 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); 606 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form);
145 __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); 607 __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
155 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 617 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
156 618
157 return entry_point; 619 return entry_point;
158 } 620 }
159 621
160 622 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
623 static RegisterOrConstant constant(int value) {
624 return RegisterOrConstant(value);
625 }
626
627 static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) {
628 __ ldsw(vmargslot_addr, result);
629 }
630
631 static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm,
632 RegisterOrConstant arg_slots,
633 Register temp_reg, Register temp2_reg) {
634 // Keep the stack pointer 2*wordSize aligned.
635 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
636 if (arg_slots.is_constant()) {
637 const int offset = arg_slots.as_constant() << LogBytesPerWord;
638 const int masked_offset = round_to(offset, 2 * BytesPerWord);
639 const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask;
640 assert(masked_offset == masked_offset2, "must agree");
641 __ sub(Gargs, offset, Gargs);
642 __ sub(SP, masked_offset, SP );
643 return offset;
644 } else {
161 #ifdef ASSERT 645 #ifdef ASSERT
162 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { 646 {
163 // Verify that argslot lies within (Gargs, FP]. 647 Label L_ok;
164 Label L_ok, L_bad; 648 __ cmp(arg_slots.as_register(), 0);
165 BLOCK_COMMENT("{ verify_argslot"); 649 __ br(Assembler::greaterEqual, false, Assembler::pt, L_ok);
166 #ifdef _LP64 650 __ delayed()->nop();
167 __ add(FP, STACK_BIAS, temp_reg); 651 __ stop("negative arg_slots");
168 __ cmp(argslot_reg, temp_reg); 652 __ bind(L_ok);
169 #else 653 }
170 __ cmp(argslot_reg, FP);
171 #endif 654 #endif
172 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad); 655 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
173 __ delayed()->nop(); 656 __ add( temp_reg, 1*BytesPerWord, temp2_reg);
174 __ cmp(Gargs, argslot_reg); 657 __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg);
175 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 658 __ sub(Gargs, temp_reg, Gargs);
176 __ delayed()->nop(); 659 __ sub(SP, temp2_reg, SP );
177 __ bind(L_bad); 660 return temp_reg;
178 __ stop(error_message); 661 }
179 __ bind(L_ok); 662 }
180 BLOCK_COMMENT("} verify_argslot"); 663
181 } 664 static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm,
182 #endif 665 RegisterOrConstant arg_slots,
183 666 Register temp_reg, Register temp2_reg) {
667 // Keep the stack pointer 2*wordSize aligned.
668 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
669 if (arg_slots.is_constant()) {
670 const int offset = arg_slots.as_constant() << LogBytesPerWord;
671 const int masked_offset = offset & ~TwoWordAlignmentMask;
672 __ add(Gargs, offset, Gargs);
673 __ add(SP, masked_offset, SP );
674 return offset;
675 } else {
676 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
677 __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg);
678 __ add(Gargs, temp_reg, Gargs);
679 __ add(SP, temp2_reg, SP );
680 return temp_reg;
681 }
682 }
184 683
185 // Helper to insert argument slots into the stack. 684 // Helper to insert argument slots into the stack.
186 // arg_slots must be a multiple of stack_move_unit() and <= 0 685 // arg_slots must be a multiple of stack_move_unit() and < 0
686 // argslot_reg is decremented to point to the new (shifted) location of the argslot
687 // But, temp_reg ends up holding the original value of argslot_reg.
187 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 688 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
188 RegisterOrConstant arg_slots, 689 RegisterOrConstant arg_slots,
189 int arg_mask,
190 Register argslot_reg, 690 Register argslot_reg,
191 Register temp_reg, Register temp2_reg, Register temp3_reg) { 691 Register temp_reg, Register temp2_reg, Register temp3_reg) {
192 assert(temp3_reg != noreg, "temp3 required"); 692 // allow constant zero
693 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
694 return;
695
193 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 696 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
194 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 697 (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
195 698
196 #ifdef ASSERT 699 BLOCK_COMMENT("insert_arg_slots {");
197 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); 700 if (VerifyMethodHandles)
198 if (arg_slots.is_register()) { 701 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
199 Label L_ok, L_bad; 702 if (VerifyMethodHandles)
200 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 703 verify_stack_move(_masm, arg_slots, -1);
201 __ br(Assembler::greater, false, Assembler::pn, L_bad);
202 __ delayed()->nop();
203 __ btst(-stack_move_unit() - 1, arg_slots.as_register());
204 __ br(Assembler::zero, false, Assembler::pt, L_ok);
205 __ delayed()->nop();
206 __ bind(L_bad);
207 __ stop("assert arg_slots <= 0 and clear low bits");
208 __ bind(L_ok);
209 } else {
210 assert(arg_slots.as_constant() <= 0, "");
211 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
212 }
213 #endif // ASSERT
214
215 #ifdef _LP64
216 if (arg_slots.is_register()) {
217 // Was arg_slots register loaded as signed int?
218 Label L_ok;
219 __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
220 __ sra(temp_reg, BitsPerInt, temp_reg);
221 __ cmp(arg_slots.as_register(), temp_reg);
222 __ br(Assembler::equal, false, Assembler::pt, L_ok);
223 __ delayed()->nop();
224 __ stop("arg_slots register not loaded as signed int");
225 __ bind(L_ok);
226 }
227 #endif
228 704
229 // Make space on the stack for the inserted argument(s). 705 // Make space on the stack for the inserted argument(s).
230 // Then pull down everything shallower than argslot_reg. 706 // Then pull down everything shallower than argslot_reg.
231 // The stacked return address gets pulled down with everything else. 707 // The stacked return address gets pulled down with everything else.
232 // That is, copy [sp, argslot) downward by -size words. In pseudo-code: 708 // That is, copy [sp, argslot) downward by -size words. In pseudo-code:
233 // sp -= size; 709 // sp -= size;
234 // for (temp = sp + size; temp < argslot; temp++) 710 // for (temp = sp + size; temp < argslot; temp++)
235 // temp[-size] = temp[0] 711 // temp[-size] = temp[0]
236 // argslot -= size; 712 // argslot -= size;
237 BLOCK_COMMENT("insert_arg_slots {"); 713
238 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 714 // offset is temp3_reg in case of arg_slots being a register.
239 715 RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
240 // Keep the stack pointer 2*wordSize aligned. 716 __ sub(Gargs, offset, temp_reg); // source pointer for copy
241 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
242 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
243 __ add(SP, masked_offset, SP);
244
245 __ mov(Gargs, temp_reg); // source pointer for copy
246 __ add(Gargs, offset, Gargs);
247 717
248 { 718 {
249 Label loop; 719 Label loop;
250 __ BIND(loop); 720 __ BIND(loop);
251 // pull one word down each time through the loop 721 // pull one word down each time through the loop
252 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 722 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg);
253 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 723 __ st_ptr(temp2_reg, Address(temp_reg, offset) );
254 __ add(temp_reg, wordSize, temp_reg); 724 __ add(temp_reg, wordSize, temp_reg);
255 __ cmp(temp_reg, argslot_reg); 725 __ cmp(temp_reg, argslot_reg);
256 __ brx(Assembler::less, false, Assembler::pt, loop); 726 __ brx(Assembler::lessUnsigned, false, Assembler::pt, loop);
257 __ delayed()->nop(); // FILLME 727 __ delayed()->nop(); // FILLME
258 } 728 }
259 729
260 // Now move the argslot down, to point to the opened-up space. 730 // Now move the argslot down, to point to the opened-up space.
261 __ add(argslot_reg, offset, argslot_reg); 731 __ add(argslot_reg, offset, argslot_reg);
262 BLOCK_COMMENT("} insert_arg_slots"); 732 BLOCK_COMMENT("} insert_arg_slots");
263 } 733 }
264 734
265 735
266 // Helper to remove argument slots from the stack. 736 // Helper to remove argument slots from the stack.
267 // arg_slots must be a multiple of stack_move_unit() and >= 0 737 // arg_slots must be a multiple of stack_move_unit() and > 0
268 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, 738 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
269 RegisterOrConstant arg_slots, 739 RegisterOrConstant arg_slots,
270 Register argslot_reg, 740 Register argslot_reg,
271 Register temp_reg, Register temp2_reg, Register temp3_reg) { 741 Register temp_reg, Register temp2_reg, Register temp3_reg) {
272 assert(temp3_reg != noreg, "temp3 required"); 742 // allow constant zero
743 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
744 return;
273 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 745 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
274 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 746 (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
275 747
276 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
277
278 #ifdef ASSERT
279 // Verify that [argslot..argslot+size) lies within (Gargs, FP).
280 __ add(argslot_reg, offset, temp2_reg);
281 verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
282 if (arg_slots.is_register()) {
283 Label L_ok, L_bad;
284 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
285 __ br(Assembler::less, false, Assembler::pn, L_bad);
286 __ delayed()->nop();
287 __ btst(-stack_move_unit() - 1, arg_slots.as_register());
288 __ br(Assembler::zero, false, Assembler::pt, L_ok);
289 __ delayed()->nop();
290 __ bind(L_bad);
291 __ stop("assert arg_slots >= 0 and clear low bits");
292 __ bind(L_ok);
293 } else {
294 assert(arg_slots.as_constant() >= 0, "");
295 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
296 }
297 #endif // ASSERT
298
299 BLOCK_COMMENT("remove_arg_slots {"); 748 BLOCK_COMMENT("remove_arg_slots {");
749 if (VerifyMethodHandles)
750 verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false,
751 "deleted argument(s) must fall within current frame");
752 if (VerifyMethodHandles)
753 verify_stack_move(_masm, arg_slots, +1);
754
300 // Pull up everything shallower than argslot. 755 // Pull up everything shallower than argslot.
301 // Then remove the excess space on the stack. 756 // Then remove the excess space on the stack.
302 // The stacked return address gets pulled up with everything else. 757 // The stacked return address gets pulled up with everything else.
303 // That is, copy [sp, argslot) upward by size words. In pseudo-code: 758 // That is, copy [sp, argslot) upward by size words. In pseudo-code:
304 // for (temp = argslot-1; temp >= sp; --temp) 759 // for (temp = argslot-1; temp >= sp; --temp)
305 // temp[size] = temp[0] 760 // temp[size] = temp[0]
306 // argslot += size; 761 // argslot += size;
307 // sp += size; 762 // sp += size;
763
764 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
308 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy 765 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy
766
309 { 767 {
310 Label loop; 768 Label L_loop;
311 __ BIND(loop); 769 __ BIND(L_loop);
312 // pull one word up each time through the loop 770 // pull one word up each time through the loop
313 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 771 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg);
314 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 772 __ st_ptr(temp2_reg, Address(temp_reg, offset) );
315 __ sub(temp_reg, wordSize, temp_reg); 773 __ sub(temp_reg, wordSize, temp_reg);
316 __ cmp(temp_reg, Gargs); 774 __ cmp(temp_reg, Gargs);
317 __ brx(Assembler::greaterEqual, false, Assembler::pt, loop); 775 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_loop);
318 __ delayed()->nop(); // FILLME 776 __ delayed()->nop(); // FILLME
319 } 777 }
320 778
321 // Now move the argslot up, to point to the just-copied block.
322 __ add(Gargs, offset, Gargs);
323 // And adjust the argslot address to point at the deletion point. 779 // And adjust the argslot address to point at the deletion point.
324 __ add(argslot_reg, offset, argslot_reg); 780 __ add(argslot_reg, offset, argslot_reg);
325 781
326 // Keep the stack pointer 2*wordSize aligned. 782 // We don't need the offset at this point anymore, just adjust SP and Gargs.
327 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 783 (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
328 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 784
329 __ add(SP, masked_offset, SP);
330 BLOCK_COMMENT("} remove_arg_slots"); 785 BLOCK_COMMENT("} remove_arg_slots");
331 } 786 }
332 787
788 // Helper to copy argument slots to the top of the stack.
789 // The sequence starts with argslot_reg and is counted by slot_count
790 // slot_count must be a multiple of stack_move_unit() and >= 0
791 // This function blows the temps but does not change argslot_reg.
792 void MethodHandles::push_arg_slots(MacroAssembler* _masm,
793 Register argslot_reg,
794 RegisterOrConstant slot_count,
795 Register temp_reg, Register temp2_reg) {
796 // allow constant zero
797 if (slot_count.is_constant() && slot_count.as_constant() == 0)
798 return;
799 assert_different_registers(argslot_reg, temp_reg, temp2_reg,
800 (!slot_count.is_register() ? Gargs : slot_count.as_register()),
801 SP);
802 assert(Interpreter::stackElementSize == wordSize, "else change this code");
803
804 BLOCK_COMMENT("push_arg_slots {");
805 if (VerifyMethodHandles)
806 verify_stack_move(_masm, slot_count, 0);
807
808 RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg);
809
810 if (slot_count.is_constant()) {
811 for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
812 __ ld_ptr( Address(argslot_reg, i * wordSize), temp_reg);
813 __ st_ptr(temp_reg, Address(Gargs, i * wordSize));
814 }
815 } else {
816 Label L_plural, L_loop, L_break;
817 // Emit code to dynamically check for the common cases, zero and one slot.
818 __ cmp(slot_count.as_register(), (int32_t) 1);
819 __ br(Assembler::greater, false, Assembler::pn, L_plural);
820 __ delayed()->nop();
821 __ br(Assembler::less, false, Assembler::pn, L_break);
822 __ delayed()->nop();
823 __ ld_ptr( Address(argslot_reg, 0), temp_reg);
824 __ st_ptr(temp_reg, Address(Gargs, 0));
825 __ ba(false, L_break);
826 __ delayed()->nop(); // FILLME
827 __ BIND(L_plural);
828
829 // Loop for 2 or more:
830 // top = &argslot[slot_count]
831 // while (top > argslot) *(--Gargs) = *(--top)
832 Register top_reg = temp_reg;
833 __ add(argslot_reg, offset, top_reg);
834 __ add(Gargs, offset, Gargs ); // move back up again so we can go down
835 __ BIND(L_loop);
836 __ sub(top_reg, wordSize, top_reg);
837 __ sub(Gargs, wordSize, Gargs );
838 __ ld_ptr( Address(top_reg, 0), temp2_reg);
839 __ st_ptr(temp2_reg, Address(Gargs, 0));
840 __ cmp(top_reg, argslot_reg);
841 __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
842 __ delayed()->nop(); // FILLME
843 __ BIND(L_break);
844 }
845 BLOCK_COMMENT("} push_arg_slots");
846 }
847
848 // in-place movement; no change to Gargs
849 // blows temp_reg, temp2_reg
850 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
851 Register bottom_reg, // invariant
852 Address top_addr, // can use temp_reg
853 RegisterOrConstant positive_distance_in_slots, // destroyed if register
854 Register temp_reg, Register temp2_reg) {
855 assert_different_registers(bottom_reg,
856 temp_reg, temp2_reg,
857 positive_distance_in_slots.register_or_noreg());
858 BLOCK_COMMENT("move_arg_slots_up {");
859 Label L_loop, L_break;
860 Register top_reg = temp_reg;
861 if (!top_addr.is_same_address(Address(top_reg, 0))) {
862 __ add(top_addr, top_reg);
863 }
864 // Detect empty (or broken) loop:
865 #ifdef ASSERT
866 if (VerifyMethodHandles) {
867 // Verify that &bottom < &top (non-empty interval)
868 Label L_ok, L_bad;
869 if (positive_distance_in_slots.is_register()) {
870 __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0);
871 __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
872 __ delayed()->nop();
873 }
874 __ cmp(bottom_reg, top_reg);
875 __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
876 __ delayed()->nop();
877 __ BIND(L_bad);
878 __ stop("valid bounds (copy up)");
879 __ BIND(L_ok);
880 }
881 #endif
882 __ cmp(bottom_reg, top_reg);
883 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
884 __ delayed()->nop();
885 // work top down to bottom, copying contiguous data upwards
886 // In pseudo-code:
887 // while (--top >= bottom) *(top + distance) = *(top + 0);
888 RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg());
889 __ BIND(L_loop);
890 __ sub(top_reg, wordSize, top_reg);
891 __ ld_ptr( Address(top_reg, 0 ), temp2_reg);
892 __ st_ptr(temp2_reg, Address(top_reg, offset) );
893 __ cmp(top_reg, bottom_reg);
894 __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
895 __ delayed()->nop(); // FILLME
896 assert(Interpreter::stackElementSize == wordSize, "else change loop");
897 __ BIND(L_break);
898 BLOCK_COMMENT("} move_arg_slots_up");
899 }
900
901 // in-place movement; no change to rsp
902 // blows temp_reg, temp2_reg
903 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
904 Address bottom_addr, // can use temp_reg
905 Register top_reg, // invariant
906 RegisterOrConstant negative_distance_in_slots, // destroyed if register
907 Register temp_reg, Register temp2_reg) {
908 assert_different_registers(top_reg,
909 negative_distance_in_slots.register_or_noreg(),
910 temp_reg, temp2_reg);
911 BLOCK_COMMENT("move_arg_slots_down {");
912 Label L_loop, L_break;
913 Register bottom_reg = temp_reg;
914 if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) {
915 __ add(bottom_addr, bottom_reg);
916 }
917 // Detect empty (or broken) loop:
918 #ifdef ASSERT
919 assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
920 if (VerifyMethodHandles) {
921 // Verify that &bottom < &top (non-empty interval)
922 Label L_ok, L_bad;
923 if (negative_distance_in_slots.is_register()) {
924 __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0);
925 __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
926 __ delayed()->nop();
927 }
928 __ cmp(bottom_reg, top_reg);
929 __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
930 __ delayed()->nop();
931 __ BIND(L_bad);
932 __ stop("valid bounds (copy down)");
933 __ BIND(L_ok);
934 }
935 #endif
936 __ cmp(bottom_reg, top_reg);
937 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
938 __ delayed()->nop();
939 // work bottom up to top, copying contiguous data downwards
940 // In pseudo-code:
941 // while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
942 RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg());
943 __ BIND(L_loop);
944 __ ld_ptr( Address(bottom_reg, 0 ), temp2_reg);
945 __ st_ptr(temp2_reg, Address(bottom_reg, offset) );
946 __ add(bottom_reg, wordSize, bottom_reg);
947 __ cmp(bottom_reg, top_reg);
948 __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_loop);
949 __ delayed()->nop(); // FILLME
950 assert(Interpreter::stackElementSize == wordSize, "else change loop");
951 __ BIND(L_break);
952 BLOCK_COMMENT("} move_arg_slots_down");
953 }
954
955 // Copy from a field or array element to a stacked argument slot.
956 // is_element (ignored) says whether caller is loading an array element instead of an instance field.
957 void MethodHandles::move_typed_arg(MacroAssembler* _masm,
958 BasicType type, bool is_element,
959 Address value_src, Address slot_dest,
960 Register temp_reg) {
961 assert(!slot_dest.uses(temp_reg), "must be different register");
962 BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
963 if (type == T_OBJECT || type == T_ARRAY) {
964 __ load_heap_oop(value_src, temp_reg);
965 __ verify_oop(temp_reg);
966 __ st_ptr(temp_reg, slot_dest);
967 } else if (type != T_VOID) {
968 int arg_size = type2aelembytes(type);
969 bool arg_is_signed = is_signed_subword_type(type);
970 int slot_size = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size; // store int sub-words as int
971 __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed);
972 __ store_sized_value(temp_reg, slot_dest, slot_size );
973 }
974 BLOCK_COMMENT("} move_typed_arg");
975 }
976
977 // Cf. TemplateInterpreterGenerator::generate_return_entry_for and
978 // InterpreterMacroAssembler::save_return_value
979 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
980 Address return_slot) {
981 BLOCK_COMMENT("move_return_value {");
982 // Look at the type and pull the value out of the corresponding register.
983 if (type == T_VOID) {
984 // nothing to do
985 } else if (type == T_OBJECT) {
986 __ verify_oop(O0);
987 __ st_ptr(O0, return_slot);
988 } else if (type == T_INT || is_subword_type(type)) {
989 int type_size = type2aelembytes(T_INT);
990 __ store_sized_value(O0, return_slot, type_size);
991 } else if (type == T_LONG) {
992 // store the value by parts
993 // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
994 #if !defined(_LP64) && defined(COMPILER2)
995 __ stx(G1, return_slot);
996 #else
997 #ifdef _LP64
998 __ stx(O0, return_slot);
999 #else
1000 if (return_slot.has_disp()) {
1001 // The displacement is a constant
1002 __ st(O0, return_slot);
1003 __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize));
1004 } else {
1005 __ std(O0, return_slot);
1006 }
1007 #endif
1008 #endif
1009 } else if (type == T_FLOAT) {
1010 __ stf(FloatRegisterImpl::S, Ftos_f, return_slot);
1011 } else if (type == T_DOUBLE) {
1012 __ stf(FloatRegisterImpl::D, Ftos_f, return_slot);
1013 } else {
1014 ShouldNotReachHere();
1015 }
1016 BLOCK_COMMENT("} move_return_value");
1017 }
333 1018
334 #ifndef PRODUCT 1019 #ifndef PRODUCT
335 extern "C" void print_method_handle(oop mh); 1020 extern "C" void print_method_handle(oop mh);
336 void trace_method_handle_stub(const char* adaptername, 1021 void trace_method_handle_stub(const char* adaptername,
337 oopDesc* mh, 1022 oopDesc* mh,
338 intptr_t* saved_sp) { 1023 intptr_t* saved_sp) {
1024 bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh
339 tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp); 1025 tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
340 print_method_handle(mh); 1026 if (has_mh)
1027 print_method_handle(mh);
341 } 1028 }
342 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 1029 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
343 if (!TraceMethodHandles) return; 1030 if (!TraceMethodHandles) return;
344 BLOCK_COMMENT("trace_method_handle {"); 1031 BLOCK_COMMENT("trace_method_handle {");
345 // save: Gargs, O5_savedSP 1032 // save: Gargs, O5_savedSP
365 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) 1052 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
366 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) 1053 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
367 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) 1054 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
368 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) 1055 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
369 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) 1056 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
1057 // OP_PRIM_TO_REF is below...
370 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) 1058 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
371 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) 1059 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
372 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) 1060 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
373 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) 1061 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
374 //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! 1062 // OP_COLLECT_ARGS is below...
1063 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
1064 |(!UseRicochetFrames ? 0 :
1065 java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
1066 ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
1067 |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
1068 |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
1069 )
1070 )
375 ); 1071 );
376 // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
377 } 1072 }
378 1073
379 //------------------------------------------------------------------------------ 1074 //------------------------------------------------------------------------------
380 // MethodHandles::generate_method_handle_stub 1075 // MethodHandles::generate_method_handle_stub
381 // 1076 //
382 // Generate an "entry" field for a method handle. 1077 // Generate an "entry" field for a method handle.
383 // This determines how the method handle will respond to calls. 1078 // This determines how the method handle will respond to calls.
384 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 1079 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
1080 MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
1081
385 // Here is the register state during an interpreted call, 1082 // Here is the register state during an interpreted call,
386 // as set up by generate_method_handle_interpreter_entry(): 1083 // as set up by generate_method_handle_interpreter_entry():
387 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) 1084 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
388 // - G3: receiver method handle 1085 // - G3: receiver method handle
389 // - O5_savedSP: sender SP (must preserve) 1086 // - O5_savedSP: sender SP (must preserve)
390 1087
391 const Register O0_argslot = O0; 1088 const Register O0_scratch = O0;
392 const Register O1_scratch = O1; 1089 const Register O1_scratch = O1;
393 const Register O2_scratch = O2; 1090 const Register O2_scratch = O2;
394 const Register O3_scratch = O3; 1091 const Register O3_scratch = O3;
395 const Register G5_index = G5; 1092 const Register O4_scratch = O4;
396 1093 const Register G5_scratch = G5;
397 // Argument registers for _raise_exception. 1094
1095 // Often used names:
1096 const Register O0_argslot = O0;
1097
1098 // Argument registers for _raise_exception:
398 const Register O0_code = O0; 1099 const Register O0_code = O0;
399 const Register O1_actual = O1; 1100 const Register O1_actual = O1;
400 const Register O2_required = O2; 1101 const Register O2_required = O2;
401 1102
402 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); 1103 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
424 } 1125 }
425 1126
426 address interp_entry = __ pc(); 1127 address interp_entry = __ pc();
427 1128
428 trace_method_handle(_masm, entry_name(ek)); 1129 trace_method_handle(_masm, entry_name(ek));
1130
1131 BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
429 1132
430 switch ((int) ek) { 1133 switch ((int) ek) {
431 case _raise_exception: 1134 case _raise_exception:
432 { 1135 {
433 // Not a real MH entry, but rather shared code for raising an 1136 // Not a real MH entry, but rather shared code for raising an
470 // Same as TemplateTable::invokestatic or invokespecial, 1173 // Same as TemplateTable::invokestatic or invokespecial,
471 // minus the CP setup and profiling: 1174 // minus the CP setup and profiling:
472 if (ek == _invokespecial_mh) { 1175 if (ek == _invokespecial_mh) {
473 // Must load & check the first argument before entering the target method. 1176 // Must load & check the first argument before entering the target method.
474 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 1177 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
475 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 1178 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
476 __ null_check(G3_method_handle); 1179 __ null_check(G3_method_handle);
477 __ verify_oop(G3_method_handle); 1180 __ verify_oop(G3_method_handle);
478 } 1181 }
479 __ jump_indirect_to(G5_method_fie, O1_scratch); 1182 __ jump_indirect_to(G5_method_fie, O1_scratch);
480 __ delayed()->nop(); 1183 __ delayed()->nop();
486 // Same as TemplateTable::invokevirtual, 1189 // Same as TemplateTable::invokevirtual,
487 // minus the CP setup and profiling: 1190 // minus the CP setup and profiling:
488 1191
489 // Pick out the vtable index and receiver offset from the MH, 1192 // Pick out the vtable index and receiver offset from the MH,
490 // and then we can discard it: 1193 // and then we can discard it:
1194 Register O2_index = O2_scratch;
491 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 1195 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
492 __ ldsw(G3_dmh_vmindex, G5_index); 1196 __ ldsw(G3_dmh_vmindex, O2_index);
493 // Note: The verifier allows us to ignore G3_mh_vmtarget. 1197 // Note: The verifier allows us to ignore G3_mh_vmtarget.
494 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 1198 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
495 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 1199 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
496 1200
497 // Get receiver klass: 1201 // Get receiver klass:
498 Register O0_klass = O0_argslot; 1202 Register O0_klass = O0_argslot;
499 __ load_klass(G3_method_handle, O0_klass); 1203 __ load_klass(G3_method_handle, O0_klass);
501 1205
502 // Get target methodOop & entry point: 1206 // Get target methodOop & entry point:
503 const int base = instanceKlass::vtable_start_offset() * wordSize; 1207 const int base = instanceKlass::vtable_start_offset() * wordSize;
504 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1208 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
505 1209
506 __ sll_ptr(G5_index, LogBytesPerWord, G5_index); 1210 __ sll_ptr(O2_index, LogBytesPerWord, O2_index);
507 __ add(O0_klass, G5_index, O0_klass); 1211 __ add(O0_klass, O2_index, O0_klass);
508 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); 1212 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
509 __ ld_ptr(vtable_entry_addr, G5_method); 1213 __ ld_ptr(vtable_entry_addr, G5_method);
510 1214
511 __ verify_oop(G5_method); 1215 __ verify_oop(G5_method);
512 __ jump_indirect_to(G5_method_fie, O1_scratch); 1216 __ jump_indirect_to(G5_method_fie, O1_scratch);
518 { 1222 {
519 // Same as TemplateTable::invokeinterface, 1223 // Same as TemplateTable::invokeinterface,
520 // minus the CP setup and profiling: 1224 // minus the CP setup and profiling:
521 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 1225 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
522 Register O1_intf = O1_scratch; 1226 Register O1_intf = O1_scratch;
1227 Register G5_index = G5_scratch;
523 __ load_heap_oop(G3_mh_vmtarget, O1_intf); 1228 __ load_heap_oop(G3_mh_vmtarget, O1_intf);
524 __ ldsw(G3_dmh_vmindex, G5_index); 1229 __ ldsw(G3_dmh_vmindex, G5_index);
525 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 1230 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
526 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 1231 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
527 1232
528 // Get receiver klass: 1233 // Get receiver klass:
529 Register O0_klass = O0_argslot; 1234 Register O0_klass = O0_argslot;
530 __ load_klass(G3_method_handle, O0_klass); 1235 __ load_klass(G3_method_handle, O0_klass);
561 case _bound_ref_direct_mh: 1266 case _bound_ref_direct_mh:
562 case _bound_int_direct_mh: 1267 case _bound_int_direct_mh:
563 case _bound_long_direct_mh: 1268 case _bound_long_direct_mh:
564 { 1269 {
565 const bool direct_to_method = (ek >= _bound_ref_direct_mh); 1270 const bool direct_to_method = (ek >= _bound_ref_direct_mh);
566 BasicType arg_type = T_ILLEGAL; 1271 BasicType arg_type = ek_bound_mh_arg_type(ek);
567 int arg_mask = _INSERT_NO_MASK; 1272 int arg_slots = type2size[arg_type];
568 int arg_slots = -1;
569 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
570 1273
571 // Make room for the new argument: 1274 // Make room for the new argument:
572 __ ldsw(G3_bmh_vmargslot, O0_argslot); 1275 load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot);
573 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 1276 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
574 1277
575 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index); 1278 insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
576 1279
577 // Store bound argument into the new stack slot: 1280 // Store bound argument into the new stack slot:
578 __ load_heap_oop(G3_bmh_argument, O1_scratch); 1281 __ load_heap_oop(G3_bmh_argument, O1_scratch);
579 if (arg_type == T_OBJECT) { 1282 if (arg_type == T_OBJECT) {
580 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); 1283 __ st_ptr(O1_scratch, Address(O0_argslot, 0));
581 } else { 1284 } else {
582 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 1285 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
583 const int arg_size = type2aelembytes(arg_type); 1286 move_typed_arg(_masm, arg_type, false,
584 __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type)); 1287 prim_value_addr,
585 __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size); // long store uses O2/O3 on !_LP64 1288 Address(O0_argslot, 0),
1289 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
586 } 1290 }
587 1291
588 if (direct_to_method) { 1292 if (direct_to_method) {
589 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop 1293 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop
590 __ verify_oop(G5_method); 1294 __ verify_oop(G5_method);
600 1304
601 case _adapter_retype_only: 1305 case _adapter_retype_only:
602 case _adapter_retype_raw: 1306 case _adapter_retype_raw:
603 // Immediately jump to the next MH layer: 1307 // Immediately jump to the next MH layer:
604 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 1308 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1309 __ verify_oop(G3_method_handle);
605 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 1310 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
606 // This is OK when all parameter types widen. 1311 // This is OK when all parameter types widen.
607 // It is also OK when a return type narrows. 1312 // It is also OK when a return type narrows.
608 break; 1313 break;
609 1314
610 case _adapter_check_cast: 1315 case _adapter_check_cast:
611 { 1316 {
612 // Temps:
613 Register G5_klass = G5_index; // Interesting AMH data.
614
615 // Check a reference argument before jumping to the next layer of MH: 1317 // Check a reference argument before jumping to the next layer of MH:
616 __ ldsw(G3_amh_vmargslot, O0_argslot); 1318 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
617 Address vmarg = __ argument_address(O0_argslot); 1319 Address vmarg = __ argument_address(O0_argslot, O0_argslot);
618 1320
619 // What class are we casting to? 1321 // What class are we casting to?
620 __ load_heap_oop(G3_amh_argument, G5_klass); // This is a Class object! 1322 Register O1_klass = O1_scratch; // Interesting AMH data.
621 __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass); 1323 __ load_heap_oop(G3_amh_argument, O1_klass); // This is a Class object!
622 1324 load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch);
623 Label done; 1325
624 __ ld_ptr(vmarg, O1_scratch); 1326 Label L_done;
625 __ tst(O1_scratch); 1327 __ ld_ptr(vmarg, O2_scratch);
626 __ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null. 1328 __ tst(O2_scratch);
1329 __ brx(Assembler::zero, false, Assembler::pn, L_done); // No cast if null.
627 __ delayed()->nop(); 1330 __ delayed()->nop();
628 __ load_klass(O1_scratch, O1_scratch); 1331 __ load_klass(O2_scratch, O2_scratch);
629 1332
630 // Live at this point: 1333 // Live at this point:
631 // - G5_klass : klass required by the target method
632 // - O0_argslot : argslot index in vmarg; may be required in the failing path 1334 // - O0_argslot : argslot index in vmarg; may be required in the failing path
633 // - O1_scratch : argument klass to test 1335 // - O1_klass : klass required by the target method
1336 // - O2_scratch : argument klass to test
634 // - G3_method_handle: adapter method handle 1337 // - G3_method_handle: adapter method handle
635 __ check_klass_subtype(O1_scratch, G5_klass, O2_scratch, O3_scratch, done); 1338 __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done);
636 1339
637 // If we get here, the type check failed! 1340 // If we get here, the type check failed!
638 __ load_heap_oop(G3_amh_argument, O2_required); // required class 1341 __ load_heap_oop(G3_amh_argument, O2_required); // required class
639 __ ld_ptr( vmarg, O1_actual); // bad object 1342 __ ld_ptr( vmarg, O1_actual); // bad object
640 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); 1343 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
641 __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining? 1344 __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining?
642 1345
643 __ bind(done); 1346 __ BIND(L_done);
644 // Get the new MH: 1347 // Get the new MH:
645 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 1348 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
646 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 1349 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
647 } 1350 }
648 break; 1351 break;
657 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim 1360 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
658 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim 1361 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
659 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim 1362 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
660 { 1363 {
661 // Perform an in-place conversion to int or an int subword. 1364 // Perform an in-place conversion to int or an int subword.
662 __ ldsw(G3_amh_vmargslot, O0_argslot); 1365 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
663 Address value; 1366 Address value;
664 Address vmarg = __ argument_address(O0_argslot); 1367 Address vmarg;
665 bool value_left_justified = false; 1368 bool value_left_justified = false;
666 1369
667 switch (ek) { 1370 switch (ek) {
668 case _adapter_opt_i2i: 1371 case _adapter_opt_i2i:
669 value = vmarg; 1372 value = vmarg = __ argument_address(O0_argslot, O0_argslot);
670 break; 1373 break;
671 case _adapter_opt_l2i: 1374 case _adapter_opt_l2i:
672 { 1375 {
673 // just delete the extra slot 1376 // just delete the extra slot
674 #ifdef _LP64 1377 #ifdef _LP64
675 // In V9, longs are given 2 64-bit slots in the interpreter, but the 1378 // In V9, longs are given 2 64-bit slots in the interpreter, but the
676 // data is passed in only 1 slot. 1379 // data is passed in only 1 slot.
677 // Keep the second slot. 1380 // Keep the second slot.
678 __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot); 1381 __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot);
679 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 1382 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
680 value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. 1383 value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value.
681 vmarg = Address(O0_argslot, Interpreter::stackElementSize); 1384 vmarg = Address(O0_argslot, Interpreter::stackElementSize);
682 #else 1385 #else
683 // Keep the first slot. 1386 // Keep the first slot.
684 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 1387 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
685 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 1388 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
686 value = Address(O0_argslot, 0); 1389 value = Address(O0_argslot, 0);
687 vmarg = value; 1390 vmarg = value;
688 #endif 1391 #endif
689 } 1392 }
690 break; 1393 break;
691 case _adapter_opt_unboxi: 1394 case _adapter_opt_unboxi:
692 { 1395 {
1396 vmarg = __ argument_address(O0_argslot, O0_argslot);
693 // Load the value up from the heap. 1397 // Load the value up from the heap.
694 __ ld_ptr(vmarg, O1_scratch); 1398 __ ld_ptr(vmarg, O1_scratch);
695 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); 1399 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
696 #ifdef ASSERT 1400 #ifdef ASSERT
697 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { 1401 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
710 default: 1414 default:
711 ShouldNotReachHere(); 1415 ShouldNotReachHere();
712 } 1416 }
713 1417
714 // This check is required on _BIG_ENDIAN 1418 // This check is required on _BIG_ENDIAN
715 Register G5_vminfo = G5_index; 1419 Register G5_vminfo = G5_scratch;
716 __ ldsw(G3_amh_conversion, G5_vminfo); 1420 __ ldsw(G3_amh_conversion, G5_vminfo);
717 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 1421 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
718 1422
719 // Original 32-bit vmdata word must be of this form: 1423 // Original 32-bit vmdata word must be of this form:
720 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | 1424 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
746 1450
747 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim 1451 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
748 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim 1452 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
749 { 1453 {
750 // Perform an in-place int-to-long or ref-to-long conversion. 1454 // Perform an in-place int-to-long or ref-to-long conversion.
751 __ ldsw(G3_amh_vmargslot, O0_argslot); 1455 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
752 1456
753 // On big-endian machine we duplicate the slot and store the MSW 1457 // On big-endian machine we duplicate the slot and store the MSW
754 // in the first slot. 1458 // in the first slot.
755 __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot); 1459 __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot);
756 1460
757 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index); 1461 insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
758 1462
759 Address arg_lsw(O0_argslot, 0); 1463 Address arg_lsw(O0_argslot, 0);
760 Address arg_msw(O0_argslot, -Interpreter::stackElementSize); 1464 Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
761 1465
762 switch (ek) { 1466 switch (ek) {
814 case _adapter_opt_rot_1_up: 1518 case _adapter_opt_rot_1_up:
815 case _adapter_opt_rot_1_down: 1519 case _adapter_opt_rot_1_down:
816 case _adapter_opt_rot_2_up: 1520 case _adapter_opt_rot_2_up:
817 case _adapter_opt_rot_2_down: 1521 case _adapter_opt_rot_2_down:
818 { 1522 {
819 int swap_bytes = 0, rotate = 0; 1523 int swap_slots = ek_adapter_opt_swap_slots(ek);
820 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 1524 int rotate = ek_adapter_opt_swap_mode(ek);
821 1525
822 // 'argslot' is the position of the first argument to swap. 1526 // 'argslot' is the position of the first argument to swap.
823 __ ldsw(G3_amh_vmargslot, O0_argslot); 1527 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
824 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 1528 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1529 if (VerifyMethodHandles)
1530 verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame");
825 1531
826 // 'vminfo' is the second. 1532 // 'vminfo' is the second.
827 Register O1_destslot = O1_scratch; 1533 Register O1_destslot = O1_scratch;
828 __ ldsw(G3_amh_conversion, O1_destslot); 1534 load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot);
829 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 1535 __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot);
830 __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot); 1536 if (VerifyMethodHandles)
831 __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot); 1537 verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame");
832 1538
1539 assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
833 if (!rotate) { 1540 if (!rotate) {
834 for (int i = 0; i < swap_bytes; i += wordSize) { 1541 // simple swap
835 __ ld_ptr(Address(O0_argslot, i), O2_scratch); 1542 for (int i = 0; i < swap_slots; i++) {
836 __ ld_ptr(Address(O1_destslot, i), O3_scratch); 1543 __ ld_ptr( Address(O0_argslot, i * wordSize), O2_scratch);
837 __ st_ptr(O3_scratch, Address(O0_argslot, i)); 1544 __ ld_ptr( Address(O1_destslot, i * wordSize), O3_scratch);
838 __ st_ptr(O2_scratch, Address(O1_destslot, i)); 1545 __ st_ptr(O3_scratch, Address(O0_argslot, i * wordSize));
1546 __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize));
839 } 1547 }
840 } else { 1548 } else {
841 // Save the first chunk, which is going to get overwritten. 1549 // A rotate is actually pair of moves, with an "odd slot" (or pair)
842 switch (swap_bytes) { 1550 // changing place with a series of other slots.
843 case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break; 1551 // First, push the "odd slot", which is going to get overwritten
844 case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru 1552 switch (swap_slots) {
845 case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break; 1553 case 2 : __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru
846 default: ShouldNotReachHere(); 1554 case 1 : __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break;
847 } 1555 default: ShouldNotReachHere();
848 1556 }
849 if (rotate > 0) { 1557 if (rotate > 0) {
850 // Rorate upward. 1558 // Here is rotate > 0:
851 __ sub(O0_argslot, swap_bytes, O0_argslot); 1559 // (low mem) (high mem)
852 #if ASSERT 1560 // | dest: more_slots... | arg: odd_slot :arg+1 |
853 { 1561 // =>
854 // Verify that argslot > destslot, by at least swap_bytes. 1562 // | dest: odd_slot | dest+1: more_slots... :arg+1 |
855 Label L_ok; 1563 // work argslot down to destslot, copying contiguous data upwards
856 __ cmp(O0_argslot, O1_destslot); 1564 // pseudo-code:
857 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
858 __ delayed()->nop();
859 __ stop("source must be above destination (upward rotation)");
860 __ bind(L_ok);
861 }
862 #endif
863 // Work argslot down to destslot, copying contiguous data upwards.
864 // Pseudo-code:
865 // argslot = src_addr - swap_bytes 1565 // argslot = src_addr - swap_bytes
866 // destslot = dest_addr 1566 // destslot = dest_addr
867 // while (argslot >= destslot) { 1567 // while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--;
868 // *(argslot + swap_bytes) = *(argslot + 0); 1568 move_arg_slots_up(_masm,
869 // argslot--; 1569 O1_destslot,
870 // } 1570 Address(O0_argslot, 0),
871 Label loop; 1571 swap_slots,
872 __ bind(loop); 1572 O0_argslot, O2_scratch);
873 __ ld_ptr(Address(O0_argslot, 0), G5_index);
874 __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
875 __ sub(O0_argslot, wordSize, O0_argslot);
876 __ cmp(O0_argslot, O1_destslot);
877 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
878 __ delayed()->nop(); // FILLME
879 } else { 1573 } else {
880 __ add(O0_argslot, swap_bytes, O0_argslot); 1574 // Here is the other direction, rotate < 0:
881 #if ASSERT 1575 // (low mem) (high mem)
882 { 1576 // | arg: odd_slot | arg+1: more_slots... :dest+1 |
883 // Verify that argslot < destslot, by at least swap_bytes. 1577 // =>
884 Label L_ok; 1578 // | arg: more_slots... | dest: odd_slot :dest+1 |
885 __ cmp(O0_argslot, O1_destslot); 1579 // work argslot up to destslot, copying contiguous data downwards
886 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 1580 // pseudo-code:
887 __ delayed()->nop();
888 __ stop("source must be above destination (upward rotation)");
889 __ bind(L_ok);
890 }
891 #endif
892 // Work argslot up to destslot, copying contiguous data downwards.
893 // Pseudo-code:
894 // argslot = src_addr + swap_bytes 1581 // argslot = src_addr + swap_bytes
895 // destslot = dest_addr 1582 // destslot = dest_addr
896 // while (argslot >= destslot) { 1583 // while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++;
897 // *(argslot - swap_bytes) = *(argslot + 0); 1584 __ add(O1_destslot, wordSize, O1_destslot);
898 // argslot++; 1585 move_arg_slots_down(_masm,
899 // } 1586 Address(O0_argslot, swap_slots * wordSize),
900 Label loop; 1587 O1_destslot,
901 __ bind(loop); 1588 -swap_slots,
902 __ ld_ptr(Address(O0_argslot, 0), G5_index); 1589 O0_argslot, O2_scratch);
903 __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes)); 1590
904 __ add(O0_argslot, wordSize, O0_argslot); 1591 __ sub(O1_destslot, wordSize, O1_destslot);
905 __ cmp(O0_argslot, O1_destslot); 1592 }
906 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop); 1593 // pop the original first chunk into the destination slot, now free
907 __ delayed()->nop(); // FILLME 1594 switch (swap_slots) {
908 } 1595 case 2 : __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru
909 1596 case 1 : __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break;
910 // Store the original first chunk into the destination slot, now free. 1597 default: ShouldNotReachHere();
911 switch (swap_bytes) {
912 case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
913 case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
914 case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
915 default: ShouldNotReachHere();
916 } 1598 }
917 } 1599 }
918 1600
919 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 1601 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
920 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 1602 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
922 break; 1604 break;
923 1605
924 case _adapter_dup_args: 1606 case _adapter_dup_args:
925 { 1607 {
926 // 'argslot' is the position of the first argument to duplicate. 1608 // 'argslot' is the position of the first argument to duplicate.
927 __ ldsw(G3_amh_vmargslot, O0_argslot); 1609 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
928 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 1610 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
929 1611
930 // 'stack_move' is negative number of words to duplicate. 1612 // 'stack_move' is negative number of words to duplicate.
931 Register G5_stack_move = G5_index; 1613 Register O1_stack_move = O1_scratch;
932 __ ldsw(G3_amh_conversion, G5_stack_move); 1614 load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
933 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 1615
934 1616 if (VerifyMethodHandles) {
935 // Remember the old Gargs (argslot[0]). 1617 verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true,
936 Register O1_oldarg = O1_scratch; 1618 "copied argument(s) must fall within current frame");
937 __ mov(Gargs, O1_oldarg); 1619 }
938 1620
939 // Move Gargs down to make room for dups. 1621 // insert location is always the bottom of the argument list:
940 __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move); 1622 __ neg(O1_stack_move);
941 __ add(Gargs, G5_stack_move, Gargs); 1623 push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch);
942
943 // Compute the new Gargs (argslot[0]).
944 Register O2_newarg = O2_scratch;
945 __ mov(Gargs, O2_newarg);
946
947 // Copy from oldarg[0...] down to newarg[0...]
948 // Pseude-code:
949 // O1_oldarg = old-Gargs
950 // O2_newarg = new-Gargs
951 // O0_argslot = argslot
952 // while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
953 Label loop;
954 __ bind(loop);
955 __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
956 __ st_ptr(O3_scratch, Address(O2_newarg, 0));
957 __ add(O0_argslot, wordSize, O0_argslot);
958 __ add(O2_newarg, wordSize, O2_newarg);
959 __ cmp(O2_newarg, O1_oldarg);
960 __ brx(Assembler::less, false, Assembler::pt, loop);
961 __ delayed()->nop(); // FILLME
962 1624
963 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 1625 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
964 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 1626 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
965 } 1627 }
966 break; 1628 break;
967 1629
968 case _adapter_drop_args: 1630 case _adapter_drop_args:
969 { 1631 {
970 // 'argslot' is the position of the first argument to nuke. 1632 // 'argslot' is the position of the first argument to nuke.
971 __ ldsw(G3_amh_vmargslot, O0_argslot); 1633 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
972 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 1634 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
973 1635
974 // 'stack_move' is number of words to drop. 1636 // 'stack_move' is number of words to drop.
975 Register G5_stack_move = G5_index; 1637 Register O1_stack_move = O1_scratch;
976 __ ldsw(G3_amh_conversion, G5_stack_move); 1638 load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
977 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 1639
978 1640 remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch);
979 remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
980 1641
981 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 1642 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
982 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 1643 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
983 } 1644 }
984 break; 1645 break;
985 1646
986 case _adapter_collect_args: 1647 case _adapter_collect_args:
987 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1648 case _adapter_fold_args:
988 break;
989
990 case _adapter_spread_args: 1649 case _adapter_spread_args:
991 // Handled completely by optimized cases. 1650 // Handled completely by optimized cases.
992 __ stop("init_AdapterMethodHandle should not issue this"); 1651 __ stop("init_AdapterMethodHandle should not issue this");
993 break; 1652 break;
994 1653
1654 case _adapter_opt_collect_ref:
1655 case _adapter_opt_collect_int:
1656 case _adapter_opt_collect_long:
1657 case _adapter_opt_collect_float:
1658 case _adapter_opt_collect_double:
1659 case _adapter_opt_collect_void:
1660 case _adapter_opt_collect_0_ref:
1661 case _adapter_opt_collect_1_ref:
1662 case _adapter_opt_collect_2_ref:
1663 case _adapter_opt_collect_3_ref:
1664 case _adapter_opt_collect_4_ref:
1665 case _adapter_opt_collect_5_ref:
1666 case _adapter_opt_filter_S0_ref:
1667 case _adapter_opt_filter_S1_ref:
1668 case _adapter_opt_filter_S2_ref:
1669 case _adapter_opt_filter_S3_ref:
1670 case _adapter_opt_filter_S4_ref:
1671 case _adapter_opt_filter_S5_ref:
1672 case _adapter_opt_collect_2_S0_ref:
1673 case _adapter_opt_collect_2_S1_ref:
1674 case _adapter_opt_collect_2_S2_ref:
1675 case _adapter_opt_collect_2_S3_ref:
1676 case _adapter_opt_collect_2_S4_ref:
1677 case _adapter_opt_collect_2_S5_ref:
1678 case _adapter_opt_fold_ref:
1679 case _adapter_opt_fold_int:
1680 case _adapter_opt_fold_long:
1681 case _adapter_opt_fold_float:
1682 case _adapter_opt_fold_double:
1683 case _adapter_opt_fold_void:
1684 case _adapter_opt_fold_1_ref:
1685 case _adapter_opt_fold_2_ref:
1686 case _adapter_opt_fold_3_ref:
1687 case _adapter_opt_fold_4_ref:
1688 case _adapter_opt_fold_5_ref:
1689 {
1690 // Given a fresh incoming stack frame, build a new ricochet frame.
1691 // On entry, TOS points at a return PC, and FP is the callers frame ptr.
1692 // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
1693 // RCX contains an AdapterMethodHandle of the indicated kind.
1694
1695 // Relevant AMH fields:
1696 // amh.vmargslot:
1697 // points to the trailing edge of the arguments
1698 // to filter, collect, or fold. For a boxing operation,
1699 // it points just after the single primitive value.
1700 // amh.argument:
1701 // recursively called MH, on |collect| arguments
1702 // amh.vmtarget:
1703 // final destination MH, on return value, etc.
1704 // amh.conversion.dest:
1705 // tells what is the type of the return value
1706 // (not needed here, since dest is also derived from ek)
1707 // amh.conversion.vminfo:
1708 // points to the trailing edge of the return value
1709 // when the vmtarget is to be called; this is
1710 // equal to vmargslot + (retained ? |collect| : 0)
1711
1712 // Pass 0 or more argument slots to the recursive target.
1713 int collect_count_constant = ek_adapter_opt_collect_count(ek);
1714
1715 // The collected arguments are copied from the saved argument list:
1716 int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
1717
1718 assert(ek_orig == _adapter_collect_args ||
1719 ek_orig == _adapter_fold_args, "");
1720 bool retain_original_args = (ek_orig == _adapter_fold_args);
1721
1722 // The return value is replaced (or inserted) at the 'vminfo' argslot.
1723 // Sometimes we can compute this statically.
1724 int dest_slot_constant = -1;
1725 if (!retain_original_args)
1726 dest_slot_constant = collect_slot_constant;
1727 else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
1728 // We are preserving all the arguments, and the return value is prepended,
1729 // so the return slot is to the left (above) the |collect| sequence.
1730 dest_slot_constant = collect_slot_constant + collect_count_constant;
1731
1732 // Replace all those slots by the result of the recursive call.
1733 // The result type can be one of ref, int, long, float, double, void.
1734 // In the case of void, nothing is pushed on the stack after return.
1735 BasicType dest = ek_adapter_opt_collect_type(ek);
1736 assert(dest == type2wfield[dest], "dest is a stack slot type");
1737 int dest_count = type2size[dest];
1738 assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
1739
1740 // Choose a return continuation.
1741 EntryKind ek_ret = _adapter_opt_return_any;
1742 if (dest != T_CONFLICT && OptimizeMethodHandles) {
1743 switch (dest) {
1744 case T_INT : ek_ret = _adapter_opt_return_int; break;
1745 case T_LONG : ek_ret = _adapter_opt_return_long; break;
1746 case T_FLOAT : ek_ret = _adapter_opt_return_float; break;
1747 case T_DOUBLE : ek_ret = _adapter_opt_return_double; break;
1748 case T_OBJECT : ek_ret = _adapter_opt_return_ref; break;
1749 case T_VOID : ek_ret = _adapter_opt_return_void; break;
1750 default : ShouldNotReachHere();
1751 }
1752 if (dest == T_OBJECT && dest_slot_constant >= 0) {
1753 EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
1754 if (ek_try <= _adapter_opt_return_LAST &&
1755 ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
1756 ek_ret = ek_try;
1757 }
1758 }
1759 assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
1760 }
1761
1762 // Already pushed: ... keep1 | collect | keep2 |
1763
1764 // Push a few extra argument words, if we need them to store the return value.
1765 {
1766 int extra_slots = 0;
1767 if (retain_original_args) {
1768 extra_slots = dest_count;
1769 } else if (collect_count_constant == -1) {
1770 extra_slots = dest_count; // collect_count might be zero; be generous
1771 } else if (dest_count > collect_count_constant) {
1772 extra_slots = (dest_count - collect_count_constant);
1773 } else {
1774 // else we know we have enough dead space in |collect| to repurpose for return values
1775 }
1776 if (extra_slots != 0) {
1777 __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP);
1778 }
1779 }
1780
1781 // Set up Ricochet Frame.
1782 __ mov(SP, O5_savedSP); // record SP for the callee
1783
1784 // One extra (empty) slot for outgoing target MH (see Gargs computation below).
1785 __ save_frame(2); // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23.
1786
1787 // Note: Gargs is live throughout the following, until we make our recursive call.
1788 // And the RF saves a copy in L4_saved_args_base.
1789
1790 RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs,
1791 entry(ek_ret)->from_interpreted_entry());
1792
1793 // Compute argument base:
1794 // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above).
1795 __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs);
1796
1797 // Now pushed: ... keep1 | collect | keep2 | extra | [RF]
1798
1799 #ifdef ASSERT
1800 if (VerifyMethodHandles && dest != T_CONFLICT) {
1801 BLOCK_COMMENT("verify AMH.conv.dest {");
1802 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch);
1803 Label L_dest_ok;
1804 __ cmp(O1_scratch, (int) dest);
1805 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
1806 __ delayed()->nop();
1807 if (dest == T_INT) {
1808 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1809 if (is_subword_type(BasicType(bt))) {
1810 __ cmp(O1_scratch, (int) bt);
1811 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
1812 __ delayed()->nop();
1813 }
1814 }
1815 }
1816 __ stop("bad dest in AMH.conv");
1817 __ BIND(L_dest_ok);
1818 BLOCK_COMMENT("} verify AMH.conv.dest");
1819 }
1820 #endif //ASSERT
1821
1822 // Find out where the original copy of the recursive argument sequence begins.
1823 Register O0_coll = O0_scratch;
1824 {
1825 RegisterOrConstant collect_slot = collect_slot_constant;
1826 if (collect_slot_constant == -1) {
1827 load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch);
1828 collect_slot = O1_scratch;
1829 }
1830 // collect_slot might be 0, but we need the move anyway.
1831 __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll);
1832 // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2|
1833 }
1834
1835 // Replace the old AMH with the recursive MH. (No going back now.)
1836 // In the case of a boxing call, the recursive call is to a 'boxer' method,
1837 // such as Integer.valueOf or Long.valueOf. In the case of a filter
1838 // or collect call, it will take one or more arguments, transform them,
1839 // and return some result, to store back into argument_base[vminfo].
1840 __ load_heap_oop(G3_amh_argument, G3_method_handle);
1841 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch);
1842
1843 // Calculate |collect|, the number of arguments we are collecting.
1844 Register O1_collect_count = O1_scratch;
1845 RegisterOrConstant collect_count;
1846 if (collect_count_constant < 0) {
1847 __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch);
1848 collect_count = O1_collect_count;
1849 } else {
1850 collect_count = collect_count_constant;
1851 #ifdef ASSERT
1852 if (VerifyMethodHandles) {
1853 BLOCK_COMMENT("verify collect_count_constant {");
1854 __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
1855 Label L_count_ok;
1856 __ cmp(O3_scratch, collect_count_constant);
1857 __ br(Assembler::equal, false, Assembler::pt, L_count_ok);
1858 __ delayed()->nop();
1859 __ stop("bad vminfo in AMH.conv");
1860 __ BIND(L_count_ok);
1861 BLOCK_COMMENT("} verify collect_count_constant");
1862 }
1863 #endif //ASSERT
1864 }
1865
1866 // copy |collect| slots directly to TOS:
1867 push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch);
1868 // Now pushed: ... keep1 | collect | keep2 | RF... | collect |
1869 // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2|
1870
1871 // If necessary, adjust the saved arguments to make room for the eventual return value.
1872 // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
1873 // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect |
1874 // In the non-retaining case, this might move keep2 either up or down.
1875 // We don't have to copy the whole | RF... collect | complex,
1876 // but we must adjust RF.saved_args_base.
1877 // Also, from now on, we will forget about the original copy of |collect|.
1878 // If we are retaining it, we will treat it as part of |keep2|.
1879 // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
1880
1881 BLOCK_COMMENT("adjust trailing arguments {");
1882 // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
1883 int open_count = dest_count;
1884 RegisterOrConstant close_count = collect_count_constant;
1885 Register O1_close_count = O1_collect_count;
1886 if (retain_original_args) {
1887 close_count = constant(0);
1888 } else if (collect_count_constant == -1) {
1889 close_count = O1_collect_count;
1890 }
1891
1892 // How many slots need moving? This is simply dest_slot (0 => no |keep3|).
1893 RegisterOrConstant keep3_count;
1894 Register O2_keep3_count = O2_scratch;
1895 if (dest_slot_constant < 0) {
1896 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count);
1897 keep3_count = O2_keep3_count;
1898 } else {
1899 keep3_count = dest_slot_constant;
1900 #ifdef ASSERT
1901 if (VerifyMethodHandles && dest_slot_constant < 0) {
1902 BLOCK_COMMENT("verify dest_slot_constant {");
1903 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
1904 Label L_vminfo_ok;
1905 __ cmp(O3_scratch, dest_slot_constant);
1906 __ br(Assembler::equal, false, Assembler::pt, L_vminfo_ok);
1907 __ delayed()->nop();
1908 __ stop("bad vminfo in AMH.conv");
1909 __ BIND(L_vminfo_ok);
1910 BLOCK_COMMENT("} verify dest_slot_constant");
1911 }
1912 #endif //ASSERT
1913 }
1914
1915 // tasks remaining:
1916 bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
1917 bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
1918 bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
1919
1920 // Old and new argument locations (based at slot 0).
1921 // Net shift (&new_argv - &old_argv) is (close_count - open_count).
1922 bool zero_open_count = (open_count == 0); // remember this bit of info
1923 if (move_keep3 && fix_arg_base) {
1924 // It will be easier to have everything in one register:
1925 if (close_count.is_register()) {
1926 // Deduct open_count from close_count register to get a clean +/- value.
1927 __ sub(close_count.as_register(), open_count, close_count.as_register());
1928 } else {
1929 close_count = close_count.as_constant() - open_count;
1930 }
1931 open_count = 0;
1932 }
1933 Register L4_old_argv = RicochetFrame::L4_saved_args_base;
1934 Register O3_new_argv = O3_scratch;
1935 if (fix_arg_base) {
1936 __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv,
1937 -(open_count * Interpreter::stackElementSize));
1938 }
1939
1940 // First decide if any actual data are to be moved.
1941 // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
1942 // (As it happens, all movements involve an argument list size change.)
1943
1944 // If there are variable parameters, use dynamic checks to skip around the whole mess.
1945 Label L_done;
1946 if (keep3_count.is_register()) {
1947 __ tst(keep3_count.as_register());
1948 __ br(Assembler::zero, false, Assembler::pn, L_done);
1949 __ delayed()->nop();
1950 }
1951 if (close_count.is_register()) {
1952 __ cmp(close_count.as_register(), open_count);
1953 __ br(Assembler::equal, false, Assembler::pn, L_done);
1954 __ delayed()->nop();
1955 }
1956
1957 if (move_keep3 && fix_arg_base) {
1958 bool emit_move_down = false, emit_move_up = false, emit_guard = false;
1959 if (!close_count.is_constant()) {
1960 emit_move_down = emit_guard = !zero_open_count;
1961 emit_move_up = true;
1962 } else if (open_count != close_count.as_constant()) {
1963 emit_move_down = (open_count > close_count.as_constant());
1964 emit_move_up = !emit_move_down;
1965 }
1966 Label L_move_up;
1967 if (emit_guard) {
1968 __ cmp(close_count.as_register(), open_count);
1969 __ br(Assembler::greater, false, Assembler::pn, L_move_up);
1970 __ delayed()->nop();
1971 }
1972
1973 if (emit_move_down) {
1974 // Move arguments down if |+dest+| > |-collect-|
1975 // (This is rare, except when arguments are retained.)
1976 // This opens space for the return value.
1977 if (keep3_count.is_constant()) {
1978 for (int i = 0; i < keep3_count.as_constant(); i++) {
1979 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
1980 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) );
1981 }
1982 } else {
1983 // Live: O1_close_count, O2_keep3_count, O3_new_argv
1984 Register argv_top = O0_scratch;
1985 __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top);
1986 move_arg_slots_down(_masm,
1987 Address(L4_old_argv, 0), // beginning of old argv
1988 argv_top, // end of old argv
1989 close_count, // distance to move down (must be negative)
1990 O4_scratch, G5_scratch);
1991 }
1992 }
1993
1994 if (emit_guard) {
1995 __ ba(false, L_done); // assumes emit_move_up is true also
1996 __ delayed()->nop();
1997 __ BIND(L_move_up);
1998 }
1999
2000 if (emit_move_up) {
2001 // Move arguments up if |+dest+| < |-collect-|
2002 // (This is usual, except when |keep3| is empty.)
2003 // This closes up the space occupied by the now-deleted collect values.
2004 if (keep3_count.is_constant()) {
2005 for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
2006 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
2007 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) );
2008 }
2009 } else {
2010 Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch));
2011 // Live: O1_close_count, O2_keep3_count, O3_new_argv
2012 move_arg_slots_up(_masm,
2013 L4_old_argv, // beginning of old argv
2014 argv_top, // end of old argv
2015 close_count, // distance to move up (must be positive)
2016 O4_scratch, G5_scratch);
2017 }
2018 }
2019 }
2020 __ BIND(L_done);
2021
2022 if (fix_arg_base) {
2023 // adjust RF.saved_args_base
2024 __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base);
2025 }
2026
2027 if (stomp_dest) {
2028 // Stomp the return slot, so it doesn't hold garbage.
2029 // This isn't strictly necessary, but it may help detect bugs.
2030 __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch);
2031 __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base,
2032 __ argument_offset(keep3_count, keep3_count.register_or_noreg()))); // uses O2_keep3_count
2033 }
2034 BLOCK_COMMENT("} adjust trailing arguments");
2035
2036 BLOCK_COMMENT("do_recursive_call");
2037 __ mov(SP, O5_savedSP); // record SP for the callee
2038 __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7);
2039 // The globally unique bounce address has two purposes:
2040 // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
2041 // 2. When returned to, it cuts back the stack and redirects control flow
2042 // to the return handler.
2043 // The return handler will further cut back the stack when it takes
2044 // down the RF. Perhaps there is a way to streamline this further.
2045
2046 // State during recursive call:
2047 // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
2048 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
2049 }
2050 break;
2051
2052 case _adapter_opt_return_ref:
2053 case _adapter_opt_return_int:
2054 case _adapter_opt_return_long:
2055 case _adapter_opt_return_float:
2056 case _adapter_opt_return_double:
2057 case _adapter_opt_return_void:
2058 case _adapter_opt_return_S0_ref:
2059 case _adapter_opt_return_S1_ref:
2060 case _adapter_opt_return_S2_ref:
2061 case _adapter_opt_return_S3_ref:
2062 case _adapter_opt_return_S4_ref:
2063 case _adapter_opt_return_S5_ref:
2064 {
2065 BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
2066 int dest_slot_constant = ek_adapter_opt_return_slot(ek);
2067
2068 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2069
2070 if (dest_slot_constant == -1) {
2071 // The current stub is a general handler for this dest_type.
2072 // It can be called from _adapter_opt_return_any below.
2073 // Stash the address in a little table.
2074 assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
2075 address return_handler = __ pc();
2076 _adapter_return_handlers[dest_type_constant] = return_handler;
2077 if (dest_type_constant == T_INT) {
2078 // do the subword types too
2079 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
2080 if (is_subword_type(BasicType(bt)) &&
2081 _adapter_return_handlers[bt] == NULL) {
2082 _adapter_return_handlers[bt] = return_handler;
2083 }
2084 }
2085 }
2086 }
2087
2088 // On entry to this continuation handler, make Gargs live again.
2089 __ mov(RicochetFrame::L4_saved_args_base, Gargs);
2090
2091 Register O7_temp = O7;
2092 Register O5_vminfo = O5;
2093
2094 RegisterOrConstant dest_slot = dest_slot_constant;
2095 if (dest_slot_constant == -1) {
2096 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo);
2097 dest_slot = O5_vminfo;
2098 }
2099 // Store the result back into the argslot.
2100 // This code uses the interpreter calling sequence, in which the return value
2101 // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
2102 // There are certain irregularities with floating point values, which can be seen
2103 // in TemplateInterpreterGenerator::generate_return_entry_for.
2104 move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp));
2105
2106 RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7);
2107
2108 // Load the final target and go.
2109 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch);
2110 __ restore(I5_savedSP, G0, SP);
2111 __ jump_to_method_handle_entry(G3_method_handle, O0_scratch);
2112 __ illtrap(0);
2113 }
2114 break;
2115
2116 case _adapter_opt_return_any:
2117 {
2118 Register O7_temp = O7;
2119 Register O5_dest_type = O5;
2120
2121 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2122 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type);
2123 __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp);
2124 __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type);
2125 __ ld_ptr(O7_temp, O5_dest_type, O7_temp);
2126
2127 #ifdef ASSERT
2128 { Label L_ok;
2129 __ br_notnull(O7_temp, false, Assembler::pt, L_ok);
2130 __ delayed()->nop();
2131 __ stop("bad method handle return");
2132 __ BIND(L_ok);
2133 }
2134 #endif //ASSERT
2135 __ JMP(O7_temp, 0);
2136 __ delayed()->nop();
2137 }
2138 break;
2139
995 case _adapter_opt_spread_0: 2140 case _adapter_opt_spread_0:
996 case _adapter_opt_spread_1: 2141 case _adapter_opt_spread_1_ref:
997 case _adapter_opt_spread_more: 2142 case _adapter_opt_spread_2_ref:
2143 case _adapter_opt_spread_3_ref:
2144 case _adapter_opt_spread_4_ref:
2145 case _adapter_opt_spread_5_ref:
2146 case _adapter_opt_spread_ref:
2147 case _adapter_opt_spread_byte:
2148 case _adapter_opt_spread_char:
2149 case _adapter_opt_spread_short:
2150 case _adapter_opt_spread_int:
2151 case _adapter_opt_spread_long:
2152 case _adapter_opt_spread_float:
2153 case _adapter_opt_spread_double:
998 { 2154 {
999 // spread an array out into a group of arguments 2155 // spread an array out into a group of arguments
1000 __ unimplemented(entry_name(ek)); 2156 int length_constant = ek_adapter_opt_spread_count(ek);
2157 bool length_can_be_zero = (length_constant == 0);
2158 if (length_constant < 0) {
2159 // some adapters with variable length must handle the zero case
2160 if (!OptimizeMethodHandles ||
2161 ek_adapter_opt_spread_type(ek) != T_OBJECT)
2162 length_can_be_zero = true;
2163 }
2164
2165 // find the address of the array argument
2166 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
2167 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
2168
2169 // O0_argslot points both to the array and to the first output arg
2170 Address vmarg = Address(O0_argslot, 0);
2171
2172 // Get the array value.
2173 Register O1_array = O1_scratch;
2174 Register O2_array_klass = O2_scratch;
2175 BasicType elem_type = ek_adapter_opt_spread_type(ek);
2176 int elem_slots = type2size[elem_type]; // 1 or 2
2177 int array_slots = 1; // array is always a T_OBJECT
2178 int length_offset = arrayOopDesc::length_offset_in_bytes();
2179 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
2180 __ ld_ptr(vmarg, O1_array);
2181
2182 Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
2183 if (length_can_be_zero) {
2184 // handle the null pointer case, if zero is allowed
2185 Label L_skip;
2186 if (length_constant < 0) {
2187 load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
2188 __ br_zero(Assembler::notZero, false, Assembler::pn, O3_scratch, L_skip);
2189 __ delayed()->nop();
2190 }
2191 __ br_null(O1_array, false, Assembler::pn, L_array_is_empty);
2192 __ delayed()->nop();
2193 __ BIND(L_skip);
2194 }
2195 __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
2196 __ load_klass(O1_array, O2_array_klass);
2197
2198 // Check the array type.
2199 Register O3_klass = O3_scratch;
2200 __ load_heap_oop(G3_amh_argument, O3_klass); // this is a Class object!
2201 load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch);
2202
2203 Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
2204 __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
2205 // If we get here, the type check failed!
2206 __ ba(false, L_bad_array_klass);
2207 __ delayed()->nop();
2208 __ BIND(L_ok_array_klass);
2209
2210 // Check length.
2211 if (length_constant >= 0) {
2212 __ ldsw(Address(O1_array, length_offset), O4_scratch);
2213 __ cmp(O4_scratch, length_constant);
2214 } else {
2215 Register O3_vminfo = O3_scratch;
2216 load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo);
2217 __ ldsw(Address(O1_array, length_offset), O4_scratch);
2218 __ cmp(O3_vminfo, O4_scratch);
2219 }
2220 __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length);
2221 __ delayed()->nop();
2222
2223 Register O2_argslot_limit = O2_scratch;
2224
2225 // Array length checks out. Now insert any required stack slots.
2226 if (length_constant == -1) {
2227 // Form a pointer to the end of the affected region.
2228 __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit);
2229 // 'stack_move' is negative number of words to insert
2230 // This number already accounts for elem_slots.
2231 Register O3_stack_move = O3_scratch;
2232 load_stack_move(_masm, G3_amh_conversion, O3_stack_move);
2233 __ cmp(O3_stack_move, 0);
2234 assert(stack_move_unit() < 0, "else change this comparison");
2235 __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space);
2236 __ delayed()->nop();
2237 __ br(Assembler::equal, false, Assembler::pn, L_copy_args);
2238 __ delayed()->nop();
2239 // single argument case, with no array movement
2240 __ BIND(L_array_is_empty);
2241 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2242 O0_argslot, O1_scratch, O2_scratch, O3_scratch);
2243 __ ba(false, L_args_done); // no spreading to do
2244 __ delayed()->nop();
2245 __ BIND(L_insert_arg_space);
2246 // come here in the usual case, stack_move < 0 (2 or more spread arguments)
2247 // Live: O1_array, O2_argslot_limit, O3_stack_move
2248 insert_arg_slots(_masm, O3_stack_move,
2249 O0_argslot, O4_scratch, G5_scratch, O1_scratch);
2250 // reload from rdx_argslot_limit since rax_argslot is now decremented
2251 __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array);
2252 } else if (length_constant >= 1) {
2253 int new_slots = (length_constant * elem_slots) - array_slots;
2254 insert_arg_slots(_masm, new_slots * stack_move_unit(),
2255 O0_argslot, O2_scratch, O3_scratch, O4_scratch);
2256 } else if (length_constant == 0) {
2257 __ BIND(L_array_is_empty);
2258 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2259 O0_argslot, O1_scratch, O2_scratch, O3_scratch);
2260 } else {
2261 ShouldNotReachHere();
2262 }
2263
2264 // Copy from the array to the new slots.
2265 // Note: Stack change code preserves integrity of O0_argslot pointer.
2266 // So even after slot insertions, O0_argslot still points to first argument.
2267 // Beware: Arguments that are shallow on the stack are deep in the array,
2268 // and vice versa. So a downward-growing stack (the usual) has to be copied
2269 // elementwise in reverse order from the source array.
2270 __ BIND(L_copy_args);
2271 if (length_constant == -1) {
2272 // [O0_argslot, O2_argslot_limit) is the area we are inserting into.
2273 // Array element [0] goes at O0_argslot_limit[-wordSize].
2274 Register O1_source = O1_array;
2275 __ add(Address(O1_array, elem0_offset), O1_source);
2276 Register O4_fill_ptr = O4_scratch;
2277 __ mov(O2_argslot_limit, O4_fill_ptr);
2278 Label L_loop;
2279 __ BIND(L_loop);
2280 __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr);
2281 move_typed_arg(_masm, elem_type, true,
2282 Address(O1_source, 0), Address(O4_fill_ptr, 0),
2283 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
2284 __ add(O1_source, type2aelembytes(elem_type), O1_source);
2285 __ cmp(O4_fill_ptr, O0_argslot);
2286 __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
2287 __ delayed()->nop(); // FILLME
2288 } else if (length_constant == 0) {
2289 // nothing to copy
2290 } else {
2291 int elem_offset = elem0_offset;
2292 int slot_offset = length_constant * Interpreter::stackElementSize;
2293 for (int index = 0; index < length_constant; index++) {
2294 slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
2295 move_typed_arg(_masm, elem_type, true,
2296 Address(O1_array, elem_offset), Address(O0_argslot, slot_offset),
2297 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
2298 elem_offset += type2aelembytes(elem_type);
2299 }
2300 }
2301 __ BIND(L_args_done);
2302
2303 // Arguments are spread. Move to next method handle.
2304 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
2305 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
2306
2307 __ BIND(L_bad_array_klass);
2308 assert(!vmarg.uses(O2_required), "must be different registers");
2309 __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required); // required class
2310 __ ld_ptr( vmarg, O1_actual); // bad object
2311 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
2312 __ delayed()->mov(Bytecodes::_aaload, O0_code); // who is complaining?
2313
2314 __ bind(L_bad_array_length);
2315 assert(!vmarg.uses(O2_required), "must be different registers");
2316 __ mov( G3_method_handle, O2_required); // required class
2317 __ ld_ptr(vmarg, O1_actual); // bad object
2318 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
2319 __ delayed()->mov(Bytecodes::_arraylength, O0_code); // who is complaining?
1001 } 2320 }
1002 break; 2321 break;
1003 2322
1004 case _adapter_flyby:
1005 case _adapter_ricochet:
1006 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1007 break;
1008
1009 default: 2323 default:
2324 DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek)));
1010 ShouldNotReachHere(); 2325 ShouldNotReachHere();
1011 } 2326 }
2327 BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
1012 2328
1013 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 2329 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1014 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 2330 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1015 2331
1016 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); 2332 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));