Mercurial > hg > truffle
comparison src/cpu/x86/vm/templateInterpreter_x86_32.cpp @ 6725:da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author | coleenp |
---|---|
date | Sat, 01 Sep 2012 13:25:18 -0400 |
parents | 1d7922586cf6 |
children | cd3d6a6b95d9 |
comparison
equal
deleted
inserted
replaced
6724:36d1d483d5d6 | 6725:da91efe96a93 |
---|---|
28 #include "interpreter/interpreter.hpp" | 28 #include "interpreter/interpreter.hpp" |
29 #include "interpreter/interpreterGenerator.hpp" | 29 #include "interpreter/interpreterGenerator.hpp" |
30 #include "interpreter/interpreterRuntime.hpp" | 30 #include "interpreter/interpreterRuntime.hpp" |
31 #include "interpreter/templateTable.hpp" | 31 #include "interpreter/templateTable.hpp" |
32 #include "oops/arrayOop.hpp" | 32 #include "oops/arrayOop.hpp" |
33 #include "oops/methodDataOop.hpp" | 33 #include "oops/methodData.hpp" |
34 #include "oops/methodOop.hpp" | 34 #include "oops/method.hpp" |
35 #include "oops/oop.inline.hpp" | 35 #include "oops/oop.inline.hpp" |
36 #include "prims/jvmtiExport.hpp" | 36 #include "prims/jvmtiExport.hpp" |
37 #include "prims/jvmtiThreadState.hpp" | 37 #include "prims/jvmtiThreadState.hpp" |
38 #include "runtime/arguments.hpp" | 38 #include "runtime/arguments.hpp" |
39 #include "runtime/deoptimization.hpp" | 39 #include "runtime/deoptimization.hpp" |
199 __ jcc(Assembler::equal, L_giant_index); | 199 __ jcc(Assembler::equal, L_giant_index); |
200 } | 200 } |
201 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); | 201 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); |
202 __ bind(L_got_cache); | 202 __ bind(L_got_cache); |
203 __ movl(rbx, Address(rbx, rcx, | 203 __ movl(rbx, Address(rbx, rcx, |
204 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + | 204 Address::times_ptr, ConstantPoolCache::base_offset() + |
205 ConstantPoolCacheEntry::flags_offset())); | 205 ConstantPoolCacheEntry::flags_offset())); |
206 __ andptr(rbx, 0xFF); | 206 __ andptr(rbx, 0xFF); |
207 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); | 207 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); |
208 __ dispatch_next(state, step); | 208 __ dispatch_next(state, step); |
209 | 209 |
341 // | 341 // |
342 // rbx,: method | 342 // rbx,: method |
343 // rcx: invocation counter | 343 // rcx: invocation counter |
344 // | 344 // |
345 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { | 345 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { |
346 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) + | 346 const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + |
347 in_bytes(InvocationCounter::counter_offset())); | 347 in_bytes(InvocationCounter::counter_offset())); |
348 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not. | 348 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. |
349 if (TieredCompilation) { | 349 if (TieredCompilation) { |
350 int increment = InvocationCounter::count_increment; | 350 int increment = InvocationCounter::count_increment; |
351 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; | 351 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; |
352 Label no_mdo, done; | 352 Label no_mdo, done; |
353 if (ProfileInterpreter) { | 353 if (ProfileInterpreter) { |
354 // Are we profiling? | 354 // Are we profiling? |
355 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset())); | 355 __ movptr(rax, Address(rbx, Method::method_data_offset())); |
356 __ testptr(rax, rax); | 356 __ testptr(rax, rax); |
357 __ jccb(Assembler::zero, no_mdo); | 357 __ jccb(Assembler::zero, no_mdo); |
358 // Increment counter in the MDO | 358 // Increment counter in the MDO |
359 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) + | 359 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + |
360 in_bytes(InvocationCounter::counter_offset())); | 360 in_bytes(InvocationCounter::counter_offset())); |
361 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); | 361 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); |
362 __ jmpb(done); | 362 __ jmpb(done); |
363 } | 363 } |
364 __ bind(no_mdo); | 364 __ bind(no_mdo); |
365 // Increment counter in methodOop (we don't need to load it, it's in rcx). | 365 // Increment counter in Method* (we don't need to load it, it's in rcx). |
366 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); | 366 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); |
367 __ bind(done); | 367 __ bind(done); |
368 } else { | 368 } else { |
369 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + | 369 const Address backedge_counter (rbx, Method::backedge_counter_offset() + |
370 InvocationCounter::counter_offset()); | 370 InvocationCounter::counter_offset()); |
371 | 371 |
372 if (ProfileInterpreter) { // %%% Merge this into methodDataOop | 372 if (ProfileInterpreter) { // %%% Merge this into MethodData* |
373 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); | 373 __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); |
374 } | 374 } |
375 // Update standard invocation counters | 375 // Update standard invocation counters |
376 __ movl(rax, backedge_counter); // load backedge counter | 376 __ movl(rax, backedge_counter); // load backedge counter |
377 | 377 |
378 __ incrementl(rcx, InvocationCounter::count_increment); | 378 __ incrementl(rcx, InvocationCounter::count_increment); |
422 // rsp - sender_sp | 422 // rsp - sender_sp |
423 | 423 |
424 // C++ interpreter only | 424 // C++ interpreter only |
425 // rsi - previous interpreter state pointer | 425 // rsi - previous interpreter state pointer |
426 | 426 |
427 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); | 427 const Address size_of_parameters(rbx, Method::size_of_parameters_offset()); |
428 | 428 |
429 // InterpreterRuntime::frequency_counter_overflow takes one argument | 429 // InterpreterRuntime::frequency_counter_overflow takes one argument |
430 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). | 430 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). |
431 // The call returns the address of the verified entry point for the method or NULL | 431 // The call returns the address of the verified entry point for the method or NULL |
432 // if the compilation did not complete (either went background or bailed out). | 432 // if the compilation did not complete (either went background or bailed out). |
433 __ movptr(rax, (intptr_t)false); | 433 __ movptr(rax, (intptr_t)false); |
434 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); | 434 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); |
435 | 435 |
436 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop | 436 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* |
437 | 437 |
438 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame | 438 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame |
439 // and jump to the interpreted entry. | 439 // and jump to the interpreted entry. |
440 __ jmp(*do_continue, relocInfo::none); | 440 __ jmp(*do_continue, relocInfo::none); |
441 | 441 |
448 // | 448 // |
449 // Registers live on entry: | 449 // Registers live on entry: |
450 // | 450 // |
451 // Asm interpreter | 451 // Asm interpreter |
452 // rdx: number of additional locals this frame needs (what we must check) | 452 // rdx: number of additional locals this frame needs (what we must check) |
453 // rbx,: methodOop | 453 // rbx,: Method* |
454 | 454 |
455 // destroyed on exit | 455 // destroyed on exit |
456 // rax, | 456 // rax, |
457 | 457 |
458 // NOTE: since the additional locals are also always pushed (wasn't obvious in | 458 // NOTE: since the additional locals are also always pushed (wasn't obvious in |
540 | 540 |
541 __ bind(after_frame_check); | 541 __ bind(after_frame_check); |
542 } | 542 } |
543 | 543 |
544 // Allocate monitor and lock method (asm interpreter) | 544 // Allocate monitor and lock method (asm interpreter) |
545 // rbx, - methodOop | 545 // rbx, - Method* |
546 // | 546 // |
547 void InterpreterGenerator::lock_method(void) { | 547 void InterpreterGenerator::lock_method(void) { |
548 // synchronize method | 548 // synchronize method |
549 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); | 549 const Address access_flags (rbx, Method::access_flags_offset()); |
550 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | 550 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); |
551 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | 551 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
552 | 552 |
553 #ifdef ASSERT | 553 #ifdef ASSERT |
554 { Label L; | 554 { Label L; |
564 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); | 564 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
565 __ movl(rax, access_flags); | 565 __ movl(rax, access_flags); |
566 __ testl(rax, JVM_ACC_STATIC); | 566 __ testl(rax, JVM_ACC_STATIC); |
567 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) | 567 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) |
568 __ jcc(Assembler::zero, done); | 568 __ jcc(Assembler::zero, done); |
569 __ movptr(rax, Address(rbx, methodOopDesc::const_offset())); | 569 __ movptr(rax, Address(rbx, Method::const_offset())); |
570 __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset())); | 570 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); |
571 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); | 571 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes())); |
572 __ movptr(rax, Address(rax, mirror_offset)); | 572 __ movptr(rax, Address(rax, mirror_offset)); |
573 __ bind(done); | 573 __ bind(done); |
574 } | 574 } |
575 // add space for monitor & lock | 575 // add space for monitor & lock |
576 __ subptr(rsp, entry_size); // add space for a monitor entry | 576 __ subptr(rsp, entry_size); // add space for a monitor entry |
590 __ enter(); // save old & set new rbp, | 590 __ enter(); // save old & set new rbp, |
591 | 591 |
592 | 592 |
593 __ push(rsi); // set sender sp | 593 __ push(rsi); // set sender sp |
594 __ push((int32_t)NULL_WORD); // leave last_sp as null | 594 __ push((int32_t)NULL_WORD); // leave last_sp as null |
595 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop | 595 __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod* |
596 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase | 596 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase |
597 __ push(rbx); // save methodOop | 597 __ push(rbx); // save Method* |
598 if (ProfileInterpreter) { | 598 if (ProfileInterpreter) { |
599 Label method_data_continue; | 599 Label method_data_continue; |
600 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); | 600 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); |
601 __ testptr(rdx, rdx); | 601 __ testptr(rdx, rdx); |
602 __ jcc(Assembler::zero, method_data_continue); | 602 __ jcc(Assembler::zero, method_data_continue); |
603 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset())); | 603 __ addptr(rdx, in_bytes(MethodData::data_offset())); |
604 __ bind(method_data_continue); | 604 __ bind(method_data_continue); |
605 __ push(rdx); // set the mdp (method data pointer) | 605 __ push(rdx); // set the mdp (method data pointer) |
606 } else { | 606 } else { |
607 __ push(0); | 607 __ push(0); |
608 } | 608 } |
609 | 609 |
610 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); | 610 __ movptr(rdx, Address(rbx, Method::const_offset())); |
611 __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset())); | 611 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); |
612 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); | 612 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); |
613 __ push(rdx); // set constant pool cache | 613 __ push(rdx); // set constant pool cache |
614 __ push(rdi); // set locals pointer | 614 __ push(rdi); // set locals pointer |
615 if (native_call) { | 615 if (native_call) { |
616 __ push(0); // no bcp | 616 __ push(0); // no bcp |
617 } else { | 617 } else { |
631 | 631 |
632 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry | 632 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry |
633 | 633 |
634 address InterpreterGenerator::generate_accessor_entry(void) { | 634 address InterpreterGenerator::generate_accessor_entry(void) { |
635 | 635 |
636 // rbx,: methodOop | 636 // rbx,: Method* |
637 // rcx: receiver (preserve for slow entry into asm interpreter) | 637 // rcx: receiver (preserve for slow entry into asm interpreter) |
638 | 638 |
639 // rsi: senderSP must preserved for slow path, set SP to it on fast path | 639 // rsi: senderSP must preserved for slow path, set SP to it on fast path |
640 | 640 |
641 address entry_point = __ pc(); | 641 address entry_point = __ pc(); |
662 // check if local 0 != NULL and read field | 662 // check if local 0 != NULL and read field |
663 __ testptr(rax, rax); | 663 __ testptr(rax, rax); |
664 __ jcc(Assembler::zero, slow_path); | 664 __ jcc(Assembler::zero, slow_path); |
665 | 665 |
666 // read first instruction word and extract bytecode @ 1 and index @ 2 | 666 // read first instruction word and extract bytecode @ 1 and index @ 2 |
667 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); | 667 __ movptr(rdx, Address(rbx, Method::const_offset())); |
668 __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset())); | 668 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); |
669 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); | 669 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); |
670 // Shift codes right to get the index on the right. | 670 // Shift codes right to get the index on the right. |
671 // The bytecode fetched looks like <index><0xb4><0x2a> | 671 // The bytecode fetched looks like <index><0xb4><0x2a> |
672 __ shrl(rdx, 2*BitsPerByte); | 672 __ shrl(rdx, 2*BitsPerByte); |
673 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); | 673 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); |
674 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); | 674 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); |
675 | 675 |
676 // rax,: local 0 | 676 // rax,: local 0 |
677 // rbx,: method | 677 // rbx,: method |
678 // rcx: receiver - do not destroy since it is needed for slow path! | 678 // rcx: receiver - do not destroy since it is needed for slow path! |
679 // rcx: scratch | 679 // rcx: scratch |
686 // contains Bytecode::_getfield in b1 byte. | 686 // contains Bytecode::_getfield in b1 byte. |
687 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); | 687 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); |
688 __ movl(rcx, | 688 __ movl(rcx, |
689 Address(rdi, | 689 Address(rdi, |
690 rdx, | 690 rdx, |
691 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); | 691 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); |
692 __ shrl(rcx, 2*BitsPerByte); | 692 __ shrl(rcx, 2*BitsPerByte); |
693 __ andl(rcx, 0xFF); | 693 __ andl(rcx, 0xFF); |
694 __ cmpl(rcx, Bytecodes::_getfield); | 694 __ cmpl(rcx, Bytecodes::_getfield); |
695 __ jcc(Assembler::notEqual, slow_path); | 695 __ jcc(Assembler::notEqual, slow_path); |
696 | 696 |
697 // Note: constant pool entry is not valid before bytecode is resolved | 697 // Note: constant pool entry is not valid before bytecode is resolved |
698 __ movptr(rcx, | 698 __ movptr(rcx, |
699 Address(rdi, | 699 Address(rdi, |
700 rdx, | 700 rdx, |
701 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); | 701 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); |
702 __ movl(rdx, | 702 __ movl(rdx, |
703 Address(rdi, | 703 Address(rdi, |
704 rdx, | 704 rdx, |
705 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); | 705 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); |
706 | 706 |
707 Label notByte, notShort, notChar; | 707 Label notByte, notShort, notChar; |
708 const Address field_address (rax, rcx, Address::times_1); | 708 const Address field_address (rax, rcx, Address::times_1); |
709 | 709 |
710 // Need to differentiate between igetfield, agetfield, bgetfield etc. | 710 // Need to differentiate between igetfield, agetfield, bgetfield etc. |
787 // and so we don't need to call the G1 pre-barrier. Thus we can use the | 787 // and so we don't need to call the G1 pre-barrier. Thus we can use the |
788 // regular method entry code to generate the NPE. | 788 // regular method entry code to generate the NPE. |
789 // | 789 // |
790 // This code is based on generate_accessor_enty. | 790 // This code is based on generate_accessor_enty. |
791 | 791 |
792 // rbx,: methodOop | 792 // rbx,: Method* |
793 // rcx: receiver (preserve for slow entry into asm interpreter) | 793 // rcx: receiver (preserve for slow entry into asm interpreter) |
794 | 794 |
795 // rsi: senderSP must preserved for slow path, set SP to it on fast path | 795 // rsi: senderSP must preserved for slow path, set SP to it on fast path |
796 | 796 |
797 address entry = __ pc(); | 797 address entry = __ pc(); |
861 | 861 |
862 address InterpreterGenerator::generate_native_entry(bool synchronized) { | 862 address InterpreterGenerator::generate_native_entry(bool synchronized) { |
863 // determine code generation flags | 863 // determine code generation flags |
864 bool inc_counter = UseCompiler || CountCompiledCalls; | 864 bool inc_counter = UseCompiler || CountCompiledCalls; |
865 | 865 |
866 // rbx,: methodOop | 866 // rbx,: Method* |
867 // rsi: sender sp | 867 // rsi: sender sp |
868 // rsi: previous interpreter state (C++ interpreter) must preserve | 868 // rsi: previous interpreter state (C++ interpreter) must preserve |
869 address entry_point = __ pc(); | 869 address entry_point = __ pc(); |
870 | 870 |
871 | 871 |
872 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); | 872 const Address size_of_parameters(rbx, Method::size_of_parameters_offset()); |
873 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); | 873 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); |
874 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); | 874 const Address access_flags (rbx, Method::access_flags_offset()); |
875 | 875 |
876 // get parameter size (always needed) | 876 // get parameter size (always needed) |
877 __ load_unsigned_short(rcx, size_of_parameters); | 877 __ load_unsigned_short(rcx, size_of_parameters); |
878 | 878 |
879 // native calls don't need the stack size check since they have no expression stack | 879 // native calls don't need the stack size check since they have no expression stack |
880 // and the arguments are already on the stack and we only add a handful of words | 880 // and the arguments are already on the stack and we only add a handful of words |
881 // to the stack | 881 // to the stack |
882 | 882 |
883 // rbx,: methodOop | 883 // rbx,: Method* |
884 // rcx: size of parameters | 884 // rcx: size of parameters |
885 // rsi: sender sp | 885 // rsi: sender sp |
886 | 886 |
887 __ pop(rax); // get return address | 887 __ pop(rax); // get return address |
888 // for natives the size of locals is zero | 888 // for natives the size of locals is zero |
986 const Register thread = rdi; | 986 const Register thread = rdi; |
987 const Register t = rcx; | 987 const Register t = rcx; |
988 | 988 |
989 // allocate space for parameters | 989 // allocate space for parameters |
990 __ get_method(method); | 990 __ get_method(method); |
991 __ verify_oop(method); | 991 __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset())); |
992 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset())); | |
993 __ shlptr(t, Interpreter::logStackElementSize); | 992 __ shlptr(t, Interpreter::logStackElementSize); |
994 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror | 993 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror |
995 __ subptr(rsp, t); | 994 __ subptr(rsp, t); |
996 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics | 995 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics |
997 | 996 |
998 // get signature handler | 997 // get signature handler |
999 { Label L; | 998 { Label L; |
1000 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); | 999 __ movptr(t, Address(method, Method::signature_handler_offset())); |
1001 __ testptr(t, t); | 1000 __ testptr(t, t); |
1002 __ jcc(Assembler::notZero, L); | 1001 __ jcc(Assembler::notZero, L); |
1003 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); | 1002 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); |
1004 __ get_method(method); | 1003 __ get_method(method); |
1005 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); | 1004 __ movptr(t, Address(method, Method::signature_handler_offset())); |
1006 __ bind(L); | 1005 __ bind(L); |
1007 } | 1006 } |
1008 | 1007 |
1009 // call signature handler | 1008 // call signature handler |
1010 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); | 1009 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); |
1022 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); | 1021 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); |
1023 | 1022 |
1024 // pass mirror handle if static call | 1023 // pass mirror handle if static call |
1025 { Label L; | 1024 { Label L; |
1026 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); | 1025 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
1027 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); | 1026 __ movl(t, Address(method, Method::access_flags_offset())); |
1028 __ testl(t, JVM_ACC_STATIC); | 1027 __ testl(t, JVM_ACC_STATIC); |
1029 __ jcc(Assembler::zero, L); | 1028 __ jcc(Assembler::zero, L); |
1030 // get mirror | 1029 // get mirror |
1031 __ movptr(t, Address(method, methodOopDesc:: const_offset())); | 1030 __ movptr(t, Address(method, Method:: const_offset())); |
1032 __ movptr(t, Address(t, constMethodOopDesc::constants_offset())); | 1031 __ movptr(t, Address(t, ConstMethod::constants_offset())); |
1033 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); | 1032 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); |
1034 __ movptr(t, Address(t, mirror_offset)); | 1033 __ movptr(t, Address(t, mirror_offset)); |
1035 // copy mirror into activation frame | 1034 // copy mirror into activation frame |
1036 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); | 1035 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); |
1037 // pass handle to mirror | 1036 // pass handle to mirror |
1038 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); | 1037 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
1040 __ bind(L); | 1039 __ bind(L); |
1041 } | 1040 } |
1042 | 1041 |
1043 // get native function entry point | 1042 // get native function entry point |
1044 { Label L; | 1043 { Label L; |
1045 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); | 1044 __ movptr(rax, Address(method, Method::native_function_offset())); |
1046 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); | 1045 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); |
1047 __ cmpptr(rax, unsatisfied.addr()); | 1046 __ cmpptr(rax, unsatisfied.addr()); |
1048 __ jcc(Assembler::notEqual, L); | 1047 __ jcc(Assembler::notEqual, L); |
1049 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); | 1048 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); |
1050 __ get_method(method); | 1049 __ get_method(method); |
1051 __ verify_oop(method); | 1050 __ movptr(rax, Address(method, Method::native_function_offset())); |
1052 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); | |
1053 __ bind(L); | 1051 __ bind(L); |
1054 } | 1052 } |
1055 | 1053 |
1056 // pass JNIEnv | 1054 // pass JNIEnv |
1057 __ get_thread(thread); | 1055 __ get_thread(thread); |
1216 | 1214 |
1217 // restore rsi to have legal interpreter frame, | 1215 // restore rsi to have legal interpreter frame, |
1218 // i.e., bci == 0 <=> rsi == code_base() | 1216 // i.e., bci == 0 <=> rsi == code_base() |
1219 // Can't call_VM until bcp is within reasonable. | 1217 // Can't call_VM until bcp is within reasonable. |
1220 __ get_method(method); // method is junk from thread_in_native to now. | 1218 __ get_method(method); // method is junk from thread_in_native to now. |
1221 __ verify_oop(method); | 1219 __ movptr(rsi, Address(method,Method::const_offset())); // get ConstMethod* |
1222 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop | 1220 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase |
1223 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase | |
1224 | 1221 |
1225 // handle exceptions (exception handling will handle unlocking!) | 1222 // handle exceptions (exception handling will handle unlocking!) |
1226 { Label L; | 1223 { Label L; |
1227 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); | 1224 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
1228 __ jcc(Assembler::zero, L); | 1225 __ jcc(Assembler::zero, L); |
1234 __ bind(L); | 1231 __ bind(L); |
1235 } | 1232 } |
1236 | 1233 |
1237 // do unlocking if necessary | 1234 // do unlocking if necessary |
1238 { Label L; | 1235 { Label L; |
1239 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); | 1236 __ movl(t, Address(method, Method::access_flags_offset())); |
1240 __ testl(t, JVM_ACC_SYNCHRONIZED); | 1237 __ testl(t, JVM_ACC_SYNCHRONIZED); |
1241 __ jcc(Assembler::zero, L); | 1238 __ jcc(Assembler::zero, L); |
1242 // the code below should be shared with interpreter macro assembler implementation | 1239 // the code below should be shared with interpreter macro assembler implementation |
1243 { Label unlock; | 1240 { Label unlock; |
1244 // BasicObjectLock will be first in list, since this is a synchronized method. However, need | 1241 // BasicObjectLock will be first in list, since this is a synchronized method. However, need |
1294 // | 1291 // |
1295 address InterpreterGenerator::generate_normal_entry(bool synchronized) { | 1292 address InterpreterGenerator::generate_normal_entry(bool synchronized) { |
1296 // determine code generation flags | 1293 // determine code generation flags |
1297 bool inc_counter = UseCompiler || CountCompiledCalls; | 1294 bool inc_counter = UseCompiler || CountCompiledCalls; |
1298 | 1295 |
1299 // rbx,: methodOop | 1296 // rbx,: Method* |
1300 // rsi: sender sp | 1297 // rsi: sender sp |
1301 address entry_point = __ pc(); | 1298 address entry_point = __ pc(); |
1302 | 1299 |
1303 | 1300 |
1304 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); | 1301 const Address size_of_parameters(rbx, Method::size_of_parameters_offset()); |
1305 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset()); | 1302 const Address size_of_locals (rbx, Method::size_of_locals_offset()); |
1306 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); | 1303 const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); |
1307 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); | 1304 const Address access_flags (rbx, Method::access_flags_offset()); |
1308 | 1305 |
1309 // get parameter size (always needed) | 1306 // get parameter size (always needed) |
1310 __ load_unsigned_short(rcx, size_of_parameters); | 1307 __ load_unsigned_short(rcx, size_of_parameters); |
1311 | 1308 |
1312 // rbx,: methodOop | 1309 // rbx,: Method* |
1313 // rcx: size of parameters | 1310 // rcx: size of parameters |
1314 | 1311 |
1315 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i ) | 1312 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i ) |
1316 | 1313 |
1317 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words | 1314 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words |
1462 // When control flow reaches any of the entry types for the interpreter | 1459 // When control flow reaches any of the entry types for the interpreter |
1463 // the following holds -> | 1460 // the following holds -> |
1464 // | 1461 // |
1465 // Arguments: | 1462 // Arguments: |
1466 // | 1463 // |
1467 // rbx,: methodOop | 1464 // rbx,: Method* |
1468 // rcx: receiver | 1465 // rcx: receiver |
1469 // | 1466 // |
1470 // | 1467 // |
1471 // Stack layout immediately at entry | 1468 // Stack layout immediately at entry |
1472 // | 1469 // |
1488 // ... | 1485 // ... |
1489 // [ monitor entry ] | 1486 // [ monitor entry ] |
1490 // [ expr. stack bottom ] | 1487 // [ expr. stack bottom ] |
1491 // [ saved rsi ] | 1488 // [ saved rsi ] |
1492 // [ current rdi ] | 1489 // [ current rdi ] |
1493 // [ methodOop ] | 1490 // [ Method* ] |
1494 // [ saved rbp, ] <--- rbp, | 1491 // [ saved rbp, ] <--- rbp, |
1495 // [ return address ] | 1492 // [ return address ] |
1496 // [ local variable m ] | 1493 // [ local variable m ] |
1497 // ... | 1494 // ... |
1498 // [ local variable 1 ] | 1495 // [ local variable 1 ] |
1554 return true; | 1551 return true; |
1555 } | 1552 } |
1556 } | 1553 } |
1557 | 1554 |
1558 // How much stack a method activation needs in words. | 1555 // How much stack a method activation needs in words. |
1559 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { | 1556 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { |
1560 | 1557 |
1561 const int stub_code = 4; // see generate_call_stub | 1558 const int stub_code = 4; // see generate_call_stub |
1562 // Save space for one monitor to get into the interpreted method in case | 1559 // Save space for one monitor to get into the interpreted method in case |
1563 // the method is synchronized | 1560 // the method is synchronized |
1564 int monitor_size = method->is_synchronized() ? | 1561 int monitor_size = method->is_synchronized() ? |
1566 | 1563 |
1567 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). | 1564 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). |
1568 // be sure to change this if you add/subtract anything to/from the overhead area | 1565 // be sure to change this if you add/subtract anything to/from the overhead area |
1569 const int overhead_size = -frame::interpreter_frame_initial_sp_offset; | 1566 const int overhead_size = -frame::interpreter_frame_initial_sp_offset; |
1570 | 1567 |
1571 const int extra_stack = methodOopDesc::extra_stack_entries(); | 1568 const int extra_stack = Method::extra_stack_entries(); |
1572 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * | 1569 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * |
1573 Interpreter::stackElementWords; | 1570 Interpreter::stackElementWords; |
1574 return overhead_size + method_stack + stub_code; | 1571 return overhead_size + method_stack + stub_code; |
1575 } | 1572 } |
1576 | 1573 |
1577 // asm based interpreter deoptimization helpers | 1574 // asm based interpreter deoptimization helpers |
1578 | 1575 |
1579 int AbstractInterpreter::layout_activation(methodOop method, | 1576 int AbstractInterpreter::layout_activation(Method* method, |
1580 int tempcount, | 1577 int tempcount, |
1581 int popframe_extra_args, | 1578 int popframe_extra_args, |
1582 int moncount, | 1579 int moncount, |
1583 int caller_actual_parameters, | 1580 int caller_actual_parameters, |
1584 int callee_param_count, | 1581 int callee_param_count, |
1735 __ testl(rax, rax); | 1732 __ testl(rax, rax); |
1736 __ jcc(Assembler::notZero, caller_not_deoptimized); | 1733 __ jcc(Assembler::notZero, caller_not_deoptimized); |
1737 | 1734 |
1738 // Compute size of arguments for saving when returning to deoptimized caller | 1735 // Compute size of arguments for saving when returning to deoptimized caller |
1739 __ get_method(rax); | 1736 __ get_method(rax); |
1740 __ verify_oop(rax); | 1737 __ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset()))); |
1741 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset()))); | |
1742 __ shlptr(rax, Interpreter::logStackElementSize); | 1738 __ shlptr(rax, Interpreter::logStackElementSize); |
1743 __ restore_locals(); | 1739 __ restore_locals(); |
1744 __ subptr(rdi, rax); | 1740 __ subptr(rdi, rax); |
1745 __ addptr(rdi, wordSize); | 1741 __ addptr(rdi, wordSize); |
1746 // Save these arguments | 1742 // Save these arguments |
1813 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); | 1809 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); |
1814 // remove the activation (without doing throws on illegalMonitorExceptions) | 1810 // remove the activation (without doing throws on illegalMonitorExceptions) |
1815 __ remove_activation(vtos, rdx, false, true, false); | 1811 __ remove_activation(vtos, rdx, false, true, false); |
1816 // restore exception | 1812 // restore exception |
1817 __ get_thread(thread); | 1813 __ get_thread(thread); |
1818 __ movptr(rax, Address(thread, JavaThread::vm_result_offset())); | 1814 __ get_vm_result(rax, thread); |
1819 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); | |
1820 __ verify_oop(rax); | |
1821 | 1815 |
1822 // Inbetween activations - previous activation type unknown yet | 1816 // Inbetween activations - previous activation type unknown yet |
1823 // compute continuation point - the continuation point expects | 1817 // compute continuation point - the continuation point expects |
1824 // the following registers set up: | 1818 // the following registers set up: |
1825 // | 1819 // |