view src/cpu/sparc/vm/relocInfo_sparc.cpp @ 453:c96030fff130

6684579: SoftReference processing can be made more efficient Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not. Reviewed-by: jmasa
author ysr
date Thu, 20 Nov 2008 16:56:09 -0800
parents dc7f315e41f7
children 6b2273dd6fa9
line wrap: on
line source

/*
 * Copyright 1998-2008 Sun Microsystems, Inc.  All Rights Reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 * CA 95054 USA or visit www.sun.com if you need additional information or
 * have any questions.
 *
 */

# include "incls/_precompiled.incl"
# include "incls/_relocInfo_sparc.cpp.incl"

void Relocation::pd_set_data_value(address x, intptr_t o) {
  NativeInstruction* ip = nativeInstruction_at(addr());
  jint inst = ip->long_at(0);
  assert(inst != NativeInstruction::illegal_instruction(), "no breakpoint");
  switch (Assembler::inv_op(inst)) {

  case Assembler::ldst_op:
    #ifdef ASSERT
      switch (Assembler::inv_op3(inst)) {
        case Assembler::lduw_op3:
        case Assembler::ldub_op3:
        case Assembler::lduh_op3:
        case Assembler::ldd_op3:
        case Assembler::ldsw_op3:
        case Assembler::ldsb_op3:
        case Assembler::ldsh_op3:
        case Assembler::ldx_op3:
        case Assembler::ldf_op3:
        case Assembler::lddf_op3:
        case Assembler::stw_op3:
        case Assembler::stb_op3:
        case Assembler::sth_op3:
        case Assembler::std_op3:
        case Assembler::stx_op3:
        case Assembler::stf_op3:
        case Assembler::stdf_op3:
        case Assembler::casa_op3:
        case Assembler::casxa_op3:
          break;
        default:
          ShouldNotReachHere();
      }
      goto do_non_sethi;
    #endif

  case Assembler::arith_op:
    #ifdef ASSERT
      switch (Assembler::inv_op3(inst)) {
        case Assembler::or_op3:
        case Assembler::add_op3:
        case Assembler::jmpl_op3:
          break;
        default:
          ShouldNotReachHere();
      }
    do_non_sethi:;
    #endif
    {
    guarantee(Assembler::inv_immed(inst), "must have a simm13 field");
    int simm13 = Assembler::low10((intptr_t)x) + o;
    guarantee(Assembler::is_simm13(simm13), "offset can't overflow simm13");
    inst &= ~Assembler::simm(    -1, 13);
    inst |=  Assembler::simm(simm13, 13);
    ip->set_long_at(0, inst);
    }
    break;

  case Assembler::branch_op:
    {
#ifdef _LP64
    jint inst2;
    guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
    if (format() != 0) {
      assert(type() == relocInfo::oop_type, "only narrow oops case");
      jint np = oopDesc::encode_heap_oop((oop)x);
      inst &= ~Assembler::hi22(-1);
      inst |=  Assembler::hi22((intptr_t)np);
      ip->set_long_at(0, inst);
      inst2 = ip->long_at( NativeInstruction::nop_instruction_size );
      guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
      ip->set_long_at(NativeInstruction::nop_instruction_size, ip->set_data32_simm13( inst2, (intptr_t)np));
      break;
    }
    ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
#ifdef COMPILER2
    // [RGV] Someone must have missed putting in a reloc entry for the
    // add in compiler2.
    inst2 = ip->long_at( NativeMovConstReg::add_offset );
    guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
    ip->set_long_at(NativeMovConstReg::add_offset,ip->set_data32_simm13( inst2, (intptr_t)x+o));
#endif
#else
    guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
    inst &= ~Assembler::hi22(     -1);
    inst |=  Assembler::hi22((intptr_t)x);
    // (ignore offset; it doesn't play into the sethi)
    ip->set_long_at(0, inst);
#endif
    }
    break;

  default:
    guarantee(false, "instruction must perform arithmetic or memory access");
  }
}


address Relocation::pd_call_destination(address orig_addr) {
  intptr_t adj = 0;
  if (orig_addr != NULL) {
    // We just moved this call instruction from orig_addr to addr().
    // This means its target will appear to have grown by addr() - orig_addr.
    adj = -( addr() - orig_addr );
  }
  if (NativeCall::is_call_at(addr())) {
    NativeCall* call = nativeCall_at(addr());
    return call->destination() + adj;
  }
  if (NativeFarCall::is_call_at(addr())) {
    NativeFarCall* call = nativeFarCall_at(addr());
    return call->destination() + adj;
  }
  // Special case:  Patchable branch local to the code cache.
  // This will break badly if the code cache grows larger than a few Mb.
  NativeGeneralJump* br = nativeGeneralJump_at(addr());
  return br->jump_destination() + adj;
}


void Relocation::pd_set_call_destination(address x) {
  if (NativeCall::is_call_at(addr())) {
    NativeCall* call = nativeCall_at(addr());
    call->set_destination(x);
    return;
  }
  if (NativeFarCall::is_call_at(addr())) {
    NativeFarCall* call = nativeFarCall_at(addr());
    call->set_destination(x);
    return;
  }
  // Special case:  Patchable branch local to the code cache.
  // This will break badly if the code cache grows larger than a few Mb.
  NativeGeneralJump* br = nativeGeneralJump_at(addr());
  br->set_jump_destination(x);
}


address* Relocation::pd_address_in_code() {
  // SPARC never embeds addresses in code, at present.
  //assert(type() == relocInfo::oop_type, "only oops are inlined at present");
  return (address*)addr();
}


address Relocation::pd_get_address_from_code() {
  // SPARC never embeds addresses in code, at present.
  //assert(type() == relocInfo::oop_type, "only oops are inlined at present");
  return *(address*)addr();
}


int Relocation::pd_breakpoint_size() {
  // minimum breakpoint size, in short words
  return NativeIllegalInstruction::instruction_size / sizeof(short);
}

void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) {
  Untested("pd_swap_in_breakpoint");
  // %%% probably do not need a general instrlen; just use the trap size
  if (instrs != NULL) {
    assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data");
    for (int i = 0; i < instrlen; i++) {
      instrs[i] = ((short*)x)[i];
    }
  }
  NativeIllegalInstruction::insert(x);
}


void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) {
  Untested("pd_swap_out_breakpoint");
  assert(instrlen * sizeof(short) == sizeof(int), "enough buf");
  union { int l; short s[1]; } u;
  for (int i = 0; i < instrlen; i++) {
    u.s[i] = instrs[i];
  }
  NativeInstruction* ni = nativeInstruction_at(x);
  ni->set_long_at(0, u.l);
}

void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}

void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}