view src/cpu/sparc/vm/icBuffer_sparc.cpp @ 20304:a22acf6d7598

8048112: G1 Full GC needs to support the case when the very first region is not available Summary: Refactor preparation for compaction during Full GC so that it lazily initializes the first compaction point. This also avoids problems later when the first region may not be committed. Also reviewed by K. Barrett. Reviewed-by: brutisso
author tschatzl
date Mon, 21 Jul 2014 10:00:31 +0200
parents f0c2369fda5a
children
line wrap: on
line source

/*
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"

int InlineCacheBuffer::ic_stub_code_size() {
#ifdef _LP64
  if (TraceJumps) return 600 * wordSize;
  return (NativeMovConstReg::instruction_size +  // sethi;add
          NativeJump::instruction_size +          // sethi; jmp; delay slot
          (1*BytesPerInstWord) + 1);            // flush + 1 extra byte
#else
  if (TraceJumps) return 300 * wordSize;
  return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
#endif
}

void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
  ResourceMark rm;
  CodeBuffer     code(code_begin, ic_stub_code_size());
  MacroAssembler* masm            = new MacroAssembler(&code);
  // note: even though the code contains an embedded metadata, we do not need reloc info
  // because
  // (1) the metadata is old (i.e., doesn't matter for scavenges)
  // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
  AddressLiteral cached_value_addrlit((address)cached_value, relocInfo::none);
  // Force the set to generate the fixed sequence so next_instruction_address works
  masm->patchable_set(cached_value_addrlit, G5_inline_cache_reg);
  assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub");
  assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub");
  AddressLiteral entry(entry_point);
  masm->JUMP(entry, G3_scratch, 0);
  masm->delayed()->nop();
  masm->flush();
}


address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);   // creation also verifies the object
  NativeJump*        jump = nativeJump_at(move->next_instruction_address());
  return jump->jump_destination();
}


void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);   // creation also verifies the object
  NativeJump*        jump = nativeJump_at(move->next_instruction_address());
  void* o = (void*)move->data();
  return o;
}