0
|
1 /*
|
844
|
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
0
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #include "incls/_precompiled.incl"
|
|
26 #include "incls/_vtableStubs.cpp.incl"
|
|
27
|
|
28 // -----------------------------------------------------------------------------------------
|
|
29 // Implementation of VtableStub
|
|
30
|
|
31 address VtableStub::_chunk = NULL;
|
|
32 address VtableStub::_chunk_end = NULL;
|
|
33 VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
|
|
34
|
|
35 static int num_vtable_chunks = 0;
|
|
36
|
|
37
|
|
38 void* VtableStub::operator new(size_t size, int code_size) {
|
|
39 assert(size == sizeof(VtableStub), "mismatched size");
|
|
40 num_vtable_chunks++;
|
|
41 // compute real VtableStub size (rounded to nearest word)
|
|
42 const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
|
|
43 // malloc them in chunks to minimize header overhead
|
|
44 const int chunk_factor = 32;
|
|
45 if (_chunk == NULL || _chunk + real_size > _chunk_end) {
|
|
46 const int bytes = chunk_factor * real_size + pd_code_alignment();
|
|
47 BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
|
48 if( blob == NULL ) vm_exit_out_of_memory1(bytes, "CodeCache: no room for %s", "vtable chunks");
|
|
49 _chunk = blob->instructions_begin();
|
|
50 _chunk_end = _chunk + bytes;
|
|
51 VTune::register_stub("vtable stub", _chunk, _chunk_end);
|
|
52 Forte::register_stub("vtable stub", _chunk, _chunk_end);
|
|
53 // Notify JVMTI about this stub. The event will be recorded by the enclosing
|
|
54 // JvmtiDynamicCodeEventCollector and posted when this thread has released
|
|
55 // all locks.
|
|
56 if (JvmtiExport::should_post_dynamic_code_generated()) {
|
|
57 JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
|
|
58 }
|
|
59 align_chunk();
|
|
60 }
|
|
61 assert(_chunk + real_size <= _chunk_end, "bad allocation");
|
|
62 void* res = _chunk;
|
|
63 _chunk += real_size;
|
|
64 align_chunk();
|
|
65 return res;
|
|
66 }
|
|
67
|
|
68
|
|
69 void VtableStub::print() {
|
|
70 tty->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
|
|
71 index(), receiver_location(), code_begin(), code_end());
|
|
72 }
|
|
73
|
|
74
|
|
75 // -----------------------------------------------------------------------------------------
|
|
76 // Implementation of VtableStubs
|
|
77 //
|
|
78 // For each hash value there's a linked list of vtable stubs (with that
|
|
79 // hash value). Each list is anchored in a little hash _table, indexed
|
|
80 // by that hash value.
|
|
81
|
|
82 VtableStub* VtableStubs::_table[VtableStubs::N];
|
|
83 int VtableStubs::_number_of_vtable_stubs = 0;
|
|
84
|
|
85
|
|
86 void VtableStubs::initialize() {
|
|
87 VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
|
|
88 {
|
|
89 MutexLocker ml(VtableStubs_lock);
|
|
90 assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
|
|
91 assert(is_power_of_2(N), "N must be a power of 2");
|
|
92 for (int i = 0; i < N; i++) {
|
|
93 _table[i] = NULL;
|
|
94 }
|
|
95 }
|
|
96 }
|
|
97
|
|
98
|
|
99 address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, methodOop method) {
|
|
100 assert(vtable_index >= 0, "must be positive");
|
|
101
|
|
102 VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
|
|
103 if (s == NULL) {
|
|
104 if (is_vtable_stub) {
|
|
105 s = create_vtable_stub(vtable_index);
|
|
106 } else {
|
|
107 s = create_itable_stub(vtable_index);
|
|
108 }
|
|
109 enter(is_vtable_stub, vtable_index, s);
|
|
110 if (PrintAdapterHandlers) {
|
|
111 tty->print_cr("Decoding VtableStub %s[%d]@%d",
|
|
112 is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
|
|
113 Disassembler::decode(s->code_begin(), s->code_end());
|
|
114 }
|
|
115 }
|
|
116 return s->entry_point();
|
|
117 }
|
|
118
|
|
119
|
|
120 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
|
|
121 // Assumption: receiver_location < 4 in most cases.
|
|
122 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
|
|
123 return (is_vtable_stub ? ~hash : hash) & mask;
|
|
124 }
|
|
125
|
|
126
|
|
127 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
|
|
128 MutexLocker ml(VtableStubs_lock);
|
|
129 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
|
|
130 VtableStub* s = _table[hash];
|
|
131 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
|
|
132 return s;
|
|
133 }
|
|
134
|
|
135
|
|
136 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
|
|
137 MutexLocker ml(VtableStubs_lock);
|
|
138 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
|
|
139 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
|
|
140 // enter s at the beginning of the corresponding list
|
|
141 s->set_next(_table[h]);
|
|
142 _table[h] = s;
|
|
143 _number_of_vtable_stubs++;
|
|
144 }
|
|
145
|
|
146
|
|
147 bool VtableStubs::is_entry_point(address pc) {
|
|
148 MutexLocker ml(VtableStubs_lock);
|
|
149 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
|
|
150 uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
|
|
151 VtableStub* s;
|
|
152 for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
|
|
153 return s == stub;
|
|
154 }
|
|
155
|
|
156
|
|
157 bool VtableStubs::contains(address pc) {
|
|
158 // simple solution for now - we may want to use
|
|
159 // a faster way if this function is called often
|
|
160 return stub_containing(pc) != NULL;
|
|
161 }
|
|
162
|
|
163
|
|
164 VtableStub* VtableStubs::stub_containing(address pc) {
|
|
165 // Note: No locking needed since any change to the data structure
|
|
166 // happens with an atomic store into it (we don't care about
|
|
167 // consistency with the _number_of_vtable_stubs counter).
|
|
168 for (int i = 0; i < N; i++) {
|
|
169 for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
|
|
170 if (s->contains(pc)) return s;
|
|
171 }
|
|
172 }
|
|
173 return NULL;
|
|
174 }
|
|
175
|
|
176 void vtableStubs_init() {
|
|
177 VtableStubs::initialize();
|
|
178 }
|
|
179
|
|
180
|
|
181 //-----------------------------------------------------------------------------------------------------
|
|
182 // Non-product code
|
|
183 #ifndef PRODUCT
|
|
184
|
|
185 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
|
|
186 ResourceMark rm;
|
|
187 HandleMark hm;
|
|
188 klassOop klass = receiver->klass();
|
|
189 instanceKlass* ik = instanceKlass::cast(klass);
|
|
190 klassVtable* vt = ik->vtable();
|
|
191 klass->print();
|
|
192 fatal3("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", index %d (vtable length %d)", (address)receiver, index, vt->length());
|
|
193 }
|
|
194
|
|
195 #endif // Product
|