comparison src/share/vm/interpreter/rewriter.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 9a25e0c45327
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_rewriter.cpp.incl"
27
28
29 // Computes an index_map (new_index -> original_index) for contant pool entries
30 // that are referred to by the interpreter at runtime via the constant pool cache.
31 void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map) {
32 const int length = pool->length();
33 index_map = new intArray(length, -1);
34 // Choose an initial value large enough that we don't get frequent
35 // calls to grow().
36 inverse_index_map = new intStack(length / 2);
37 for (int i = 0; i < length; i++) {
38 switch (pool->tag_at(i).value()) {
39 case JVM_CONSTANT_Fieldref : // fall through
40 case JVM_CONSTANT_Methodref : // fall through
41 case JVM_CONSTANT_InterfaceMethodref: {
42 index_map->at_put(i, inverse_index_map->length());
43 inverse_index_map->append(i);
44 }
45 }
46 }
47 }
48
49
50 // Creates a constant pool cache given an inverse_index_map
51 constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) {
52 const int length = inverse_index_map.length();
53 constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, CHECK_(constantPoolCacheHandle()));
54 cache->initialize(inverse_index_map);
55 return constantPoolCacheHandle(THREAD, cache);
56 }
57
58
59
60 // The new finalization semantics says that registration of
61 // finalizable objects must be performed on successful return from the
62 // Object.<init> constructor. We could implement this trivially if
63 // <init> were never rewritten but since JVMTI allows this to occur, a
64 // more complicated solution is required. A special return bytecode
65 // is used only by Object.<init> to signal the finalization
66 // registration point. Additionally local 0 must be preserved so it's
67 // available to pass to the registration function. For simplicty we
68 // require that local 0 is never overwritten so it's available as an
69 // argument for registration.
70
71 void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
72 RawBytecodeStream bcs(method);
73 while (!bcs.is_last_bytecode()) {
74 Bytecodes::Code opcode = bcs.raw_next();
75 switch (opcode) {
76 case Bytecodes::_return: *bcs.bcp() = Bytecodes::_return_register_finalizer; break;
77
78 case Bytecodes::_istore:
79 case Bytecodes::_lstore:
80 case Bytecodes::_fstore:
81 case Bytecodes::_dstore:
82 case Bytecodes::_astore:
83 if (bcs.get_index() != 0) continue;
84
85 // fall through
86 case Bytecodes::_istore_0:
87 case Bytecodes::_lstore_0:
88 case Bytecodes::_fstore_0:
89 case Bytecodes::_dstore_0:
90 case Bytecodes::_astore_0:
91 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
92 "can't overwrite local 0 in Object.<init>");
93 break;
94 }
95 }
96 }
97
98
99 // Rewrites a method given the index_map information
100 methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) {
101
102 int nof_jsrs = 0;
103 bool has_monitor_bytecodes = false;
104
105 {
106 // We cannot tolerate a GC in this block, because we've
107 // cached the bytecodes in 'code_base'. If the methodOop
108 // moves, the bytecodes will also move.
109 No_Safepoint_Verifier nsv;
110 Bytecodes::Code c;
111
112 // Bytecodes and their length
113 const address code_base = method->code_base();
114 const int code_length = method->code_size();
115
116 int bc_length;
117 for (int bci = 0; bci < code_length; bci += bc_length) {
118 address bcp = code_base + bci;
119 c = (Bytecodes::Code)(*bcp);
120
121 // Since we have the code, see if we can get the length
122 // directly. Some more complicated bytecodes will report
123 // a length of zero, meaning we need to make another method
124 // call to calculate the length.
125 bc_length = Bytecodes::length_for(c);
126 if (bc_length == 0) {
127 bc_length = Bytecodes::length_at(bcp);
128
129 // length_at will put us at the bytecode after the one modified
130 // by 'wide'. We don't currently examine any of the bytecodes
131 // modified by wide, but in case we do in the future...
132 if (c == Bytecodes::_wide) {
133 c = (Bytecodes::Code)bcp[1];
134 }
135 }
136
137 assert(bc_length != 0, "impossible bytecode length");
138
139 switch (c) {
140 case Bytecodes::_lookupswitch : {
141 #ifndef CC_INTERP
142 Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
143 bc->set_code(
144 bc->number_of_pairs() < BinarySwitchThreshold
145 ? Bytecodes::_fast_linearswitch
146 : Bytecodes::_fast_binaryswitch
147 );
148 #endif
149 break;
150 }
151 case Bytecodes::_getstatic : // fall through
152 case Bytecodes::_putstatic : // fall through
153 case Bytecodes::_getfield : // fall through
154 case Bytecodes::_putfield : // fall through
155 case Bytecodes::_invokevirtual : // fall through
156 case Bytecodes::_invokespecial : // fall through
157 case Bytecodes::_invokestatic : // fall through
158 case Bytecodes::_invokeinterface: {
159 address p = bcp + 1;
160 Bytes::put_native_u2(p, index_map[Bytes::get_Java_u2(p)]);
161 break;
162 }
163 case Bytecodes::_jsr : // fall through
164 case Bytecodes::_jsr_w : nof_jsrs++; break;
165 case Bytecodes::_monitorenter : // fall through
166 case Bytecodes::_monitorexit : has_monitor_bytecodes = true; break;
167 }
168 }
169 }
170
171 // Update access flags
172 if (has_monitor_bytecodes) {
173 method->set_has_monitor_bytecodes();
174 }
175
176 // The present of a jsr bytecode implies that the method might potentially
177 // have to be rewritten, so we run the oopMapGenerator on the method
178 if (nof_jsrs > 0) {
179 method->set_has_jsrs();
180 ResolveOopMapConflicts romc(method);
181 methodHandle original_method = method;
182 method = romc.do_potential_rewrite(CHECK_(methodHandle()));
183 if (method() != original_method()) {
184 // Insert invalid bytecode into original methodOop and set
185 // interpreter entrypoint, so that a executing this method
186 // will manifest itself in an easy recognizable form.
187 address bcp = original_method->bcp_from(0);
188 *bcp = (u1)Bytecodes::_shouldnotreachhere;
189 int kind = Interpreter::method_kind(original_method);
190 original_method->set_interpreter_kind(kind);
191 }
192
193 // Update monitor matching info.
194 if (romc.monitor_safe()) {
195 method->set_guaranteed_monitor_matching();
196 }
197 }
198
199 // Setup method entrypoints for compiler and interpreter
200 method->link_method(method, CHECK_(methodHandle()));
201
202 return method;
203 }
204
205
206 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
207 // gather starting points
208 ResourceMark rm(THREAD);
209 constantPoolHandle pool (THREAD, klass->constants());
210 objArrayHandle methods (THREAD, klass->methods());
211 assert(pool->cache() == NULL, "constant pool cache must not be set yet");
212
213 // determine index maps for methodOop rewriting
214 intArray* index_map = NULL;
215 intStack* inverse_index_map = NULL;
216 compute_index_maps(pool, index_map, inverse_index_map);
217
218 // allocate constant pool cache
219 constantPoolCacheHandle cache = new_constant_pool_cache(*inverse_index_map, CHECK);
220 pool->set_cache(cache());
221 cache->set_constant_pool(pool());
222
223 if (RegisterFinalizersAtInit && klass->name() == vmSymbols::java_lang_Object()) {
224 int i = methods->length();
225 while (i-- > 0) {
226 methodOop method = (methodOop)methods->obj_at(i);
227 if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
228 // rewrite the return bytecodes of Object.<init> to register the
229 // object for finalization if needed.
230 methodHandle m(THREAD, method);
231 rewrite_Object_init(m, CHECK);
232 break;
233 }
234 }
235 }
236
237 // rewrite methods
238 { int i = methods->length();
239 while (i-- > 0) {
240 methodHandle m(THREAD, (methodOop)methods->obj_at(i));
241 m = rewrite_method(m, *index_map, CHECK);
242 // Method might have gotten rewritten.
243 methods->obj_at_put(i, m());
244 }
245 }
246 }