0
|
1 /*
|
|
2 * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #include "incls/_precompiled.incl"
|
|
26 #include "incls/_parse3.cpp.incl"
|
|
27
|
|
28 //=============================================================================
|
|
29 // Helper methods for _get* and _put* bytecodes
|
|
30 //=============================================================================
|
|
31 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
|
|
32 // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
|
|
33 // Better to check now than to Deoptimize as soon as we execute
|
|
34 assert( field->is_static(), "Only check if field is static");
|
|
35 // is_being_initialized() is too generous. It allows access to statics
|
|
36 // by threads that are not running the <clinit> before the <clinit> finishes.
|
|
37 // return field->holder()->is_being_initialized();
|
|
38
|
|
39 // The following restriction is correct but conservative.
|
|
40 // It is also desirable to allow compilation of methods called from <clinit>
|
|
41 // but this generated code will need to be made safe for execution by
|
|
42 // other threads, or the transition from interpreted to compiled code would
|
|
43 // need to be guarded.
|
|
44 ciInstanceKlass *field_holder = field->holder();
|
|
45
|
|
46 bool access_OK = false;
|
|
47 if (method->holder()->is_subclass_of(field_holder)) {
|
|
48 if (method->is_static()) {
|
|
49 if (method->name() == ciSymbol::class_initializer_name()) {
|
|
50 // OK to access static fields inside initializer
|
|
51 access_OK = true;
|
|
52 }
|
|
53 } else {
|
|
54 if (method->name() == ciSymbol::object_initializer_name()) {
|
|
55 // It's also OK to access static fields inside a constructor,
|
|
56 // because any thread calling the constructor must first have
|
|
57 // synchronized on the class by executing a '_new' bytecode.
|
|
58 access_OK = true;
|
|
59 }
|
|
60 }
|
|
61 }
|
|
62
|
|
63 return access_OK;
|
|
64
|
|
65 }
|
|
66
|
|
67
|
|
68 void Parse::do_field_access(bool is_get, bool is_field) {
|
|
69 bool will_link;
|
|
70 ciField* field = iter().get_field(will_link);
|
|
71 assert(will_link, "getfield: typeflow responsibility");
|
|
72
|
|
73 ciInstanceKlass* field_holder = field->holder();
|
|
74
|
|
75 if (is_field == field->is_static()) {
|
|
76 // Interpreter will throw java_lang_IncompatibleClassChangeError
|
|
77 // Check this before allowing <clinit> methods to access static fields
|
|
78 uncommon_trap(Deoptimization::Reason_unhandled,
|
|
79 Deoptimization::Action_none);
|
|
80 return;
|
|
81 }
|
|
82
|
|
83 if (!is_field && !field_holder->is_initialized()) {
|
|
84 if (!static_field_ok_in_clinit(field, method())) {
|
|
85 uncommon_trap(Deoptimization::Reason_uninitialized,
|
|
86 Deoptimization::Action_reinterpret,
|
|
87 NULL, "!static_field_ok_in_clinit");
|
|
88 return;
|
|
89 }
|
|
90 }
|
|
91
|
|
92 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
|
|
93
|
|
94 // Note: We do not check for an unloaded field type here any more.
|
|
95
|
|
96 // Generate code for the object pointer.
|
|
97 Node* obj;
|
|
98 if (is_field) {
|
|
99 int obj_depth = is_get ? 0 : field->type()->size();
|
|
100 obj = do_null_check(peek(obj_depth), T_OBJECT);
|
|
101 // Compile-time detect of null-exception?
|
|
102 if (stopped()) return;
|
|
103
|
|
104 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
|
|
105 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
|
|
106
|
|
107 if (is_get) {
|
|
108 --_sp; // pop receiver before getting
|
|
109 do_get_xxx(tjp, obj, field, is_field);
|
|
110 } else {
|
|
111 do_put_xxx(tjp, obj, field, is_field);
|
|
112 --_sp; // pop receiver after putting
|
|
113 }
|
|
114 } else {
|
|
115 const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder);
|
|
116 obj = _gvn.makecon(tkp);
|
|
117 if (is_get) {
|
|
118 do_get_xxx(tkp, obj, field, is_field);
|
|
119 } else {
|
|
120 do_put_xxx(tkp, obj, field, is_field);
|
|
121 }
|
|
122 }
|
|
123 }
|
|
124
|
|
125
|
|
126 void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
|
|
127 // Does this field have a constant value? If so, just push the value.
|
|
128 if (field->is_constant() && push_constant(field->constant_value())) return;
|
|
129
|
|
130 ciType* field_klass = field->type();
|
|
131 bool is_vol = field->is_volatile();
|
|
132
|
|
133 // Compute address and memory type.
|
|
134 int offset = field->offset_in_bytes();
|
|
135 const TypePtr* adr_type = C->alias_type(field)->adr_type();
|
|
136 Node *adr = basic_plus_adr(obj, obj, offset);
|
|
137 BasicType bt = field->layout_type();
|
|
138
|
|
139 // Build the resultant type of the load
|
|
140 const Type *type;
|
|
141
|
|
142 bool must_assert_null = false;
|
|
143
|
|
144 if( bt == T_OBJECT ) {
|
|
145 if (!field->type()->is_loaded()) {
|
|
146 type = TypeInstPtr::BOTTOM;
|
|
147 must_assert_null = true;
|
|
148 } else if (field->is_constant()) {
|
|
149 // This can happen if the constant oop is non-perm.
|
|
150 ciObject* con = field->constant_value().as_object();
|
|
151 // Do not "join" in the previous type; it doesn't add value,
|
|
152 // and may yield a vacuous result if the field is of interface type.
|
|
153 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
|
|
154 assert(type != NULL, "field singleton type must be consistent");
|
|
155 } else {
|
|
156 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
|
|
157 }
|
|
158 } else {
|
|
159 type = Type::get_const_basic_type(bt);
|
|
160 }
|
|
161 // Build the load.
|
|
162 Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
|
|
163
|
|
164 // Adjust Java stack
|
|
165 if (type2size[bt] == 1)
|
|
166 push(ld);
|
|
167 else
|
|
168 push_pair(ld);
|
|
169
|
|
170 if (must_assert_null) {
|
|
171 // Do not take a trap here. It's possible that the program
|
|
172 // will never load the field's class, and will happily see
|
|
173 // null values in this field forever. Don't stumble into a
|
|
174 // trap for such a program, or we might get a long series
|
|
175 // of useless recompilations. (Or, we might load a class
|
|
176 // which should not be loaded.) If we ever see a non-null
|
|
177 // value, we will then trap and recompile. (The trap will
|
|
178 // not need to mention the class index, since the class will
|
|
179 // already have been loaded if we ever see a non-null value.)
|
|
180 // uncommon_trap(iter().get_field_signature_index());
|
|
181 #ifndef PRODUCT
|
|
182 if (PrintOpto && (Verbose || WizardMode)) {
|
|
183 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
|
|
184 }
|
|
185 #endif
|
|
186 if (C->log() != NULL) {
|
|
187 C->log()->elem("assert_null reason='field' klass='%d'",
|
|
188 C->log()->identify(field->type()));
|
|
189 }
|
|
190 // If there is going to be a trap, put it at the next bytecode:
|
|
191 set_bci(iter().next_bci());
|
|
192 do_null_assert(peek(), T_OBJECT);
|
|
193 set_bci(iter().cur_bci()); // put it back
|
|
194 }
|
|
195
|
|
196 // If reference is volatile, prevent following memory ops from
|
|
197 // floating up past the volatile read. Also prevents commoning
|
|
198 // another volatile read.
|
|
199 if (field->is_volatile()) {
|
|
200 // Memory barrier includes bogus read of value to force load BEFORE membar
|
|
201 insert_mem_bar(Op_MemBarAcquire, ld);
|
|
202 }
|
|
203 }
|
|
204
|
|
205 void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
|
|
206 bool is_vol = field->is_volatile();
|
|
207 // If reference is volatile, prevent following memory ops from
|
|
208 // floating down past the volatile write. Also prevents commoning
|
|
209 // another volatile read.
|
|
210 if (is_vol) insert_mem_bar(Op_MemBarRelease);
|
|
211
|
|
212 // Compute address and memory type.
|
|
213 int offset = field->offset_in_bytes();
|
|
214 const TypePtr* adr_type = C->alias_type(field)->adr_type();
|
|
215 Node* adr = basic_plus_adr(obj, obj, offset);
|
|
216 BasicType bt = field->layout_type();
|
|
217 // Value to be stored
|
|
218 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
|
|
219 // Round doubles before storing
|
|
220 if (bt == T_DOUBLE) val = dstore_rounding(val);
|
|
221
|
|
222 // Store the value.
|
|
223 Node* store;
|
|
224 if (bt == T_OBJECT) {
|
|
225 const TypePtr* field_type;
|
|
226 if (!field->type()->is_loaded()) {
|
|
227 field_type = TypeInstPtr::BOTTOM;
|
|
228 } else {
|
|
229 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
|
|
230 }
|
|
231 store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
|
|
232 } else {
|
|
233 store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
|
|
234 }
|
|
235
|
|
236 // If reference is volatile, prevent following volatiles ops from
|
|
237 // floating up before the volatile write.
|
|
238 if (is_vol) {
|
|
239 // First place the specific membar for THIS volatile index. This first
|
|
240 // membar is dependent on the store, keeping any other membars generated
|
|
241 // below from floating up past the store.
|
|
242 int adr_idx = C->get_alias_index(adr_type);
|
|
243 insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx);
|
|
244
|
|
245 // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
|
|
246 // volatile alias indices. Skip this if the membar is redundant.
|
|
247 if (adr_idx != Compile::AliasIdxBot) {
|
|
248 insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot);
|
|
249 }
|
|
250
|
|
251 // Finally, place alias-index-specific membars for each volatile index
|
|
252 // that isn't the adr_idx membar. Typically there's only 1 or 2.
|
|
253 for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
|
|
254 if (i != adr_idx && C->alias_type(i)->is_volatile()) {
|
|
255 insert_mem_bar_volatile(Op_MemBarVolatile, i);
|
|
256 }
|
|
257 }
|
|
258 }
|
|
259
|
|
260 // If the field is final, the rules of Java say we are in <init> or <clinit>.
|
|
261 // Note the presence of writes to final non-static fields, so that we
|
|
262 // can insert a memory barrier later on to keep the writes from floating
|
|
263 // out of the constructor.
|
|
264 if (is_field && field->is_final()) {
|
|
265 set_wrote_final(true);
|
|
266 }
|
|
267 }
|
|
268
|
|
269
|
|
270 bool Parse::push_constant(ciConstant constant) {
|
|
271 switch (constant.basic_type()) {
|
|
272 case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
|
|
273 case T_INT: push( intcon(constant.as_int()) ); break;
|
|
274 case T_CHAR: push( intcon(constant.as_char()) ); break;
|
|
275 case T_BYTE: push( intcon(constant.as_byte()) ); break;
|
|
276 case T_SHORT: push( intcon(constant.as_short()) ); break;
|
|
277 case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break;
|
|
278 case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break;
|
|
279 case T_LONG: push_pair( longcon(constant.as_long()) ); break;
|
|
280 case T_ARRAY:
|
|
281 case T_OBJECT: {
|
|
282 // the oop is in perm space if the ciObject "has_encoding"
|
|
283 ciObject* oop_constant = constant.as_object();
|
|
284 if (oop_constant->is_null_object()) {
|
|
285 push( zerocon(T_OBJECT) );
|
|
286 break;
|
|
287 } else if (oop_constant->has_encoding()) {
|
|
288 push( makecon(TypeOopPtr::make_from_constant(oop_constant)) );
|
|
289 break;
|
|
290 } else {
|
|
291 // we cannot inline the oop, but we can use it later to narrow a type
|
|
292 return false;
|
|
293 }
|
|
294 }
|
|
295 case T_ILLEGAL: {
|
|
296 // Invalid ciConstant returned due to OutOfMemoryError in the CI
|
|
297 assert(C->env()->failing(), "otherwise should not see this");
|
|
298 // These always occur because of object types; we are going to
|
|
299 // bail out anyway, so make the stack depths match up
|
|
300 push( zerocon(T_OBJECT) );
|
|
301 return false;
|
|
302 }
|
|
303 default:
|
|
304 ShouldNotReachHere();
|
|
305 return false;
|
|
306 }
|
|
307
|
|
308 // success
|
|
309 return true;
|
|
310 }
|
|
311
|
|
312
|
|
313
|
|
314 //=============================================================================
|
|
315 void Parse::do_anewarray() {
|
|
316 bool will_link;
|
|
317 ciKlass* klass = iter().get_klass(will_link);
|
|
318
|
|
319 // Uncommon Trap when class that array contains is not loaded
|
|
320 // we need the loaded class for the rest of graph; do not
|
|
321 // initialize the container class (see Java spec)!!!
|
|
322 assert(will_link, "anewarray: typeflow responsibility");
|
|
323
|
|
324 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
|
|
325 // Check that array_klass object is loaded
|
|
326 if (!array_klass->is_loaded()) {
|
|
327 // Generate uncommon_trap for unloaded array_class
|
|
328 uncommon_trap(Deoptimization::Reason_unloaded,
|
|
329 Deoptimization::Action_reinterpret,
|
|
330 array_klass);
|
|
331 return;
|
|
332 }
|
|
333
|
|
334 kill_dead_locals();
|
|
335
|
|
336 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
|
|
337 Node* count_val = pop();
|
|
338 Node* obj = new_array(makecon(array_klass_type), count_val);
|
|
339 push(obj);
|
|
340 }
|
|
341
|
|
342
|
|
343 void Parse::do_newarray(BasicType elem_type) {
|
|
344 kill_dead_locals();
|
|
345
|
|
346 Node* count_val = pop();
|
|
347 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
|
|
348 Node* obj = new_array(makecon(array_klass), count_val);
|
|
349 // Push resultant oop onto stack
|
|
350 push(obj);
|
|
351 }
|
|
352
|
|
353 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
|
|
354 // Also handle the degenerate 1-dimensional case of anewarray.
|
|
355 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions) {
|
|
356 Node* length = lengths[0];
|
|
357 assert(length != NULL, "");
|
|
358 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length);
|
|
359 if (ndimensions > 1) {
|
|
360 jint length_con = find_int_con(length, -1);
|
|
361 guarantee(length_con >= 0, "non-constant multianewarray");
|
|
362 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
|
|
363 const TypePtr* adr_type = TypeAryPtr::OOPS;
|
|
364 const Type* elemtype = _gvn.type(array)->is_aryptr()->elem();
|
|
365 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
|
|
366 for (jint i = 0; i < length_con; i++) {
|
|
367 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1);
|
|
368 intptr_t offset = header + ((intptr_t)i << LogBytesPerWord);
|
|
369 Node* eaddr = basic_plus_adr(array, offset);
|
|
370 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
|
|
371 }
|
|
372 }
|
|
373 return array;
|
|
374 }
|
|
375
|
|
376 void Parse::do_multianewarray() {
|
|
377 int ndimensions = iter().get_dimensions();
|
|
378
|
|
379 // the m-dimensional array
|
|
380 bool will_link;
|
|
381 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
|
|
382 assert(will_link, "multianewarray: typeflow responsibility");
|
|
383
|
|
384 // Note: Array classes are always initialized; no is_initialized check.
|
|
385
|
|
386 enum { MAX_DIMENSION = 5 };
|
|
387 if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
|
|
388 uncommon_trap(Deoptimization::Reason_unhandled,
|
|
389 Deoptimization::Action_none);
|
|
390 return;
|
|
391 }
|
|
392
|
|
393 kill_dead_locals();
|
|
394
|
|
395 // get the lengths from the stack (first dimension is on top)
|
|
396 Node* length[MAX_DIMENSION+1];
|
|
397 length[ndimensions] = NULL; // terminating null for make_runtime_call
|
|
398 int j;
|
|
399 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
|
|
400
|
|
401 // The original expression was of this form: new T[length0][length1]...
|
|
402 // It is often the case that the lengths are small (except the last).
|
|
403 // If that happens, use the fast 1-d creator a constant number of times.
|
|
404 const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
|
|
405 jint expand_count = 1; // count of allocations in the expansion
|
|
406 jint expand_fanout = 1; // running total fanout
|
|
407 for (j = 0; j < ndimensions-1; j++) {
|
|
408 jint dim_con = find_int_con(length[j], -1);
|
|
409 expand_fanout *= dim_con;
|
|
410 expand_count += expand_fanout; // count the level-J sub-arrays
|
|
411 if (dim_con < 0
|
|
412 || dim_con > expand_limit
|
|
413 || expand_count > expand_limit) {
|
|
414 expand_count = 0;
|
|
415 break;
|
|
416 }
|
|
417 }
|
|
418
|
|
419 // Can use multianewarray instead of [a]newarray if only one dimension,
|
|
420 // or if all non-final dimensions are small constants.
|
|
421 if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
|
|
422 Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions);
|
|
423 push(obj);
|
|
424 return;
|
|
425 }
|
|
426
|
|
427 address fun = NULL;
|
|
428 switch (ndimensions) {
|
|
429 //case 1: Actually, there is no case 1. It's handled by new_array.
|
|
430 case 2: fun = OptoRuntime::multianewarray2_Java(); break;
|
|
431 case 3: fun = OptoRuntime::multianewarray3_Java(); break;
|
|
432 case 4: fun = OptoRuntime::multianewarray4_Java(); break;
|
|
433 case 5: fun = OptoRuntime::multianewarray5_Java(); break;
|
|
434 default: ShouldNotReachHere();
|
|
435 };
|
|
436
|
|
437 Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
|
|
438 OptoRuntime::multianewarray_Type(ndimensions),
|
|
439 fun, NULL, TypeRawPtr::BOTTOM,
|
|
440 makecon(TypeKlassPtr::make(array_klass)),
|
|
441 length[0], length[1], length[2],
|
|
442 length[3], length[4]);
|
|
443 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
|
|
444
|
|
445 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
|
|
446
|
|
447 // Improve the type: We know it's not null, exact, and of a given length.
|
|
448 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
|
|
449 type = type->is_aryptr()->cast_to_exactness(true);
|
|
450
|
|
451 const TypeInt* ltype = _gvn.find_int_type(length[0]);
|
|
452 if (ltype != NULL)
|
|
453 type = type->is_aryptr()->cast_to_size(ltype);
|
|
454
|
|
455 // We cannot sharpen the nested sub-arrays, since the top level is mutable.
|
|
456
|
|
457 Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
|
|
458 push(cast);
|
|
459
|
|
460 // Possible improvements:
|
|
461 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.)
|
|
462 // - Issue CastII against length[*] values, to TypeInt::POS.
|
|
463 }
|