Mercurial > hg > truffle
annotate src/share/vm/opto/library_call.cpp @ 771:a77eddcd510c jdk7-b60
6843041: Remove duplicate README files in repositories (make/README)
Reviewed-by: robilad
author | ohair |
---|---|
date | Tue, 19 May 2009 17:40:10 -0700 |
parents | 9c6be3edf0dc |
children | 93c14e5562c4 |
rev | line source |
---|---|
0 | 1 /* |
643
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
2 * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_library_call.cpp.incl" | |
27 | |
28 class LibraryIntrinsic : public InlineCallGenerator { | |
29 // Extend the set of intrinsics known to the runtime: | |
30 public: | |
31 private: | |
32 bool _is_virtual; | |
33 vmIntrinsics::ID _intrinsic_id; | |
34 | |
35 public: | |
36 LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id) | |
37 : InlineCallGenerator(m), | |
38 _is_virtual(is_virtual), | |
39 _intrinsic_id(id) | |
40 { | |
41 } | |
42 virtual bool is_intrinsic() const { return true; } | |
43 virtual bool is_virtual() const { return _is_virtual; } | |
44 virtual JVMState* generate(JVMState* jvms); | |
45 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } | |
46 }; | |
47 | |
48 | |
49 // Local helper class for LibraryIntrinsic: | |
50 class LibraryCallKit : public GraphKit { | |
51 private: | |
52 LibraryIntrinsic* _intrinsic; // the library intrinsic being called | |
53 | |
54 public: | |
55 LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic) | |
56 : GraphKit(caller), | |
57 _intrinsic(intrinsic) | |
58 { | |
59 } | |
60 | |
61 ciMethod* caller() const { return jvms()->method(); } | |
62 int bci() const { return jvms()->bci(); } | |
63 LibraryIntrinsic* intrinsic() const { return _intrinsic; } | |
64 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); } | |
65 ciMethod* callee() const { return _intrinsic->method(); } | |
66 ciSignature* signature() const { return callee()->signature(); } | |
67 int arg_size() const { return callee()->arg_size(); } | |
68 | |
69 bool try_to_inline(); | |
70 | |
71 // Helper functions to inline natives | |
72 void push_result(RegionNode* region, PhiNode* value); | |
73 Node* generate_guard(Node* test, RegionNode* region, float true_prob); | |
74 Node* generate_slow_guard(Node* test, RegionNode* region); | |
75 Node* generate_fair_guard(Node* test, RegionNode* region); | |
76 Node* generate_negative_guard(Node* index, RegionNode* region, | |
77 // resulting CastII of index: | |
78 Node* *pos_index = NULL); | |
79 Node* generate_nonpositive_guard(Node* index, bool never_negative, | |
80 // resulting CastII of index: | |
81 Node* *pos_index = NULL); | |
82 Node* generate_limit_guard(Node* offset, Node* subseq_length, | |
83 Node* array_length, | |
84 RegionNode* region); | |
85 Node* generate_current_thread(Node* &tls_output); | |
86 address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, | |
87 bool disjoint_bases, const char* &name); | |
88 Node* load_mirror_from_klass(Node* klass); | |
89 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, | |
90 int nargs, | |
91 RegionNode* region, int null_path, | |
92 int offset); | |
93 Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs, | |
94 RegionNode* region, int null_path) { | |
95 int offset = java_lang_Class::klass_offset_in_bytes(); | |
96 return load_klass_from_mirror_common(mirror, never_see_null, nargs, | |
97 region, null_path, | |
98 offset); | |
99 } | |
100 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null, | |
101 int nargs, | |
102 RegionNode* region, int null_path) { | |
103 int offset = java_lang_Class::array_klass_offset_in_bytes(); | |
104 return load_klass_from_mirror_common(mirror, never_see_null, nargs, | |
105 region, null_path, | |
106 offset); | |
107 } | |
108 Node* generate_access_flags_guard(Node* kls, | |
109 int modifier_mask, int modifier_bits, | |
110 RegionNode* region); | |
111 Node* generate_interface_guard(Node* kls, RegionNode* region); | |
112 Node* generate_array_guard(Node* kls, RegionNode* region) { | |
113 return generate_array_guard_common(kls, region, false, false); | |
114 } | |
115 Node* generate_non_array_guard(Node* kls, RegionNode* region) { | |
116 return generate_array_guard_common(kls, region, false, true); | |
117 } | |
118 Node* generate_objArray_guard(Node* kls, RegionNode* region) { | |
119 return generate_array_guard_common(kls, region, true, false); | |
120 } | |
121 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) { | |
122 return generate_array_guard_common(kls, region, true, true); | |
123 } | |
124 Node* generate_array_guard_common(Node* kls, RegionNode* region, | |
125 bool obj_array, bool not_array); | |
126 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region); | |
127 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id, | |
128 bool is_virtual = false, bool is_static = false); | |
129 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) { | |
130 return generate_method_call(method_id, false, true); | |
131 } | |
132 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) { | |
133 return generate_method_call(method_id, true, false); | |
134 } | |
135 | |
136 bool inline_string_compareTo(); | |
137 bool inline_string_indexOf(); | |
138 Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i); | |
681 | 139 bool inline_string_equals(); |
0 | 140 Node* pop_math_arg(); |
141 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); | |
142 bool inline_math_native(vmIntrinsics::ID id); | |
143 bool inline_trig(vmIntrinsics::ID id); | |
144 bool inline_trans(vmIntrinsics::ID id); | |
145 bool inline_abs(vmIntrinsics::ID id); | |
146 bool inline_sqrt(vmIntrinsics::ID id); | |
147 bool inline_pow(vmIntrinsics::ID id); | |
148 bool inline_exp(vmIntrinsics::ID id); | |
149 bool inline_min_max(vmIntrinsics::ID id); | |
150 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); | |
151 // This returns Type::AnyPtr, RawPtr, or OopPtr. | |
152 int classify_unsafe_addr(Node* &base, Node* &offset); | |
153 Node* make_unsafe_address(Node* base, Node* offset); | |
154 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); | |
155 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); | |
156 bool inline_unsafe_allocate(); | |
157 bool inline_unsafe_copyMemory(); | |
158 bool inline_native_currentThread(); | |
159 bool inline_native_time_funcs(bool isNano); | |
160 bool inline_native_isInterrupted(); | |
161 bool inline_native_Class_query(vmIntrinsics::ID id); | |
162 bool inline_native_subtype_check(); | |
163 | |
164 bool inline_native_newArray(); | |
165 bool inline_native_getLength(); | |
166 bool inline_array_copyOf(bool is_copyOfRange); | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
167 bool inline_array_equals(); |
0 | 168 bool inline_native_clone(bool is_virtual); |
169 bool inline_native_Reflection_getCallerClass(); | |
170 bool inline_native_AtomicLong_get(); | |
171 bool inline_native_AtomicLong_attemptUpdate(); | |
172 bool is_method_invoke_or_aux_frame(JVMState* jvms); | |
173 // Helper function for inlining native object hash method | |
174 bool inline_native_hashcode(bool is_virtual, bool is_static); | |
175 bool inline_native_getClass(); | |
176 | |
177 // Helper functions for inlining arraycopy | |
178 bool inline_arraycopy(); | |
179 void generate_arraycopy(const TypePtr* adr_type, | |
180 BasicType basic_elem_type, | |
181 Node* src, Node* src_offset, | |
182 Node* dest, Node* dest_offset, | |
183 Node* copy_length, | |
184 int nargs, // arguments on stack for debug info | |
185 bool disjoint_bases = false, | |
186 bool length_never_negative = false, | |
187 RegionNode* slow_region = NULL); | |
188 AllocateArrayNode* tightly_coupled_allocation(Node* ptr, | |
189 RegionNode* slow_region); | |
190 void generate_clear_array(const TypePtr* adr_type, | |
191 Node* dest, | |
192 BasicType basic_elem_type, | |
193 Node* slice_off, | |
194 Node* slice_len, | |
195 Node* slice_end); | |
196 bool generate_block_arraycopy(const TypePtr* adr_type, | |
197 BasicType basic_elem_type, | |
198 AllocateNode* alloc, | |
199 Node* src, Node* src_offset, | |
200 Node* dest, Node* dest_offset, | |
201 Node* dest_size); | |
202 void generate_slow_arraycopy(const TypePtr* adr_type, | |
203 Node* src, Node* src_offset, | |
204 Node* dest, Node* dest_offset, | |
205 Node* copy_length, | |
206 int nargs); | |
207 Node* generate_checkcast_arraycopy(const TypePtr* adr_type, | |
208 Node* dest_elem_klass, | |
209 Node* src, Node* src_offset, | |
210 Node* dest, Node* dest_offset, | |
211 Node* copy_length, int nargs); | |
212 Node* generate_generic_arraycopy(const TypePtr* adr_type, | |
213 Node* src, Node* src_offset, | |
214 Node* dest, Node* dest_offset, | |
215 Node* copy_length, int nargs); | |
216 void generate_unchecked_arraycopy(const TypePtr* adr_type, | |
217 BasicType basic_elem_type, | |
218 bool disjoint_bases, | |
219 Node* src, Node* src_offset, | |
220 Node* dest, Node* dest_offset, | |
221 Node* copy_length); | |
222 bool inline_unsafe_CAS(BasicType type); | |
223 bool inline_unsafe_ordered_store(BasicType type); | |
224 bool inline_fp_conversions(vmIntrinsics::ID id); | |
643
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
225 bool inline_bitCount(vmIntrinsics::ID id); |
0 | 226 bool inline_reverseBytes(vmIntrinsics::ID id); |
227 }; | |
228 | |
229 | |
230 //---------------------------make_vm_intrinsic---------------------------- | |
231 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { | |
232 vmIntrinsics::ID id = m->intrinsic_id(); | |
233 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); | |
234 | |
235 if (DisableIntrinsic[0] != '\0' | |
236 && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) { | |
237 // disabled by a user request on the command line: | |
238 // example: -XX:DisableIntrinsic=_hashCode,_getClass | |
239 return NULL; | |
240 } | |
241 | |
242 if (!m->is_loaded()) { | |
243 // do not attempt to inline unloaded methods | |
244 return NULL; | |
245 } | |
246 | |
247 // Only a few intrinsics implement a virtual dispatch. | |
248 // They are expensive calls which are also frequently overridden. | |
249 if (is_virtual) { | |
250 switch (id) { | |
251 case vmIntrinsics::_hashCode: | |
252 case vmIntrinsics::_clone: | |
253 // OK, Object.hashCode and Object.clone intrinsics come in both flavors | |
254 break; | |
255 default: | |
256 return NULL; | |
257 } | |
258 } | |
259 | |
260 // -XX:-InlineNatives disables nearly all intrinsics: | |
261 if (!InlineNatives) { | |
262 switch (id) { | |
263 case vmIntrinsics::_indexOf: | |
264 case vmIntrinsics::_compareTo: | |
681 | 265 case vmIntrinsics::_equals: |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
266 case vmIntrinsics::_equalsC: |
0 | 267 break; // InlineNatives does not control String.compareTo |
268 default: | |
269 return NULL; | |
270 } | |
271 } | |
272 | |
273 switch (id) { | |
274 case vmIntrinsics::_compareTo: | |
275 if (!SpecialStringCompareTo) return NULL; | |
276 break; | |
277 case vmIntrinsics::_indexOf: | |
278 if (!SpecialStringIndexOf) return NULL; | |
279 break; | |
681 | 280 case vmIntrinsics::_equals: |
281 if (!SpecialStringEquals) return NULL; | |
282 break; | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
283 case vmIntrinsics::_equalsC: |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
284 if (!SpecialArraysEquals) return NULL; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
285 break; |
0 | 286 case vmIntrinsics::_arraycopy: |
287 if (!InlineArrayCopy) return NULL; | |
288 break; | |
289 case vmIntrinsics::_copyMemory: | |
290 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL; | |
291 if (!InlineArrayCopy) return NULL; | |
292 break; | |
293 case vmIntrinsics::_hashCode: | |
294 if (!InlineObjectHash) return NULL; | |
295 break; | |
296 case vmIntrinsics::_clone: | |
297 case vmIntrinsics::_copyOf: | |
298 case vmIntrinsics::_copyOfRange: | |
299 if (!InlineObjectCopy) return NULL; | |
300 // These also use the arraycopy intrinsic mechanism: | |
301 if (!InlineArrayCopy) return NULL; | |
302 break; | |
303 case vmIntrinsics::_checkIndex: | |
304 // We do not intrinsify this. The optimizer does fine with it. | |
305 return NULL; | |
306 | |
307 case vmIntrinsics::_get_AtomicLong: | |
308 case vmIntrinsics::_attemptUpdate: | |
309 if (!InlineAtomicLong) return NULL; | |
310 break; | |
311 | |
312 case vmIntrinsics::_Object_init: | |
313 case vmIntrinsics::_invoke: | |
314 // We do not intrinsify these; they are marked for other purposes. | |
315 return NULL; | |
316 | |
317 case vmIntrinsics::_getCallerClass: | |
318 if (!UseNewReflection) return NULL; | |
319 if (!InlineReflectionGetCallerClass) return NULL; | |
320 if (!JDK_Version::is_gte_jdk14x_version()) return NULL; | |
321 break; | |
322 | |
643
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
323 case vmIntrinsics::_bitCount_i: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
324 case vmIntrinsics::_bitCount_l: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
325 if (!UsePopCountInstruction) return NULL; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
326 break; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
327 |
0 | 328 default: |
329 break; | |
330 } | |
331 | |
332 // -XX:-InlineClassNatives disables natives from the Class class. | |
333 // The flag applies to all reflective calls, notably Array.newArray | |
334 // (visible to Java programmers as Array.newInstance). | |
335 if (m->holder()->name() == ciSymbol::java_lang_Class() || | |
336 m->holder()->name() == ciSymbol::java_lang_reflect_Array()) { | |
337 if (!InlineClassNatives) return NULL; | |
338 } | |
339 | |
340 // -XX:-InlineThreadNatives disables natives from the Thread class. | |
341 if (m->holder()->name() == ciSymbol::java_lang_Thread()) { | |
342 if (!InlineThreadNatives) return NULL; | |
343 } | |
344 | |
345 // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes. | |
346 if (m->holder()->name() == ciSymbol::java_lang_Math() || | |
347 m->holder()->name() == ciSymbol::java_lang_Float() || | |
348 m->holder()->name() == ciSymbol::java_lang_Double()) { | |
349 if (!InlineMathNatives) return NULL; | |
350 } | |
351 | |
352 // -XX:-InlineUnsafeOps disables natives from the Unsafe class. | |
353 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) { | |
354 if (!InlineUnsafeOps) return NULL; | |
355 } | |
356 | |
357 return new LibraryIntrinsic(m, is_virtual, (vmIntrinsics::ID) id); | |
358 } | |
359 | |
360 //----------------------register_library_intrinsics----------------------- | |
361 // Initialize this file's data structures, for each Compile instance. | |
362 void Compile::register_library_intrinsics() { | |
363 // Nothing to do here. | |
364 } | |
365 | |
366 JVMState* LibraryIntrinsic::generate(JVMState* jvms) { | |
367 LibraryCallKit kit(jvms, this); | |
368 Compile* C = kit.C; | |
369 int nodes = C->unique(); | |
370 #ifndef PRODUCT | |
371 if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { | |
372 char buf[1000]; | |
373 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); | |
374 tty->print_cr("Intrinsic %s", str); | |
375 } | |
376 #endif | |
377 if (kit.try_to_inline()) { | |
378 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | |
379 tty->print("Inlining intrinsic %s%s at bci:%d in", | |
380 vmIntrinsics::name_at(intrinsic_id()), | |
381 (is_virtual() ? " (virtual)" : ""), kit.bci()); | |
382 kit.caller()->print_short_name(tty); | |
383 tty->print_cr(" (%d bytes)", kit.caller()->code_size()); | |
384 } | |
385 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); | |
386 if (C->log()) { | |
387 C->log()->elem("intrinsic id='%s'%s nodes='%d'", | |
388 vmIntrinsics::name_at(intrinsic_id()), | |
389 (is_virtual() ? " virtual='1'" : ""), | |
390 C->unique() - nodes); | |
391 } | |
392 return kit.transfer_exceptions_into_jvms(); | |
393 } | |
394 | |
395 if (PrintIntrinsics) { | |
396 switch (intrinsic_id()) { | |
397 case vmIntrinsics::_invoke: | |
398 case vmIntrinsics::_Object_init: | |
399 // We do not expect to inline these, so do not produce any noise about them. | |
400 break; | |
401 default: | |
402 tty->print("Did not inline intrinsic %s%s at bci:%d in", | |
403 vmIntrinsics::name_at(intrinsic_id()), | |
404 (is_virtual() ? " (virtual)" : ""), kit.bci()); | |
405 kit.caller()->print_short_name(tty); | |
406 tty->print_cr(" (%d bytes)", kit.caller()->code_size()); | |
407 } | |
408 } | |
409 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); | |
410 return NULL; | |
411 } | |
412 | |
413 bool LibraryCallKit::try_to_inline() { | |
414 // Handle symbolic names for otherwise undistinguished boolean switches: | |
415 const bool is_store = true; | |
416 const bool is_native_ptr = true; | |
417 const bool is_static = true; | |
418 | |
419 switch (intrinsic_id()) { | |
420 case vmIntrinsics::_hashCode: | |
421 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static); | |
422 case vmIntrinsics::_identityHashCode: | |
423 return inline_native_hashcode(/*!virtual*/ false, is_static); | |
424 case vmIntrinsics::_getClass: | |
425 return inline_native_getClass(); | |
426 | |
427 case vmIntrinsics::_dsin: | |
428 case vmIntrinsics::_dcos: | |
429 case vmIntrinsics::_dtan: | |
430 case vmIntrinsics::_dabs: | |
431 case vmIntrinsics::_datan2: | |
432 case vmIntrinsics::_dsqrt: | |
433 case vmIntrinsics::_dexp: | |
434 case vmIntrinsics::_dlog: | |
435 case vmIntrinsics::_dlog10: | |
436 case vmIntrinsics::_dpow: | |
437 return inline_math_native(intrinsic_id()); | |
438 | |
439 case vmIntrinsics::_min: | |
440 case vmIntrinsics::_max: | |
441 return inline_min_max(intrinsic_id()); | |
442 | |
443 case vmIntrinsics::_arraycopy: | |
444 return inline_arraycopy(); | |
445 | |
446 case vmIntrinsics::_compareTo: | |
447 return inline_string_compareTo(); | |
448 case vmIntrinsics::_indexOf: | |
449 return inline_string_indexOf(); | |
681 | 450 case vmIntrinsics::_equals: |
451 return inline_string_equals(); | |
0 | 452 |
453 case vmIntrinsics::_getObject: | |
454 return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false); | |
455 case vmIntrinsics::_getBoolean: | |
456 return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false); | |
457 case vmIntrinsics::_getByte: | |
458 return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false); | |
459 case vmIntrinsics::_getShort: | |
460 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false); | |
461 case vmIntrinsics::_getChar: | |
462 return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false); | |
463 case vmIntrinsics::_getInt: | |
464 return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false); | |
465 case vmIntrinsics::_getLong: | |
466 return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false); | |
467 case vmIntrinsics::_getFloat: | |
468 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false); | |
469 case vmIntrinsics::_getDouble: | |
470 return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false); | |
471 | |
472 case vmIntrinsics::_putObject: | |
473 return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false); | |
474 case vmIntrinsics::_putBoolean: | |
475 return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false); | |
476 case vmIntrinsics::_putByte: | |
477 return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false); | |
478 case vmIntrinsics::_putShort: | |
479 return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false); | |
480 case vmIntrinsics::_putChar: | |
481 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false); | |
482 case vmIntrinsics::_putInt: | |
483 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false); | |
484 case vmIntrinsics::_putLong: | |
485 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false); | |
486 case vmIntrinsics::_putFloat: | |
487 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false); | |
488 case vmIntrinsics::_putDouble: | |
489 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false); | |
490 | |
491 case vmIntrinsics::_getByte_raw: | |
492 return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false); | |
493 case vmIntrinsics::_getShort_raw: | |
494 return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false); | |
495 case vmIntrinsics::_getChar_raw: | |
496 return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false); | |
497 case vmIntrinsics::_getInt_raw: | |
498 return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false); | |
499 case vmIntrinsics::_getLong_raw: | |
500 return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false); | |
501 case vmIntrinsics::_getFloat_raw: | |
502 return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false); | |
503 case vmIntrinsics::_getDouble_raw: | |
504 return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false); | |
505 case vmIntrinsics::_getAddress_raw: | |
506 return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false); | |
507 | |
508 case vmIntrinsics::_putByte_raw: | |
509 return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false); | |
510 case vmIntrinsics::_putShort_raw: | |
511 return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false); | |
512 case vmIntrinsics::_putChar_raw: | |
513 return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false); | |
514 case vmIntrinsics::_putInt_raw: | |
515 return inline_unsafe_access(is_native_ptr, is_store, T_INT, false); | |
516 case vmIntrinsics::_putLong_raw: | |
517 return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false); | |
518 case vmIntrinsics::_putFloat_raw: | |
519 return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false); | |
520 case vmIntrinsics::_putDouble_raw: | |
521 return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false); | |
522 case vmIntrinsics::_putAddress_raw: | |
523 return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false); | |
524 | |
525 case vmIntrinsics::_getObjectVolatile: | |
526 return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true); | |
527 case vmIntrinsics::_getBooleanVolatile: | |
528 return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true); | |
529 case vmIntrinsics::_getByteVolatile: | |
530 return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true); | |
531 case vmIntrinsics::_getShortVolatile: | |
532 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true); | |
533 case vmIntrinsics::_getCharVolatile: | |
534 return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true); | |
535 case vmIntrinsics::_getIntVolatile: | |
536 return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true); | |
537 case vmIntrinsics::_getLongVolatile: | |
538 return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true); | |
539 case vmIntrinsics::_getFloatVolatile: | |
540 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true); | |
541 case vmIntrinsics::_getDoubleVolatile: | |
542 return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true); | |
543 | |
544 case vmIntrinsics::_putObjectVolatile: | |
545 return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true); | |
546 case vmIntrinsics::_putBooleanVolatile: | |
547 return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true); | |
548 case vmIntrinsics::_putByteVolatile: | |
549 return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true); | |
550 case vmIntrinsics::_putShortVolatile: | |
551 return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true); | |
552 case vmIntrinsics::_putCharVolatile: | |
553 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true); | |
554 case vmIntrinsics::_putIntVolatile: | |
555 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true); | |
556 case vmIntrinsics::_putLongVolatile: | |
557 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true); | |
558 case vmIntrinsics::_putFloatVolatile: | |
559 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true); | |
560 case vmIntrinsics::_putDoubleVolatile: | |
561 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true); | |
562 | |
563 case vmIntrinsics::_prefetchRead: | |
564 return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static); | |
565 case vmIntrinsics::_prefetchWrite: | |
566 return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static); | |
567 case vmIntrinsics::_prefetchReadStatic: | |
568 return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static); | |
569 case vmIntrinsics::_prefetchWriteStatic: | |
570 return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static); | |
571 | |
572 case vmIntrinsics::_compareAndSwapObject: | |
573 return inline_unsafe_CAS(T_OBJECT); | |
574 case vmIntrinsics::_compareAndSwapInt: | |
575 return inline_unsafe_CAS(T_INT); | |
576 case vmIntrinsics::_compareAndSwapLong: | |
577 return inline_unsafe_CAS(T_LONG); | |
578 | |
579 case vmIntrinsics::_putOrderedObject: | |
580 return inline_unsafe_ordered_store(T_OBJECT); | |
581 case vmIntrinsics::_putOrderedInt: | |
582 return inline_unsafe_ordered_store(T_INT); | |
583 case vmIntrinsics::_putOrderedLong: | |
584 return inline_unsafe_ordered_store(T_LONG); | |
585 | |
586 case vmIntrinsics::_currentThread: | |
587 return inline_native_currentThread(); | |
588 case vmIntrinsics::_isInterrupted: | |
589 return inline_native_isInterrupted(); | |
590 | |
591 case vmIntrinsics::_currentTimeMillis: | |
592 return inline_native_time_funcs(false); | |
593 case vmIntrinsics::_nanoTime: | |
594 return inline_native_time_funcs(true); | |
595 case vmIntrinsics::_allocateInstance: | |
596 return inline_unsafe_allocate(); | |
597 case vmIntrinsics::_copyMemory: | |
598 return inline_unsafe_copyMemory(); | |
599 case vmIntrinsics::_newArray: | |
600 return inline_native_newArray(); | |
601 case vmIntrinsics::_getLength: | |
602 return inline_native_getLength(); | |
603 case vmIntrinsics::_copyOf: | |
604 return inline_array_copyOf(false); | |
605 case vmIntrinsics::_copyOfRange: | |
606 return inline_array_copyOf(true); | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
607 case vmIntrinsics::_equalsC: |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
608 return inline_array_equals(); |
0 | 609 case vmIntrinsics::_clone: |
610 return inline_native_clone(intrinsic()->is_virtual()); | |
611 | |
612 case vmIntrinsics::_isAssignableFrom: | |
613 return inline_native_subtype_check(); | |
614 | |
615 case vmIntrinsics::_isInstance: | |
616 case vmIntrinsics::_getModifiers: | |
617 case vmIntrinsics::_isInterface: | |
618 case vmIntrinsics::_isArray: | |
619 case vmIntrinsics::_isPrimitive: | |
620 case vmIntrinsics::_getSuperclass: | |
621 case vmIntrinsics::_getComponentType: | |
622 case vmIntrinsics::_getClassAccessFlags: | |
623 return inline_native_Class_query(intrinsic_id()); | |
624 | |
625 case vmIntrinsics::_floatToRawIntBits: | |
626 case vmIntrinsics::_floatToIntBits: | |
627 case vmIntrinsics::_intBitsToFloat: | |
628 case vmIntrinsics::_doubleToRawLongBits: | |
629 case vmIntrinsics::_doubleToLongBits: | |
630 case vmIntrinsics::_longBitsToDouble: | |
631 return inline_fp_conversions(intrinsic_id()); | |
632 | |
643
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
633 case vmIntrinsics::_bitCount_i: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
634 case vmIntrinsics::_bitCount_l: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
635 return inline_bitCount(intrinsic_id()); |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
636 |
0 | 637 case vmIntrinsics::_reverseBytes_i: |
638 case vmIntrinsics::_reverseBytes_l: | |
639 return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); | |
640 | |
641 case vmIntrinsics::_get_AtomicLong: | |
642 return inline_native_AtomicLong_get(); | |
643 case vmIntrinsics::_attemptUpdate: | |
644 return inline_native_AtomicLong_attemptUpdate(); | |
645 | |
646 case vmIntrinsics::_getCallerClass: | |
647 return inline_native_Reflection_getCallerClass(); | |
648 | |
649 default: | |
650 // If you get here, it may be that someone has added a new intrinsic | |
651 // to the list in vmSymbols.hpp without implementing it here. | |
652 #ifndef PRODUCT | |
653 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) { | |
654 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)", | |
655 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id()); | |
656 } | |
657 #endif | |
658 return false; | |
659 } | |
660 } | |
661 | |
662 //------------------------------push_result------------------------------ | |
663 // Helper function for finishing intrinsics. | |
664 void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) { | |
665 record_for_igvn(region); | |
666 set_control(_gvn.transform(region)); | |
667 BasicType value_type = value->type()->basic_type(); | |
668 push_node(value_type, _gvn.transform(value)); | |
669 } | |
670 | |
671 //------------------------------generate_guard--------------------------- | |
672 // Helper function for generating guarded fast-slow graph structures. | |
673 // The given 'test', if true, guards a slow path. If the test fails | |
674 // then a fast path can be taken. (We generally hope it fails.) | |
675 // In all cases, GraphKit::control() is updated to the fast path. | |
676 // The returned value represents the control for the slow path. | |
677 // The return value is never 'top'; it is either a valid control | |
678 // or NULL if it is obvious that the slow path can never be taken. | |
679 // Also, if region and the slow control are not NULL, the slow edge | |
680 // is appended to the region. | |
681 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) { | |
682 if (stopped()) { | |
683 // Already short circuited. | |
684 return NULL; | |
685 } | |
686 | |
687 // Build an if node and its projections. | |
688 // If test is true we take the slow path, which we assume is uncommon. | |
689 if (_gvn.type(test) == TypeInt::ZERO) { | |
690 // The slow branch is never taken. No need to build this guard. | |
691 return NULL; | |
692 } | |
693 | |
694 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN); | |
695 | |
696 Node* if_slow = _gvn.transform( new (C, 1) IfTrueNode(iff) ); | |
697 if (if_slow == top()) { | |
698 // The slow branch is never taken. No need to build this guard. | |
699 return NULL; | |
700 } | |
701 | |
702 if (region != NULL) | |
703 region->add_req(if_slow); | |
704 | |
705 Node* if_fast = _gvn.transform( new (C, 1) IfFalseNode(iff) ); | |
706 set_control(if_fast); | |
707 | |
708 return if_slow; | |
709 } | |
710 | |
711 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) { | |
712 return generate_guard(test, region, PROB_UNLIKELY_MAG(3)); | |
713 } | |
714 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) { | |
715 return generate_guard(test, region, PROB_FAIR); | |
716 } | |
717 | |
718 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region, | |
719 Node* *pos_index) { | |
720 if (stopped()) | |
721 return NULL; // already stopped | |
722 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] | |
723 return NULL; // index is already adequately typed | |
724 Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) ); | |
725 Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) ); | |
726 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN); | |
727 if (is_neg != NULL && pos_index != NULL) { | |
728 // Emulate effect of Parse::adjust_map_after_if. | |
729 Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS); | |
730 ccast->set_req(0, control()); | |
731 (*pos_index) = _gvn.transform(ccast); | |
732 } | |
733 return is_neg; | |
734 } | |
735 | |
736 inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative, | |
737 Node* *pos_index) { | |
738 if (stopped()) | |
739 return NULL; // already stopped | |
740 if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] | |
741 return NULL; // index is already adequately typed | |
742 Node* cmp_le = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) ); | |
743 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); | |
744 Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) ); | |
745 Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN); | |
746 if (is_notp != NULL && pos_index != NULL) { | |
747 // Emulate effect of Parse::adjust_map_after_if. | |
748 Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS1); | |
749 ccast->set_req(0, control()); | |
750 (*pos_index) = _gvn.transform(ccast); | |
751 } | |
752 return is_notp; | |
753 } | |
754 | |
755 // Make sure that 'position' is a valid limit index, in [0..length]. | |
756 // There are two equivalent plans for checking this: | |
757 // A. (offset + copyLength) unsigned<= arrayLength | |
758 // B. offset <= (arrayLength - copyLength) | |
759 // We require that all of the values above, except for the sum and | |
760 // difference, are already known to be non-negative. | |
761 // Plan A is robust in the face of overflow, if offset and copyLength | |
762 // are both hugely positive. | |
763 // | |
764 // Plan B is less direct and intuitive, but it does not overflow at | |
765 // all, since the difference of two non-negatives is always | |
766 // representable. Whenever Java methods must perform the equivalent | |
767 // check they generally use Plan B instead of Plan A. | |
768 // For the moment we use Plan A. | |
769 inline Node* LibraryCallKit::generate_limit_guard(Node* offset, | |
770 Node* subseq_length, | |
771 Node* array_length, | |
772 RegionNode* region) { | |
773 if (stopped()) | |
774 return NULL; // already stopped | |
775 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO; | |
776 if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length)) | |
777 return NULL; // common case of whole-array copy | |
778 Node* last = subseq_length; | |
779 if (!zero_offset) // last += offset | |
780 last = _gvn.transform( new (C, 3) AddINode(last, offset)); | |
781 Node* cmp_lt = _gvn.transform( new (C, 3) CmpUNode(array_length, last) ); | |
782 Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) ); | |
783 Node* is_over = generate_guard(bol_lt, region, PROB_MIN); | |
784 return is_over; | |
785 } | |
786 | |
787 | |
788 //--------------------------generate_current_thread-------------------- | |
789 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { | |
790 ciKlass* thread_klass = env()->Thread_klass(); | |
791 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); | |
792 Node* thread = _gvn.transform(new (C, 1) ThreadLocalNode()); | |
793 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); | |
794 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT); | |
795 tls_output = thread; | |
796 return threadObj; | |
797 } | |
798 | |
799 | |
800 //------------------------------inline_string_compareTo------------------------ | |
801 bool LibraryCallKit::inline_string_compareTo() { | |
802 | |
681 | 803 if (!Matcher::has_match_rule(Op_StrComp)) return false; |
804 | |
0 | 805 const int value_offset = java_lang_String::value_offset_in_bytes(); |
806 const int count_offset = java_lang_String::count_offset_in_bytes(); | |
807 const int offset_offset = java_lang_String::offset_offset_in_bytes(); | |
808 | |
809 _sp += 2; | |
810 Node *argument = pop(); // pop non-receiver first: it was pushed second | |
811 Node *receiver = pop(); | |
812 | |
813 // Null check on self without removing any arguments. The argument | |
814 // null check technically happens in the wrong place, which can lead to | |
815 // invalid stack traces when string compare is inlined into a method | |
816 // which handles NullPointerExceptions. | |
817 _sp += 2; | |
818 receiver = do_null_check(receiver, T_OBJECT); | |
819 argument = do_null_check(argument, T_OBJECT); | |
820 _sp -= 2; | |
821 if (stopped()) { | |
822 return true; | |
823 } | |
824 | |
825 ciInstanceKlass* klass = env()->String_klass(); | |
826 const TypeInstPtr* string_type = | |
827 TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0); | |
828 | |
829 Node* compare = | |
830 _gvn.transform(new (C, 7) StrCompNode( | |
831 control(), | |
832 memory(TypeAryPtr::CHARS), | |
833 memory(string_type->add_offset(value_offset)), | |
834 memory(string_type->add_offset(count_offset)), | |
835 memory(string_type->add_offset(offset_offset)), | |
836 receiver, | |
837 argument)); | |
838 push(compare); | |
839 return true; | |
840 } | |
841 | |
681 | 842 //------------------------------inline_string_equals------------------------ |
843 bool LibraryCallKit::inline_string_equals() { | |
844 | |
845 if (!Matcher::has_match_rule(Op_StrEquals)) return false; | |
846 | |
847 const int value_offset = java_lang_String::value_offset_in_bytes(); | |
848 const int count_offset = java_lang_String::count_offset_in_bytes(); | |
849 const int offset_offset = java_lang_String::offset_offset_in_bytes(); | |
850 | |
851 _sp += 2; | |
852 Node* argument = pop(); // pop non-receiver first: it was pushed second | |
853 Node* receiver = pop(); | |
854 | |
855 // Null check on self without removing any arguments. The argument | |
856 // null check technically happens in the wrong place, which can lead to | |
857 // invalid stack traces when string compare is inlined into a method | |
858 // which handles NullPointerExceptions. | |
859 _sp += 2; | |
860 receiver = do_null_check(receiver, T_OBJECT); | |
861 //should not do null check for argument for String.equals(), because spec | |
862 //allows to specify NULL as argument. | |
863 _sp -= 2; | |
864 | |
865 if (stopped()) { | |
866 return true; | |
867 } | |
868 | |
869 // get String klass for instanceOf | |
870 ciInstanceKlass* klass = env()->String_klass(); | |
871 | |
872 // two paths (plus control) merge | |
873 RegionNode* region = new (C, 3) RegionNode(3); | |
874 Node* phi = new (C, 3) PhiNode(region, TypeInt::BOOL); | |
875 | |
876 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass))); | |
877 Node* cmp = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1))); | |
878 Node* bol = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq)); | |
879 | |
880 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); | |
881 | |
882 Node* if_true = _gvn.transform(new (C, 1) IfTrueNode(iff)); | |
883 set_control(if_true); | |
884 | |
885 const TypeInstPtr* string_type = | |
886 TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0); | |
887 | |
888 // instanceOf == true | |
889 Node* equals = | |
890 _gvn.transform(new (C, 7) StrEqualsNode( | |
891 control(), | |
892 memory(TypeAryPtr::CHARS), | |
893 memory(string_type->add_offset(value_offset)), | |
894 memory(string_type->add_offset(count_offset)), | |
895 memory(string_type->add_offset(offset_offset)), | |
896 receiver, | |
897 argument)); | |
898 | |
899 phi->init_req(1, _gvn.transform(equals)); | |
900 region->init_req(1, if_true); | |
901 | |
902 //instanceOf == false, fallthrough | |
903 Node* if_false = _gvn.transform(new (C, 1) IfFalseNode(iff)); | |
904 set_control(if_false); | |
905 | |
906 phi->init_req(2, _gvn.transform(intcon(0))); | |
907 region->init_req(2, if_false); | |
908 | |
909 // post merge | |
910 set_control(_gvn.transform(region)); | |
911 record_for_igvn(region); | |
912 | |
913 push(_gvn.transform(phi)); | |
914 | |
915 return true; | |
916 } | |
917 | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
918 //------------------------------inline_array_equals---------------------------- |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
919 bool LibraryCallKit::inline_array_equals() { |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
920 |
174
510f98a80563
6709972: runThese failed with assert(false,"bad AD file")
rasbold
parents:
169
diff
changeset
|
921 if (!Matcher::has_match_rule(Op_AryEq)) return false; |
510f98a80563
6709972: runThese failed with assert(false,"bad AD file")
rasbold
parents:
169
diff
changeset
|
922 |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
923 _sp += 2; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
924 Node *argument2 = pop(); |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
925 Node *argument1 = pop(); |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
926 |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
927 Node* equals = |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
928 _gvn.transform(new (C, 3) AryEqNode(control(), |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
929 argument1, |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
930 argument2) |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
931 ); |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
932 push(equals); |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
933 return true; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
934 } |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
935 |
0 | 936 // Java version of String.indexOf(constant string) |
937 // class StringDecl { | |
938 // StringDecl(char[] ca) { | |
939 // offset = 0; | |
940 // count = ca.length; | |
941 // value = ca; | |
942 // } | |
943 // int offset; | |
944 // int count; | |
945 // char[] value; | |
946 // } | |
947 // | |
948 // static int string_indexOf_J(StringDecl string_object, char[] target_object, | |
949 // int targetOffset, int cache_i, int md2) { | |
950 // int cache = cache_i; | |
951 // int sourceOffset = string_object.offset; | |
952 // int sourceCount = string_object.count; | |
953 // int targetCount = target_object.length; | |
954 // | |
955 // int targetCountLess1 = targetCount - 1; | |
956 // int sourceEnd = sourceOffset + sourceCount - targetCountLess1; | |
957 // | |
958 // char[] source = string_object.value; | |
959 // char[] target = target_object; | |
960 // int lastChar = target[targetCountLess1]; | |
961 // | |
962 // outer_loop: | |
963 // for (int i = sourceOffset; i < sourceEnd; ) { | |
964 // int src = source[i + targetCountLess1]; | |
965 // if (src == lastChar) { | |
966 // // With random strings and a 4-character alphabet, | |
967 // // reverse matching at this point sets up 0.8% fewer | |
968 // // frames, but (paradoxically) makes 0.3% more probes. | |
969 // // Since those probes are nearer the lastChar probe, | |
970 // // there is may be a net D$ win with reverse matching. | |
971 // // But, reversing loop inhibits unroll of inner loop | |
972 // // for unknown reason. So, does running outer loop from | |
973 // // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount) | |
974 // for (int j = 0; j < targetCountLess1; j++) { | |
975 // if (target[targetOffset + j] != source[i+j]) { | |
976 // if ((cache & (1 << source[i+j])) == 0) { | |
977 // if (md2 < j+1) { | |
978 // i += j+1; | |
979 // continue outer_loop; | |
980 // } | |
981 // } | |
982 // i += md2; | |
983 // continue outer_loop; | |
984 // } | |
985 // } | |
986 // return i - sourceOffset; | |
987 // } | |
988 // if ((cache & (1 << src)) == 0) { | |
989 // i += targetCountLess1; | |
990 // } // using "i += targetCount;" and an "else i++;" causes a jump to jump. | |
991 // i++; | |
992 // } | |
993 // return -1; | |
994 // } | |
995 | |
996 //------------------------------string_indexOf------------------------ | |
997 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i, | |
998 jint cache_i, jint md2_i) { | |
999 | |
1000 Node* no_ctrl = NULL; | |
1001 float likely = PROB_LIKELY(0.9); | |
1002 float unlikely = PROB_UNLIKELY(0.9); | |
1003 | |
1004 const int value_offset = java_lang_String::value_offset_in_bytes(); | |
1005 const int count_offset = java_lang_String::count_offset_in_bytes(); | |
1006 const int offset_offset = java_lang_String::offset_offset_in_bytes(); | |
1007 | |
1008 ciInstanceKlass* klass = env()->String_klass(); | |
1009 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0); | |
1010 const TypeAryPtr* source_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0); | |
1011 | |
1012 Node* sourceOffseta = basic_plus_adr(string_object, string_object, offset_offset); | |
1013 Node* sourceOffset = make_load(no_ctrl, sourceOffseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); | |
1014 Node* sourceCounta = basic_plus_adr(string_object, string_object, count_offset); | |
1015 Node* sourceCount = make_load(no_ctrl, sourceCounta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); | |
1016 Node* sourcea = basic_plus_adr(string_object, string_object, value_offset); | |
1017 Node* source = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset)); | |
1018 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
1019 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) ); |
0 | 1020 jint target_length = target_array->length(); |
1021 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); | |
1022 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); | |
1023 | |
1024 IdealKit kit(gvn(), control(), merged_memory()); | |
1025 #define __ kit. | |
1026 Node* zero = __ ConI(0); | |
1027 Node* one = __ ConI(1); | |
1028 Node* cache = __ ConI(cache_i); | |
1029 Node* md2 = __ ConI(md2_i); | |
1030 Node* lastChar = __ ConI(target_array->char_at(target_length - 1)); | |
1031 Node* targetCount = __ ConI(target_length); | |
1032 Node* targetCountLess1 = __ ConI(target_length - 1); | |
1033 Node* targetOffset = __ ConI(targetOffset_i); | |
1034 Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1); | |
1035 | |
1036 IdealVariable rtn(kit), i(kit), j(kit); __ declares_done(); | |
1037 Node* outer_loop = __ make_label(2 /* goto */); | |
1038 Node* return_ = __ make_label(1); | |
1039 | |
1040 __ set(rtn,__ ConI(-1)); | |
1041 __ loop(i, sourceOffset, BoolTest::lt, sourceEnd); { | |
1042 Node* i2 = __ AddI(__ value(i), targetCountLess1); | |
1043 // pin to prohibit loading of "next iteration" value which may SEGV (rare) | |
1044 Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS); | |
1045 __ if_then(src, BoolTest::eq, lastChar, unlikely); { | |
1046 __ loop(j, zero, BoolTest::lt, targetCountLess1); { | |
1047 Node* tpj = __ AddI(targetOffset, __ value(j)); | |
1048 Node* targ = load_array_element(no_ctrl, target, tpj, target_type); | |
1049 Node* ipj = __ AddI(__ value(i), __ value(j)); | |
1050 Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS); | |
1051 __ if_then(targ, BoolTest::ne, src2); { | |
1052 __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); { | |
1053 __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); { | |
1054 __ increment(i, __ AddI(__ value(j), one)); | |
1055 __ goto_(outer_loop); | |
1056 } __ end_if(); __ dead(j); | |
1057 }__ end_if(); __ dead(j); | |
1058 __ increment(i, md2); | |
1059 __ goto_(outer_loop); | |
1060 }__ end_if(); | |
1061 __ increment(j, one); | |
1062 }__ end_loop(); __ dead(j); | |
1063 __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i); | |
1064 __ goto_(return_); | |
1065 }__ end_if(); | |
1066 __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); { | |
1067 __ increment(i, targetCountLess1); | |
1068 }__ end_if(); | |
1069 __ increment(i, one); | |
1070 __ bind(outer_loop); | |
1071 }__ end_loop(); __ dead(i); | |
1072 __ bind(return_); | |
1073 __ drain_delay_transform(); | |
1074 | |
1075 set_control(__ ctrl()); | |
1076 Node* result = __ value(rtn); | |
1077 #undef __ | |
1078 C->set_has_loops(true); | |
1079 return result; | |
1080 } | |
1081 | |
1082 //------------------------------inline_string_indexOf------------------------ | |
1083 bool LibraryCallKit::inline_string_indexOf() { | |
1084 | |
1085 const int value_offset = java_lang_String::value_offset_in_bytes(); | |
1086 const int count_offset = java_lang_String::count_offset_in_bytes(); | |
1087 const int offset_offset = java_lang_String::offset_offset_in_bytes(); | |
1088 | |
1089 _sp += 2; | |
681 | 1090 Node *argument = pop(); // pop non-receiver first: it was pushed second |
1091 Node *receiver = pop(); | |
1092 | |
1093 Node* result; | |
1094 if (Matcher::has_match_rule(Op_StrIndexOf) && | |
1095 UseSSE42Intrinsics) { | |
1096 // Generate SSE4.2 version of indexOf | |
1097 // We currently only have match rules that use SSE4.2 | |
1098 | |
1099 // Null check on self without removing any arguments. The argument | |
1100 // null check technically happens in the wrong place, which can lead to | |
1101 // invalid stack traces when string compare is inlined into a method | |
1102 // which handles NullPointerExceptions. | |
1103 _sp += 2; | |
1104 receiver = do_null_check(receiver, T_OBJECT); | |
1105 argument = do_null_check(argument, T_OBJECT); | |
1106 _sp -= 2; | |
1107 | |
1108 if (stopped()) { | |
1109 return true; | |
1110 } | |
1111 | |
1112 ciInstanceKlass* klass = env()->String_klass(); | |
1113 const TypeInstPtr* string_type = | |
1114 TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0); | |
1115 | |
1116 result = | |
1117 _gvn.transform(new (C, 7) | |
1118 StrIndexOfNode(control(), | |
1119 memory(TypeAryPtr::CHARS), | |
1120 memory(string_type->add_offset(value_offset)), | |
1121 memory(string_type->add_offset(count_offset)), | |
1122 memory(string_type->add_offset(offset_offset)), | |
1123 receiver, | |
1124 argument)); | |
1125 } else { //Use LibraryCallKit::string_indexOf | |
1126 // don't intrinsify is argument isn't a constant string. | |
1127 if (!argument->is_Con()) { | |
1128 return false; | |
1129 } | |
1130 const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr(); | |
1131 if (str_type == NULL) { | |
1132 return false; | |
1133 } | |
1134 ciInstanceKlass* klass = env()->String_klass(); | |
1135 ciObject* str_const = str_type->const_oop(); | |
1136 if (str_const == NULL || str_const->klass() != klass) { | |
1137 return false; | |
1138 } | |
1139 ciInstance* str = str_const->as_instance(); | |
1140 assert(str != NULL, "must be instance"); | |
1141 | |
1142 ciObject* v = str->field_value_by_offset(value_offset).as_object(); | |
1143 int o = str->field_value_by_offset(offset_offset).as_int(); | |
1144 int c = str->field_value_by_offset(count_offset).as_int(); | |
1145 ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array | |
1146 | |
1147 // constant strings have no offset and count == length which | |
1148 // simplifies the resulting code somewhat so lets optimize for that. | |
1149 if (o != 0 || c != pat->length()) { | |
1150 return false; | |
1151 } | |
1152 | |
1153 // Null check on self without removing any arguments. The argument | |
1154 // null check technically happens in the wrong place, which can lead to | |
1155 // invalid stack traces when string compare is inlined into a method | |
1156 // which handles NullPointerExceptions. | |
1157 _sp += 2; | |
1158 receiver = do_null_check(receiver, T_OBJECT); | |
1159 // No null check on the argument is needed since it's a constant String oop. | |
1160 _sp -= 2; | |
1161 if (stopped()) { | |
1162 return true; | |
1163 } | |
1164 | |
1165 // The null string as a pattern always returns 0 (match at beginning of string) | |
1166 if (c == 0) { | |
1167 push(intcon(0)); | |
1168 return true; | |
1169 } | |
1170 | |
1171 // Generate default indexOf | |
1172 jchar lastChar = pat->char_at(o + (c - 1)); | |
1173 int cache = 0; | |
1174 int i; | |
1175 for (i = 0; i < c - 1; i++) { | |
1176 assert(i < pat->length(), "out of range"); | |
1177 cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1))); | |
1178 } | |
1179 | |
1180 int md2 = c; | |
1181 for (i = 0; i < c - 1; i++) { | |
1182 assert(i < pat->length(), "out of range"); | |
1183 if (pat->char_at(o + i) == lastChar) { | |
1184 md2 = (c - 1) - i; | |
1185 } | |
1186 } | |
1187 | |
1188 result = string_indexOf(receiver, pat, o, cache, md2); | |
0 | 1189 } |
1190 | |
1191 push(result); | |
1192 return true; | |
1193 } | |
1194 | |
1195 //--------------------------pop_math_arg-------------------------------- | |
1196 // Pop a double argument to a math function from the stack | |
1197 // rounding it if necessary. | |
1198 Node * LibraryCallKit::pop_math_arg() { | |
1199 Node *arg = pop_pair(); | |
1200 if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 ) | |
1201 arg = _gvn.transform( new (C, 2) RoundDoubleNode(0, arg) ); | |
1202 return arg; | |
1203 } | |
1204 | |
1205 //------------------------------inline_trig---------------------------------- | |
1206 // Inline sin/cos/tan instructions, if possible. If rounding is required, do | |
1207 // argument reduction which will turn into a fast/slow diamond. | |
1208 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) { | |
1209 _sp += arg_size(); // restore stack pointer | |
1210 Node* arg = pop_math_arg(); | |
1211 Node* trig = NULL; | |
1212 | |
1213 switch (id) { | |
1214 case vmIntrinsics::_dsin: | |
1215 trig = _gvn.transform((Node*)new (C, 2) SinDNode(arg)); | |
1216 break; | |
1217 case vmIntrinsics::_dcos: | |
1218 trig = _gvn.transform((Node*)new (C, 2) CosDNode(arg)); | |
1219 break; | |
1220 case vmIntrinsics::_dtan: | |
1221 trig = _gvn.transform((Node*)new (C, 2) TanDNode(arg)); | |
1222 break; | |
1223 default: | |
1224 assert(false, "bad intrinsic was passed in"); | |
1225 return false; | |
1226 } | |
1227 | |
1228 // Rounding required? Check for argument reduction! | |
1229 if( Matcher::strict_fp_requires_explicit_rounding ) { | |
1230 | |
1231 static const double pi_4 = 0.7853981633974483; | |
1232 static const double neg_pi_4 = -0.7853981633974483; | |
1233 // pi/2 in 80-bit extended precision | |
1234 // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00}; | |
1235 // -pi/2 in 80-bit extended precision | |
1236 // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00}; | |
1237 // Cutoff value for using this argument reduction technique | |
1238 //static const double pi_2_minus_epsilon = 1.564660403643354; | |
1239 //static const double neg_pi_2_plus_epsilon = -1.564660403643354; | |
1240 | |
1241 // Pseudocode for sin: | |
1242 // if (x <= Math.PI / 4.0) { | |
1243 // if (x >= -Math.PI / 4.0) return fsin(x); | |
1244 // if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0); | |
1245 // } else { | |
1246 // if (x <= Math.PI / 2.0) return fcos(x - Math.PI / 2.0); | |
1247 // } | |
1248 // return StrictMath.sin(x); | |
1249 | |
1250 // Pseudocode for cos: | |
1251 // if (x <= Math.PI / 4.0) { | |
1252 // if (x >= -Math.PI / 4.0) return fcos(x); | |
1253 // if (x >= -Math.PI / 2.0) return fsin(x + Math.PI / 2.0); | |
1254 // } else { | |
1255 // if (x <= Math.PI / 2.0) return -fsin(x - Math.PI / 2.0); | |
1256 // } | |
1257 // return StrictMath.cos(x); | |
1258 | |
1259 // Actually, sticking in an 80-bit Intel value into C2 will be tough; it | |
1260 // requires a special machine instruction to load it. Instead we'll try | |
1261 // the 'easy' case. If we really need the extra range +/- PI/2 we'll | |
1262 // probably do the math inside the SIN encoding. | |
1263 | |
1264 // Make the merge point | |
1265 RegionNode *r = new (C, 3) RegionNode(3); | |
1266 Node *phi = new (C, 3) PhiNode(r,Type::DOUBLE); | |
1267 | |
1268 // Flatten arg so we need only 1 test | |
1269 Node *abs = _gvn.transform(new (C, 2) AbsDNode(arg)); | |
1270 // Node for PI/4 constant | |
1271 Node *pi4 = makecon(TypeD::make(pi_4)); | |
1272 // Check PI/4 : abs(arg) | |
1273 Node *cmp = _gvn.transform(new (C, 3) CmpDNode(pi4,abs)); | |
1274 // Check: If PI/4 < abs(arg) then go slow | |
1275 Node *bol = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::lt ) ); | |
1276 // Branch either way | |
1277 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | |
1278 set_control(opt_iff(r,iff)); | |
1279 | |
1280 // Set fast path result | |
1281 phi->init_req(2,trig); | |
1282 | |
1283 // Slow path - non-blocking leaf call | |
1284 Node* call = NULL; | |
1285 switch (id) { | |
1286 case vmIntrinsics::_dsin: | |
1287 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(), | |
1288 CAST_FROM_FN_PTR(address, SharedRuntime::dsin), | |
1289 "Sin", NULL, arg, top()); | |
1290 break; | |
1291 case vmIntrinsics::_dcos: | |
1292 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(), | |
1293 CAST_FROM_FN_PTR(address, SharedRuntime::dcos), | |
1294 "Cos", NULL, arg, top()); | |
1295 break; | |
1296 case vmIntrinsics::_dtan: | |
1297 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(), | |
1298 CAST_FROM_FN_PTR(address, SharedRuntime::dtan), | |
1299 "Tan", NULL, arg, top()); | |
1300 break; | |
1301 } | |
1302 assert(control()->in(0) == call, ""); | |
1303 Node* slow_result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms)); | |
1304 r->init_req(1,control()); | |
1305 phi->init_req(1,slow_result); | |
1306 | |
1307 // Post-merge | |
1308 set_control(_gvn.transform(r)); | |
1309 record_for_igvn(r); | |
1310 trig = _gvn.transform(phi); | |
1311 | |
1312 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
1313 } | |
1314 // Push result back on JVM stack | |
1315 push_pair(trig); | |
1316 return true; | |
1317 } | |
1318 | |
1319 //------------------------------inline_sqrt------------------------------------- | |
1320 // Inline square root instruction, if possible. | |
1321 bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) { | |
1322 assert(id == vmIntrinsics::_dsqrt, "Not square root"); | |
1323 _sp += arg_size(); // restore stack pointer | |
1324 push_pair(_gvn.transform(new (C, 2) SqrtDNode(0, pop_math_arg()))); | |
1325 return true; | |
1326 } | |
1327 | |
1328 //------------------------------inline_abs------------------------------------- | |
1329 // Inline absolute value instruction, if possible. | |
1330 bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) { | |
1331 assert(id == vmIntrinsics::_dabs, "Not absolute value"); | |
1332 _sp += arg_size(); // restore stack pointer | |
1333 push_pair(_gvn.transform(new (C, 2) AbsDNode(pop_math_arg()))); | |
1334 return true; | |
1335 } | |
1336 | |
1337 //------------------------------inline_exp------------------------------------- | |
1338 // Inline exp instructions, if possible. The Intel hardware only misses | |
1339 // really odd corner cases (+/- Infinity). Just uncommon-trap them. | |
1340 bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) { | |
1341 assert(id == vmIntrinsics::_dexp, "Not exp"); | |
1342 | |
1343 // If this inlining ever returned NaN in the past, we do not intrinsify it | |
1344 // every again. NaN results requires StrictMath.exp handling. | |
1345 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | |
1346 | |
1347 // Do not intrinsify on older platforms which lack cmove. | |
1348 if (ConditionalMoveLimit == 0) return false; | |
1349 | |
1350 _sp += arg_size(); // restore stack pointer | |
1351 Node *x = pop_math_arg(); | |
1352 Node *result = _gvn.transform(new (C, 2) ExpDNode(0,x)); | |
1353 | |
1354 //------------------- | |
1355 //result=(result.isNaN())? StrictMath::exp():result; | |
1356 // Check: If isNaN() by checking result!=result? then go to Strict Math | |
1357 Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); | |
1358 // Build the boolean node | |
1359 Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); | |
1360 | |
1361 { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); | |
1362 // End the current control-flow path | |
1363 push_pair(x); | |
1364 // Math.exp intrinsic returned a NaN, which requires StrictMath.exp | |
1365 // to handle. Recompile without intrinsifying Math.exp | |
1366 uncommon_trap(Deoptimization::Reason_intrinsic, | |
1367 Deoptimization::Action_make_not_entrant); | |
1368 } | |
1369 | |
1370 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
1371 | |
1372 push_pair(result); | |
1373 | |
1374 return true; | |
1375 } | |
1376 | |
1377 //------------------------------inline_pow------------------------------------- | |
1378 // Inline power instructions, if possible. | |
1379 bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) { | |
1380 assert(id == vmIntrinsics::_dpow, "Not pow"); | |
1381 | |
1382 // If this inlining ever returned NaN in the past, we do not intrinsify it | |
1383 // every again. NaN results requires StrictMath.pow handling. | |
1384 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | |
1385 | |
1386 // Do not intrinsify on older platforms which lack cmove. | |
1387 if (ConditionalMoveLimit == 0) return false; | |
1388 | |
1389 // Pseudocode for pow | |
1390 // if (x <= 0.0) { | |
1391 // if ((double)((int)y)==y) { // if y is int | |
1392 // result = ((1&(int)y)==0)?-DPow(abs(x), y):DPow(abs(x), y) | |
1393 // } else { | |
1394 // result = NaN; | |
1395 // } | |
1396 // } else { | |
1397 // result = DPow(x,y); | |
1398 // } | |
1399 // if (result != result)? { | |
605 | 1400 // uncommon_trap(); |
0 | 1401 // } |
1402 // return result; | |
1403 | |
1404 _sp += arg_size(); // restore stack pointer | |
1405 Node* y = pop_math_arg(); | |
1406 Node* x = pop_math_arg(); | |
1407 | |
1408 Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); | |
1409 | |
1410 // Short form: if not top-level (i.e., Math.pow but inlining Math.pow | |
1411 // inside of something) then skip the fancy tests and just check for | |
1412 // NaN result. | |
1413 Node *result = NULL; | |
1414 if( jvms()->depth() >= 1 ) { | |
1415 result = fast_result; | |
1416 } else { | |
1417 | |
1418 // Set the merge point for If node with condition of (x <= 0.0) | |
1419 // There are four possible paths to region node and phi node | |
1420 RegionNode *r = new (C, 4) RegionNode(4); | |
1421 Node *phi = new (C, 4) PhiNode(r, Type::DOUBLE); | |
1422 | |
1423 // Build the first if node: if (x <= 0.0) | |
1424 // Node for 0 constant | |
1425 Node *zeronode = makecon(TypeD::ZERO); | |
1426 // Check x:0 | |
1427 Node *cmp = _gvn.transform(new (C, 3) CmpDNode(x, zeronode)); | |
1428 // Check: If (x<=0) then go complex path | |
1429 Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) ); | |
1430 // Branch either way | |
1431 IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); | |
1432 Node *opt_test = _gvn.transform(if1); | |
1433 //assert( opt_test->is_If(), "Expect an IfNode"); | |
1434 IfNode *opt_if1 = (IfNode*)opt_test; | |
1435 // Fast path taken; set region slot 3 | |
1436 Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_if1) ); | |
1437 r->init_req(3,fast_taken); // Capture fast-control | |
1438 | |
1439 // Fast path not-taken, i.e. slow path | |
1440 Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(opt_if1) ); | |
1441 | |
1442 // Set fast path result | |
1443 Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, y, x) ); | |
1444 phi->init_req(3, fast_result); | |
1445 | |
1446 // Complex path | |
1447 // Build the second if node (if y is int) | |
1448 // Node for (int)y | |
1449 Node *inty = _gvn.transform( new (C, 2) ConvD2INode(y)); | |
1450 // Node for (double)((int) y) | |
1451 Node *doubleinty= _gvn.transform( new (C, 2) ConvI2DNode(inty)); | |
1452 // Check (double)((int) y) : y | |
1453 Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y)); | |
1454 // Check if (y isn't int) then go to slow path | |
1455 | |
1456 Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) ); | |
605 | 1457 // Branch either way |
0 | 1458 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); |
1459 Node *slow_path = opt_iff(r,if2); // Set region path 2 | |
1460 | |
1461 // Calculate DPow(abs(x), y)*(1 & (int)y) | |
1462 // Node for constant 1 | |
1463 Node *conone = intcon(1); | |
1464 // 1& (int)y | |
1465 Node *signnode= _gvn.transform( new (C, 3) AndINode(conone, inty) ); | |
1466 // zero node | |
1467 Node *conzero = intcon(0); | |
1468 // Check (1&(int)y)==0? | |
1469 Node *cmpeq1 = _gvn.transform(new (C, 3) CmpINode(signnode, conzero)); | |
1470 // Check if (1&(int)y)!=0?, if so the result is negative | |
1471 Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmpeq1, BoolTest::ne ) ); | |
1472 // abs(x) | |
1473 Node *absx=_gvn.transform( new (C, 2) AbsDNode(x)); | |
1474 // abs(x)^y | |
1475 Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, y, absx) ); | |
1476 // -abs(x)^y | |
1477 Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy)); | |
1478 // (1&(int)y)==1?-DPow(abs(x), y):DPow(abs(x), y) | |
1479 Node *signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE)); | |
1480 // Set complex path fast result | |
1481 phi->init_req(2, signresult); | |
1482 | |
1483 static const jlong nan_bits = CONST64(0x7ff8000000000000); | |
1484 Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN | |
1485 r->init_req(1,slow_path); | |
1486 phi->init_req(1,slow_result); | |
1487 | |
1488 // Post merge | |
1489 set_control(_gvn.transform(r)); | |
1490 record_for_igvn(r); | |
1491 result=_gvn.transform(phi); | |
1492 } | |
1493 | |
1494 //------------------- | |
1495 //result=(result.isNaN())? uncommon_trap():result; | |
1496 // Check: If isNaN() by checking result!=result? then go to Strict Math | |
1497 Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); | |
1498 // Build the boolean node | |
1499 Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); | |
1500 | |
1501 { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); | |
1502 // End the current control-flow path | |
1503 push_pair(x); | |
1504 push_pair(y); | |
1505 // Math.pow intrinsic returned a NaN, which requires StrictMath.pow | |
1506 // to handle. Recompile without intrinsifying Math.pow. | |
1507 uncommon_trap(Deoptimization::Reason_intrinsic, | |
1508 Deoptimization::Action_make_not_entrant); | |
1509 } | |
1510 | |
1511 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
1512 | |
1513 push_pair(result); | |
1514 | |
1515 return true; | |
1516 } | |
1517 | |
1518 //------------------------------inline_trans------------------------------------- | |
1519 // Inline transcendental instructions, if possible. The Intel hardware gets | |
1520 // these right, no funny corner cases missed. | |
1521 bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) { | |
1522 _sp += arg_size(); // restore stack pointer | |
1523 Node* arg = pop_math_arg(); | |
1524 Node* trans = NULL; | |
1525 | |
1526 switch (id) { | |
1527 case vmIntrinsics::_dlog: | |
1528 trans = _gvn.transform((Node*)new (C, 2) LogDNode(arg)); | |
1529 break; | |
1530 case vmIntrinsics::_dlog10: | |
1531 trans = _gvn.transform((Node*)new (C, 2) Log10DNode(arg)); | |
1532 break; | |
1533 default: | |
1534 assert(false, "bad intrinsic was passed in"); | |
1535 return false; | |
1536 } | |
1537 | |
1538 // Push result back on JVM stack | |
1539 push_pair(trans); | |
1540 return true; | |
1541 } | |
1542 | |
1543 //------------------------------runtime_math----------------------------- | |
1544 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) { | |
1545 Node* a = NULL; | |
1546 Node* b = NULL; | |
1547 | |
1548 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(), | |
1549 "must be (DD)D or (D)D type"); | |
1550 | |
1551 // Inputs | |
1552 _sp += arg_size(); // restore stack pointer | |
1553 if (call_type == OptoRuntime::Math_DD_D_Type()) { | |
1554 b = pop_math_arg(); | |
1555 } | |
1556 a = pop_math_arg(); | |
1557 | |
1558 const TypePtr* no_memory_effects = NULL; | |
1559 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, | |
1560 no_memory_effects, | |
1561 a, top(), b, b ? top() : NULL); | |
1562 Node* value = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+0)); | |
1563 #ifdef ASSERT | |
1564 Node* value_top = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+1)); | |
1565 assert(value_top == top(), "second value must be top"); | |
1566 #endif | |
1567 | |
1568 push_pair(value); | |
1569 return true; | |
1570 } | |
1571 | |
1572 //------------------------------inline_math_native----------------------------- | |
1573 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) { | |
1574 switch (id) { | |
1575 // These intrinsics are not properly supported on all hardware | |
1576 case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) : | |
1577 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS"); | |
1578 case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) : | |
1579 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN"); | |
1580 case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) : | |
1581 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN"); | |
1582 | |
1583 case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) : | |
1584 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG"); | |
1585 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) : | |
1586 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10"); | |
1587 | |
1588 // These intrinsics are supported on all hardware | |
1589 case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false; | |
1590 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_abs(id) : false; | |
1591 | |
1592 // These intrinsics don't work on X86. The ad implementation doesn't | |
1593 // handle NaN's properly. Instead of returning infinity, the ad | |
1594 // implementation returns a NaN on overflow. See bug: 6304089 | |
1595 // Once the ad implementations are fixed, change the code below | |
1596 // to match the intrinsics above | |
1597 | |
1598 case vmIntrinsics::_dexp: return | |
1599 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); | |
1600 case vmIntrinsics::_dpow: return | |
1601 runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); | |
1602 | |
1603 // These intrinsics are not yet correctly implemented | |
1604 case vmIntrinsics::_datan2: | |
1605 return false; | |
1606 | |
1607 default: | |
1608 ShouldNotReachHere(); | |
1609 return false; | |
1610 } | |
1611 } | |
1612 | |
1613 static bool is_simple_name(Node* n) { | |
1614 return (n->req() == 1 // constant | |
1615 || (n->is_Type() && n->as_Type()->type()->singleton()) | |
1616 || n->is_Proj() // parameter or return value | |
1617 || n->is_Phi() // local of some sort | |
1618 ); | |
1619 } | |
1620 | |
1621 //----------------------------inline_min_max----------------------------------- | |
1622 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { | |
1623 push(generate_min_max(id, argument(0), argument(1))); | |
1624 | |
1625 return true; | |
1626 } | |
1627 | |
1628 Node* | |
1629 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { | |
1630 // These are the candidate return value: | |
1631 Node* xvalue = x0; | |
1632 Node* yvalue = y0; | |
1633 | |
1634 if (xvalue == yvalue) { | |
1635 return xvalue; | |
1636 } | |
1637 | |
1638 bool want_max = (id == vmIntrinsics::_max); | |
1639 | |
1640 const TypeInt* txvalue = _gvn.type(xvalue)->isa_int(); | |
1641 const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int(); | |
1642 if (txvalue == NULL || tyvalue == NULL) return top(); | |
1643 // This is not really necessary, but it is consistent with a | |
1644 // hypothetical MaxINode::Value method: | |
1645 int widen = MAX2(txvalue->_widen, tyvalue->_widen); | |
1646 | |
1647 // %%% This folding logic should (ideally) be in a different place. | |
1648 // Some should be inside IfNode, and there to be a more reliable | |
1649 // transformation of ?: style patterns into cmoves. We also want | |
1650 // more powerful optimizations around cmove and min/max. | |
1651 | |
1652 // Try to find a dominating comparison of these guys. | |
1653 // It can simplify the index computation for Arrays.copyOf | |
1654 // and similar uses of System.arraycopy. | |
1655 // First, compute the normalized version of CmpI(x, y). | |
1656 int cmp_op = Op_CmpI; | |
1657 Node* xkey = xvalue; | |
1658 Node* ykey = yvalue; | |
1659 Node* ideal_cmpxy = _gvn.transform( new(C, 3) CmpINode(xkey, ykey) ); | |
1660 if (ideal_cmpxy->is_Cmp()) { | |
1661 // E.g., if we have CmpI(length - offset, count), | |
1662 // it might idealize to CmpI(length, count + offset) | |
1663 cmp_op = ideal_cmpxy->Opcode(); | |
1664 xkey = ideal_cmpxy->in(1); | |
1665 ykey = ideal_cmpxy->in(2); | |
1666 } | |
1667 | |
1668 // Start by locating any relevant comparisons. | |
1669 Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey; | |
1670 Node* cmpxy = NULL; | |
1671 Node* cmpyx = NULL; | |
1672 for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) { | |
1673 Node* cmp = start_from->fast_out(k); | |
1674 if (cmp->outcnt() > 0 && // must have prior uses | |
1675 cmp->in(0) == NULL && // must be context-independent | |
1676 cmp->Opcode() == cmp_op) { // right kind of compare | |
1677 if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp; | |
1678 if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp; | |
1679 } | |
1680 } | |
1681 | |
1682 const int NCMPS = 2; | |
1683 Node* cmps[NCMPS] = { cmpxy, cmpyx }; | |
1684 int cmpn; | |
1685 for (cmpn = 0; cmpn < NCMPS; cmpn++) { | |
1686 if (cmps[cmpn] != NULL) break; // find a result | |
1687 } | |
1688 if (cmpn < NCMPS) { | |
1689 // Look for a dominating test that tells us the min and max. | |
1690 int depth = 0; // Limit search depth for speed | |
1691 Node* dom = control(); | |
1692 for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) { | |
1693 if (++depth >= 100) break; | |
1694 Node* ifproj = dom; | |
1695 if (!ifproj->is_Proj()) continue; | |
1696 Node* iff = ifproj->in(0); | |
1697 if (!iff->is_If()) continue; | |
1698 Node* bol = iff->in(1); | |
1699 if (!bol->is_Bool()) continue; | |
1700 Node* cmp = bol->in(1); | |
1701 if (cmp == NULL) continue; | |
1702 for (cmpn = 0; cmpn < NCMPS; cmpn++) | |
1703 if (cmps[cmpn] == cmp) break; | |
1704 if (cmpn == NCMPS) continue; | |
1705 BoolTest::mask btest = bol->as_Bool()->_test._test; | |
1706 if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate(); | |
1707 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute(); | |
1708 // At this point, we know that 'x btest y' is true. | |
1709 switch (btest) { | |
1710 case BoolTest::eq: | |
1711 // They are proven equal, so we can collapse the min/max. | |
1712 // Either value is the answer. Choose the simpler. | |
1713 if (is_simple_name(yvalue) && !is_simple_name(xvalue)) | |
1714 return yvalue; | |
1715 return xvalue; | |
1716 case BoolTest::lt: // x < y | |
1717 case BoolTest::le: // x <= y | |
1718 return (want_max ? yvalue : xvalue); | |
1719 case BoolTest::gt: // x > y | |
1720 case BoolTest::ge: // x >= y | |
1721 return (want_max ? xvalue : yvalue); | |
1722 } | |
1723 } | |
1724 } | |
1725 | |
1726 // We failed to find a dominating test. | |
1727 // Let's pick a test that might GVN with prior tests. | |
1728 Node* best_bol = NULL; | |
1729 BoolTest::mask best_btest = BoolTest::illegal; | |
1730 for (cmpn = 0; cmpn < NCMPS; cmpn++) { | |
1731 Node* cmp = cmps[cmpn]; | |
1732 if (cmp == NULL) continue; | |
1733 for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) { | |
1734 Node* bol = cmp->fast_out(j); | |
1735 if (!bol->is_Bool()) continue; | |
1736 BoolTest::mask btest = bol->as_Bool()->_test._test; | |
1737 if (btest == BoolTest::eq || btest == BoolTest::ne) continue; | |
1738 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute(); | |
1739 if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) { | |
1740 best_bol = bol->as_Bool(); | |
1741 best_btest = btest; | |
1742 } | |
1743 } | |
1744 } | |
1745 | |
1746 Node* answer_if_true = NULL; | |
1747 Node* answer_if_false = NULL; | |
1748 switch (best_btest) { | |
1749 default: | |
1750 if (cmpxy == NULL) | |
1751 cmpxy = ideal_cmpxy; | |
1752 best_bol = _gvn.transform( new(C, 2) BoolNode(cmpxy, BoolTest::lt) ); | |
1753 // and fall through: | |
1754 case BoolTest::lt: // x < y | |
1755 case BoolTest::le: // x <= y | |
1756 answer_if_true = (want_max ? yvalue : xvalue); | |
1757 answer_if_false = (want_max ? xvalue : yvalue); | |
1758 break; | |
1759 case BoolTest::gt: // x > y | |
1760 case BoolTest::ge: // x >= y | |
1761 answer_if_true = (want_max ? xvalue : yvalue); | |
1762 answer_if_false = (want_max ? yvalue : xvalue); | |
1763 break; | |
1764 } | |
1765 | |
1766 jint hi, lo; | |
1767 if (want_max) { | |
1768 // We can sharpen the minimum. | |
1769 hi = MAX2(txvalue->_hi, tyvalue->_hi); | |
1770 lo = MAX2(txvalue->_lo, tyvalue->_lo); | |
1771 } else { | |
1772 // We can sharpen the maximum. | |
1773 hi = MIN2(txvalue->_hi, tyvalue->_hi); | |
1774 lo = MIN2(txvalue->_lo, tyvalue->_lo); | |
1775 } | |
1776 | |
1777 // Use a flow-free graph structure, to avoid creating excess control edges | |
1778 // which could hinder other optimizations. | |
1779 // Since Math.min/max is often used with arraycopy, we want | |
1780 // tightly_coupled_allocation to be able to see beyond min/max expressions. | |
1781 Node* cmov = CMoveNode::make(C, NULL, best_bol, | |
1782 answer_if_false, answer_if_true, | |
1783 TypeInt::make(lo, hi, widen)); | |
1784 | |
1785 return _gvn.transform(cmov); | |
1786 | |
1787 /* | |
1788 // This is not as desirable as it may seem, since Min and Max | |
1789 // nodes do not have a full set of optimizations. | |
1790 // And they would interfere, anyway, with 'if' optimizations | |
1791 // and with CMoveI canonical forms. | |
1792 switch (id) { | |
1793 case vmIntrinsics::_min: | |
1794 result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break; | |
1795 case vmIntrinsics::_max: | |
1796 result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break; | |
1797 default: | |
1798 ShouldNotReachHere(); | |
1799 } | |
1800 */ | |
1801 } | |
1802 | |
1803 inline int | |
1804 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) { | |
1805 const TypePtr* base_type = TypePtr::NULL_PTR; | |
1806 if (base != NULL) base_type = _gvn.type(base)->isa_ptr(); | |
1807 if (base_type == NULL) { | |
1808 // Unknown type. | |
1809 return Type::AnyPtr; | |
1810 } else if (base_type == TypePtr::NULL_PTR) { | |
1811 // Since this is a NULL+long form, we have to switch to a rawptr. | |
1812 base = _gvn.transform( new (C, 2) CastX2PNode(offset) ); | |
1813 offset = MakeConX(0); | |
1814 return Type::RawPtr; | |
1815 } else if (base_type->base() == Type::RawPtr) { | |
1816 return Type::RawPtr; | |
1817 } else if (base_type->isa_oopptr()) { | |
1818 // Base is never null => always a heap address. | |
1819 if (base_type->ptr() == TypePtr::NotNull) { | |
1820 return Type::OopPtr; | |
1821 } | |
1822 // Offset is small => always a heap address. | |
1823 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t(); | |
1824 if (offset_type != NULL && | |
1825 base_type->offset() == 0 && // (should always be?) | |
1826 offset_type->_lo >= 0 && | |
1827 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) { | |
1828 return Type::OopPtr; | |
1829 } | |
1830 // Otherwise, it might either be oop+off or NULL+addr. | |
1831 return Type::AnyPtr; | |
1832 } else { | |
1833 // No information: | |
1834 return Type::AnyPtr; | |
1835 } | |
1836 } | |
1837 | |
1838 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) { | |
1839 int kind = classify_unsafe_addr(base, offset); | |
1840 if (kind == Type::RawPtr) { | |
1841 return basic_plus_adr(top(), base, offset); | |
1842 } else { | |
1843 return basic_plus_adr(base, offset); | |
1844 } | |
1845 } | |
1846 | |
643
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1847 //----------------------------inline_bitCount_int/long----------------------- |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1848 // inline int Integer.bitCount(int) |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1849 // inline int Long.bitCount(long) |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1850 bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) { |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1851 assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount"); |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1852 if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1853 if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1854 _sp += arg_size(); // restore stack pointer |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1855 switch (id) { |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1856 case vmIntrinsics::_bitCount_i: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1857 push(_gvn.transform(new (C, 2) PopCountINode(pop()))); |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1858 break; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1859 case vmIntrinsics::_bitCount_l: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1860 push(_gvn.transform(new (C, 2) PopCountLNode(pop_pair()))); |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1861 break; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1862 default: |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1863 ShouldNotReachHere(); |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1864 } |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1865 return true; |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1866 } |
c771b7f43bbf
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
605
diff
changeset
|
1867 |
0 | 1868 //----------------------------inline_reverseBytes_int/long------------------- |
605 | 1869 // inline Integer.reverseBytes(int) |
1870 // inline Long.reverseBytes(long) | |
0 | 1871 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { |
1872 assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes"); | |
1873 if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; | |
1874 if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; | |
1875 _sp += arg_size(); // restore stack pointer | |
1876 switch (id) { | |
1877 case vmIntrinsics::_reverseBytes_i: | |
1878 push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop()))); | |
1879 break; | |
1880 case vmIntrinsics::_reverseBytes_l: | |
1881 push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair()))); | |
1882 break; | |
1883 default: | |
1884 ; | |
1885 } | |
1886 return true; | |
1887 } | |
1888 | |
1889 //----------------------------inline_unsafe_access---------------------------- | |
1890 | |
1891 const static BasicType T_ADDRESS_HOLDER = T_LONG; | |
1892 | |
1893 // Interpret Unsafe.fieldOffset cookies correctly: | |
1894 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset); | |
1895 | |
1896 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) { | |
1897 if (callee()->is_static()) return false; // caller must have the capability! | |
1898 | |
1899 #ifndef PRODUCT | |
1900 { | |
1901 ResourceMark rm; | |
1902 // Check the signatures. | |
1903 ciSignature* sig = signature(); | |
1904 #ifdef ASSERT | |
1905 if (!is_store) { | |
1906 // Object getObject(Object base, int/long offset), etc. | |
1907 BasicType rtype = sig->return_type()->basic_type(); | |
1908 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name()) | |
1909 rtype = T_ADDRESS; // it is really a C void* | |
1910 assert(rtype == type, "getter must return the expected value"); | |
1911 if (!is_native_ptr) { | |
1912 assert(sig->count() == 2, "oop getter has 2 arguments"); | |
1913 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object"); | |
1914 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct"); | |
1915 } else { | |
1916 assert(sig->count() == 1, "native getter has 1 argument"); | |
1917 assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long"); | |
1918 } | |
1919 } else { | |
1920 // void putObject(Object base, int/long offset, Object x), etc. | |
1921 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value"); | |
1922 if (!is_native_ptr) { | |
1923 assert(sig->count() == 3, "oop putter has 3 arguments"); | |
1924 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object"); | |
1925 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct"); | |
1926 } else { | |
1927 assert(sig->count() == 2, "native putter has 2 arguments"); | |
1928 assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long"); | |
1929 } | |
1930 BasicType vtype = sig->type_at(sig->count()-1)->basic_type(); | |
1931 if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name()) | |
1932 vtype = T_ADDRESS; // it is really a C void* | |
1933 assert(vtype == type, "putter must accept the expected value"); | |
1934 } | |
1935 #endif // ASSERT | |
1936 } | |
1937 #endif //PRODUCT | |
1938 | |
1939 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | |
1940 | |
1941 int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ]; | |
1942 | |
1943 // Argument words: "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words | |
1944 int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0); | |
1945 | |
1946 debug_only(int saved_sp = _sp); | |
1947 _sp += nargs; | |
1948 | |
1949 Node* val; | |
1950 debug_only(val = (Node*)(uintptr_t)-1); | |
1951 | |
1952 | |
1953 if (is_store) { | |
1954 // Get the value being stored. (Pop it first; it was pushed last.) | |
1955 switch (type) { | |
1956 case T_DOUBLE: | |
1957 case T_LONG: | |
1958 case T_ADDRESS: | |
1959 val = pop_pair(); | |
1960 break; | |
1961 default: | |
1962 val = pop(); | |
1963 } | |
1964 } | |
1965 | |
1966 // Build address expression. See the code in inline_unsafe_prefetch. | |
1967 Node *adr; | |
1968 Node *heap_base_oop = top(); | |
1969 if (!is_native_ptr) { | |
1970 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset | |
1971 Node* offset = pop_pair(); | |
1972 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
1973 Node* base = pop(); | |
1974 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset | |
1975 // to be plain byte offsets, which are also the same as those accepted | |
1976 // by oopDesc::field_base. | |
1977 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | |
1978 "fieldOffset must be byte-scaled"); | |
1979 // 32-bit machines ignore the high half! | |
1980 offset = ConvL2X(offset); | |
1981 adr = make_unsafe_address(base, offset); | |
1982 heap_base_oop = base; | |
1983 } else { | |
1984 Node* ptr = pop_pair(); | |
1985 // Adjust Java long to machine word: | |
1986 ptr = ConvL2X(ptr); | |
1987 adr = make_unsafe_address(NULL, ptr); | |
1988 } | |
1989 | |
1990 // Pop receiver last: it was pushed first. | |
1991 Node *receiver = pop(); | |
1992 | |
1993 assert(saved_sp == _sp, "must have correct argument count"); | |
1994 | |
1995 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); | |
1996 | |
1997 // First guess at the value type. | |
1998 const Type *value_type = Type::get_const_basic_type(type); | |
1999 | |
2000 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM, | |
2001 // there was not enough information to nail it down. | |
2002 Compile::AliasType* alias_type = C->alias_type(adr_type); | |
2003 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); | |
2004 | |
2005 // We will need memory barriers unless we can determine a unique | |
2006 // alias category for this reference. (Note: If for some reason | |
2007 // the barriers get omitted and the unsafe reference begins to "pollute" | |
2008 // the alias analysis of the rest of the graph, either Compile::can_alias | |
2009 // or Compile::must_alias will throw a diagnostic assert.) | |
2010 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM); | |
2011 | |
2012 if (!is_store && type == T_OBJECT) { | |
2013 // Attempt to infer a sharper value type from the offset and base type. | |
2014 ciKlass* sharpened_klass = NULL; | |
2015 | |
2016 // See if it is an instance field, with an object type. | |
2017 if (alias_type->field() != NULL) { | |
2018 assert(!is_native_ptr, "native pointer op cannot use a java address"); | |
2019 if (alias_type->field()->type()->is_klass()) { | |
2020 sharpened_klass = alias_type->field()->type()->as_klass(); | |
2021 } | |
2022 } | |
2023 | |
2024 // See if it is a narrow oop array. | |
2025 if (adr_type->isa_aryptr()) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2026 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) { |
0 | 2027 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr(); |
2028 if (elem_type != NULL) { | |
2029 sharpened_klass = elem_type->klass(); | |
2030 } | |
2031 } | |
2032 } | |
2033 | |
2034 if (sharpened_klass != NULL) { | |
2035 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass); | |
2036 | |
2037 // Sharpen the value type. | |
2038 value_type = tjp; | |
2039 | |
2040 #ifndef PRODUCT | |
2041 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { | |
2042 tty->print(" from base type: "); adr_type->dump(); | |
2043 tty->print(" sharpened value: "); value_type->dump(); | |
2044 } | |
2045 #endif | |
2046 } | |
2047 } | |
2048 | |
2049 // Null check on self without removing any arguments. The argument | |
2050 // null check technically happens in the wrong place, which can lead to | |
2051 // invalid stack traces when the primitive is inlined into a method | |
2052 // which handles NullPointerExceptions. | |
2053 _sp += nargs; | |
2054 do_null_check(receiver, T_OBJECT); | |
2055 _sp -= nargs; | |
2056 if (stopped()) { | |
2057 return true; | |
2058 } | |
2059 // Heap pointers get a null-check from the interpreter, | |
2060 // as a courtesy. However, this is not guaranteed by Unsafe, | |
2061 // and it is not possible to fully distinguish unintended nulls | |
2062 // from intended ones in this API. | |
2063 | |
2064 if (is_volatile) { | |
2065 // We need to emit leading and trailing CPU membars (see below) in | |
2066 // addition to memory membars when is_volatile. This is a little | |
2067 // too strong, but avoids the need to insert per-alias-type | |
2068 // volatile membars (for stores; compare Parse::do_put_xxx), which | |
605 | 2069 // we cannot do effectively here because we probably only have a |
0 | 2070 // rough approximation of type. |
2071 need_mem_bar = true; | |
2072 // For Stores, place a memory ordering barrier now. | |
2073 if (is_store) | |
2074 insert_mem_bar(Op_MemBarRelease); | |
2075 } | |
2076 | |
2077 // Memory barrier to prevent normal and 'unsafe' accesses from | |
2078 // bypassing each other. Happens after null checks, so the | |
2079 // exception paths do not take memory state from the memory barrier, | |
2080 // so there's no problems making a strong assert about mixing users | |
2081 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar | |
2082 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. | |
2083 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); | |
2084 | |
2085 if (!is_store) { | |
2086 Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile); | |
2087 // load value and push onto stack | |
2088 switch (type) { | |
2089 case T_BOOLEAN: | |
2090 case T_CHAR: | |
2091 case T_BYTE: | |
2092 case T_SHORT: | |
2093 case T_INT: | |
2094 case T_FLOAT: | |
2095 case T_OBJECT: | |
2096 push( p ); | |
2097 break; | |
2098 case T_ADDRESS: | |
2099 // Cast to an int type. | |
2100 p = _gvn.transform( new (C, 2) CastP2XNode(NULL,p) ); | |
2101 p = ConvX2L(p); | |
2102 push_pair(p); | |
2103 break; | |
2104 case T_DOUBLE: | |
2105 case T_LONG: | |
2106 push_pair( p ); | |
2107 break; | |
2108 default: ShouldNotReachHere(); | |
2109 } | |
2110 } else { | |
2111 // place effect of store into memory | |
2112 switch (type) { | |
2113 case T_DOUBLE: | |
2114 val = dstore_rounding(val); | |
2115 break; | |
2116 case T_ADDRESS: | |
2117 // Repackage the long as a pointer. | |
2118 val = ConvL2X(val); | |
2119 val = _gvn.transform( new (C, 2) CastX2PNode(val) ); | |
2120 break; | |
2121 } | |
2122 | |
2123 if (type != T_OBJECT ) { | |
2124 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile); | |
2125 } else { | |
2126 // Possibly an oop being stored to Java heap or native memory | |
2127 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { | |
2128 // oop to Java heap. | |
2129 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type); | |
2130 } else { | |
2131 | |
2132 // We can't tell at compile time if we are storing in the Java heap or outside | |
2133 // of it. So we need to emit code to conditionally do the proper type of | |
2134 // store. | |
2135 | |
2136 IdealKit kit(gvn(), control(), merged_memory()); | |
2137 kit.declares_done(); | |
2138 // QQQ who knows what probability is here?? | |
2139 kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { | |
2140 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type); | |
2141 } kit.else_(); { | |
2142 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile); | |
2143 } kit.end_if(); | |
2144 } | |
2145 } | |
2146 } | |
2147 | |
2148 if (is_volatile) { | |
2149 if (!is_store) | |
2150 insert_mem_bar(Op_MemBarAcquire); | |
2151 else | |
2152 insert_mem_bar(Op_MemBarVolatile); | |
2153 } | |
2154 | |
2155 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); | |
2156 | |
2157 return true; | |
2158 } | |
2159 | |
2160 //----------------------------inline_unsafe_prefetch---------------------------- | |
2161 | |
2162 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) { | |
2163 #ifndef PRODUCT | |
2164 { | |
2165 ResourceMark rm; | |
2166 // Check the signatures. | |
2167 ciSignature* sig = signature(); | |
2168 #ifdef ASSERT | |
2169 // Object getObject(Object base, int/long offset), etc. | |
2170 BasicType rtype = sig->return_type()->basic_type(); | |
2171 if (!is_native_ptr) { | |
2172 assert(sig->count() == 2, "oop prefetch has 2 arguments"); | |
2173 assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object"); | |
2174 assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct"); | |
2175 } else { | |
2176 assert(sig->count() == 1, "native prefetch has 1 argument"); | |
2177 assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long"); | |
2178 } | |
2179 #endif // ASSERT | |
2180 } | |
2181 #endif // !PRODUCT | |
2182 | |
2183 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | |
2184 | |
2185 // Argument words: "this" if not static, plus (oop/offset) or (lo/hi) args | |
2186 int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3); | |
2187 | |
2188 debug_only(int saved_sp = _sp); | |
2189 _sp += nargs; | |
2190 | |
2191 // Build address expression. See the code in inline_unsafe_access. | |
2192 Node *adr; | |
2193 if (!is_native_ptr) { | |
2194 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset | |
2195 Node* offset = pop_pair(); | |
2196 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
2197 Node* base = pop(); | |
2198 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset | |
2199 // to be plain byte offsets, which are also the same as those accepted | |
2200 // by oopDesc::field_base. | |
2201 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | |
2202 "fieldOffset must be byte-scaled"); | |
2203 // 32-bit machines ignore the high half! | |
2204 offset = ConvL2X(offset); | |
2205 adr = make_unsafe_address(base, offset); | |
2206 } else { | |
2207 Node* ptr = pop_pair(); | |
2208 // Adjust Java long to machine word: | |
2209 ptr = ConvL2X(ptr); | |
2210 adr = make_unsafe_address(NULL, ptr); | |
2211 } | |
2212 | |
2213 if (is_static) { | |
2214 assert(saved_sp == _sp, "must have correct argument count"); | |
2215 } else { | |
2216 // Pop receiver last: it was pushed first. | |
2217 Node *receiver = pop(); | |
2218 assert(saved_sp == _sp, "must have correct argument count"); | |
2219 | |
2220 // Null check on self without removing any arguments. The argument | |
2221 // null check technically happens in the wrong place, which can lead to | |
2222 // invalid stack traces when the primitive is inlined into a method | |
2223 // which handles NullPointerExceptions. | |
2224 _sp += nargs; | |
2225 do_null_check(receiver, T_OBJECT); | |
2226 _sp -= nargs; | |
2227 if (stopped()) { | |
2228 return true; | |
2229 } | |
2230 } | |
2231 | |
2232 // Generate the read or write prefetch | |
2233 Node *prefetch; | |
2234 if (is_store) { | |
2235 prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr); | |
2236 } else { | |
2237 prefetch = new (C, 3) PrefetchReadNode(i_o(), adr); | |
2238 } | |
2239 prefetch->init_req(0, control()); | |
2240 set_i_o(_gvn.transform(prefetch)); | |
2241 | |
2242 return true; | |
2243 } | |
2244 | |
2245 //----------------------------inline_unsafe_CAS---------------------------- | |
2246 | |
2247 bool LibraryCallKit::inline_unsafe_CAS(BasicType type) { | |
2248 // This basic scheme here is the same as inline_unsafe_access, but | |
2249 // differs in enough details that combining them would make the code | |
2250 // overly confusing. (This is a true fact! I originally combined | |
2251 // them, but even I was confused by it!) As much code/comments as | |
2252 // possible are retained from inline_unsafe_access though to make | |
605 | 2253 // the correspondences clearer. - dl |
0 | 2254 |
2255 if (callee()->is_static()) return false; // caller must have the capability! | |
2256 | |
2257 #ifndef PRODUCT | |
2258 { | |
2259 ResourceMark rm; | |
2260 // Check the signatures. | |
2261 ciSignature* sig = signature(); | |
2262 #ifdef ASSERT | |
2263 BasicType rtype = sig->return_type()->basic_type(); | |
2264 assert(rtype == T_BOOLEAN, "CAS must return boolean"); | |
2265 assert(sig->count() == 4, "CAS has 4 arguments"); | |
2266 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object"); | |
2267 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long"); | |
2268 #endif // ASSERT | |
2269 } | |
2270 #endif //PRODUCT | |
2271 | |
2272 // number of stack slots per value argument (1 or 2) | |
2273 int type_words = type2size[type]; | |
2274 | |
2275 // Cannot inline wide CAS on machines that don't support it natively | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
2276 if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8()) |
0 | 2277 return false; |
2278 | |
2279 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | |
2280 | |
2281 // Argument words: "this" plus oop plus offset plus oldvalue plus newvalue; | |
2282 int nargs = 1 + 1 + 2 + type_words + type_words; | |
2283 | |
2284 // pop arguments: newval, oldval, offset, base, and receiver | |
2285 debug_only(int saved_sp = _sp); | |
2286 _sp += nargs; | |
2287 Node* newval = (type_words == 1) ? pop() : pop_pair(); | |
2288 Node* oldval = (type_words == 1) ? pop() : pop_pair(); | |
2289 Node *offset = pop_pair(); | |
2290 Node *base = pop(); | |
2291 Node *receiver = pop(); | |
2292 assert(saved_sp == _sp, "must have correct argument count"); | |
2293 | |
2294 // Null check receiver. | |
2295 _sp += nargs; | |
2296 do_null_check(receiver, T_OBJECT); | |
2297 _sp -= nargs; | |
2298 if (stopped()) { | |
2299 return true; | |
2300 } | |
2301 | |
2302 // Build field offset expression. | |
2303 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset | |
2304 // to be plain byte offsets, which are also the same as those accepted | |
2305 // by oopDesc::field_base. | |
2306 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); | |
2307 // 32-bit machines ignore the high half of long offsets | |
2308 offset = ConvL2X(offset); | |
2309 Node* adr = make_unsafe_address(base, offset); | |
2310 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); | |
2311 | |
2312 // (Unlike inline_unsafe_access, there seems no point in trying | |
2313 // to refine types. Just use the coarse types here. | |
2314 const Type *value_type = Type::get_const_basic_type(type); | |
2315 Compile::AliasType* alias_type = C->alias_type(adr_type); | |
2316 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); | |
2317 int alias_idx = C->get_alias_index(adr_type); | |
2318 | |
2319 // Memory-model-wise, a CAS acts like a little synchronized block, | |
605 | 2320 // so needs barriers on each side. These don't translate into |
0 | 2321 // actual barriers on most machines, but we still need rest of |
2322 // compiler to respect ordering. | |
2323 | |
2324 insert_mem_bar(Op_MemBarRelease); | |
2325 insert_mem_bar(Op_MemBarCPUOrder); | |
2326 | |
2327 // 4984716: MemBars must be inserted before this | |
2328 // memory node in order to avoid a false | |
2329 // dependency which will confuse the scheduler. | |
2330 Node *mem = memory(alias_idx); | |
2331 | |
2332 // For now, we handle only those cases that actually exist: ints, | |
2333 // longs, and Object. Adding others should be straightforward. | |
2334 Node* cas; | |
2335 switch(type) { | |
2336 case T_INT: | |
2337 cas = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval)); | |
2338 break; | |
2339 case T_LONG: | |
2340 cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval)); | |
2341 break; | |
2342 case T_OBJECT: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2343 // reference stores need a store barrier. |
0 | 2344 // (They don't if CAS fails, but it isn't worth checking.) |
2345 pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2346 #ifdef _LP64 |
163 | 2347 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
221
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2348 Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); |
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2349 Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2350 cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr, |
221
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2351 newval_enc, oldval_enc)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2352 } else |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
2353 #endif |
221
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2354 { |
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2355 cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval)); |
1e026f8da827
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
174
diff
changeset
|
2356 } |
0 | 2357 post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true); |
2358 break; | |
2359 default: | |
2360 ShouldNotReachHere(); | |
2361 break; | |
2362 } | |
2363 | |
2364 // SCMemProjNodes represent the memory state of CAS. Their main | |
2365 // role is to prevent CAS nodes from being optimized away when their | |
2366 // results aren't used. | |
2367 Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(cas)); | |
2368 set_memory(proj, alias_idx); | |
2369 | |
2370 // Add the trailing membar surrounding the access | |
2371 insert_mem_bar(Op_MemBarCPUOrder); | |
2372 insert_mem_bar(Op_MemBarAcquire); | |
2373 | |
2374 push(cas); | |
2375 return true; | |
2376 } | |
2377 | |
2378 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) { | |
2379 // This is another variant of inline_unsafe_access, differing in | |
2380 // that it always issues store-store ("release") barrier and ensures | |
2381 // store-atomicity (which only matters for "long"). | |
2382 | |
2383 if (callee()->is_static()) return false; // caller must have the capability! | |
2384 | |
2385 #ifndef PRODUCT | |
2386 { | |
2387 ResourceMark rm; | |
2388 // Check the signatures. | |
2389 ciSignature* sig = signature(); | |
2390 #ifdef ASSERT | |
2391 BasicType rtype = sig->return_type()->basic_type(); | |
2392 assert(rtype == T_VOID, "must return void"); | |
2393 assert(sig->count() == 3, "has 3 arguments"); | |
2394 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object"); | |
2395 assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long"); | |
2396 #endif // ASSERT | |
2397 } | |
2398 #endif //PRODUCT | |
2399 | |
2400 // number of stack slots per value argument (1 or 2) | |
2401 int type_words = type2size[type]; | |
2402 | |
2403 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | |
2404 | |
2405 // Argument words: "this" plus oop plus offset plus value; | |
2406 int nargs = 1 + 1 + 2 + type_words; | |
2407 | |
2408 // pop arguments: val, offset, base, and receiver | |
2409 debug_only(int saved_sp = _sp); | |
2410 _sp += nargs; | |
2411 Node* val = (type_words == 1) ? pop() : pop_pair(); | |
2412 Node *offset = pop_pair(); | |
2413 Node *base = pop(); | |
2414 Node *receiver = pop(); | |
2415 assert(saved_sp == _sp, "must have correct argument count"); | |
2416 | |
2417 // Null check receiver. | |
2418 _sp += nargs; | |
2419 do_null_check(receiver, T_OBJECT); | |
2420 _sp -= nargs; | |
2421 if (stopped()) { | |
2422 return true; | |
2423 } | |
2424 | |
2425 // Build field offset expression. | |
2426 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); | |
2427 // 32-bit machines ignore the high half of long offsets | |
2428 offset = ConvL2X(offset); | |
2429 Node* adr = make_unsafe_address(base, offset); | |
2430 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); | |
2431 const Type *value_type = Type::get_const_basic_type(type); | |
2432 Compile::AliasType* alias_type = C->alias_type(adr_type); | |
2433 | |
2434 insert_mem_bar(Op_MemBarRelease); | |
2435 insert_mem_bar(Op_MemBarCPUOrder); | |
2436 // Ensure that the store is atomic for longs: | |
2437 bool require_atomic_access = true; | |
2438 Node* store; | |
2439 if (type == T_OBJECT) // reference stores need a store barrier. | |
2440 store = store_oop_to_unknown(control(), base, adr, adr_type, val, value_type, type); | |
2441 else { | |
2442 store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access); | |
2443 } | |
2444 insert_mem_bar(Op_MemBarCPUOrder); | |
2445 return true; | |
2446 } | |
2447 | |
2448 bool LibraryCallKit::inline_unsafe_allocate() { | |
2449 if (callee()->is_static()) return false; // caller must have the capability! | |
2450 int nargs = 1 + 1; | |
2451 assert(signature()->size() == nargs-1, "alloc has 1 argument"); | |
2452 null_check_receiver(callee()); // check then ignore argument(0) | |
2453 _sp += nargs; // set original stack for use by uncommon_trap | |
2454 Node* cls = do_null_check(argument(1), T_OBJECT); | |
2455 _sp -= nargs; | |
2456 if (stopped()) return true; | |
2457 | |
2458 Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0); | |
2459 _sp += nargs; // set original stack for use by uncommon_trap | |
2460 kls = do_null_check(kls, T_OBJECT); | |
2461 _sp -= nargs; | |
2462 if (stopped()) return true; // argument was like int.class | |
2463 | |
2464 // Note: The argument might still be an illegal value like | |
2465 // Serializable.class or Object[].class. The runtime will handle it. | |
2466 // But we must make an explicit check for initialization. | |
2467 Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)); | |
2468 Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT); | |
2469 Node* bits = intcon(instanceKlass::fully_initialized); | |
2470 Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) ); | |
2471 // The 'test' is non-zero if we need to take a slow path. | |
2472 | |
2473 Node* obj = new_instance(kls, test); | |
2474 push(obj); | |
2475 | |
2476 return true; | |
2477 } | |
2478 | |
2479 //------------------------inline_native_time_funcs-------------- | |
2480 // inline code for System.currentTimeMillis() and System.nanoTime() | |
2481 // these have the same type and signature | |
2482 bool LibraryCallKit::inline_native_time_funcs(bool isNano) { | |
2483 address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) : | |
2484 CAST_FROM_FN_PTR(address, os::javaTimeMillis); | |
2485 const char * funcName = isNano ? "nanoTime" : "currentTimeMillis"; | |
2486 const TypeFunc *tf = OptoRuntime::current_time_millis_Type(); | |
2487 const TypePtr* no_memory_effects = NULL; | |
2488 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); | |
2489 Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0)); | |
2490 #ifdef ASSERT | |
2491 Node* value_top = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms + 1)); | |
2492 assert(value_top == top(), "second value must be top"); | |
2493 #endif | |
2494 push_pair(value); | |
2495 return true; | |
2496 } | |
2497 | |
2498 //------------------------inline_native_currentThread------------------ | |
2499 bool LibraryCallKit::inline_native_currentThread() { | |
2500 Node* junk = NULL; | |
2501 push(generate_current_thread(junk)); | |
2502 return true; | |
2503 } | |
2504 | |
2505 //------------------------inline_native_isInterrupted------------------ | |
2506 bool LibraryCallKit::inline_native_isInterrupted() { | |
2507 const int nargs = 1+1; // receiver + boolean | |
2508 assert(nargs == arg_size(), "sanity"); | |
2509 // Add a fast path to t.isInterrupted(clear_int): | |
2510 // (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int)) | |
2511 // ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int) | |
2512 // So, in the common case that the interrupt bit is false, | |
2513 // we avoid making a call into the VM. Even if the interrupt bit | |
2514 // is true, if the clear_int argument is false, we avoid the VM call. | |
2515 // However, if the receiver is not currentThread, we must call the VM, | |
2516 // because there must be some locking done around the operation. | |
2517 | |
2518 // We only go to the fast case code if we pass two guards. | |
2519 // Paths which do not pass are accumulated in the slow_region. | |
2520 RegionNode* slow_region = new (C, 1) RegionNode(1); | |
2521 record_for_igvn(slow_region); | |
2522 RegionNode* result_rgn = new (C, 4) RegionNode(1+3); // fast1, fast2, slow | |
2523 PhiNode* result_val = new (C, 4) PhiNode(result_rgn, TypeInt::BOOL); | |
2524 enum { no_int_result_path = 1, | |
2525 no_clear_result_path = 2, | |
2526 slow_result_path = 3 | |
2527 }; | |
2528 | |
2529 // (a) Receiving thread must be the current thread. | |
2530 Node* rec_thr = argument(0); | |
2531 Node* tls_ptr = NULL; | |
2532 Node* cur_thr = generate_current_thread(tls_ptr); | |
2533 Node* cmp_thr = _gvn.transform( new (C, 3) CmpPNode(cur_thr, rec_thr) ); | |
2534 Node* bol_thr = _gvn.transform( new (C, 2) BoolNode(cmp_thr, BoolTest::ne) ); | |
2535 | |
2536 bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO); | |
2537 if (!known_current_thread) | |
2538 generate_slow_guard(bol_thr, slow_region); | |
2539 | |
2540 // (b) Interrupt bit on TLS must be false. | |
2541 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); | |
2542 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); | |
2543 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); | |
2544 Node* int_bit = make_load(NULL, p, TypeInt::BOOL, T_INT); | |
2545 Node* cmp_bit = _gvn.transform( new (C, 3) CmpINode(int_bit, intcon(0)) ); | |
2546 Node* bol_bit = _gvn.transform( new (C, 2) BoolNode(cmp_bit, BoolTest::ne) ); | |
2547 | |
2548 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); | |
2549 | |
2550 // First fast path: if (!TLS._interrupted) return false; | |
2551 Node* false_bit = _gvn.transform( new (C, 1) IfFalseNode(iff_bit) ); | |
2552 result_rgn->init_req(no_int_result_path, false_bit); | |
2553 result_val->init_req(no_int_result_path, intcon(0)); | |
2554 | |
2555 // drop through to next case | |
2556 set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_bit)) ); | |
2557 | |
2558 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path. | |
2559 Node* clr_arg = argument(1); | |
2560 Node* cmp_arg = _gvn.transform( new (C, 3) CmpINode(clr_arg, intcon(0)) ); | |
2561 Node* bol_arg = _gvn.transform( new (C, 2) BoolNode(cmp_arg, BoolTest::ne) ); | |
2562 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN); | |
2563 | |
2564 // Second fast path: ... else if (!clear_int) return true; | |
2565 Node* false_arg = _gvn.transform( new (C, 1) IfFalseNode(iff_arg) ); | |
2566 result_rgn->init_req(no_clear_result_path, false_arg); | |
2567 result_val->init_req(no_clear_result_path, intcon(1)); | |
2568 | |
2569 // drop through to next case | |
2570 set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_arg)) ); | |
2571 | |
2572 // (d) Otherwise, go to the slow path. | |
2573 slow_region->add_req(control()); | |
2574 set_control( _gvn.transform(slow_region) ); | |
2575 | |
2576 if (stopped()) { | |
2577 // There is no slow path. | |
2578 result_rgn->init_req(slow_result_path, top()); | |
2579 result_val->init_req(slow_result_path, top()); | |
2580 } else { | |
2581 // non-virtual because it is a private non-static | |
2582 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted); | |
2583 | |
2584 Node* slow_val = set_results_for_java_call(slow_call); | |
2585 // this->control() comes from set_results_for_java_call | |
2586 | |
2587 // If we know that the result of the slow call will be true, tell the optimizer! | |
2588 if (known_current_thread) slow_val = intcon(1); | |
2589 | |
2590 Node* fast_io = slow_call->in(TypeFunc::I_O); | |
2591 Node* fast_mem = slow_call->in(TypeFunc::Memory); | |
2592 // These two phis are pre-filled with copies of of the fast IO and Memory | |
2593 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO); | |
2594 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM); | |
2595 | |
2596 result_rgn->init_req(slow_result_path, control()); | |
2597 io_phi ->init_req(slow_result_path, i_o()); | |
2598 mem_phi ->init_req(slow_result_path, reset_memory()); | |
2599 result_val->init_req(slow_result_path, slow_val); | |
2600 | |
2601 set_all_memory( _gvn.transform(mem_phi) ); | |
2602 set_i_o( _gvn.transform(io_phi) ); | |
2603 } | |
2604 | |
2605 push_result(result_rgn, result_val); | |
2606 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
2607 | |
2608 return true; | |
2609 } | |
2610 | |
2611 //---------------------------load_mirror_from_klass---------------------------- | |
2612 // Given a klass oop, load its java mirror (a java.lang.Class oop). | |
2613 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { | |
2614 Node* p = basic_plus_adr(klass, Klass::java_mirror_offset_in_bytes() + sizeof(oopDesc)); | |
2615 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT); | |
2616 } | |
2617 | |
2618 //-----------------------load_klass_from_mirror_common------------------------- | |
2619 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. | |
2620 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), | |
2621 // and branch to the given path on the region. | |
2622 // If never_see_null, take an uncommon trap on null, so we can optimistically | |
2623 // compile for the non-null case. | |
2624 // If the region is NULL, force never_see_null = true. | |
2625 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror, | |
2626 bool never_see_null, | |
2627 int nargs, | |
2628 RegionNode* region, | |
2629 int null_path, | |
2630 int offset) { | |
2631 if (region == NULL) never_see_null = true; | |
2632 Node* p = basic_plus_adr(mirror, offset); | |
2633 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
2634 Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); |
0 | 2635 _sp += nargs; // any deopt will start just before call to enclosing method |
2636 Node* null_ctl = top(); | |
2637 kls = null_check_oop(kls, &null_ctl, never_see_null); | |
2638 if (region != NULL) { | |
2639 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class). | |
2640 region->init_req(null_path, null_ctl); | |
2641 } else { | |
2642 assert(null_ctl == top(), "no loose ends"); | |
2643 } | |
2644 _sp -= nargs; | |
2645 return kls; | |
2646 } | |
2647 | |
2648 //--------------------(inline_native_Class_query helpers)--------------------- | |
2649 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER. | |
2650 // Fall through if (mods & mask) == bits, take the guard otherwise. | |
2651 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { | |
2652 // Branch around if the given klass has the given modifier bit set. | |
2653 // Like generate_guard, adds a new path onto the region. | |
2654 Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); | |
2655 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); | |
2656 Node* mask = intcon(modifier_mask); | |
2657 Node* bits = intcon(modifier_bits); | |
2658 Node* mbit = _gvn.transform( new (C, 3) AndINode(mods, mask) ); | |
2659 Node* cmp = _gvn.transform( new (C, 3) CmpINode(mbit, bits) ); | |
2660 Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) ); | |
2661 return generate_fair_guard(bol, region); | |
2662 } | |
2663 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) { | |
2664 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); | |
2665 } | |
2666 | |
2667 //-------------------------inline_native_Class_query------------------- | |
2668 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { | |
2669 int nargs = 1+0; // just the Class mirror, in most cases | |
2670 const Type* return_type = TypeInt::BOOL; | |
2671 Node* prim_return_value = top(); // what happens if it's a primitive class? | |
2672 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | |
2673 bool expect_prim = false; // most of these guys expect to work on refs | |
2674 | |
2675 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT }; | |
2676 | |
2677 switch (id) { | |
2678 case vmIntrinsics::_isInstance: | |
2679 nargs = 1+1; // the Class mirror, plus the object getting queried about | |
2680 // nothing is an instance of a primitive type | |
2681 prim_return_value = intcon(0); | |
2682 break; | |
2683 case vmIntrinsics::_getModifiers: | |
2684 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); | |
2685 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line"); | |
2686 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin); | |
2687 break; | |
2688 case vmIntrinsics::_isInterface: | |
2689 prim_return_value = intcon(0); | |
2690 break; | |
2691 case vmIntrinsics::_isArray: | |
2692 prim_return_value = intcon(0); | |
2693 expect_prim = true; // cf. ObjectStreamClass.getClassSignature | |
2694 break; | |
2695 case vmIntrinsics::_isPrimitive: | |
2696 prim_return_value = intcon(1); | |
2697 expect_prim = true; // obviously | |
2698 break; | |
2699 case vmIntrinsics::_getSuperclass: | |
2700 prim_return_value = null(); | |
2701 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR); | |
2702 break; | |
2703 case vmIntrinsics::_getComponentType: | |
2704 prim_return_value = null(); | |
2705 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR); | |
2706 break; | |
2707 case vmIntrinsics::_getClassAccessFlags: | |
2708 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); | |
2709 return_type = TypeInt::INT; // not bool! 6297094 | |
2710 break; | |
2711 default: | |
2712 ShouldNotReachHere(); | |
2713 } | |
2714 | |
2715 Node* mirror = argument(0); | |
2716 Node* obj = (nargs <= 1)? top(): argument(1); | |
2717 | |
2718 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); | |
2719 if (mirror_con == NULL) return false; // cannot happen? | |
2720 | |
2721 #ifndef PRODUCT | |
2722 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { | |
2723 ciType* k = mirror_con->java_mirror_type(); | |
2724 if (k) { | |
2725 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id())); | |
2726 k->print_name(); | |
2727 tty->cr(); | |
2728 } | |
2729 } | |
2730 #endif | |
2731 | |
2732 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive). | |
2733 RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
2734 record_for_igvn(region); | |
2735 PhiNode* phi = new (C, PATH_LIMIT) PhiNode(region, return_type); | |
2736 | |
2737 // The mirror will never be null of Reflection.getClassAccessFlags, however | |
2738 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE | |
2739 // if it is. See bug 4774291. | |
2740 | |
2741 // For Reflection.getClassAccessFlags(), the null check occurs in | |
2742 // the wrong place; see inline_unsafe_access(), above, for a similar | |
2743 // situation. | |
2744 _sp += nargs; // set original stack for use by uncommon_trap | |
2745 mirror = do_null_check(mirror, T_OBJECT); | |
2746 _sp -= nargs; | |
2747 // If mirror or obj is dead, only null-path is taken. | |
2748 if (stopped()) return true; | |
2749 | |
2750 if (expect_prim) never_see_null = false; // expect nulls (meaning prims) | |
2751 | |
2752 // Now load the mirror's klass metaobject, and null-check it. | |
2753 // Side-effects region with the control path if the klass is null. | |
2754 Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs, | |
2755 region, _prim_path); | |
2756 // If kls is null, we have a primitive mirror. | |
2757 phi->init_req(_prim_path, prim_return_value); | |
2758 if (stopped()) { push_result(region, phi); return true; } | |
2759 | |
2760 Node* p; // handy temp | |
2761 Node* null_ctl; | |
2762 | |
2763 // Now that we have the non-null klass, we can perform the real query. | |
2764 // For constant classes, the query will constant-fold in LoadNode::Value. | |
2765 Node* query_value = top(); | |
2766 switch (id) { | |
2767 case vmIntrinsics::_isInstance: | |
2768 // nothing is an instance of a primitive type | |
2769 query_value = gen_instanceof(obj, kls); | |
2770 break; | |
2771 | |
2772 case vmIntrinsics::_getModifiers: | |
2773 p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc)); | |
2774 query_value = make_load(NULL, p, TypeInt::INT, T_INT); | |
2775 break; | |
2776 | |
2777 case vmIntrinsics::_isInterface: | |
2778 // (To verify this code sequence, check the asserts in JVM_IsInterface.) | |
2779 if (generate_interface_guard(kls, region) != NULL) | |
2780 // A guard was added. If the guard is taken, it was an interface. | |
2781 phi->add_req(intcon(1)); | |
2782 // If we fall through, it's a plain class. | |
2783 query_value = intcon(0); | |
2784 break; | |
2785 | |
2786 case vmIntrinsics::_isArray: | |
2787 // (To verify this code sequence, check the asserts in JVM_IsArrayClass.) | |
2788 if (generate_array_guard(kls, region) != NULL) | |
2789 // A guard was added. If the guard is taken, it was an array. | |
2790 phi->add_req(intcon(1)); | |
2791 // If we fall through, it's a plain class. | |
2792 query_value = intcon(0); | |
2793 break; | |
2794 | |
2795 case vmIntrinsics::_isPrimitive: | |
2796 query_value = intcon(0); // "normal" path produces false | |
2797 break; | |
2798 | |
2799 case vmIntrinsics::_getSuperclass: | |
2800 // The rules here are somewhat unfortunate, but we can still do better | |
2801 // with random logic than with a JNI call. | |
2802 // Interfaces store null or Object as _super, but must report null. | |
2803 // Arrays store an intermediate super as _super, but must report Object. | |
2804 // Other types can report the actual _super. | |
2805 // (To verify this code sequence, check the asserts in JVM_IsInterface.) | |
2806 if (generate_interface_guard(kls, region) != NULL) | |
2807 // A guard was added. If the guard is taken, it was an interface. | |
2808 phi->add_req(null()); | |
2809 if (generate_array_guard(kls, region) != NULL) | |
2810 // A guard was added. If the guard is taken, it was an array. | |
2811 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); | |
2812 // If we fall through, it's a plain class. Get its _super. | |
2813 p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc)); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
2814 kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) ); |
0 | 2815 null_ctl = top(); |
2816 kls = null_check_oop(kls, &null_ctl); | |
2817 if (null_ctl != top()) { | |
2818 // If the guard is taken, Object.superClass is null (both klass and mirror). | |
2819 region->add_req(null_ctl); | |
2820 phi ->add_req(null()); | |
2821 } | |
2822 if (!stopped()) { | |
2823 query_value = load_mirror_from_klass(kls); | |
2824 } | |
2825 break; | |
2826 | |
2827 case vmIntrinsics::_getComponentType: | |
2828 if (generate_array_guard(kls, region) != NULL) { | |
2829 // Be sure to pin the oop load to the guard edge just created: | |
2830 Node* is_array_ctrl = region->in(region->req()-1); | |
2831 Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc)); | |
2832 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT); | |
2833 phi->add_req(cmo); | |
2834 } | |
2835 query_value = null(); // non-array case is null | |
2836 break; | |
2837 | |
2838 case vmIntrinsics::_getClassAccessFlags: | |
2839 p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); | |
2840 query_value = make_load(NULL, p, TypeInt::INT, T_INT); | |
2841 break; | |
2842 | |
2843 default: | |
2844 ShouldNotReachHere(); | |
2845 } | |
2846 | |
2847 // Fall-through is the normal case of a query to a real class. | |
2848 phi->init_req(1, query_value); | |
2849 region->init_req(1, control()); | |
2850 | |
2851 push_result(region, phi); | |
2852 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
2853 | |
2854 return true; | |
2855 } | |
2856 | |
2857 //--------------------------inline_native_subtype_check------------------------ | |
2858 // This intrinsic takes the JNI calls out of the heart of | |
2859 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc. | |
2860 bool LibraryCallKit::inline_native_subtype_check() { | |
2861 int nargs = 1+1; // the Class mirror, plus the other class getting examined | |
2862 | |
2863 // Pull both arguments off the stack. | |
2864 Node* args[2]; // two java.lang.Class mirrors: superc, subc | |
2865 args[0] = argument(0); | |
2866 args[1] = argument(1); | |
2867 Node* klasses[2]; // corresponding Klasses: superk, subk | |
2868 klasses[0] = klasses[1] = top(); | |
2869 | |
2870 enum { | |
2871 // A full decision tree on {superc is prim, subc is prim}: | |
2872 _prim_0_path = 1, // {P,N} => false | |
2873 // {P,P} & superc!=subc => false | |
2874 _prim_same_path, // {P,P} & superc==subc => true | |
2875 _prim_1_path, // {N,P} => false | |
2876 _ref_subtype_path, // {N,N} & subtype check wins => true | |
2877 _both_ref_path, // {N,N} & subtype check loses => false | |
2878 PATH_LIMIT | |
2879 }; | |
2880 | |
2881 RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
2882 Node* phi = new (C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL); | |
2883 record_for_igvn(region); | |
2884 | |
2885 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads | |
2886 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; | |
2887 int class_klass_offset = java_lang_Class::klass_offset_in_bytes(); | |
2888 | |
2889 // First null-check both mirrors and load each mirror's klass metaobject. | |
2890 int which_arg; | |
2891 for (which_arg = 0; which_arg <= 1; which_arg++) { | |
2892 Node* arg = args[which_arg]; | |
2893 _sp += nargs; // set original stack for use by uncommon_trap | |
2894 arg = do_null_check(arg, T_OBJECT); | |
2895 _sp -= nargs; | |
2896 if (stopped()) break; | |
2897 args[which_arg] = _gvn.transform(arg); | |
2898 | |
2899 Node* p = basic_plus_adr(arg, class_klass_offset); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
2900 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); |
0 | 2901 klasses[which_arg] = _gvn.transform(kls); |
2902 } | |
2903 | |
2904 // Having loaded both klasses, test each for null. | |
2905 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | |
2906 for (which_arg = 0; which_arg <= 1; which_arg++) { | |
2907 Node* kls = klasses[which_arg]; | |
2908 Node* null_ctl = top(); | |
2909 _sp += nargs; // set original stack for use by uncommon_trap | |
2910 kls = null_check_oop(kls, &null_ctl, never_see_null); | |
2911 _sp -= nargs; | |
2912 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path); | |
2913 region->init_req(prim_path, null_ctl); | |
2914 if (stopped()) break; | |
2915 klasses[which_arg] = kls; | |
2916 } | |
2917 | |
2918 if (!stopped()) { | |
2919 // now we have two reference types, in klasses[0..1] | |
2920 Node* subk = klasses[1]; // the argument to isAssignableFrom | |
2921 Node* superk = klasses[0]; // the receiver | |
2922 region->set_req(_both_ref_path, gen_subtype_check(subk, superk)); | |
2923 // now we have a successful reference subtype check | |
2924 region->set_req(_ref_subtype_path, control()); | |
2925 } | |
2926 | |
2927 // If both operands are primitive (both klasses null), then | |
2928 // we must return true when they are identical primitives. | |
2929 // It is convenient to test this after the first null klass check. | |
2930 set_control(region->in(_prim_0_path)); // go back to first null check | |
2931 if (!stopped()) { | |
2932 // Since superc is primitive, make a guard for the superc==subc case. | |
2933 Node* cmp_eq = _gvn.transform( new (C, 3) CmpPNode(args[0], args[1]) ); | |
2934 Node* bol_eq = _gvn.transform( new (C, 2) BoolNode(cmp_eq, BoolTest::eq) ); | |
2935 generate_guard(bol_eq, region, PROB_FAIR); | |
2936 if (region->req() == PATH_LIMIT+1) { | |
2937 // A guard was added. If the added guard is taken, superc==subc. | |
2938 region->swap_edges(PATH_LIMIT, _prim_same_path); | |
2939 region->del_req(PATH_LIMIT); | |
2940 } | |
2941 region->set_req(_prim_0_path, control()); // Not equal after all. | |
2942 } | |
2943 | |
2944 // these are the only paths that produce 'true': | |
2945 phi->set_req(_prim_same_path, intcon(1)); | |
2946 phi->set_req(_ref_subtype_path, intcon(1)); | |
2947 | |
2948 // pull together the cases: | |
2949 assert(region->req() == PATH_LIMIT, "sane region"); | |
2950 for (uint i = 1; i < region->req(); i++) { | |
2951 Node* ctl = region->in(i); | |
2952 if (ctl == NULL || ctl == top()) { | |
2953 region->set_req(i, top()); | |
2954 phi ->set_req(i, top()); | |
2955 } else if (phi->in(i) == NULL) { | |
2956 phi->set_req(i, intcon(0)); // all other paths produce 'false' | |
2957 } | |
2958 } | |
2959 | |
2960 set_control(_gvn.transform(region)); | |
2961 push(_gvn.transform(phi)); | |
2962 | |
2963 return true; | |
2964 } | |
2965 | |
2966 //---------------------generate_array_guard_common------------------------ | |
2967 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, | |
2968 bool obj_array, bool not_array) { | |
2969 // If obj_array/non_array==false/false: | |
2970 // Branch around if the given klass is in fact an array (either obj or prim). | |
2971 // If obj_array/non_array==false/true: | |
2972 // Branch around if the given klass is not an array klass of any kind. | |
2973 // If obj_array/non_array==true/true: | |
2974 // Branch around if the kls is not an oop array (kls is int[], String, etc.) | |
2975 // If obj_array/non_array==true/false: | |
2976 // Branch around if the kls is an oop array (Object[] or subtype) | |
2977 // | |
2978 // Like generate_guard, adds a new path onto the region. | |
2979 jint layout_con = 0; | |
2980 Node* layout_val = get_layout_helper(kls, layout_con); | |
2981 if (layout_val == NULL) { | |
2982 bool query = (obj_array | |
2983 ? Klass::layout_helper_is_objArray(layout_con) | |
2984 : Klass::layout_helper_is_javaArray(layout_con)); | |
2985 if (query == not_array) { | |
2986 return NULL; // never a branch | |
2987 } else { // always a branch | |
2988 Node* always_branch = control(); | |
2989 if (region != NULL) | |
2990 region->add_req(always_branch); | |
2991 set_control(top()); | |
2992 return always_branch; | |
2993 } | |
2994 } | |
2995 // Now test the correct condition. | |
2996 jint nval = (obj_array | |
2997 ? ((jint)Klass::_lh_array_tag_type_value | |
2998 << Klass::_lh_array_tag_shift) | |
2999 : Klass::_lh_neutral_value); | |
3000 Node* cmp = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(nval)) ); | |
3001 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array | |
3002 // invert the test if we are looking for a non-array | |
3003 if (not_array) btest = BoolTest(btest).negate(); | |
3004 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, btest) ); | |
3005 return generate_fair_guard(bol, region); | |
3006 } | |
3007 | |
3008 | |
3009 //-----------------------inline_native_newArray-------------------------- | |
3010 bool LibraryCallKit::inline_native_newArray() { | |
3011 int nargs = 2; | |
3012 Node* mirror = argument(0); | |
3013 Node* count_val = argument(1); | |
3014 | |
3015 _sp += nargs; // set original stack for use by uncommon_trap | |
3016 mirror = do_null_check(mirror, T_OBJECT); | |
3017 _sp -= nargs; | |
163 | 3018 // If mirror or obj is dead, only null-path is taken. |
3019 if (stopped()) return true; | |
0 | 3020 |
3021 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; | |
3022 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
3023 PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, | |
3024 TypeInstPtr::NOTNULL); | |
3025 PhiNode* result_io = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); | |
3026 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, | |
3027 TypePtr::BOTTOM); | |
3028 | |
3029 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | |
3030 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null, | |
3031 nargs, | |
3032 result_reg, _slow_path); | |
3033 Node* normal_ctl = control(); | |
3034 Node* no_array_ctl = result_reg->in(_slow_path); | |
3035 | |
3036 // Generate code for the slow case. We make a call to newArray(). | |
3037 set_control(no_array_ctl); | |
3038 if (!stopped()) { | |
3039 // Either the input type is void.class, or else the | |
3040 // array klass has not yet been cached. Either the | |
3041 // ensuing call will throw an exception, or else it | |
3042 // will cache the array klass for next time. | |
3043 PreserveJVMState pjvms(this); | |
3044 CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray); | |
3045 Node* slow_result = set_results_for_java_call(slow_call); | |
3046 // this->control() comes from set_results_for_java_call | |
3047 result_reg->set_req(_slow_path, control()); | |
3048 result_val->set_req(_slow_path, slow_result); | |
3049 result_io ->set_req(_slow_path, i_o()); | |
3050 result_mem->set_req(_slow_path, reset_memory()); | |
3051 } | |
3052 | |
3053 set_control(normal_ctl); | |
3054 if (!stopped()) { | |
3055 // Normal case: The array type has been cached in the java.lang.Class. | |
3056 // The following call works fine even if the array type is polymorphic. | |
3057 // It could be a dynamic mix of int[], boolean[], Object[], etc. | |
730
9c6be3edf0dc
6589834: deoptimization problem with -XX:+DeoptimizeALot
cfang
parents:
681
diff
changeset
|
3058 Node* obj = new_array(klass_node, count_val, nargs); |
0 | 3059 result_reg->init_req(_normal_path, control()); |
3060 result_val->init_req(_normal_path, obj); | |
3061 result_io ->init_req(_normal_path, i_o()); | |
3062 result_mem->init_req(_normal_path, reset_memory()); | |
3063 } | |
3064 | |
3065 // Return the combined state. | |
3066 set_i_o( _gvn.transform(result_io) ); | |
3067 set_all_memory( _gvn.transform(result_mem) ); | |
3068 push_result(result_reg, result_val); | |
3069 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
3070 | |
3071 return true; | |
3072 } | |
3073 | |
3074 //----------------------inline_native_getLength-------------------------- | |
3075 bool LibraryCallKit::inline_native_getLength() { | |
3076 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | |
3077 | |
3078 int nargs = 1; | |
3079 Node* array = argument(0); | |
3080 | |
3081 _sp += nargs; // set original stack for use by uncommon_trap | |
3082 array = do_null_check(array, T_OBJECT); | |
3083 _sp -= nargs; | |
3084 | |
3085 // If array is dead, only null-path is taken. | |
3086 if (stopped()) return true; | |
3087 | |
3088 // Deoptimize if it is a non-array. | |
3089 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL); | |
3090 | |
3091 if (non_array != NULL) { | |
3092 PreserveJVMState pjvms(this); | |
3093 set_control(non_array); | |
3094 _sp += nargs; // push the arguments back on the stack | |
3095 uncommon_trap(Deoptimization::Reason_intrinsic, | |
3096 Deoptimization::Action_maybe_recompile); | |
3097 } | |
3098 | |
3099 // If control is dead, only non-array-path is taken. | |
3100 if (stopped()) return true; | |
3101 | |
3102 // The works fine even if the array type is polymorphic. | |
3103 // It could be a dynamic mix of int[], boolean[], Object[], etc. | |
3104 push( load_array_length(array) ); | |
3105 | |
3106 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
3107 | |
3108 return true; | |
3109 } | |
3110 | |
3111 //------------------------inline_array_copyOf---------------------------- | |
3112 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { | |
3113 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | |
3114 | |
3115 // Restore the stack and pop off the arguments. | |
3116 int nargs = 3 + (is_copyOfRange? 1: 0); | |
3117 Node* original = argument(0); | |
3118 Node* start = is_copyOfRange? argument(1): intcon(0); | |
3119 Node* end = is_copyOfRange? argument(2): argument(1); | |
3120 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); | |
3121 | |
3122 _sp += nargs; // set original stack for use by uncommon_trap | |
3123 array_type_mirror = do_null_check(array_type_mirror, T_OBJECT); | |
3124 original = do_null_check(original, T_OBJECT); | |
3125 _sp -= nargs; | |
3126 | |
3127 // Check if a null path was taken unconditionally. | |
3128 if (stopped()) return true; | |
3129 | |
3130 Node* orig_length = load_array_length(original); | |
3131 | |
3132 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nargs, | |
3133 NULL, 0); | |
3134 _sp += nargs; // set original stack for use by uncommon_trap | |
3135 klass_node = do_null_check(klass_node, T_OBJECT); | |
3136 _sp -= nargs; | |
3137 | |
3138 RegionNode* bailout = new (C, 1) RegionNode(1); | |
3139 record_for_igvn(bailout); | |
3140 | |
3141 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. | |
3142 // Bail out if that is so. | |
3143 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); | |
3144 if (not_objArray != NULL) { | |
3145 // Improve the klass node's type from the new optimistic assumption: | |
3146 ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); | |
3147 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); | |
3148 Node* cast = new (C, 2) CastPPNode(klass_node, akls); | |
3149 cast->init_req(0, control()); | |
3150 klass_node = _gvn.transform(cast); | |
3151 } | |
3152 | |
3153 // Bail out if either start or end is negative. | |
3154 generate_negative_guard(start, bailout, &start); | |
3155 generate_negative_guard(end, bailout, &end); | |
3156 | |
3157 Node* length = end; | |
3158 if (_gvn.type(start) != TypeInt::ZERO) { | |
3159 length = _gvn.transform( new (C, 3) SubINode(end, start) ); | |
3160 } | |
3161 | |
3162 // Bail out if length is negative. | |
3163 // ...Not needed, since the new_array will throw the right exception. | |
3164 //generate_negative_guard(length, bailout, &length); | |
3165 | |
3166 if (bailout->req() > 1) { | |
3167 PreserveJVMState pjvms(this); | |
3168 set_control( _gvn.transform(bailout) ); | |
3169 _sp += nargs; // push the arguments back on the stack | |
3170 uncommon_trap(Deoptimization::Reason_intrinsic, | |
3171 Deoptimization::Action_maybe_recompile); | |
3172 } | |
3173 | |
3174 if (!stopped()) { | |
3175 // How many elements will we copy from the original? | |
3176 // The answer is MinI(orig_length - start, length). | |
3177 Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); | |
3178 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); | |
3179 | |
730
9c6be3edf0dc
6589834: deoptimization problem with -XX:+DeoptimizeALot
cfang
parents:
681
diff
changeset
|
3180 Node* newcopy = new_array(klass_node, length, nargs); |
0 | 3181 |
3182 // Generate a direct call to the right arraycopy function(s). | |
3183 // We know the copy is disjoint but we might not know if the | |
3184 // oop stores need checking. | |
3185 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). | |
3186 // This will fail a store-check if x contains any non-nulls. | |
3187 bool disjoint_bases = true; | |
3188 bool length_never_negative = true; | |
3189 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, | |
3190 original, start, newcopy, intcon(0), moved, | |
3191 nargs, disjoint_bases, length_never_negative); | |
3192 | |
3193 push(newcopy); | |
3194 } | |
3195 | |
3196 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
3197 | |
3198 return true; | |
3199 } | |
3200 | |
3201 | |
3202 //----------------------generate_virtual_guard--------------------------- | |
3203 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call. | |
3204 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, | |
3205 RegionNode* slow_region) { | |
3206 ciMethod* method = callee(); | |
3207 int vtable_index = method->vtable_index(); | |
3208 // Get the methodOop out of the appropriate vtable entry. | |
3209 int entry_offset = (instanceKlass::vtable_start_offset() + | |
3210 vtable_index*vtableEntry::size()) * wordSize + | |
3211 vtableEntry::method_offset_in_bytes(); | |
3212 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); | |
3213 Node* target_call = make_load(NULL, entry_addr, TypeInstPtr::NOTNULL, T_OBJECT); | |
3214 | |
3215 // Compare the target method with the expected method (e.g., Object.hashCode). | |
3216 const TypeInstPtr* native_call_addr = TypeInstPtr::make(method); | |
3217 | |
3218 Node* native_call = makecon(native_call_addr); | |
3219 Node* chk_native = _gvn.transform( new(C, 3) CmpPNode(target_call, native_call) ); | |
3220 Node* test_native = _gvn.transform( new(C, 2) BoolNode(chk_native, BoolTest::ne) ); | |
3221 | |
3222 return generate_slow_guard(test_native, slow_region); | |
3223 } | |
3224 | |
3225 //-----------------------generate_method_call---------------------------- | |
3226 // Use generate_method_call to make a slow-call to the real | |
3227 // method if the fast path fails. An alternative would be to | |
3228 // use a stub like OptoRuntime::slow_arraycopy_Java. | |
3229 // This only works for expanding the current library call, | |
3230 // not another intrinsic. (E.g., don't use this for making an | |
3231 // arraycopy call inside of the copyOf intrinsic.) | |
3232 CallJavaNode* | |
3233 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) { | |
3234 // When compiling the intrinsic method itself, do not use this technique. | |
3235 guarantee(callee() != C->method(), "cannot make slow-call to self"); | |
3236 | |
3237 ciMethod* method = callee(); | |
3238 // ensure the JVMS we have will be correct for this call | |
3239 guarantee(method_id == method->intrinsic_id(), "must match"); | |
3240 | |
3241 const TypeFunc* tf = TypeFunc::make(method); | |
3242 int tfdc = tf->domain()->cnt(); | |
3243 CallJavaNode* slow_call; | |
3244 if (is_static) { | |
3245 assert(!is_virtual, ""); | |
3246 slow_call = new(C, tfdc) CallStaticJavaNode(tf, | |
3247 SharedRuntime::get_resolve_static_call_stub(), | |
3248 method, bci()); | |
3249 } else if (is_virtual) { | |
3250 null_check_receiver(method); | |
3251 int vtable_index = methodOopDesc::invalid_vtable_index; | |
3252 if (UseInlineCaches) { | |
3253 // Suppress the vtable call | |
3254 } else { | |
3255 // hashCode and clone are not a miranda methods, | |
3256 // so the vtable index is fixed. | |
3257 // No need to use the linkResolver to get it. | |
3258 vtable_index = method->vtable_index(); | |
3259 } | |
3260 slow_call = new(C, tfdc) CallDynamicJavaNode(tf, | |
3261 SharedRuntime::get_resolve_virtual_call_stub(), | |
3262 method, vtable_index, bci()); | |
3263 } else { // neither virtual nor static: opt_virtual | |
3264 null_check_receiver(method); | |
3265 slow_call = new(C, tfdc) CallStaticJavaNode(tf, | |
3266 SharedRuntime::get_resolve_opt_virtual_call_stub(), | |
3267 method, bci()); | |
3268 slow_call->set_optimized_virtual(true); | |
3269 } | |
3270 set_arguments_for_java_call(slow_call); | |
3271 set_edges_for_java_call(slow_call); | |
3272 return slow_call; | |
3273 } | |
3274 | |
3275 | |
3276 //------------------------------inline_native_hashcode-------------------- | |
3277 // Build special case code for calls to hashCode on an object. | |
3278 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { | |
3279 assert(is_static == callee()->is_static(), "correct intrinsic selection"); | |
3280 assert(!(is_virtual && is_static), "either virtual, special, or static"); | |
3281 | |
3282 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT }; | |
3283 | |
3284 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
3285 PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, | |
3286 TypeInt::INT); | |
3287 PhiNode* result_io = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); | |
3288 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, | |
3289 TypePtr::BOTTOM); | |
3290 Node* obj = NULL; | |
3291 if (!is_static) { | |
3292 // Check for hashing null object | |
3293 obj = null_check_receiver(callee()); | |
3294 if (stopped()) return true; // unconditionally null | |
3295 result_reg->init_req(_null_path, top()); | |
3296 result_val->init_req(_null_path, top()); | |
3297 } else { | |
3298 // Do a null check, and return zero if null. | |
3299 // System.identityHashCode(null) == 0 | |
3300 obj = argument(0); | |
3301 Node* null_ctl = top(); | |
3302 obj = null_check_oop(obj, &null_ctl); | |
3303 result_reg->init_req(_null_path, null_ctl); | |
3304 result_val->init_req(_null_path, _gvn.intcon(0)); | |
3305 } | |
3306 | |
3307 // Unconditionally null? Then return right away. | |
3308 if (stopped()) { | |
3309 set_control( result_reg->in(_null_path) ); | |
3310 if (!stopped()) | |
3311 push( result_val ->in(_null_path) ); | |
3312 return true; | |
3313 } | |
3314 | |
3315 // After null check, get the object's klass. | |
3316 Node* obj_klass = load_object_klass(obj); | |
3317 | |
3318 // This call may be virtual (invokevirtual) or bound (invokespecial). | |
3319 // For each case we generate slightly different code. | |
3320 | |
3321 // We only go to the fast case code if we pass a number of guards. The | |
3322 // paths which do not pass are accumulated in the slow_region. | |
3323 RegionNode* slow_region = new (C, 1) RegionNode(1); | |
3324 record_for_igvn(slow_region); | |
3325 | |
3326 // If this is a virtual call, we generate a funny guard. We pull out | |
3327 // the vtable entry corresponding to hashCode() from the target object. | |
3328 // If the target method which we are calling happens to be the native | |
3329 // Object hashCode() method, we pass the guard. We do not need this | |
3330 // guard for non-virtual calls -- the caller is known to be the native | |
3331 // Object hashCode(). | |
3332 if (is_virtual) { | |
3333 generate_virtual_guard(obj_klass, slow_region); | |
3334 } | |
3335 | |
3336 // Get the header out of the object, use LoadMarkNode when available | |
3337 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); | |
3338 Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS); | |
3339 header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) ); | |
3340 | |
3341 // Test the header to see if it is unlocked. | |
3342 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); | |
3343 Node *lmasked_header = _gvn.transform( new (C, 3) AndXNode(header, lock_mask) ); | |
3344 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); | |
3345 Node *chk_unlocked = _gvn.transform( new (C, 3) CmpXNode( lmasked_header, unlocked_val)); | |
3346 Node *test_unlocked = _gvn.transform( new (C, 2) BoolNode( chk_unlocked, BoolTest::ne) ); | |
3347 | |
3348 generate_slow_guard(test_unlocked, slow_region); | |
3349 | |
3350 // Get the hash value and check to see that it has been properly assigned. | |
3351 // We depend on hash_mask being at most 32 bits and avoid the use of | |
3352 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit | |
3353 // vm: see markOop.hpp. | |
3354 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); | |
3355 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); | |
3356 Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) ); | |
3357 // This hack lets the hash bits live anywhere in the mark object now, as long | |
605 | 3358 // as the shift drops the relevant bits into the low 32 bits. Note that |
0 | 3359 // Java spec says that HashCode is an int so there's no point in capturing |
3360 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). | |
3361 hshifted_header = ConvX2I(hshifted_header); | |
3362 Node *hash_val = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) ); | |
3363 | |
3364 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); | |
3365 Node *chk_assigned = _gvn.transform( new (C, 3) CmpINode( hash_val, no_hash_val)); | |
3366 Node *test_assigned = _gvn.transform( new (C, 2) BoolNode( chk_assigned, BoolTest::eq) ); | |
3367 | |
3368 generate_slow_guard(test_assigned, slow_region); | |
3369 | |
3370 Node* init_mem = reset_memory(); | |
3371 // fill in the rest of the null path: | |
3372 result_io ->init_req(_null_path, i_o()); | |
3373 result_mem->init_req(_null_path, init_mem); | |
3374 | |
3375 result_val->init_req(_fast_path, hash_val); | |
3376 result_reg->init_req(_fast_path, control()); | |
3377 result_io ->init_req(_fast_path, i_o()); | |
3378 result_mem->init_req(_fast_path, init_mem); | |
3379 | |
3380 // Generate code for the slow case. We make a call to hashCode(). | |
3381 set_control(_gvn.transform(slow_region)); | |
3382 if (!stopped()) { | |
3383 // No need for PreserveJVMState, because we're using up the present state. | |
3384 set_all_memory(init_mem); | |
3385 vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode; | |
3386 if (is_static) hashCode_id = vmIntrinsics::_identityHashCode; | |
3387 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static); | |
3388 Node* slow_result = set_results_for_java_call(slow_call); | |
3389 // this->control() comes from set_results_for_java_call | |
3390 result_reg->init_req(_slow_path, control()); | |
3391 result_val->init_req(_slow_path, slow_result); | |
3392 result_io ->set_req(_slow_path, i_o()); | |
3393 result_mem ->set_req(_slow_path, reset_memory()); | |
3394 } | |
3395 | |
3396 // Return the combined state. | |
3397 set_i_o( _gvn.transform(result_io) ); | |
3398 set_all_memory( _gvn.transform(result_mem) ); | |
3399 push_result(result_reg, result_val); | |
3400 | |
3401 return true; | |
3402 } | |
3403 | |
3404 //---------------------------inline_native_getClass---------------------------- | |
605 | 3405 // Build special case code for calls to getClass on an object. |
0 | 3406 bool LibraryCallKit::inline_native_getClass() { |
3407 Node* obj = null_check_receiver(callee()); | |
3408 if (stopped()) return true; | |
3409 push( load_mirror_from_klass(load_object_klass(obj)) ); | |
3410 return true; | |
3411 } | |
3412 | |
3413 //-----------------inline_native_Reflection_getCallerClass--------------------- | |
3414 // In the presence of deep enough inlining, getCallerClass() becomes a no-op. | |
3415 // | |
3416 // NOTE that this code must perform the same logic as | |
3417 // vframeStream::security_get_caller_frame in that it must skip | |
3418 // Method.invoke() and auxiliary frames. | |
3419 | |
3420 | |
3421 | |
3422 | |
3423 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { | |
3424 ciMethod* method = callee(); | |
3425 | |
3426 #ifndef PRODUCT | |
3427 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3428 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); | |
3429 } | |
3430 #endif | |
3431 | |
3432 debug_only(int saved_sp = _sp); | |
3433 | |
3434 // Argument words: (int depth) | |
3435 int nargs = 1; | |
3436 | |
3437 _sp += nargs; | |
3438 Node* caller_depth_node = pop(); | |
3439 | |
3440 assert(saved_sp == _sp, "must have correct argument count"); | |
3441 | |
3442 // The depth value must be a constant in order for the runtime call | |
3443 // to be eliminated. | |
3444 const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int(); | |
3445 if (caller_depth_type == NULL || !caller_depth_type->is_con()) { | |
3446 #ifndef PRODUCT | |
3447 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3448 tty->print_cr(" Bailing out because caller depth was not a constant"); | |
3449 } | |
3450 #endif | |
3451 return false; | |
3452 } | |
3453 // Note that the JVM state at this point does not include the | |
3454 // getCallerClass() frame which we are trying to inline. The | |
3455 // semantics of getCallerClass(), however, are that the "first" | |
3456 // frame is the getCallerClass() frame, so we subtract one from the | |
3457 // requested depth before continuing. We don't inline requests of | |
3458 // getCallerClass(0). | |
3459 int caller_depth = caller_depth_type->get_con() - 1; | |
3460 if (caller_depth < 0) { | |
3461 #ifndef PRODUCT | |
3462 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3463 tty->print_cr(" Bailing out because caller depth was %d", caller_depth); | |
3464 } | |
3465 #endif | |
3466 return false; | |
3467 } | |
3468 | |
3469 if (!jvms()->has_method()) { | |
3470 #ifndef PRODUCT | |
3471 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3472 tty->print_cr(" Bailing out because intrinsic was inlined at top level"); | |
3473 } | |
3474 #endif | |
3475 return false; | |
3476 } | |
3477 int _depth = jvms()->depth(); // cache call chain depth | |
3478 | |
3479 // Walk back up the JVM state to find the caller at the required | |
3480 // depth. NOTE that this code must perform the same logic as | |
3481 // vframeStream::security_get_caller_frame in that it must skip | |
3482 // Method.invoke() and auxiliary frames. Note also that depth is | |
3483 // 1-based (1 is the bottom of the inlining). | |
3484 int inlining_depth = _depth; | |
3485 JVMState* caller_jvms = NULL; | |
3486 | |
3487 if (inlining_depth > 0) { | |
3488 caller_jvms = jvms(); | |
3489 assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth"); | |
3490 do { | |
3491 // The following if-tests should be performed in this order | |
3492 if (is_method_invoke_or_aux_frame(caller_jvms)) { | |
3493 // Skip a Method.invoke() or auxiliary frame | |
3494 } else if (caller_depth > 0) { | |
3495 // Skip real frame | |
3496 --caller_depth; | |
3497 } else { | |
3498 // We're done: reached desired caller after skipping. | |
3499 break; | |
3500 } | |
3501 caller_jvms = caller_jvms->caller(); | |
3502 --inlining_depth; | |
3503 } while (inlining_depth > 0); | |
3504 } | |
3505 | |
3506 if (inlining_depth == 0) { | |
3507 #ifndef PRODUCT | |
3508 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3509 tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth); | |
3510 tty->print_cr(" JVM state at this point:"); | |
3511 for (int i = _depth; i >= 1; i--) { | |
3512 tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8()); | |
3513 } | |
3514 } | |
3515 #endif | |
3516 return false; // Reached end of inlining | |
3517 } | |
3518 | |
3519 // Acquire method holder as java.lang.Class | |
3520 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); | |
3521 ciInstance* caller_mirror = caller_klass->java_mirror(); | |
3522 // Push this as a constant | |
3523 push(makecon(TypeInstPtr::make(caller_mirror))); | |
3524 #ifndef PRODUCT | |
3525 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | |
3526 tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth); | |
3527 tty->print_cr(" JVM state at this point:"); | |
3528 for (int i = _depth; i >= 1; i--) { | |
3529 tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8()); | |
3530 } | |
3531 } | |
3532 #endif | |
3533 return true; | |
3534 } | |
3535 | |
3536 // Helper routine for above | |
3537 bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) { | |
3538 // Is this the Method.invoke method itself? | |
3539 if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke) | |
3540 return true; | |
3541 | |
3542 // Is this a helper, defined somewhere underneath MethodAccessorImpl. | |
3543 ciKlass* k = jvms->method()->holder(); | |
3544 if (k->is_instance_klass()) { | |
3545 ciInstanceKlass* ik = k->as_instance_klass(); | |
3546 for (; ik != NULL; ik = ik->super()) { | |
3547 if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() && | |
3548 ik == env()->find_system_klass(ik->name())) { | |
3549 return true; | |
3550 } | |
3551 } | |
3552 } | |
3553 | |
3554 return false; | |
3555 } | |
3556 | |
3557 static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by | |
3558 // inline_native_AtomicLong_attemptUpdate() but it has no way of | |
3559 // computing it since there is no lookup field by name function in the | |
3560 // CI interface. This is computed and set by inline_native_AtomicLong_get(). | |
3561 // Using a static variable here is safe even if we have multiple compilation | |
3562 // threads because the offset is constant. At worst the same offset will be | |
3563 // computed and stored multiple | |
3564 | |
3565 bool LibraryCallKit::inline_native_AtomicLong_get() { | |
3566 // Restore the stack and pop off the argument | |
3567 _sp+=1; | |
3568 Node *obj = pop(); | |
3569 | |
3570 // get the offset of the "value" field. Since the CI interfaces | |
3571 // does not provide a way to look up a field by name, we scan the bytecodes | |
3572 // to get the field index. We expect the first 2 instructions of the method | |
3573 // to be: | |
3574 // 0 aload_0 | |
3575 // 1 getfield "value" | |
3576 ciMethod* method = callee(); | |
3577 if (value_field_offset == -1) | |
3578 { | |
3579 ciField* value_field; | |
3580 ciBytecodeStream iter(method); | |
3581 Bytecodes::Code bc = iter.next(); | |
3582 | |
3583 if ((bc != Bytecodes::_aload_0) && | |
3584 ((bc != Bytecodes::_aload) || (iter.get_index() != 0))) | |
3585 return false; | |
3586 bc = iter.next(); | |
3587 if (bc != Bytecodes::_getfield) | |
3588 return false; | |
3589 bool ignore; | |
3590 value_field = iter.get_field(ignore); | |
3591 value_field_offset = value_field->offset_in_bytes(); | |
3592 } | |
3593 | |
3594 // Null check without removing any arguments. | |
3595 _sp++; | |
3596 obj = do_null_check(obj, T_OBJECT); | |
3597 _sp--; | |
3598 // Check for locking null object | |
3599 if (stopped()) return true; | |
3600 | |
3601 Node *adr = basic_plus_adr(obj, obj, value_field_offset); | |
3602 const TypePtr *adr_type = _gvn.type(adr)->is_ptr(); | |
3603 int alias_idx = C->get_alias_index(adr_type); | |
3604 | |
3605 Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr)); | |
3606 | |
3607 push_pair(result); | |
3608 | |
3609 return true; | |
3610 } | |
3611 | |
3612 bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() { | |
3613 // Restore the stack and pop off the arguments | |
3614 _sp+=5; | |
3615 Node *newVal = pop_pair(); | |
3616 Node *oldVal = pop_pair(); | |
3617 Node *obj = pop(); | |
3618 | |
3619 // we need the offset of the "value" field which was computed when | |
3620 // inlining the get() method. Give up if we don't have it. | |
3621 if (value_field_offset == -1) | |
3622 return false; | |
3623 | |
3624 // Null check without removing any arguments. | |
3625 _sp+=5; | |
3626 obj = do_null_check(obj, T_OBJECT); | |
3627 _sp-=5; | |
3628 // Check for locking null object | |
3629 if (stopped()) return true; | |
3630 | |
3631 Node *adr = basic_plus_adr(obj, obj, value_field_offset); | |
3632 const TypePtr *adr_type = _gvn.type(adr)->is_ptr(); | |
3633 int alias_idx = C->get_alias_index(adr_type); | |
3634 | |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3635 Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal)); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3636 Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas)); |
0 | 3637 set_memory(store_proj, alias_idx); |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3638 Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) ); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3639 |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3640 Node *result; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3641 // CMove node is not used to be able fold a possible check code |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3642 // after attemptUpdate() call. This code could be transformed |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3643 // into CMove node by loop optimizations. |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3644 { |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3645 RegionNode *r = new (C, 3) RegionNode(3); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3646 result = new (C, 3) PhiNode(r, TypeInt::BOOL); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3647 |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3648 Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3649 Node *iftrue = opt_iff(r, iff); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3650 r->init_req(1, iftrue); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3651 result->init_req(1, intcon(1)); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3652 result->init_req(2, intcon(0)); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3653 |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3654 set_control(_gvn.transform(r)); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3655 record_for_igvn(r); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3656 |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3657 C->set_has_split_ifs(true); // Has chance for split-if optimization |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3658 } |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3659 |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
235
diff
changeset
|
3660 push(_gvn.transform(result)); |
0 | 3661 return true; |
3662 } | |
3663 | |
3664 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { | |
3665 // restore the arguments | |
3666 _sp += arg_size(); | |
3667 | |
3668 switch (id) { | |
3669 case vmIntrinsics::_floatToRawIntBits: | |
3670 push(_gvn.transform( new (C, 2) MoveF2INode(pop()))); | |
3671 break; | |
3672 | |
3673 case vmIntrinsics::_intBitsToFloat: | |
3674 push(_gvn.transform( new (C, 2) MoveI2FNode(pop()))); | |
3675 break; | |
3676 | |
3677 case vmIntrinsics::_doubleToRawLongBits: | |
3678 push_pair(_gvn.transform( new (C, 2) MoveD2LNode(pop_pair()))); | |
3679 break; | |
3680 | |
3681 case vmIntrinsics::_longBitsToDouble: | |
3682 push_pair(_gvn.transform( new (C, 2) MoveL2DNode(pop_pair()))); | |
3683 break; | |
3684 | |
3685 case vmIntrinsics::_doubleToLongBits: { | |
3686 Node* value = pop_pair(); | |
3687 | |
3688 // two paths (plus control) merge in a wood | |
3689 RegionNode *r = new (C, 3) RegionNode(3); | |
3690 Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG); | |
3691 | |
3692 Node *cmpisnan = _gvn.transform( new (C, 3) CmpDNode(value, value)); | |
3693 // Build the boolean node | |
3694 Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) ); | |
3695 | |
3696 // Branch either way. | |
3697 // NaN case is less traveled, which makes all the difference. | |
3698 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | |
3699 Node *opt_isnan = _gvn.transform(ifisnan); | |
3700 assert( opt_isnan->is_If(), "Expect an IfNode"); | |
3701 IfNode *opt_ifisnan = (IfNode*)opt_isnan; | |
3702 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) ); | |
3703 | |
3704 set_control(iftrue); | |
3705 | |
3706 static const jlong nan_bits = CONST64(0x7ff8000000000000); | |
3707 Node *slow_result = longcon(nan_bits); // return NaN | |
3708 phi->init_req(1, _gvn.transform( slow_result )); | |
3709 r->init_req(1, iftrue); | |
3710 | |
3711 // Else fall through | |
3712 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) ); | |
3713 set_control(iffalse); | |
3714 | |
3715 phi->init_req(2, _gvn.transform( new (C, 2) MoveD2LNode(value))); | |
3716 r->init_req(2, iffalse); | |
3717 | |
3718 // Post merge | |
3719 set_control(_gvn.transform(r)); | |
3720 record_for_igvn(r); | |
3721 | |
3722 Node* result = _gvn.transform(phi); | |
3723 assert(result->bottom_type()->isa_long(), "must be"); | |
3724 push_pair(result); | |
3725 | |
3726 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
3727 | |
3728 break; | |
3729 } | |
3730 | |
3731 case vmIntrinsics::_floatToIntBits: { | |
3732 Node* value = pop(); | |
3733 | |
3734 // two paths (plus control) merge in a wood | |
3735 RegionNode *r = new (C, 3) RegionNode(3); | |
3736 Node *phi = new (C, 3) PhiNode(r, TypeInt::INT); | |
3737 | |
3738 Node *cmpisnan = _gvn.transform( new (C, 3) CmpFNode(value, value)); | |
3739 // Build the boolean node | |
3740 Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) ); | |
3741 | |
3742 // Branch either way. | |
3743 // NaN case is less traveled, which makes all the difference. | |
3744 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | |
3745 Node *opt_isnan = _gvn.transform(ifisnan); | |
3746 assert( opt_isnan->is_If(), "Expect an IfNode"); | |
3747 IfNode *opt_ifisnan = (IfNode*)opt_isnan; | |
3748 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) ); | |
3749 | |
3750 set_control(iftrue); | |
3751 | |
3752 static const jint nan_bits = 0x7fc00000; | |
3753 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN | |
3754 phi->init_req(1, _gvn.transform( slow_result )); | |
3755 r->init_req(1, iftrue); | |
3756 | |
3757 // Else fall through | |
3758 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) ); | |
3759 set_control(iffalse); | |
3760 | |
3761 phi->init_req(2, _gvn.transform( new (C, 2) MoveF2INode(value))); | |
3762 r->init_req(2, iffalse); | |
3763 | |
3764 // Post merge | |
3765 set_control(_gvn.transform(r)); | |
3766 record_for_igvn(r); | |
3767 | |
3768 Node* result = _gvn.transform(phi); | |
3769 assert(result->bottom_type()->isa_int(), "must be"); | |
3770 push(result); | |
3771 | |
3772 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
3773 | |
3774 break; | |
3775 } | |
3776 | |
3777 default: | |
3778 ShouldNotReachHere(); | |
3779 } | |
3780 | |
3781 return true; | |
3782 } | |
3783 | |
3784 #ifdef _LP64 | |
3785 #define XTOP ,top() /*additional argument*/ | |
3786 #else //_LP64 | |
3787 #define XTOP /*no additional argument*/ | |
3788 #endif //_LP64 | |
3789 | |
3790 //----------------------inline_unsafe_copyMemory------------------------- | |
3791 bool LibraryCallKit::inline_unsafe_copyMemory() { | |
3792 if (callee()->is_static()) return false; // caller must have the capability! | |
3793 int nargs = 1 + 5 + 3; // 5 args: (src: ptr,off, dst: ptr,off, size) | |
3794 assert(signature()->size() == nargs-1, "copy has 5 arguments"); | |
3795 null_check_receiver(callee()); // check then ignore argument(0) | |
3796 if (stopped()) return true; | |
3797 | |
3798 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | |
3799 | |
3800 Node* src_ptr = argument(1); | |
3801 Node* src_off = ConvL2X(argument(2)); | |
3802 assert(argument(3)->is_top(), "2nd half of long"); | |
3803 Node* dst_ptr = argument(4); | |
3804 Node* dst_off = ConvL2X(argument(5)); | |
3805 assert(argument(6)->is_top(), "2nd half of long"); | |
3806 Node* size = ConvL2X(argument(7)); | |
3807 assert(argument(8)->is_top(), "2nd half of long"); | |
3808 | |
3809 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | |
3810 "fieldOffset must be byte-scaled"); | |
3811 | |
3812 Node* src = make_unsafe_address(src_ptr, src_off); | |
3813 Node* dst = make_unsafe_address(dst_ptr, dst_off); | |
3814 | |
3815 // Conservatively insert a memory barrier on all memory slices. | |
3816 // Do not let writes of the copy source or destination float below the copy. | |
3817 insert_mem_bar(Op_MemBarCPUOrder); | |
3818 | |
3819 // Call it. Note that the length argument is not scaled. | |
3820 make_runtime_call(RC_LEAF|RC_NO_FP, | |
3821 OptoRuntime::fast_arraycopy_Type(), | |
3822 StubRoutines::unsafe_arraycopy(), | |
3823 "unsafe_arraycopy", | |
3824 TypeRawPtr::BOTTOM, | |
3825 src, dst, size XTOP); | |
3826 | |
3827 // Do not let reads of the copy destination float above the copy. | |
3828 insert_mem_bar(Op_MemBarCPUOrder); | |
3829 | |
3830 return true; | |
3831 } | |
3832 | |
3833 | |
3834 //------------------------inline_native_clone---------------------------- | |
3835 // Here are the simple edge cases: | |
3836 // null receiver => normal trap | |
3837 // virtual and clone was overridden => slow path to out-of-line clone | |
3838 // not cloneable or finalizer => slow path to out-of-line Object.clone | |
3839 // | |
3840 // The general case has two steps, allocation and copying. | |
3841 // Allocation has two cases, and uses GraphKit::new_instance or new_array. | |
3842 // | |
3843 // Copying also has two cases, oop arrays and everything else. | |
3844 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy). | |
3845 // Everything else uses the tight inline loop supplied by CopyArrayNode. | |
3846 // | |
3847 // These steps fold up nicely if and when the cloned object's klass | |
3848 // can be sharply typed as an object array, a type array, or an instance. | |
3849 // | |
3850 bool LibraryCallKit::inline_native_clone(bool is_virtual) { | |
3851 int nargs = 1; | |
3852 Node* obj = null_check_receiver(callee()); | |
3853 if (stopped()) return true; | |
3854 Node* obj_klass = load_object_klass(obj); | |
3855 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); | |
3856 const TypeOopPtr* toop = ((tklass != NULL) | |
3857 ? tklass->as_instance_type() | |
3858 : TypeInstPtr::NOTNULL); | |
3859 | |
3860 // Conservatively insert a memory barrier on all memory slices. | |
3861 // Do not let writes into the original float below the clone. | |
3862 insert_mem_bar(Op_MemBarCPUOrder); | |
3863 | |
3864 // paths into result_reg: | |
3865 enum { | |
3866 _slow_path = 1, // out-of-line call to clone method (virtual or not) | |
3867 _objArray_path, // plain allocation, plus arrayof_oop_arraycopy | |
3868 _fast_path, // plain allocation, plus a CopyArray operation | |
3869 PATH_LIMIT | |
3870 }; | |
3871 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
3872 PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, | |
3873 TypeInstPtr::NOTNULL); | |
3874 PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); | |
3875 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, | |
3876 TypePtr::BOTTOM); | |
3877 record_for_igvn(result_reg); | |
3878 | |
3879 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; | |
3880 int raw_adr_idx = Compile::AliasIdxRaw; | |
3881 const bool raw_mem_only = true; | |
3882 | |
3883 // paths into alloc_reg (on the fast path, just before the CopyArray): | |
3884 enum { _typeArray_alloc = 1, _instance_alloc, ALLOC_LIMIT }; | |
3885 RegionNode* alloc_reg = new(C, ALLOC_LIMIT) RegionNode(ALLOC_LIMIT); | |
3886 PhiNode* alloc_val = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, raw_adr_type); | |
3887 PhiNode* alloc_siz = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, TypeX_X); | |
3888 PhiNode* alloc_i_o = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::ABIO); | |
3889 PhiNode* alloc_mem = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::MEMORY, | |
3890 raw_adr_type); | |
3891 record_for_igvn(alloc_reg); | |
3892 | |
3893 bool card_mark = false; // (see below) | |
3894 | |
3895 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); | |
3896 if (array_ctl != NULL) { | |
3897 // It's an array. | |
3898 PreserveJVMState pjvms(this); | |
3899 set_control(array_ctl); | |
3900 Node* obj_length = load_array_length(obj); | |
3901 Node* obj_size = NULL; | |
730
9c6be3edf0dc
6589834: deoptimization problem with -XX:+DeoptimizeALot
cfang
parents:
681
diff
changeset
|
3902 Node* alloc_obj = new_array(obj_klass, obj_length, nargs, |
0 | 3903 raw_mem_only, &obj_size); |
3904 assert(obj_size != NULL, ""); | |
3905 Node* raw_obj = alloc_obj->in(1); | |
3906 assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); | |
3907 if (ReduceBulkZeroing) { | |
3908 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); | |
3909 if (alloc != NULL) { | |
3910 // We will be completely responsible for initializing this object. | |
3911 alloc->maybe_set_complete(&_gvn); | |
3912 } | |
3913 } | |
3914 | |
3915 if (!use_ReduceInitialCardMarks()) { | |
3916 // If it is an oop array, it requires very special treatment, | |
3917 // because card marking is required on each card of the array. | |
3918 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); | |
3919 if (is_obja != NULL) { | |
3920 PreserveJVMState pjvms2(this); | |
3921 set_control(is_obja); | |
3922 // Generate a direct call to the right arraycopy function(s). | |
3923 bool disjoint_bases = true; | |
3924 bool length_never_negative = true; | |
3925 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, | |
3926 obj, intcon(0), alloc_obj, intcon(0), | |
3927 obj_length, nargs, | |
3928 disjoint_bases, length_never_negative); | |
3929 result_reg->init_req(_objArray_path, control()); | |
3930 result_val->init_req(_objArray_path, alloc_obj); | |
3931 result_i_o ->set_req(_objArray_path, i_o()); | |
3932 result_mem ->set_req(_objArray_path, reset_memory()); | |
3933 } | |
3934 } | |
3935 // We can dispense with card marks if we know the allocation | |
3936 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks | |
3937 // causes the non-eden paths to simulate a fresh allocation, | |
3938 // insofar that no further card marks are required to initialize | |
3939 // the object. | |
3940 | |
3941 // Otherwise, there are no card marks to worry about. | |
3942 alloc_val->init_req(_typeArray_alloc, raw_obj); | |
3943 alloc_siz->init_req(_typeArray_alloc, obj_size); | |
3944 alloc_reg->init_req(_typeArray_alloc, control()); | |
3945 alloc_i_o->init_req(_typeArray_alloc, i_o()); | |
3946 alloc_mem->init_req(_typeArray_alloc, memory(raw_adr_type)); | |
3947 } | |
3948 | |
3949 // We only go to the fast case code if we pass a number of guards. | |
3950 // The paths which do not pass are accumulated in the slow_region. | |
3951 RegionNode* slow_region = new (C, 1) RegionNode(1); | |
3952 record_for_igvn(slow_region); | |
3953 if (!stopped()) { | |
3954 // It's an instance. Make the slow-path tests. | |
3955 // If this is a virtual call, we generate a funny guard. We grab | |
3956 // the vtable entry corresponding to clone() from the target object. | |
3957 // If the target method which we are calling happens to be the | |
3958 // Object clone() method, we pass the guard. We do not need this | |
3959 // guard for non-virtual calls; the caller is known to be the native | |
3960 // Object clone(). | |
3961 if (is_virtual) { | |
3962 generate_virtual_guard(obj_klass, slow_region); | |
3963 } | |
3964 | |
3965 // The object must be cloneable and must not have a finalizer. | |
3966 // Both of these conditions may be checked in a single test. | |
3967 // We could optimize the cloneable test further, but we don't care. | |
3968 generate_access_flags_guard(obj_klass, | |
3969 // Test both conditions: | |
3970 JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER, | |
3971 // Must be cloneable but not finalizer: | |
3972 JVM_ACC_IS_CLONEABLE, | |
3973 slow_region); | |
3974 } | |
3975 | |
3976 if (!stopped()) { | |
3977 // It's an instance, and it passed the slow-path tests. | |
3978 PreserveJVMState pjvms(this); | |
3979 Node* obj_size = NULL; | |
3980 Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); | |
3981 assert(obj_size != NULL, ""); | |
3982 Node* raw_obj = alloc_obj->in(1); | |
3983 assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); | |
3984 if (ReduceBulkZeroing) { | |
3985 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); | |
3986 if (alloc != NULL && !alloc->maybe_set_complete(&_gvn)) | |
3987 alloc = NULL; | |
3988 } | |
3989 if (!use_ReduceInitialCardMarks()) { | |
3990 // Put in store barrier for any and all oops we are sticking | |
3991 // into this object. (We could avoid this if we could prove | |
3992 // that the object type contains no oop fields at all.) | |
3993 card_mark = true; | |
3994 } | |
3995 alloc_val->init_req(_instance_alloc, raw_obj); | |
3996 alloc_siz->init_req(_instance_alloc, obj_size); | |
3997 alloc_reg->init_req(_instance_alloc, control()); | |
3998 alloc_i_o->init_req(_instance_alloc, i_o()); | |
3999 alloc_mem->init_req(_instance_alloc, memory(raw_adr_type)); | |
4000 } | |
4001 | |
4002 // Generate code for the slow case. We make a call to clone(). | |
4003 set_control(_gvn.transform(slow_region)); | |
4004 if (!stopped()) { | |
4005 PreserveJVMState pjvms(this); | |
4006 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual); | |
4007 Node* slow_result = set_results_for_java_call(slow_call); | |
4008 // this->control() comes from set_results_for_java_call | |
4009 result_reg->init_req(_slow_path, control()); | |
4010 result_val->init_req(_slow_path, slow_result); | |
4011 result_i_o ->set_req(_slow_path, i_o()); | |
4012 result_mem ->set_req(_slow_path, reset_memory()); | |
4013 } | |
4014 | |
4015 // The object is allocated, as an array and/or an instance. Now copy it. | |
4016 set_control( _gvn.transform(alloc_reg) ); | |
4017 set_i_o( _gvn.transform(alloc_i_o) ); | |
4018 set_memory( _gvn.transform(alloc_mem), raw_adr_type ); | |
4019 Node* raw_obj = _gvn.transform(alloc_val); | |
4020 | |
4021 if (!stopped()) { | |
4022 // Copy the fastest available way. | |
4023 // (No need for PreserveJVMState, since we're using it all up now.) | |
163 | 4024 // TODO: generate fields/elements copies for small objects instead. |
0 | 4025 Node* src = obj; |
4026 Node* dest = raw_obj; | |
4027 Node* size = _gvn.transform(alloc_siz); | |
4028 | |
4029 // Exclude the header. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4030 int base_off = instanceOopDesc::base_offset_in_bytes(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4031 if (UseCompressedOops) { |
163 | 4032 assert(base_off % BytesPerLong != 0, "base with compressed oops"); |
4033 // With compressed oops base_offset_in_bytes is 12 which creates | |
4034 // the gap since countx is rounded by 8 bytes below. | |
4035 // Copy klass and the gap. | |
4036 base_off = instanceOopDesc::klass_offset_in_bytes(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4037 } |
0 | 4038 src = basic_plus_adr(src, base_off); |
4039 dest = basic_plus_adr(dest, base_off); | |
4040 | |
4041 // Compute the length also, if needed: | |
4042 Node* countx = size; | |
4043 countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) ); | |
4044 countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) )); | |
4045 | |
4046 // Select an appropriate instruction to initialize the range. | |
4047 // The CopyArray instruction (if supported) can be optimized | |
4048 // into a discrete set of scalar loads and stores. | |
4049 bool disjoint_bases = true; | |
4050 generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, | |
4051 src, NULL, dest, NULL, countx); | |
4052 | |
4053 // Now that the object is properly initialized, type it as an oop. | |
4054 // Use a secondary InitializeNode memory barrier. | |
4055 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, raw_adr_idx, | |
4056 raw_obj)->as_Initialize(); | |
4057 init->set_complete(&_gvn); // (there is no corresponding AllocateNode) | |
4058 Node* new_obj = new(C, 2) CheckCastPPNode(control(), raw_obj, | |
4059 TypeInstPtr::NOTNULL); | |
4060 new_obj = _gvn.transform(new_obj); | |
4061 | |
4062 // If necessary, emit some card marks afterwards. (Non-arrays only.) | |
4063 if (card_mark) { | |
4064 Node* no_particular_value = NULL; | |
4065 Node* no_particular_field = NULL; | |
4066 post_barrier(control(), | |
4067 memory(raw_adr_type), | |
4068 new_obj, | |
4069 no_particular_field, | |
4070 raw_adr_idx, | |
4071 no_particular_value, | |
4072 T_OBJECT, | |
4073 false); | |
4074 } | |
4075 // Present the results of the slow call. | |
4076 result_reg->init_req(_fast_path, control()); | |
4077 result_val->init_req(_fast_path, new_obj); | |
4078 result_i_o ->set_req(_fast_path, i_o()); | |
4079 result_mem ->set_req(_fast_path, reset_memory()); | |
4080 } | |
4081 | |
4082 // Return the combined state. | |
4083 set_control( _gvn.transform(result_reg) ); | |
4084 set_i_o( _gvn.transform(result_i_o) ); | |
4085 set_all_memory( _gvn.transform(result_mem) ); | |
4086 | |
4087 // Cast the result to a sharper type, since we know what clone does. | |
4088 Node* new_obj = _gvn.transform(result_val); | |
4089 Node* cast = new (C, 2) CheckCastPPNode(control(), new_obj, toop); | |
4090 push(_gvn.transform(cast)); | |
4091 | |
4092 return true; | |
4093 } | |
4094 | |
4095 | |
4096 // constants for computing the copy function | |
4097 enum { | |
4098 COPYFUNC_UNALIGNED = 0, | |
4099 COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize | |
4100 COPYFUNC_CONJOINT = 0, | |
4101 COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend | |
4102 }; | |
4103 | |
4104 // Note: The condition "disjoint" applies also for overlapping copies | |
4105 // where an descending copy is permitted (i.e., dest_offset <= src_offset). | |
4106 static address | |
4107 select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) { | |
4108 int selector = | |
4109 (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + | |
4110 (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); | |
4111 | |
4112 #define RETURN_STUB(xxx_arraycopy) { \ | |
4113 name = #xxx_arraycopy; \ | |
4114 return StubRoutines::xxx_arraycopy(); } | |
4115 | |
4116 switch (t) { | |
4117 case T_BYTE: | |
4118 case T_BOOLEAN: | |
4119 switch (selector) { | |
4120 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy); | |
4121 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy); | |
4122 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy); | |
4123 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy); | |
4124 } | |
4125 case T_CHAR: | |
4126 case T_SHORT: | |
4127 switch (selector) { | |
4128 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy); | |
4129 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy); | |
4130 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy); | |
4131 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy); | |
4132 } | |
4133 case T_INT: | |
4134 case T_FLOAT: | |
4135 switch (selector) { | |
4136 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy); | |
4137 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy); | |
4138 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy); | |
4139 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy); | |
4140 } | |
4141 case T_DOUBLE: | |
4142 case T_LONG: | |
4143 switch (selector) { | |
4144 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy); | |
4145 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy); | |
4146 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy); | |
4147 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy); | |
4148 } | |
4149 case T_ARRAY: | |
4150 case T_OBJECT: | |
4151 switch (selector) { | |
4152 case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_arraycopy); | |
4153 case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_arraycopy); | |
4154 case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_disjoint_arraycopy); | |
4155 case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_disjoint_arraycopy); | |
4156 } | |
4157 default: | |
4158 ShouldNotReachHere(); | |
4159 return NULL; | |
4160 } | |
4161 | |
4162 #undef RETURN_STUB | |
4163 } | |
4164 | |
4165 //------------------------------basictype2arraycopy---------------------------- | |
4166 address LibraryCallKit::basictype2arraycopy(BasicType t, | |
4167 Node* src_offset, | |
4168 Node* dest_offset, | |
4169 bool disjoint_bases, | |
4170 const char* &name) { | |
4171 const TypeInt* src_offset_inttype = gvn().find_int_type(src_offset);; | |
4172 const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);; | |
4173 | |
4174 bool aligned = false; | |
4175 bool disjoint = disjoint_bases; | |
4176 | |
4177 // if the offsets are the same, we can treat the memory regions as | |
4178 // disjoint, because either the memory regions are in different arrays, | |
4179 // or they are identical (which we can treat as disjoint.) We can also | |
4180 // treat a copy with a destination index less that the source index | |
4181 // as disjoint since a low->high copy will work correctly in this case. | |
4182 if (src_offset_inttype != NULL && src_offset_inttype->is_con() && | |
4183 dest_offset_inttype != NULL && dest_offset_inttype->is_con()) { | |
4184 // both indices are constants | |
4185 int s_offs = src_offset_inttype->get_con(); | |
4186 int d_offs = dest_offset_inttype->get_con(); | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
4187 int element_size = type2aelembytes(t); |
0 | 4188 aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && |
4189 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0); | |
4190 if (s_offs >= d_offs) disjoint = true; | |
4191 } else if (src_offset == dest_offset && src_offset != NULL) { | |
4192 // This can occur if the offsets are identical non-constants. | |
4193 disjoint = true; | |
4194 } | |
4195 | |
4196 return select_arraycopy_function(t, aligned, disjoint, name); | |
4197 } | |
4198 | |
4199 | |
4200 //------------------------------inline_arraycopy----------------------- | |
4201 bool LibraryCallKit::inline_arraycopy() { | |
4202 // Restore the stack and pop off the arguments. | |
4203 int nargs = 5; // 2 oops, 3 ints, no size_t or long | |
4204 assert(callee()->signature()->size() == nargs, "copy has 5 arguments"); | |
4205 | |
4206 Node *src = argument(0); | |
4207 Node *src_offset = argument(1); | |
4208 Node *dest = argument(2); | |
4209 Node *dest_offset = argument(3); | |
4210 Node *length = argument(4); | |
4211 | |
4212 // Compile time checks. If any of these checks cannot be verified at compile time, | |
4213 // we do not make a fast path for this call. Instead, we let the call remain as it | |
4214 // is. The checks we choose to mandate at compile time are: | |
4215 // | |
4216 // (1) src and dest are arrays. | |
4217 const Type* src_type = src->Value(&_gvn); | |
4218 const Type* dest_type = dest->Value(&_gvn); | |
4219 const TypeAryPtr* top_src = src_type->isa_aryptr(); | |
4220 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); | |
4221 if (top_src == NULL || top_src->klass() == NULL || | |
4222 top_dest == NULL || top_dest->klass() == NULL) { | |
4223 // Conservatively insert a memory barrier on all memory slices. | |
4224 // Do not let writes into the source float below the arraycopy. | |
4225 insert_mem_bar(Op_MemBarCPUOrder); | |
4226 | |
4227 // Call StubRoutines::generic_arraycopy stub. | |
4228 generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT, | |
4229 src, src_offset, dest, dest_offset, length, | |
4230 nargs); | |
4231 | |
4232 // Do not let reads from the destination float above the arraycopy. | |
4233 // Since we cannot type the arrays, we don't know which slices | |
4234 // might be affected. We could restrict this barrier only to those | |
4235 // memory slices which pertain to array elements--but don't bother. | |
4236 if (!InsertMemBarAfterArraycopy) | |
4237 // (If InsertMemBarAfterArraycopy, there is already one in place.) | |
4238 insert_mem_bar(Op_MemBarCPUOrder); | |
4239 return true; | |
4240 } | |
4241 | |
4242 // (2) src and dest arrays must have elements of the same BasicType | |
4243 // Figure out the size and type of the elements we will be copying. | |
4244 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type(); | |
4245 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type(); | |
4246 if (src_elem == T_ARRAY) src_elem = T_OBJECT; | |
4247 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT; | |
4248 | |
4249 if (src_elem != dest_elem || dest_elem == T_VOID) { | |
4250 // The component types are not the same or are not recognized. Punt. | |
4251 // (But, avoid the native method wrapper to JVM_ArrayCopy.) | |
4252 generate_slow_arraycopy(TypePtr::BOTTOM, | |
4253 src, src_offset, dest, dest_offset, length, | |
4254 nargs); | |
4255 return true; | |
4256 } | |
4257 | |
4258 //--------------------------------------------------------------------------- | |
4259 // We will make a fast path for this call to arraycopy. | |
4260 | |
4261 // We have the following tests left to perform: | |
4262 // | |
4263 // (3) src and dest must not be null. | |
4264 // (4) src_offset must not be negative. | |
4265 // (5) dest_offset must not be negative. | |
4266 // (6) length must not be negative. | |
4267 // (7) src_offset + length must not exceed length of src. | |
4268 // (8) dest_offset + length must not exceed length of dest. | |
4269 // (9) each element of an oop array must be assignable | |
4270 | |
4271 RegionNode* slow_region = new (C, 1) RegionNode(1); | |
4272 record_for_igvn(slow_region); | |
4273 | |
4274 // (3) operands must not be null | |
4275 // We currently perform our null checks with the do_null_check routine. | |
4276 // This means that the null exceptions will be reported in the caller | |
4277 // rather than (correctly) reported inside of the native arraycopy call. | |
4278 // This should be corrected, given time. We do our null check with the | |
4279 // stack pointer restored. | |
4280 _sp += nargs; | |
4281 src = do_null_check(src, T_ARRAY); | |
4282 dest = do_null_check(dest, T_ARRAY); | |
4283 _sp -= nargs; | |
4284 | |
4285 // (4) src_offset must not be negative. | |
4286 generate_negative_guard(src_offset, slow_region); | |
4287 | |
4288 // (5) dest_offset must not be negative. | |
4289 generate_negative_guard(dest_offset, slow_region); | |
4290 | |
4291 // (6) length must not be negative (moved to generate_arraycopy()). | |
4292 // generate_negative_guard(length, slow_region); | |
4293 | |
4294 // (7) src_offset + length must not exceed length of src. | |
4295 generate_limit_guard(src_offset, length, | |
4296 load_array_length(src), | |
4297 slow_region); | |
4298 | |
4299 // (8) dest_offset + length must not exceed length of dest. | |
4300 generate_limit_guard(dest_offset, length, | |
4301 load_array_length(dest), | |
4302 slow_region); | |
4303 | |
4304 // (9) each element of an oop array must be assignable | |
4305 // The generate_arraycopy subroutine checks this. | |
4306 | |
4307 // This is where the memory effects are placed: | |
4308 const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem); | |
4309 generate_arraycopy(adr_type, dest_elem, | |
4310 src, src_offset, dest, dest_offset, length, | |
4311 nargs, false, false, slow_region); | |
4312 | |
4313 return true; | |
4314 } | |
4315 | |
4316 //-----------------------------generate_arraycopy---------------------- | |
4317 // Generate an optimized call to arraycopy. | |
4318 // Caller must guard against non-arrays. | |
4319 // Caller must determine a common array basic-type for both arrays. | |
4320 // Caller must validate offsets against array bounds. | |
4321 // The slow_region has already collected guard failure paths | |
4322 // (such as out of bounds length or non-conformable array types). | |
4323 // The generated code has this shape, in general: | |
4324 // | |
4325 // if (length == 0) return // via zero_path | |
4326 // slowval = -1 | |
4327 // if (types unknown) { | |
4328 // slowval = call generic copy loop | |
4329 // if (slowval == 0) return // via checked_path | |
4330 // } else if (indexes in bounds) { | |
4331 // if ((is object array) && !(array type check)) { | |
4332 // slowval = call checked copy loop | |
4333 // if (slowval == 0) return // via checked_path | |
4334 // } else { | |
4335 // call bulk copy loop | |
4336 // return // via fast_path | |
4337 // } | |
4338 // } | |
4339 // // adjust params for remaining work: | |
4340 // if (slowval != -1) { | |
4341 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n | |
4342 // } | |
4343 // slow_region: | |
4344 // call slow arraycopy(src, src_offset, dest, dest_offset, length) | |
4345 // return // via slow_call_path | |
4346 // | |
4347 // This routine is used from several intrinsics: System.arraycopy, | |
4348 // Object.clone (the array subcase), and Arrays.copyOf[Range]. | |
4349 // | |
4350 void | |
4351 LibraryCallKit::generate_arraycopy(const TypePtr* adr_type, | |
4352 BasicType basic_elem_type, | |
4353 Node* src, Node* src_offset, | |
4354 Node* dest, Node* dest_offset, | |
4355 Node* copy_length, | |
4356 int nargs, | |
4357 bool disjoint_bases, | |
4358 bool length_never_negative, | |
4359 RegionNode* slow_region) { | |
4360 | |
4361 if (slow_region == NULL) { | |
4362 slow_region = new(C,1) RegionNode(1); | |
4363 record_for_igvn(slow_region); | |
4364 } | |
4365 | |
4366 Node* original_dest = dest; | |
4367 AllocateArrayNode* alloc = NULL; // used for zeroing, if needed | |
4368 Node* raw_dest = NULL; // used before zeroing, if needed | |
4369 bool must_clear_dest = false; | |
4370 | |
4371 // See if this is the initialization of a newly-allocated array. | |
4372 // If so, we will take responsibility here for initializing it to zero. | |
4373 // (Note: Because tightly_coupled_allocation performs checks on the | |
4374 // out-edges of the dest, we need to avoid making derived pointers | |
4375 // from it until we have checked its uses.) | |
4376 if (ReduceBulkZeroing | |
4377 && !ZeroTLAB // pointless if already zeroed | |
4378 && basic_elem_type != T_CONFLICT // avoid corner case | |
4379 && !_gvn.eqv_uncast(src, dest) | |
4380 && ((alloc = tightly_coupled_allocation(dest, slow_region)) | |
4381 != NULL) | |
34
545c277a3ecf
6667581: Don't generate initialization (by 0) code for arrays with size 0
kvn
parents:
29
diff
changeset
|
4382 && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 |
0 | 4383 && alloc->maybe_set_complete(&_gvn)) { |
4384 // "You break it, you buy it." | |
4385 InitializeNode* init = alloc->initialization(); | |
4386 assert(init->is_complete(), "we just did this"); | |
4387 assert(dest->Opcode() == Op_CheckCastPP, "sanity"); | |
4388 assert(dest->in(0)->in(0) == init, "dest pinned"); | |
4389 raw_dest = dest->in(1); // grab the raw pointer! | |
4390 original_dest = dest; | |
4391 dest = raw_dest; | |
4392 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory | |
4393 // Decouple the original InitializeNode, turning it into a simple membar. | |
4394 // We will build a new one at the end of this routine. | |
4395 init->set_req(InitializeNode::RawAddress, top()); | |
4396 // From this point on, every exit path is responsible for | |
4397 // initializing any non-copied parts of the object to zero. | |
4398 must_clear_dest = true; | |
4399 } else { | |
4400 // No zeroing elimination here. | |
4401 alloc = NULL; | |
4402 //original_dest = dest; | |
4403 //must_clear_dest = false; | |
4404 } | |
4405 | |
4406 // Results are placed here: | |
4407 enum { fast_path = 1, // normal void-returning assembly stub | |
4408 checked_path = 2, // special assembly stub with cleanup | |
4409 slow_call_path = 3, // something went wrong; call the VM | |
4410 zero_path = 4, // bypass when length of copy is zero | |
4411 bcopy_path = 5, // copy primitive array by 64-bit blocks | |
4412 PATH_LIMIT = 6 | |
4413 }; | |
4414 RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); | |
4415 PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO); | |
4416 PhiNode* result_memory = new(C, PATH_LIMIT) PhiNode(result_region, Type::MEMORY, adr_type); | |
4417 record_for_igvn(result_region); | |
4418 _gvn.set_type_bottom(result_i_o); | |
4419 _gvn.set_type_bottom(result_memory); | |
4420 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice"); | |
4421 | |
4422 // The slow_control path: | |
4423 Node* slow_control; | |
4424 Node* slow_i_o = i_o(); | |
4425 Node* slow_mem = memory(adr_type); | |
4426 debug_only(slow_control = (Node*) badAddress); | |
4427 | |
4428 // Checked control path: | |
4429 Node* checked_control = top(); | |
4430 Node* checked_mem = NULL; | |
4431 Node* checked_i_o = NULL; | |
4432 Node* checked_value = NULL; | |
4433 | |
4434 if (basic_elem_type == T_CONFLICT) { | |
4435 assert(!must_clear_dest, ""); | |
4436 Node* cv = generate_generic_arraycopy(adr_type, | |
4437 src, src_offset, dest, dest_offset, | |
4438 copy_length, nargs); | |
4439 if (cv == NULL) cv = intcon(-1); // failure (no stub available) | |
4440 checked_control = control(); | |
4441 checked_i_o = i_o(); | |
4442 checked_mem = memory(adr_type); | |
4443 checked_value = cv; | |
4444 set_control(top()); // no fast path | |
4445 } | |
4446 | |
4447 Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative); | |
4448 if (not_pos != NULL) { | |
4449 PreserveJVMState pjvms(this); | |
4450 set_control(not_pos); | |
4451 | |
4452 // (6) length must not be negative. | |
4453 if (!length_never_negative) { | |
4454 generate_negative_guard(copy_length, slow_region); | |
4455 } | |
4456 | |
4457 if (!stopped() && must_clear_dest) { | |
4458 Node* dest_length = alloc->in(AllocateNode::ALength); | |
4459 if (_gvn.eqv_uncast(copy_length, dest_length) | |
4460 || _gvn.find_int_con(dest_length, 1) <= 0) { | |
4461 // There is no zeroing to do. | |
4462 } else { | |
4463 // Clear the whole thing since there are no source elements to copy. | |
4464 generate_clear_array(adr_type, dest, basic_elem_type, | |
4465 intcon(0), NULL, | |
4466 alloc->in(AllocateNode::AllocSize)); | |
4467 } | |
4468 } | |
4469 | |
4470 // Present the results of the fast call. | |
4471 result_region->init_req(zero_path, control()); | |
4472 result_i_o ->init_req(zero_path, i_o()); | |
4473 result_memory->init_req(zero_path, memory(adr_type)); | |
4474 } | |
4475 | |
4476 if (!stopped() && must_clear_dest) { | |
4477 // We have to initialize the *uncopied* part of the array to zero. | |
4478 // The copy destination is the slice dest[off..off+len]. The other slices | |
4479 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. | |
4480 Node* dest_size = alloc->in(AllocateNode::AllocSize); | |
4481 Node* dest_length = alloc->in(AllocateNode::ALength); | |
4482 Node* dest_tail = _gvn.transform( new(C,3) AddINode(dest_offset, | |
4483 copy_length) ); | |
4484 | |
4485 // If there is a head section that needs zeroing, do it now. | |
4486 if (find_int_con(dest_offset, -1) != 0) { | |
4487 generate_clear_array(adr_type, dest, basic_elem_type, | |
4488 intcon(0), dest_offset, | |
4489 NULL); | |
4490 } | |
4491 | |
4492 // Next, perform a dynamic check on the tail length. | |
4493 // It is often zero, and we can win big if we prove this. | |
4494 // There are two wins: Avoid generating the ClearArray | |
4495 // with its attendant messy index arithmetic, and upgrade | |
4496 // the copy to a more hardware-friendly word size of 64 bits. | |
4497 Node* tail_ctl = NULL; | |
4498 if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) { | |
4499 Node* cmp_lt = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) ); | |
4500 Node* bol_lt = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) ); | |
4501 tail_ctl = generate_slow_guard(bol_lt, NULL); | |
4502 assert(tail_ctl != NULL || !stopped(), "must be an outcome"); | |
4503 } | |
4504 | |
4505 // At this point, let's assume there is no tail. | |
4506 if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) { | |
4507 // There is no tail. Try an upgrade to a 64-bit copy. | |
4508 bool didit = false; | |
4509 { PreserveJVMState pjvms(this); | |
4510 didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc, | |
4511 src, src_offset, dest, dest_offset, | |
4512 dest_size); | |
4513 if (didit) { | |
4514 // Present the results of the block-copying fast call. | |
4515 result_region->init_req(bcopy_path, control()); | |
4516 result_i_o ->init_req(bcopy_path, i_o()); | |
4517 result_memory->init_req(bcopy_path, memory(adr_type)); | |
4518 } | |
4519 } | |
4520 if (didit) | |
4521 set_control(top()); // no regular fast path | |
4522 } | |
4523 | |
4524 // Clear the tail, if any. | |
4525 if (tail_ctl != NULL) { | |
4526 Node* notail_ctl = stopped() ? NULL : control(); | |
4527 set_control(tail_ctl); | |
4528 if (notail_ctl == NULL) { | |
4529 generate_clear_array(adr_type, dest, basic_elem_type, | |
4530 dest_tail, NULL, | |
4531 dest_size); | |
4532 } else { | |
4533 // Make a local merge. | |
4534 Node* done_ctl = new(C,3) RegionNode(3); | |
4535 Node* done_mem = new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type); | |
4536 done_ctl->init_req(1, notail_ctl); | |
4537 done_mem->init_req(1, memory(adr_type)); | |
4538 generate_clear_array(adr_type, dest, basic_elem_type, | |
4539 dest_tail, NULL, | |
4540 dest_size); | |
4541 done_ctl->init_req(2, control()); | |
4542 done_mem->init_req(2, memory(adr_type)); | |
4543 set_control( _gvn.transform(done_ctl) ); | |
4544 set_memory( _gvn.transform(done_mem), adr_type ); | |
4545 } | |
4546 } | |
4547 } | |
4548 | |
4549 BasicType copy_type = basic_elem_type; | |
4550 assert(basic_elem_type != T_ARRAY, "caller must fix this"); | |
4551 if (!stopped() && copy_type == T_OBJECT) { | |
4552 // If src and dest have compatible element types, we can copy bits. | |
4553 // Types S[] and D[] are compatible if D is a supertype of S. | |
4554 // | |
4555 // If they are not, we will use checked_oop_disjoint_arraycopy, | |
4556 // which performs a fast optimistic per-oop check, and backs off | |
4557 // further to JVM_ArrayCopy on the first per-oop check that fails. | |
4558 // (Actually, we don't move raw bits only; the GC requires card marks.) | |
4559 | |
4560 // Get the klassOop for both src and dest | |
4561 Node* src_klass = load_object_klass(src); | |
4562 Node* dest_klass = load_object_klass(dest); | |
4563 | |
4564 // Generate the subtype check. | |
4565 // This might fold up statically, or then again it might not. | |
4566 // | |
4567 // Non-static example: Copying List<String>.elements to a new String[]. | |
4568 // The backing store for a List<String> is always an Object[], | |
4569 // but its elements are always type String, if the generic types | |
4570 // are correct at the source level. | |
4571 // | |
4572 // Test S[] against D[], not S against D, because (probably) | |
4573 // the secondary supertype cache is less busy for S[] than S. | |
4574 // This usually only matters when D is an interface. | |
4575 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass); | |
4576 // Plug failing path into checked_oop_disjoint_arraycopy | |
4577 if (not_subtype_ctrl != top()) { | |
4578 PreserveJVMState pjvms(this); | |
4579 set_control(not_subtype_ctrl); | |
4580 // (At this point we can assume disjoint_bases, since types differ.) | |
4581 int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); | |
4582 Node* p1 = basic_plus_adr(dest_klass, ek_offset); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
4583 Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM); |
0 | 4584 Node* dest_elem_klass = _gvn.transform(n1); |
4585 Node* cv = generate_checkcast_arraycopy(adr_type, | |
4586 dest_elem_klass, | |
4587 src, src_offset, dest, dest_offset, | |
4588 copy_length, | |
4589 nargs); | |
4590 if (cv == NULL) cv = intcon(-1); // failure (no stub available) | |
4591 checked_control = control(); | |
4592 checked_i_o = i_o(); | |
4593 checked_mem = memory(adr_type); | |
4594 checked_value = cv; | |
4595 } | |
4596 // At this point we know we do not need type checks on oop stores. | |
4597 | |
4598 // Let's see if we need card marks: | |
4599 if (alloc != NULL && use_ReduceInitialCardMarks()) { | |
4600 // If we do not need card marks, copy using the jint or jlong stub. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4601 copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
4602 assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), |
0 | 4603 "sizes agree"); |
4604 } | |
4605 } | |
4606 | |
4607 if (!stopped()) { | |
4608 // Generate the fast path, if possible. | |
4609 PreserveJVMState pjvms(this); | |
4610 generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases, | |
4611 src, src_offset, dest, dest_offset, | |
4612 ConvI2X(copy_length)); | |
4613 | |
4614 // Present the results of the fast call. | |
4615 result_region->init_req(fast_path, control()); | |
4616 result_i_o ->init_req(fast_path, i_o()); | |
4617 result_memory->init_req(fast_path, memory(adr_type)); | |
4618 } | |
4619 | |
4620 // Here are all the slow paths up to this point, in one bundle: | |
4621 slow_control = top(); | |
4622 if (slow_region != NULL) | |
4623 slow_control = _gvn.transform(slow_region); | |
4624 debug_only(slow_region = (RegionNode*)badAddress); | |
4625 | |
4626 set_control(checked_control); | |
4627 if (!stopped()) { | |
4628 // Clean up after the checked call. | |
4629 // The returned value is either 0 or -1^K, | |
4630 // where K = number of partially transferred array elements. | |
4631 Node* cmp = _gvn.transform( new(C, 3) CmpINode(checked_value, intcon(0)) ); | |
4632 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); | |
4633 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); | |
4634 | |
4635 // If it is 0, we are done, so transfer to the end. | |
4636 Node* checks_done = _gvn.transform( new(C, 1) IfTrueNode(iff) ); | |
4637 result_region->init_req(checked_path, checks_done); | |
4638 result_i_o ->init_req(checked_path, checked_i_o); | |
4639 result_memory->init_req(checked_path, checked_mem); | |
4640 | |
4641 // If it is not zero, merge into the slow call. | |
4642 set_control( _gvn.transform( new(C, 1) IfFalseNode(iff) )); | |
4643 RegionNode* slow_reg2 = new(C, 3) RegionNode(3); | |
4644 PhiNode* slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO); | |
4645 PhiNode* slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type); | |
4646 record_for_igvn(slow_reg2); | |
4647 slow_reg2 ->init_req(1, slow_control); | |
4648 slow_i_o2 ->init_req(1, slow_i_o); | |
4649 slow_mem2 ->init_req(1, slow_mem); | |
4650 slow_reg2 ->init_req(2, control()); | |
4651 slow_i_o2 ->init_req(2, i_o()); | |
4652 slow_mem2 ->init_req(2, memory(adr_type)); | |
4653 | |
4654 slow_control = _gvn.transform(slow_reg2); | |
4655 slow_i_o = _gvn.transform(slow_i_o2); | |
4656 slow_mem = _gvn.transform(slow_mem2); | |
4657 | |
4658 if (alloc != NULL) { | |
4659 // We'll restart from the very beginning, after zeroing the whole thing. | |
4660 // This can cause double writes, but that's OK since dest is brand new. | |
4661 // So we ignore the low 31 bits of the value returned from the stub. | |
4662 } else { | |
4663 // We must continue the copy exactly where it failed, or else | |
4664 // another thread might see the wrong number of writes to dest. | |
4665 Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) ); | |
4666 Node* slow_offset = new(C, 3) PhiNode(slow_reg2, TypeInt::INT); | |
4667 slow_offset->init_req(1, intcon(0)); | |
4668 slow_offset->init_req(2, checked_offset); | |
4669 slow_offset = _gvn.transform(slow_offset); | |
4670 | |
4671 // Adjust the arguments by the conditionally incoming offset. | |
4672 Node* src_off_plus = _gvn.transform( new(C, 3) AddINode(src_offset, slow_offset) ); | |
4673 Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) ); | |
4674 Node* length_minus = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) ); | |
4675 | |
4676 // Tweak the node variables to adjust the code produced below: | |
4677 src_offset = src_off_plus; | |
4678 dest_offset = dest_off_plus; | |
4679 copy_length = length_minus; | |
4680 } | |
4681 } | |
4682 | |
4683 set_control(slow_control); | |
4684 if (!stopped()) { | |
4685 // Generate the slow path, if needed. | |
4686 PreserveJVMState pjvms(this); // replace_in_map may trash the map | |
4687 | |
4688 set_memory(slow_mem, adr_type); | |
4689 set_i_o(slow_i_o); | |
4690 | |
4691 if (must_clear_dest) { | |
4692 generate_clear_array(adr_type, dest, basic_elem_type, | |
4693 intcon(0), NULL, | |
4694 alloc->in(AllocateNode::AllocSize)); | |
4695 } | |
4696 | |
4697 if (dest != original_dest) { | |
4698 // Promote from rawptr to oop, so it looks right in the call's GC map. | |
4699 dest = _gvn.transform( new(C,2) CheckCastPPNode(control(), dest, | |
4700 TypeInstPtr::NOTNULL) ); | |
4701 | |
4702 // Edit the call's debug-info to avoid referring to original_dest. | |
4703 // (The problem with original_dest is that it isn't ready until | |
4704 // after the InitializeNode completes, but this stuff is before.) | |
4705 // Substitute in the locally valid dest_oop. | |
4706 replace_in_map(original_dest, dest); | |
4707 } | |
4708 | |
4709 generate_slow_arraycopy(adr_type, | |
4710 src, src_offset, dest, dest_offset, | |
4711 copy_length, nargs); | |
4712 | |
4713 result_region->init_req(slow_call_path, control()); | |
4714 result_i_o ->init_req(slow_call_path, i_o()); | |
4715 result_memory->init_req(slow_call_path, memory(adr_type)); | |
4716 } | |
4717 | |
4718 // Remove unused edges. | |
4719 for (uint i = 1; i < result_region->req(); i++) { | |
4720 if (result_region->in(i) == NULL) | |
4721 result_region->init_req(i, top()); | |
4722 } | |
4723 | |
4724 // Finished; return the combined state. | |
4725 set_control( _gvn.transform(result_region) ); | |
4726 set_i_o( _gvn.transform(result_i_o) ); | |
4727 set_memory( _gvn.transform(result_memory), adr_type ); | |
4728 | |
4729 if (dest != original_dest) { | |
4730 // Pin the "finished" array node after the arraycopy/zeroing operations. | |
4731 // Use a secondary InitializeNode memory barrier. | |
4732 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, | |
4733 Compile::AliasIdxRaw, | |
4734 raw_dest)->as_Initialize(); | |
4735 init->set_complete(&_gvn); // (there is no corresponding AllocateNode) | |
4736 _gvn.hash_delete(original_dest); | |
4737 original_dest->set_req(0, control()); | |
4738 _gvn.hash_find_insert(original_dest); // put back into GVN table | |
4739 } | |
4740 | |
4741 // The memory edges above are precise in order to model effects around | |
605 | 4742 // array copies accurately to allow value numbering of field loads around |
0 | 4743 // arraycopy. Such field loads, both before and after, are common in Java |
4744 // collections and similar classes involving header/array data structures. | |
4745 // | |
4746 // But with low number of register or when some registers are used or killed | |
4747 // by arraycopy calls it causes registers spilling on stack. See 6544710. | |
4748 // The next memory barrier is added to avoid it. If the arraycopy can be | |
4749 // optimized away (which it can, sometimes) then we can manually remove | |
4750 // the membar also. | |
4751 if (InsertMemBarAfterArraycopy) | |
4752 insert_mem_bar(Op_MemBarCPUOrder); | |
4753 } | |
4754 | |
4755 | |
4756 // Helper function which determines if an arraycopy immediately follows | |
4757 // an allocation, with no intervening tests or other escapes for the object. | |
4758 AllocateArrayNode* | |
4759 LibraryCallKit::tightly_coupled_allocation(Node* ptr, | |
4760 RegionNode* slow_region) { | |
4761 if (stopped()) return NULL; // no fast path | |
4762 if (C->AliasLevel() == 0) return NULL; // no MergeMems around | |
4763 | |
4764 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn); | |
4765 if (alloc == NULL) return NULL; | |
4766 | |
4767 Node* rawmem = memory(Compile::AliasIdxRaw); | |
4768 // Is the allocation's memory state untouched? | |
4769 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) { | |
4770 // Bail out if there have been raw-memory effects since the allocation. | |
4771 // (Example: There might have been a call or safepoint.) | |
4772 return NULL; | |
4773 } | |
4774 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw); | |
4775 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) { | |
4776 return NULL; | |
4777 } | |
4778 | |
4779 // There must be no unexpected observers of this allocation. | |
4780 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) { | |
4781 Node* obs = ptr->fast_out(i); | |
4782 if (obs != this->map()) { | |
4783 return NULL; | |
4784 } | |
4785 } | |
4786 | |
4787 // This arraycopy must unconditionally follow the allocation of the ptr. | |
4788 Node* alloc_ctl = ptr->in(0); | |
4789 assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo"); | |
4790 | |
4791 Node* ctl = control(); | |
4792 while (ctl != alloc_ctl) { | |
4793 // There may be guards which feed into the slow_region. | |
4794 // Any other control flow means that we might not get a chance | |
4795 // to finish initializing the allocated object. | |
4796 if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) { | |
4797 IfNode* iff = ctl->in(0)->as_If(); | |
4798 Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con); | |
4799 assert(not_ctl != NULL && not_ctl != ctl, "found alternate"); | |
4800 if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) { | |
4801 ctl = iff->in(0); // This test feeds the known slow_region. | |
4802 continue; | |
4803 } | |
4804 // One more try: Various low-level checks bottom out in | |
4805 // uncommon traps. If the debug-info of the trap omits | |
4806 // any reference to the allocation, as we've already | |
4807 // observed, then there can be no objection to the trap. | |
4808 bool found_trap = false; | |
4809 for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) { | |
4810 Node* obs = not_ctl->fast_out(j); | |
4811 if (obs->in(0) == not_ctl && obs->is_Call() && | |
4812 (obs->as_Call()->entry_point() == | |
4813 SharedRuntime::uncommon_trap_blob()->instructions_begin())) { | |
4814 found_trap = true; break; | |
4815 } | |
4816 } | |
4817 if (found_trap) { | |
4818 ctl = iff->in(0); // This test feeds a harmless uncommon trap. | |
4819 continue; | |
4820 } | |
4821 } | |
4822 return NULL; | |
4823 } | |
4824 | |
4825 // If we get this far, we have an allocation which immediately | |
4826 // precedes the arraycopy, and we can take over zeroing the new object. | |
4827 // The arraycopy will finish the initialization, and provide | |
4828 // a new control state to which we will anchor the destination pointer. | |
4829 | |
4830 return alloc; | |
4831 } | |
4832 | |
4833 // Helper for initialization of arrays, creating a ClearArray. | |
4834 // It writes zero bits in [start..end), within the body of an array object. | |
4835 // The memory effects are all chained onto the 'adr_type' alias category. | |
4836 // | |
4837 // Since the object is otherwise uninitialized, we are free | |
4838 // to put a little "slop" around the edges of the cleared area, | |
4839 // as long as it does not go back into the array's header, | |
4840 // or beyond the array end within the heap. | |
4841 // | |
4842 // The lower edge can be rounded down to the nearest jint and the | |
4843 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes. | |
4844 // | |
4845 // Arguments: | |
4846 // adr_type memory slice where writes are generated | |
4847 // dest oop of the destination array | |
4848 // basic_elem_type element type of the destination | |
4849 // slice_idx array index of first element to store | |
4850 // slice_len number of elements to store (or NULL) | |
4851 // dest_size total size in bytes of the array object | |
4852 // | |
4853 // Exactly one of slice_len or dest_size must be non-NULL. | |
4854 // If dest_size is non-NULL, zeroing extends to the end of the object. | |
4855 // If slice_len is non-NULL, the slice_idx value must be a constant. | |
4856 void | |
4857 LibraryCallKit::generate_clear_array(const TypePtr* adr_type, | |
4858 Node* dest, | |
4859 BasicType basic_elem_type, | |
4860 Node* slice_idx, | |
4861 Node* slice_len, | |
4862 Node* dest_size) { | |
4863 // one or the other but not both of slice_len and dest_size: | |
4864 assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, ""); | |
4865 if (slice_len == NULL) slice_len = top(); | |
4866 if (dest_size == NULL) dest_size = top(); | |
4867 | |
4868 // operate on this memory slice: | |
4869 Node* mem = memory(adr_type); // memory slice to operate on | |
4870 | |
4871 // scaling and rounding of indexes: | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
4872 int scale = exact_log2(type2aelembytes(basic_elem_type)); |
0 | 4873 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); |
4874 int clear_low = (-1 << scale) & (BytesPerInt - 1); | |
4875 int bump_bit = (-1 << scale) & BytesPerInt; | |
4876 | |
4877 // determine constant starts and ends | |
4878 const intptr_t BIG_NEG = -128; | |
4879 assert(BIG_NEG + 2*abase < 0, "neg enough"); | |
4880 intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG); | |
4881 intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG); | |
4882 if (slice_len_con == 0) { | |
4883 return; // nothing to do here | |
4884 } | |
4885 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low; | |
4886 intptr_t end_con = find_intptr_t_con(dest_size, -1); | |
4887 if (slice_idx_con >= 0 && slice_len_con >= 0) { | |
4888 assert(end_con < 0, "not two cons"); | |
4889 end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale), | |
4890 BytesPerLong); | |
4891 } | |
4892 | |
4893 if (start_con >= 0 && end_con >= 0) { | |
4894 // Constant start and end. Simple. | |
4895 mem = ClearArrayNode::clear_memory(control(), mem, dest, | |
4896 start_con, end_con, &_gvn); | |
4897 } else if (start_con >= 0 && dest_size != top()) { | |
4898 // Constant start, pre-rounded end after the tail of the array. | |
4899 Node* end = dest_size; | |
4900 mem = ClearArrayNode::clear_memory(control(), mem, dest, | |
4901 start_con, end, &_gvn); | |
4902 } else if (start_con >= 0 && slice_len != top()) { | |
4903 // Constant start, non-constant end. End needs rounding up. | |
4904 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) | |
4905 intptr_t end_base = abase + (slice_idx_con << scale); | |
4906 int end_round = (-1 << scale) & (BytesPerLong - 1); | |
4907 Node* end = ConvI2X(slice_len); | |
4908 if (scale != 0) | |
4909 end = _gvn.transform( new(C,3) LShiftXNode(end, intcon(scale) )); | |
4910 end_base += end_round; | |
4911 end = _gvn.transform( new(C,3) AddXNode(end, MakeConX(end_base)) ); | |
4912 end = _gvn.transform( new(C,3) AndXNode(end, MakeConX(~end_round)) ); | |
4913 mem = ClearArrayNode::clear_memory(control(), mem, dest, | |
4914 start_con, end, &_gvn); | |
4915 } else if (start_con < 0 && dest_size != top()) { | |
4916 // Non-constant start, pre-rounded end after the tail of the array. | |
4917 // This is almost certainly a "round-to-end" operation. | |
4918 Node* start = slice_idx; | |
4919 start = ConvI2X(start); | |
4920 if (scale != 0) | |
4921 start = _gvn.transform( new(C,3) LShiftXNode( start, intcon(scale) )); | |
4922 start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(abase)) ); | |
4923 if ((bump_bit | clear_low) != 0) { | |
4924 int to_clear = (bump_bit | clear_low); | |
4925 // Align up mod 8, then store a jint zero unconditionally | |
4926 // just before the mod-8 boundary. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4927 if (((abase + bump_bit) & ~to_clear) - bump_bit |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4928 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4929 bump_bit = 0; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4930 assert((abase & to_clear) == 0, "array base must be long-aligned"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4931 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4932 // Bump 'start' up to (or past) the next jint boundary: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4933 start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) ); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4934 assert((abase & clear_low) == 0, "array base must be int-aligned"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4935 } |
0 | 4936 // Round bumped 'start' down to jlong boundary in body of array. |
4937 start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) ); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4938 if (bump_bit != 0) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4939 // Store a zero to the immediately preceding jint: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4940 Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) ); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4941 Node* p1 = basic_plus_adr(dest, x1); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4942 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4943 mem = _gvn.transform(mem); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
34
diff
changeset
|
4944 } |
0 | 4945 } |
4946 Node* end = dest_size; // pre-rounded | |
4947 mem = ClearArrayNode::clear_memory(control(), mem, dest, | |
4948 start, end, &_gvn); | |
4949 } else { | |
4950 // Non-constant start, unrounded non-constant end. | |
4951 // (Nobody zeroes a random midsection of an array using this routine.) | |
4952 ShouldNotReachHere(); // fix caller | |
4953 } | |
4954 | |
4955 // Done. | |
4956 set_memory(mem, adr_type); | |
4957 } | |
4958 | |
4959 | |
4960 bool | |
4961 LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type, | |
4962 BasicType basic_elem_type, | |
4963 AllocateNode* alloc, | |
4964 Node* src, Node* src_offset, | |
4965 Node* dest, Node* dest_offset, | |
4966 Node* dest_size) { | |
4967 // See if there is an advantage from block transfer. | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
4968 int scale = exact_log2(type2aelembytes(basic_elem_type)); |
0 | 4969 if (scale >= LogBytesPerLong) |
4970 return false; // it is already a block transfer | |
4971 | |
4972 // Look at the alignment of the starting offsets. | |
4973 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); | |
4974 const intptr_t BIG_NEG = -128; | |
4975 assert(BIG_NEG + 2*abase < 0, "neg enough"); | |
4976 | |
4977 intptr_t src_off = abase + ((intptr_t) find_int_con(src_offset, -1) << scale); | |
4978 intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale); | |
4979 if (src_off < 0 || dest_off < 0) | |
4980 // At present, we can only understand constants. | |
4981 return false; | |
4982 | |
4983 if (((src_off | dest_off) & (BytesPerLong-1)) != 0) { | |
4984 // Non-aligned; too bad. | |
4985 // One more chance: Pick off an initial 32-bit word. | |
4986 // This is a common case, since abase can be odd mod 8. | |
4987 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && | |
4988 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { | |
4989 Node* sptr = basic_plus_adr(src, src_off); | |
4990 Node* dptr = basic_plus_adr(dest, dest_off); | |
4991 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type); | |
4992 store_to_memory(control(), dptr, sval, T_INT, adr_type); | |
4993 src_off += BytesPerInt; | |
4994 dest_off += BytesPerInt; | |
4995 } else { | |
4996 return false; | |
4997 } | |
4998 } | |
4999 assert(src_off % BytesPerLong == 0, ""); | |
5000 assert(dest_off % BytesPerLong == 0, ""); | |
5001 | |
5002 // Do this copy by giant steps. | |
5003 Node* sptr = basic_plus_adr(src, src_off); | |
5004 Node* dptr = basic_plus_adr(dest, dest_off); | |
5005 Node* countx = dest_size; | |
5006 countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) ); | |
5007 countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) ); | |
5008 | |
5009 bool disjoint_bases = true; // since alloc != NULL | |
5010 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, | |
5011 sptr, NULL, dptr, NULL, countx); | |
5012 | |
5013 return true; | |
5014 } | |
5015 | |
5016 | |
5017 // Helper function; generates code for the slow case. | |
5018 // We make a call to a runtime method which emulates the native method, | |
5019 // but without the native wrapper overhead. | |
5020 void | |
5021 LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type, | |
5022 Node* src, Node* src_offset, | |
5023 Node* dest, Node* dest_offset, | |
5024 Node* copy_length, | |
5025 int nargs) { | |
5026 _sp += nargs; // any deopt will start just before call to enclosing method | |
5027 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON, | |
5028 OptoRuntime::slow_arraycopy_Type(), | |
5029 OptoRuntime::slow_arraycopy_Java(), | |
5030 "slow_arraycopy", adr_type, | |
5031 src, src_offset, dest, dest_offset, | |
5032 copy_length); | |
5033 _sp -= nargs; | |
5034 | |
5035 // Handle exceptions thrown by this fellow: | |
5036 make_slow_call_ex(call, env()->Throwable_klass(), false); | |
5037 } | |
5038 | |
5039 // Helper function; generates code for cases requiring runtime checks. | |
5040 Node* | |
5041 LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type, | |
5042 Node* dest_elem_klass, | |
5043 Node* src, Node* src_offset, | |
5044 Node* dest, Node* dest_offset, | |
5045 Node* copy_length, | |
5046 int nargs) { | |
5047 if (stopped()) return NULL; | |
5048 | |
5049 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); | |
5050 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. | |
5051 return NULL; | |
5052 } | |
5053 | |
5054 // Pick out the parameters required to perform a store-check | |
5055 // for the target array. This is an optimistic check. It will | |
5056 // look in each non-null element's class, at the desired klass's | |
5057 // super_check_offset, for the desired klass. | |
5058 int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc); | |
5059 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); | |
5060 Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM); | |
5061 Node* check_offset = _gvn.transform(n3); | |
5062 Node* check_value = dest_elem_klass; | |
5063 | |
5064 Node* src_start = array_element_address(src, src_offset, T_OBJECT); | |
5065 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); | |
5066 | |
5067 // (We know the arrays are never conjoint, because their types differ.) | |
5068 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, | |
5069 OptoRuntime::checkcast_arraycopy_Type(), | |
5070 copyfunc_addr, "checkcast_arraycopy", adr_type, | |
5071 // five arguments, of which two are | |
5072 // intptr_t (jlong in LP64) | |
5073 src_start, dest_start, | |
5074 copy_length XTOP, | |
5075 check_offset XTOP, | |
5076 check_value); | |
5077 | |
5078 return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms)); | |
5079 } | |
5080 | |
5081 | |
5082 // Helper function; generates code for cases requiring runtime checks. | |
5083 Node* | |
5084 LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type, | |
5085 Node* src, Node* src_offset, | |
5086 Node* dest, Node* dest_offset, | |
5087 Node* copy_length, | |
5088 int nargs) { | |
5089 if (stopped()) return NULL; | |
5090 | |
5091 address copyfunc_addr = StubRoutines::generic_arraycopy(); | |
5092 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. | |
5093 return NULL; | |
5094 } | |
5095 | |
5096 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, | |
5097 OptoRuntime::generic_arraycopy_Type(), | |
5098 copyfunc_addr, "generic_arraycopy", adr_type, | |
5099 src, src_offset, dest, dest_offset, copy_length); | |
5100 | |
5101 return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms)); | |
5102 } | |
5103 | |
5104 // Helper function; generates the fast out-of-line call to an arraycopy stub. | |
5105 void | |
5106 LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type, | |
5107 BasicType basic_elem_type, | |
5108 bool disjoint_bases, | |
5109 Node* src, Node* src_offset, | |
5110 Node* dest, Node* dest_offset, | |
5111 Node* copy_length) { | |
5112 if (stopped()) return; // nothing to do | |
5113 | |
5114 Node* src_start = src; | |
5115 Node* dest_start = dest; | |
5116 if (src_offset != NULL || dest_offset != NULL) { | |
5117 assert(src_offset != NULL && dest_offset != NULL, ""); | |
5118 src_start = array_element_address(src, src_offset, basic_elem_type); | |
5119 dest_start = array_element_address(dest, dest_offset, basic_elem_type); | |
5120 } | |
5121 | |
5122 // Figure out which arraycopy runtime method to call. | |
5123 const char* copyfunc_name = "arraycopy"; | |
5124 address copyfunc_addr = | |
5125 basictype2arraycopy(basic_elem_type, src_offset, dest_offset, | |
5126 disjoint_bases, copyfunc_name); | |
5127 | |
5128 // Call it. Note that the count_ix value is not scaled to a byte-size. | |
5129 make_runtime_call(RC_LEAF|RC_NO_FP, | |
5130 OptoRuntime::fast_arraycopy_Type(), | |
5131 copyfunc_addr, copyfunc_name, adr_type, | |
5132 src_start, dest_start, copy_length XTOP); | |
5133 } |