comparison src/share/vm/graal/graalRuntime.cpp @ 7125:1baf7f1e3f23

decoupled C++ Graal runtime from C1
author Doug Simon <doug.simon@oracle.com>
date Mon, 03 Dec 2012 15:32:17 +0100
parents d5f7e737827f
children 6c46172c04bf 5a95c784febf
comparison
equal deleted inserted replaced
7124:ab65fa23f8e9 7125:1baf7f1e3f23
22 */ 22 */
23 23
24 #include "precompiled.hpp" 24 #include "precompiled.hpp"
25 #include "runtime/interfaceSupport.hpp" 25 #include "runtime/interfaceSupport.hpp"
26 #include "prims/jvm.h" 26 #include "prims/jvm.h"
27 #include "graal/graalRuntime.hpp"
27 #include "graal/graalVMToCompiler.hpp" 28 #include "graal/graalVMToCompiler.hpp"
29 #include "asm/codeBuffer.hpp"
30 #include "runtime/biasedLocking.hpp"
31
32 // Implementation of GraalStubAssembler
33
34 GraalStubAssembler::GraalStubAssembler(CodeBuffer* code, const char * name, int stub_id) : MacroAssembler(code) {
35 _name = name;
36 _must_gc_arguments = false;
37 _frame_size = no_frame_size;
38 _num_rt_args = 0;
39 _stub_id = stub_id;
40 }
41
42
43 void GraalStubAssembler::set_info(const char* name, bool must_gc_arguments) {
44 _name = name;
45 _must_gc_arguments = must_gc_arguments;
46 }
47
48
49 void GraalStubAssembler::set_frame_size(int size) {
50 if (_frame_size == no_frame_size) {
51 _frame_size = size;
52 }
53 assert(_frame_size == size, "can't change the frame size");
54 }
55
56
57 void GraalStubAssembler::set_num_rt_args(int args) {
58 if (_num_rt_args == 0) {
59 _num_rt_args = args;
60 }
61 assert(_num_rt_args == args, "can't change the number of args");
62 }
63
64 // Implementation of GraalRuntime
65
66 CodeBlob* GraalRuntime::_blobs[GraalRuntime::number_of_ids];
67 const char *GraalRuntime::_blob_names[] = {
68 GRAAL_STUBS(STUB_NAME, LAST_STUB_NAME)
69 };
70
71 // Simple helper to see if the caller of a runtime stub which
72 // entered the VM has been deoptimized
73
74 static bool caller_is_deopted() {
75 JavaThread* thread = JavaThread::current();
76 RegisterMap reg_map(thread, false);
77 frame runtime_frame = thread->last_frame();
78 frame caller_frame = runtime_frame.sender(&reg_map);
79 assert(caller_frame.is_compiled_frame(), "must be compiled");
80 return caller_frame.is_deoptimized_frame();
81 }
82
83 // Stress deoptimization
84 static void deopt_caller() {
85 if ( !caller_is_deopted()) {
86 JavaThread* thread = JavaThread::current();
87 RegisterMap reg_map(thread, false);
88 frame runtime_frame = thread->last_frame();
89 frame caller_frame = runtime_frame.sender(&reg_map);
90 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
91 assert(caller_is_deopted(), "Must be deoptimized");
92 }
93 }
94
95 static bool setup_code_buffer(CodeBuffer* code) {
96 // Preinitialize the consts section to some large size:
97 int locs_buffer_size = 1 * (relocInfo::length_limit + sizeof(relocInfo));
98 char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
99 code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
100 locs_buffer_size / sizeof(relocInfo));
101
102 // Global stubs have neither constants nor local stubs
103 code->initialize_consts_size(0);
104 code->initialize_stubs_size(0);
105
106 return true;
107 }
108
109 void GraalRuntime::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
110 assert(0 <= id && id < number_of_ids, "illegal stub id");
111 ResourceMark rm;
112 // create code buffer for code storage
113 CodeBuffer code(buffer_blob);
114
115 setup_code_buffer(&code);
116
117 // create assembler for code generation
118 GraalStubAssembler* sasm = new GraalStubAssembler(&code, name_for(id), id);
119 // generate code for runtime stub
120 OopMapSet* oop_maps;
121 oop_maps = generate_code_for(id, sasm);
122 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
123 "if stub has an oop map it must have a valid frame size");
124
125 #ifdef ASSERT
126 // Make sure that stubs that need oopmaps have them
127 switch (id) {
128 // These stubs don't need to have an oopmap
129 case graal_slow_subtype_check_id:
130 #if defined(SPARC) || defined(PPC)
131 case handle_exception_nofpu_id: // Unused on sparc
132 #endif
133 #ifdef GRAAL
134 case graal_verify_oop_id:
135 case graal_unwind_exception_call_id:
136 case graal_OSR_migration_end_id:
137 case graal_arithmetic_frem_id:
138 case graal_arithmetic_drem_id:
139 case graal_set_deopt_info_id:
140 #endif
141 break;
142
143 // All other stubs should have oopmaps
144 default:
145 assert(oop_maps != NULL, "must have an oopmap");
146 }
147 #endif
148
149 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
150 sasm->align(BytesPerWord);
151 // make sure all code is in code buffer
152 sasm->flush();
153 // create blob - distinguish a few special cases
154 CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
155 &code,
156 CodeOffsets::frame_never_safe,
157 sasm->frame_size(),
158 oop_maps,
159 sasm->must_gc_arguments());
160 // install blob
161 assert(blob != NULL, "blob must exist");
162 _blobs[id] = blob;
163 }
164
165
166 void GraalRuntime::initialize(BufferBlob* blob) {
167 // generate stubs
168 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
169 // printing
170 #ifndef PRODUCT
171 if (PrintSimpleStubs) {
172 ResourceMark rm;
173 for (int id = 0; id < number_of_ids; id++) {
174 _blobs[id]->print();
175 if (_blobs[id]->oop_maps() != NULL) {
176 _blobs[id]->oop_maps()->print();
177 }
178 }
179 }
180 #endif
181 }
182
183
184 CodeBlob* GraalRuntime::blob_for(StubID id) {
185 assert(0 <= id && id < number_of_ids, "illegal stub id");
186 return _blobs[id];
187 }
188
189
190 const char* GraalRuntime::name_for(StubID id) {
191 assert(0 <= id && id < number_of_ids, "illegal stub id");
192 return _blob_names[id];
193 }
194
195 const char* GraalRuntime::name_for_address(address entry) {
196 for (int id = 0; id < number_of_ids; id++) {
197 if (entry == entry_for((StubID)id)) return name_for((StubID)id);
198 }
199
200 #define FUNCTION_CASE(a, f) \
201 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
202
203 FUNCTION_CASE(entry, os::javaTimeMillis);
204 FUNCTION_CASE(entry, os::javaTimeNanos);
205 FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
206 FUNCTION_CASE(entry, SharedRuntime::d2f);
207 FUNCTION_CASE(entry, SharedRuntime::d2i);
208 FUNCTION_CASE(entry, SharedRuntime::d2l);
209 FUNCTION_CASE(entry, SharedRuntime::dcos);
210 FUNCTION_CASE(entry, SharedRuntime::dexp);
211 FUNCTION_CASE(entry, SharedRuntime::dlog);
212 FUNCTION_CASE(entry, SharedRuntime::dlog10);
213 FUNCTION_CASE(entry, SharedRuntime::dpow);
214 FUNCTION_CASE(entry, SharedRuntime::drem);
215 FUNCTION_CASE(entry, SharedRuntime::dsin);
216 FUNCTION_CASE(entry, SharedRuntime::dtan);
217 FUNCTION_CASE(entry, SharedRuntime::f2i);
218 FUNCTION_CASE(entry, SharedRuntime::f2l);
219 FUNCTION_CASE(entry, SharedRuntime::frem);
220 FUNCTION_CASE(entry, SharedRuntime::l2d);
221 FUNCTION_CASE(entry, SharedRuntime::l2f);
222 FUNCTION_CASE(entry, SharedRuntime::ldiv);
223 FUNCTION_CASE(entry, SharedRuntime::lmul);
224 FUNCTION_CASE(entry, SharedRuntime::lrem);
225 FUNCTION_CASE(entry, SharedRuntime::lrem);
226 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
227 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
228 #ifdef TRACE_HAVE_INTRINSICS
229 FUNCTION_CASE(entry, TRACE_TIME_METHOD);
230 #endif
231
232 #undef FUNCTION_CASE
233 }
234
235
236 JRT_ENTRY(void, GraalRuntime::new_instance(JavaThread* thread, Klass* klass))
237 assert(klass->is_klass(), "not a class");
238 instanceKlassHandle h(thread, klass);
239 h->check_valid_for_instantiation(true, CHECK);
240 // make sure klass is initialized
241 h->initialize(CHECK);
242 // allocate instance and return via TLS
243 oop obj = h->allocate_instance(CHECK);
244 thread->set_vm_result(obj);
245 JRT_END
246
247
248 JRT_ENTRY(void, GraalRuntime::new_type_array(JavaThread* thread, Klass* klass, jint length))
249 // Note: no handle for klass needed since they are not used
250 // anymore after new_typeArray() and no GC can happen before.
251 // (This may have to change if this code changes!)
252 assert(klass->is_klass(), "not a class");
253 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
254 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
255 thread->set_vm_result(obj);
256 // This is pretty rare but this runtime patch is stressful to deoptimization
257 // if we deoptimize here so force a deopt to stress the path.
258 if (DeoptimizeALot) {
259 deopt_caller();
260 }
261
262 JRT_END
263
264
265 JRT_ENTRY(void, GraalRuntime::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
266 // Note: no handle for klass needed since they are not used
267 // anymore after new_objArray() and no GC can happen before.
268 // (This may have to change if this code changes!)
269 assert(array_klass->is_klass(), "not a class");
270 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
271 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
272 thread->set_vm_result(obj);
273 // This is pretty rare but this runtime patch is stressful to deoptimization
274 // if we deoptimize here so force a deopt to stress the path.
275 if (DeoptimizeALot) {
276 deopt_caller();
277 }
278 JRT_END
279
280
281 JRT_ENTRY(void, GraalRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
282 assert(klass->is_klass(), "not a class");
283 assert(rank >= 1, "rank must be nonzero");
284 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
285 thread->set_vm_result(obj);
286 JRT_END
287
288 JRT_ENTRY(void, GraalRuntime::unimplemented_entry(JavaThread* thread, StubID id))
289 tty->print_cr("GraalRuntime::entry_for(%d) returned unimplemented entry point", id);
290 JRT_END
291
292 extern void vm_exit(int code);
293
294 // Enter this method from compiled code handler below. This is where we transition
295 // to VM mode. This is done as a helper routine so that the method called directly
296 // from compiled code does not have to transition to VM. This allows the entry
297 // method to see if the nmethod that we have just looked up a handler for has
298 // been deoptimized while we were in the vm. This simplifies the assembly code
299 // cpu directories.
300 //
301 // We are entering here from exception stub (via the entry method below)
302 // If there is a compiled exception handler in this method, we will continue there;
303 // otherwise we will unwind the stack and continue at the caller of top frame method
304 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
305 // control the area where we can allow a safepoint. After we exit the safepoint area we can
306 // check to see if the handler we are going to return is now in a nmethod that has
307 // been deoptimized. If that is the case we return the deopt blob
308 // unpack_with_exception entry instead. This makes life for the exception blob easier
309 // because making that same check and diverting is painful from assembly language.
310 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
311 // Reset method handle flag.
312 thread->set_is_method_handle_return(false);
313
314 Handle exception(thread, ex);
315 nm = CodeCache::find_nmethod(pc);
316 assert(nm != NULL, "this is not an nmethod");
317 // Adjust the pc as needed/
318 if (nm->is_deopt_pc(pc)) {
319 RegisterMap map(thread, false);
320 frame exception_frame = thread->last_frame().sender(&map);
321 // if the frame isn't deopted then pc must not correspond to the caller of last_frame
322 assert(exception_frame.is_deoptimized_frame(), "must be deopted");
323 pc = exception_frame.pc();
324 }
325 #ifdef ASSERT
326 assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
327 assert(exception->is_oop(), "just checking");
328 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
329 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
330 if (ExitVMOnVerifyError) vm_exit(-1);
331 ShouldNotReachHere();
332 }
333 #endif
334
335 // Check the stack guard pages and reenable them if necessary and there is
336 // enough space on the stack to do so. Use fast exceptions only if the guard
337 // pages are enabled.
338 bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
339 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
340
341 if (JvmtiExport::can_post_on_exceptions()) {
342 // To ensure correct notification of exception catches and throws
343 // we have to deoptimize here. If we attempted to notify the
344 // catches and throws during this exception lookup it's possible
345 // we could deoptimize on the way out of the VM and end back in
346 // the interpreter at the throw site. This would result in double
347 // notifications since the interpreter would also notify about
348 // these same catches and throws as it unwound the frame.
349
350 RegisterMap reg_map(thread);
351 frame stub_frame = thread->last_frame();
352 frame caller_frame = stub_frame.sender(&reg_map);
353
354 // We don't really want to deoptimize the nmethod itself since we
355 // can actually continue in the exception handler ourselves but I
356 // don't see an easy way to have the desired effect.
357 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
358 assert(caller_is_deopted(), "Must be deoptimized");
359
360 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
361 }
362
363 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
364 if (guard_pages_enabled) {
365 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
366 if (fast_continuation != NULL) {
367 // Set flag if return address is a method handle call site.
368 thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
369 return fast_continuation;
370 }
371 }
372
373 // If the stack guard pages are enabled, check whether there is a handler in
374 // the current method. Otherwise (guard pages disabled), force an unwind and
375 // skip the exception cache update (i.e., just leave continuation==NULL).
376 address continuation = NULL;
377 if (guard_pages_enabled) {
378
379 // New exception handling mechanism can support inlined methods
380 // with exception handlers since the mappings are from PC to PC
381
382 // debugging support
383 // tracing
384 if (TraceExceptions) {
385 ttyLocker ttyl;
386 ResourceMark rm;
387 int offset = pc - nm->code_begin();
388 tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " [" PTR_FORMAT "+%d] for thread 0x%x",
389 exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, nm->code_begin(), offset, thread);
390 }
391 // for AbortVMOnException flag
392 NOT_PRODUCT(Exceptions::debug_check_abort(exception));
393
394 // Clear out the exception oop and pc since looking up an
395 // exception handler can cause class loading, which might throw an
396 // exception and those fields are expected to be clear during
397 // normal bytecode execution.
398 thread->set_exception_oop(NULL);
399 thread->set_exception_pc(NULL);
400
401 continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
402 // If an exception was thrown during exception dispatch, the exception oop may have changed
403 thread->set_exception_oop(exception());
404 thread->set_exception_pc(pc);
405
406 // the exception cache is used only by non-implicit exceptions
407 if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
408 nm->add_handler_for_exception_and_pc(exception, pc, continuation);
409 }
410 }
411
412 thread->set_vm_result(exception());
413 // Set flag if return address is a method handle call site.
414 thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
415
416 if (TraceExceptions) {
417 ttyLocker ttyl;
418 ResourceMark rm;
419 tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
420 thread, continuation, pc);
421 }
422
423 return continuation;
424 JRT_END
425
426 // Enter this method from compiled code only if there is a Java exception handler
427 // in the method handling the exception.
428 // We are entering here from exception stub. We don't do a normal VM transition here.
429 // We do it in a helper. This is so we can check to see if the nmethod we have just
430 // searched for an exception handler has been deoptimized in the meantime.
431 address GraalRuntime::exception_handler_for_pc(JavaThread* thread) {
432 oop exception = thread->exception_oop();
433 address pc = thread->exception_pc();
434 // Still in Java mode
435 DEBUG_ONLY(ResetNoHandleMark rnhm);
436 nmethod* nm = NULL;
437 address continuation = NULL;
438 {
439 // Enter VM mode by calling the helper
440 ResetNoHandleMark rnhm;
441 continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
442 }
443 // Back in JAVA, use no oops DON'T safepoint
444
445 // Now check to see if the nmethod we were called from is now deoptimized.
446 // If so we must return to the deopt blob and deoptimize the nmethod
447 if (nm != NULL && caller_is_deopted()) {
448 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
449 }
450
451 assert(continuation != NULL, "no handler found");
452 return continuation;
453 }
454
455 JRT_ENTRY(void, GraalRuntime::graal_create_null_exception(JavaThread* thread))
456 thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)());
457 JRT_END
458
459 JRT_ENTRY(void, GraalRuntime::graal_create_out_of_bounds_exception(JavaThread* thread, jint index))
460 char message[jintAsStringSize];
461 sprintf(message, "%d", index);
462 thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)());
463 JRT_END
464
465 JRT_ENTRY_NO_ASYNC(void, GraalRuntime::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
466 if (TraceGraal >= 3) {
467 char type[O_BUFLEN];
468 obj->klass()->name()->as_C_string(type, O_BUFLEN);
469 markOop mark = obj->mark();
470 tty->print_cr("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, mark, lock);
471 tty->flush();
472 }
473 #ifdef ASSERT
474 if (PrintBiasedLockingStatistics) {
475 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
476 }
477 #endif
478 Handle h_obj(thread, obj);
479 assert(h_obj()->is_oop(), "must be NULL or an object");
480 if (UseBiasedLocking) {
481 // Retry fast entry if bias is revoked to avoid unnecessary inflation
482 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
483 } else {
484 if (UseFastLocking) {
485 // When using fast locking, the compiled code has already tried the fast case
486 ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
487 } else {
488 ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
489 }
490 }
491 if (TraceGraal >= 3) {
492 tty->print_cr("%s: exiting locking slow with obj=" INTPTR_FORMAT, thread->name(), obj);
493 }
494 JRT_END
495
496
497 JRT_LEAF(void, GraalRuntime::graal_monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
498 assert(thread == JavaThread::current(), "threads must correspond");
499 assert(thread->last_Java_sp(), "last_Java_sp must be set");
500 // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
501 EXCEPTION_MARK;
502
503 #ifdef DEBUG
504 if (!obj->is_oop()) {
505 ResetNoHandleMark rhm;
506 nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
507 if (method != NULL) {
508 tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), obj);
509 }
510 thread->print_stack_on(tty);
511 assert(false, "invalid lock object pointer dected");
512 }
513 #endif
514
515 if (UseFastLocking) {
516 // When using fast locking, the compiled code has already tried the fast case
517 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
518 } else {
519 ObjectSynchronizer::fast_exit(obj, lock, THREAD);
520 }
521 if (TraceGraal >= 3) {
522 char type[O_BUFLEN];
523 obj->klass()->name()->as_C_string(type, O_BUFLEN);
524 tty->print_cr("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, obj->mark(), lock);
525 tty->flush();
526 }
527 JRT_END
528
529 JRT_ENTRY(void, GraalRuntime::graal_log_object(JavaThread* thread, oop obj, jint flags))
530 bool string = mask_bits_are_true(flags, LOG_OBJECT_STRING);
531 bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS);
532 bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE);
533 if (!string) {
534 if (!address && obj->is_oop_or_null(true)) {
535 char buf[O_BUFLEN];
536 tty->print("%s@%p", obj->klass()->name()->as_C_string(buf, O_BUFLEN), obj);
537 } else {
538 tty->print("%p", obj);
539 }
540 } else {
541 ResourceMark rm;
542 assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
543 char *buf = java_lang_String::as_utf8_string(obj);
544 tty->print(buf);
545 }
546 if (newline) {
547 tty->cr();
548 }
549 JRT_END
550
551 JRT_ENTRY(void, GraalRuntime::graal_vm_error(JavaThread* thread, oop where, oop format, jlong value))
552 ResourceMark rm;
553 assert(where == NULL || java_lang_String::is_instance(where), "must be");
554 const char *error_msg = where == NULL ? "<internal Graal error>" : java_lang_String::as_utf8_string(where);
555 char *detail_msg = NULL;
556 if (format != NULL) {
557 const char* buf = java_lang_String::as_utf8_string(format);
558 size_t detail_msg_length = strlen(buf) * 2;
559 detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
560 jio_snprintf(detail_msg, detail_msg_length, buf, value);
561 }
562 report_vm_error(__FILE__, __LINE__, error_msg, detail_msg);
563 JRT_END
564
565 JRT_ENTRY(void, GraalRuntime::graal_log_printf(JavaThread* thread, oop format, jlong val))
566 ResourceMark rm;
567 assert(format != NULL && java_lang_String::is_instance(format), "must be");
568 char *buf = java_lang_String::as_utf8_string(format);
569 tty->print(buf, val);
570 JRT_END
571
572 JRT_ENTRY(void, GraalRuntime::graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
573 union {
574 jlong l;
575 jdouble d;
576 jfloat f;
577 } uu;
578 uu.l = value;
579 switch (typeChar) {
580 case 'z': tty->print(value == 0 ? "false" : "true"); break;
581 case 'b': tty->print("%d", (jbyte) value); break;
582 case 'c': tty->print("%c", (jchar) value); break;
583 case 's': tty->print("%d", (jshort) value); break;
584 case 'i': tty->print("%d", (jint) value); break;
585 case 'f': tty->print("%f", uu.f); break;
586 case 'j': tty->print(INT64_FORMAT, value); break;
587 case 'd': tty->print("%lf", uu.d); break;
588 default: assert(false, "unknown typeChar"); break;
589 }
590 if (newline) {
591 tty->cr();
592 }
593 JRT_END
28 594
29 // JVM_InitializeGraalRuntime 595 // JVM_InitializeGraalRuntime
30 JVM_ENTRY(jobject, JVM_InitializeGraalRuntime(JNIEnv *env, jclass graalclass)) 596 JVM_ENTRY(jobject, JVM_InitializeGraalRuntime(JNIEnv *env, jclass graalclass))
31 return VMToCompiler::graalRuntimePermObject(); 597 return VMToCompiler::graalRuntimePermObject();
32 JVM_END 598 JVM_END