comparison src/share/vm/c1/c1_LIRGenerator.cpp @ 1579:e9ff18c4ace7

Merge
author jrose
date Wed, 02 Jun 2010 22:45:42 -0700
parents c18cbe5936b8 87fc6aca31ab
children b812ff5abc73
comparison
equal deleted inserted replaced
1562:dfe27f03244a 1579:e9ff18c4ace7
1307 LIR_Opr pre_val = new_register(T_OBJECT); 1307 LIR_Opr pre_val = new_register(T_OBJECT);
1308 1308
1309 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 1309 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1310 if (!addr_opr->is_address()) { 1310 if (!addr_opr->is_address()) {
1311 assert(addr_opr->is_register(), "must be"); 1311 assert(addr_opr->is_register(), "must be");
1312 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT)); 1312 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1313 } 1313 }
1314 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, 1314 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
1315 info); 1315 info);
1316 __ branch(lir_cond_notEqual, T_INT, slow); 1316 __ branch(lir_cond_notEqual, T_INT, slow);
1317 __ branch_destination(slow->continuation()); 1317 __ branch_destination(slow->continuation());
1323 // If the "new_val" is a constant NULL, no barrier is necessary. 1323 // If the "new_val" is a constant NULL, no barrier is necessary.
1324 if (new_val->is_constant() && 1324 if (new_val->is_constant() &&
1325 new_val->as_constant_ptr()->as_jobject() == NULL) return; 1325 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1326 1326
1327 if (!new_val->is_register()) { 1327 if (!new_val->is_register()) {
1328 LIR_Opr new_val_reg = new_pointer_register(); 1328 LIR_Opr new_val_reg = new_register(T_OBJECT);
1329 if (new_val->is_constant()) { 1329 if (new_val->is_constant()) {
1330 __ move(new_val, new_val_reg); 1330 __ move(new_val, new_val_reg);
1331 } else { 1331 } else {
1332 __ leal(new_val, new_val_reg); 1332 __ leal(new_val, new_val_reg);
1333 } 1333 }
1335 } 1335 }
1336 assert(new_val->is_register(), "must be a register at this point"); 1336 assert(new_val->is_register(), "must be a register at this point");
1337 1337
1338 if (addr->is_address()) { 1338 if (addr->is_address()) {
1339 LIR_Address* address = addr->as_address_ptr(); 1339 LIR_Address* address = addr->as_address_ptr();
1340 LIR_Opr ptr = new_pointer_register(); 1340 LIR_Opr ptr = new_register(T_OBJECT);
1341 if (!address->index()->is_valid() && address->disp() == 0) { 1341 if (!address->index()->is_valid() && address->disp() == 0) {
1342 __ move(address->base(), ptr); 1342 __ move(address->base(), ptr);
1343 } else { 1343 } else {
1344 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1344 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1345 __ leal(addr, ptr); 1345 __ leal(addr, ptr);
1348 } 1348 }
1349 assert(addr->is_register(), "must be a register at this point"); 1349 assert(addr->is_register(), "must be a register at this point");
1350 1350
1351 LIR_Opr xor_res = new_pointer_register(); 1351 LIR_Opr xor_res = new_pointer_register();
1352 LIR_Opr xor_shift_res = new_pointer_register(); 1352 LIR_Opr xor_shift_res = new_pointer_register();
1353
1354 if (TwoOperandLIRForm ) { 1353 if (TwoOperandLIRForm ) {
1355 __ move(addr, xor_res); 1354 __ move(addr, xor_res);
1356 __ logical_xor(xor_res, new_val, xor_res); 1355 __ logical_xor(xor_res, new_val, xor_res);
1357 __ move(xor_res, xor_shift_res); 1356 __ move(xor_res, xor_shift_res);
1358 __ unsigned_shift_right(xor_shift_res, 1357 __ unsigned_shift_right(xor_shift_res,
1366 xor_shift_res, 1365 xor_shift_res,
1367 LIR_OprDesc::illegalOpr()); 1366 LIR_OprDesc::illegalOpr());
1368 } 1367 }
1369 1368
1370 if (!new_val->is_register()) { 1369 if (!new_val->is_register()) {
1371 LIR_Opr new_val_reg = new_pointer_register(); 1370 LIR_Opr new_val_reg = new_register(T_OBJECT);
1372 __ leal(new_val, new_val_reg); 1371 __ leal(new_val, new_val_reg);
1373 new_val = new_val_reg; 1372 new_val = new_val_reg;
1374 } 1373 }
1375 assert(new_val->is_register(), "must be a register at this point"); 1374 assert(new_val->is_register(), "must be a register at this point");
1376 1375
1377 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); 1376 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1378 1377
1379 CodeStub* slow = new G1PostBarrierStub(addr, new_val); 1378 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1380 __ branch(lir_cond_notEqual, T_INT, slow); 1379 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1381 __ branch_destination(slow->continuation()); 1380 __ branch_destination(slow->continuation());
1382 } 1381 }
1383 1382
1384 #endif // SERIALGC 1383 #endif // SERIALGC
1385 //////////////////////////////////////////////////////////////////////// 1384 ////////////////////////////////////////////////////////////////////////
2369 2368
2370 // emit invoke code 2369 // emit invoke code
2371 bool optimized = x->target_is_loaded() && x->target_is_final(); 2370 bool optimized = x->target_is_loaded() && x->target_is_final();
2372 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 2371 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2373 2372
2373 // JSR 292
2374 // Preserve the SP over MethodHandle call sites.
2375 ciMethod* target = x->target();
2376 if (target->is_method_handle_invoke()) {
2377 info->set_is_method_handle_invoke(true);
2378 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2379 }
2380
2374 switch (x->code()) { 2381 switch (x->code()) {
2375 case Bytecodes::_invokestatic: 2382 case Bytecodes::_invokestatic:
2376 __ call_static(x->target(), result_register, 2383 __ call_static(target, result_register,
2377 SharedRuntime::get_resolve_static_call_stub(), 2384 SharedRuntime::get_resolve_static_call_stub(),
2378 arg_list, info); 2385 arg_list, info);
2379 break; 2386 break;
2380 case Bytecodes::_invokespecial: 2387 case Bytecodes::_invokespecial:
2381 case Bytecodes::_invokevirtual: 2388 case Bytecodes::_invokevirtual:
2382 case Bytecodes::_invokeinterface: 2389 case Bytecodes::_invokeinterface:
2383 // for final target we still produce an inline cache, in order 2390 // for final target we still produce an inline cache, in order
2384 // to be able to call mixed mode 2391 // to be able to call mixed mode
2385 if (x->code() == Bytecodes::_invokespecial || optimized) { 2392 if (x->code() == Bytecodes::_invokespecial || optimized) {
2386 __ call_opt_virtual(x->target(), receiver, result_register, 2393 __ call_opt_virtual(target, receiver, result_register,
2387 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2394 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2388 arg_list, info); 2395 arg_list, info);
2389 } else if (x->vtable_index() < 0) { 2396 } else if (x->vtable_index() < 0) {
2390 __ call_icvirtual(x->target(), receiver, result_register, 2397 __ call_icvirtual(target, receiver, result_register,
2391 SharedRuntime::get_resolve_virtual_call_stub(), 2398 SharedRuntime::get_resolve_virtual_call_stub(),
2392 arg_list, info); 2399 arg_list, info);
2393 } else { 2400 } else {
2394 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); 2401 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2395 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); 2402 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2396 __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); 2403 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2397 } 2404 }
2398 break; 2405 break;
2399 case Bytecodes::_invokedynamic: { 2406 case Bytecodes::_invokedynamic: {
2400 ciBytecodeStream bcs(x->scope()->method()); 2407 ciBytecodeStream bcs(x->scope()->method());
2401 bcs.force_bci(x->bci()); 2408 bcs.force_bci(x->bci());
2430 __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp); 2437 __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2431 2438
2432 // Load target MethodHandle from CallSite object. 2439 // Load target MethodHandle from CallSite object.
2433 __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); 2440 __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2434 2441
2435 __ call_dynamic(x->target(), receiver, result_register, 2442 __ call_dynamic(target, receiver, result_register,
2436 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2443 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2437 arg_list, info); 2444 arg_list, info);
2438 break; 2445 break;
2439 } 2446 }
2440 default: 2447 default:
2441 ShouldNotReachHere(); 2448 ShouldNotReachHere();
2442 break; 2449 break;
2450 }
2451
2452 // JSR 292
2453 // Restore the SP after MethodHandle call sites.
2454 if (target->is_method_handle_invoke()) {
2455 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2443 } 2456 }
2444 2457
2445 if (x->type()->is_float() || x->type()->is_double()) { 2458 if (x->type()->is_float() || x->type()->is_double()) {
2446 // Force rounding of results from non-strictfp when in strictfp 2459 // Force rounding of results from non-strictfp when in strictfp
2447 // scope (or when we don't know the strictness of the callee, to 2460 // scope (or when we don't know the strictness of the callee, to