comparison src/cpu/x86/vm/sharedRuntime_x86_64.cpp @ 5905:2ee7dcc77c63

7145024: Crashes in ucrypto related to C2 Reviewed-by: kvn
author never
date Tue, 28 Feb 2012 10:04:01 -0800
parents bf7796b7367a
children 031df0387c09
comparison
equal deleted inserted replaced
5904:bf7796b7367a 5905:2ee7dcc77c63
1179 OopMap* map, 1179 OopMap* map,
1180 VMRegPair* in_regs, 1180 VMRegPair* in_regs,
1181 BasicType* in_sig_bt) { 1181 BasicType* in_sig_bt) {
1182 // if map is non-NULL then the code should store the values, 1182 // if map is non-NULL then the code should store the values,
1183 // otherwise it should load them. 1183 // otherwise it should load them.
1184 int handle_index = 0; 1184 int slot = arg_save_area;
1185 // Save down double word first 1185 // Save down double word first
1186 for ( int i = 0; i < total_in_args; i++) { 1186 for ( int i = 0; i < total_in_args; i++) {
1187 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) { 1187 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1188 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1189 int offset = slot * VMRegImpl::stack_slot_size; 1188 int offset = slot * VMRegImpl::stack_slot_size;
1190 handle_index += 2; 1189 slot += VMRegImpl::slots_per_word;
1191 assert(handle_index <= stack_slots, "overflow"); 1190 assert(slot <= stack_slots, "overflow");
1192 if (map != NULL) { 1191 if (map != NULL) {
1193 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); 1192 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1194 } else { 1193 } else {
1195 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); 1194 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1196 } 1195 }
1197 } 1196 }
1198 if (in_regs[i].first()->is_Register() && 1197 if (in_regs[i].first()->is_Register() &&
1199 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { 1198 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1200 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1201 int offset = slot * VMRegImpl::stack_slot_size; 1199 int offset = slot * VMRegImpl::stack_slot_size;
1202 handle_index += 2; 1200 slot += VMRegImpl::slots_per_word;
1203 assert(handle_index <= stack_slots, "overflow");
1204 if (map != NULL) { 1201 if (map != NULL) {
1205 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register()); 1202 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1206 if (in_sig_bt[i] == T_ARRAY) { 1203 if (in_sig_bt[i] == T_ARRAY) {
1207 map->set_oop(VMRegImpl::stack2reg(slot));; 1204 map->set_oop(VMRegImpl::stack2reg(slot));;
1208 } 1205 }
1212 } 1209 }
1213 } 1210 }
1214 // Save or restore single word registers 1211 // Save or restore single word registers
1215 for ( int i = 0; i < total_in_args; i++) { 1212 for ( int i = 0; i < total_in_args; i++) {
1216 if (in_regs[i].first()->is_Register()) { 1213 if (in_regs[i].first()->is_Register()) {
1217 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1218 int offset = slot * VMRegImpl::stack_slot_size; 1214 int offset = slot * VMRegImpl::stack_slot_size;
1219 assert(handle_index <= stack_slots, "overflow"); 1215 slot++;
1216 assert(slot <= stack_slots, "overflow");
1220 1217
1221 // Value is in an input register pass we must flush it to the stack 1218 // Value is in an input register pass we must flush it to the stack
1222 const Register reg = in_regs[i].first()->as_Register(); 1219 const Register reg = in_regs[i].first()->as_Register();
1223 switch (in_sig_bt[i]) { 1220 switch (in_sig_bt[i]) {
1224 case T_BOOLEAN: 1221 case T_BOOLEAN:
1239 case T_OBJECT: 1236 case T_OBJECT:
1240 default: ShouldNotReachHere(); 1237 default: ShouldNotReachHere();
1241 } 1238 }
1242 } else if (in_regs[i].first()->is_XMMRegister()) { 1239 } else if (in_regs[i].first()->is_XMMRegister()) {
1243 if (in_sig_bt[i] == T_FLOAT) { 1240 if (in_sig_bt[i] == T_FLOAT) {
1244 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1245 int offset = slot * VMRegImpl::stack_slot_size; 1241 int offset = slot * VMRegImpl::stack_slot_size;
1246 assert(handle_index <= stack_slots, "overflow"); 1242 slot++;
1243 assert(slot <= stack_slots, "overflow");
1247 if (map != NULL) { 1244 if (map != NULL) {
1248 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); 1245 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1249 } else { 1246 } else {
1250 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); 1247 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1251 } 1248 }
1365 __ xorptr(tmp_reg, tmp_reg); 1362 __ xorptr(tmp_reg, tmp_reg);
1366 move_ptr(masm, tmp, body_arg); 1363 move_ptr(masm, tmp, body_arg);
1367 move32_64(masm, tmp, length_arg); 1364 move32_64(masm, tmp, length_arg);
1368 __ bind(done); 1365 __ bind(done);
1369 } 1366 }
1367
1368
1369 class ComputeMoveOrder: public StackObj {
1370 class MoveOperation: public ResourceObj {
1371 friend class ComputeMoveOrder;
1372 private:
1373 VMRegPair _src;
1374 VMRegPair _dst;
1375 int _src_index;
1376 int _dst_index;
1377 bool _processed;
1378 MoveOperation* _next;
1379 MoveOperation* _prev;
1380
1381 static int get_id(VMRegPair r) {
1382 return r.first()->value();
1383 }
1384
1385 public:
1386 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1387 _src(src)
1388 , _src_index(src_index)
1389 , _dst(dst)
1390 , _dst_index(dst_index)
1391 , _next(NULL)
1392 , _prev(NULL)
1393 , _processed(false) {
1394 }
1395
1396 VMRegPair src() const { return _src; }
1397 int src_id() const { return get_id(src()); }
1398 int src_index() const { return _src_index; }
1399 VMRegPair dst() const { return _dst; }
1400 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1401 int dst_index() const { return _dst_index; }
1402 int dst_id() const { return get_id(dst()); }
1403 MoveOperation* next() const { return _next; }
1404 MoveOperation* prev() const { return _prev; }
1405 void set_processed() { _processed = true; }
1406 bool is_processed() const { return _processed; }
1407
1408 // insert
1409 void break_cycle(VMRegPair temp_register) {
1410 // create a new store following the last store
1411 // to move from the temp_register to the original
1412 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1413
1414 // break the cycle of links and insert new_store at the end
1415 // break the reverse link.
1416 MoveOperation* p = prev();
1417 assert(p->next() == this, "must be");
1418 _prev = NULL;
1419 p->_next = new_store;
1420 new_store->_prev = p;
1421
1422 // change the original store to save it's value in the temp.
1423 set_dst(-1, temp_register);
1424 }
1425
1426 void link(GrowableArray<MoveOperation*>& killer) {
1427 // link this store in front the store that it depends on
1428 MoveOperation* n = killer.at_grow(src_id(), NULL);
1429 if (n != NULL) {
1430 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1431 _next = n;
1432 n->_prev = this;
1433 }
1434 }
1435 };
1436
1437 private:
1438 GrowableArray<MoveOperation*> edges;
1439
1440 public:
1441 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1442 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1443 // Move operations where the dest is the stack can all be
1444 // scheduled first since they can't interfere with the other moves.
1445 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1446 if (in_sig_bt[i] == T_ARRAY) {
1447 c_arg--;
1448 if (out_regs[c_arg].first()->is_stack() &&
1449 out_regs[c_arg + 1].first()->is_stack()) {
1450 arg_order.push(i);
1451 arg_order.push(c_arg);
1452 } else {
1453 if (out_regs[c_arg].first()->is_stack() ||
1454 in_regs[i].first() == out_regs[c_arg].first()) {
1455 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1456 } else {
1457 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1458 }
1459 }
1460 } else if (in_sig_bt[i] == T_VOID) {
1461 arg_order.push(i);
1462 arg_order.push(c_arg);
1463 } else {
1464 if (out_regs[c_arg].first()->is_stack() ||
1465 in_regs[i].first() == out_regs[c_arg].first()) {
1466 arg_order.push(i);
1467 arg_order.push(c_arg);
1468 } else {
1469 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1470 }
1471 }
1472 }
1473 // Break any cycles in the register moves and emit the in the
1474 // proper order.
1475 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1476 for (int i = 0; i < stores->length(); i++) {
1477 arg_order.push(stores->at(i)->src_index());
1478 arg_order.push(stores->at(i)->dst_index());
1479 }
1480 }
1481
1482 // Collected all the move operations
1483 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1484 if (src.first() == dst.first()) return;
1485 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1486 }
1487
1488 // Walk the edges breaking cycles between moves. The result list
1489 // can be walked in order to produce the proper set of loads
1490 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1491 // Record which moves kill which values
1492 GrowableArray<MoveOperation*> killer;
1493 for (int i = 0; i < edges.length(); i++) {
1494 MoveOperation* s = edges.at(i);
1495 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1496 killer.at_put_grow(s->dst_id(), s, NULL);
1497 }
1498 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1499 "make sure temp isn't in the registers that are killed");
1500
1501 // create links between loads and stores
1502 for (int i = 0; i < edges.length(); i++) {
1503 edges.at(i)->link(killer);
1504 }
1505
1506 // at this point, all the move operations are chained together
1507 // in a doubly linked list. Processing it backwards finds
1508 // the beginning of the chain, forwards finds the end. If there's
1509 // a cycle it can be broken at any point, so pick an edge and walk
1510 // backward until the list ends or we end where we started.
1511 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1512 for (int e = 0; e < edges.length(); e++) {
1513 MoveOperation* s = edges.at(e);
1514 if (!s->is_processed()) {
1515 MoveOperation* start = s;
1516 // search for the beginning of the chain or cycle
1517 while (start->prev() != NULL && start->prev() != s) {
1518 start = start->prev();
1519 }
1520 if (start->prev() == s) {
1521 start->break_cycle(temp_register);
1522 }
1523 // walk the chain forward inserting to store list
1524 while (start != NULL) {
1525 stores->append(start);
1526 start->set_processed();
1527 start = start->next();
1528 }
1529 }
1530 }
1531 return stores;
1532 }
1533 };
1534
1370 1535
1371 // --------------------------------------------------------------------------- 1536 // ---------------------------------------------------------------------------
1372 // Generate a native wrapper for a given method. The method takes arguments 1537 // Generate a native wrapper for a given method. The method takes arguments
1373 // in the Java compiled code convention, marshals them to the native 1538 // in the Java compiled code convention, marshals them to the native
1374 // convention (handlizes oops, etc), transitions to native, makes the call, 1539 // convention (handlizes oops, etc), transitions to native, makes the call,
1486 int single_slots = 0; 1651 int single_slots = 0;
1487 for ( int i = 0; i < total_in_args; i++) { 1652 for ( int i = 0; i < total_in_args; i++) {
1488 if (in_regs[i].first()->is_Register()) { 1653 if (in_regs[i].first()->is_Register()) {
1489 const Register reg = in_regs[i].first()->as_Register(); 1654 const Register reg = in_regs[i].first()->as_Register();
1490 switch (in_sig_bt[i]) { 1655 switch (in_sig_bt[i]) {
1491 case T_ARRAY:
1492 case T_BOOLEAN: 1656 case T_BOOLEAN:
1493 case T_BYTE: 1657 case T_BYTE:
1494 case T_SHORT: 1658 case T_SHORT:
1495 case T_CHAR: 1659 case T_CHAR:
1496 case T_INT: single_slots++; break; 1660 case T_INT: single_slots++; break;
1661 case T_ARRAY:
1497 case T_LONG: double_slots++; break; 1662 case T_LONG: double_slots++; break;
1498 default: ShouldNotReachHere(); 1663 default: ShouldNotReachHere();
1499 } 1664 }
1500 } else if (in_regs[i].first()->is_XMMRegister()) { 1665 } else if (in_regs[i].first()->is_XMMRegister()) {
1501 switch (in_sig_bt[i]) { 1666 switch (in_sig_bt[i]) {
1688 freg_destroyed[f] = false; 1853 freg_destroyed[f] = false;
1689 } 1854 }
1690 1855
1691 #endif /* ASSERT */ 1856 #endif /* ASSERT */
1692 1857
1693 if (is_critical_native) {
1694 // The mapping of Java and C arguments passed in registers are
1695 // rotated by one, which helps when passing arguments to regular
1696 // Java method but for critical natives that creates a cycle which
1697 // can cause arguments to be killed before they are used. Break
1698 // the cycle by moving the first argument into a temporary
1699 // register.
1700 for (int i = 0; i < total_c_args; i++) {
1701 if (in_regs[i].first()->is_Register() &&
1702 in_regs[i].first()->as_Register() == rdi) {
1703 __ mov(rbx, rdi);
1704 in_regs[i].set1(rbx->as_VMReg());
1705 }
1706 }
1707 }
1708
1709 // This may iterate in two different directions depending on the 1858 // This may iterate in two different directions depending on the
1710 // kind of native it is. The reason is that for regular JNI natives 1859 // kind of native it is. The reason is that for regular JNI natives
1711 // the incoming and outgoing registers are offset upwards and for 1860 // the incoming and outgoing registers are offset upwards and for
1712 // critical natives they are offset down. 1861 // critical natives they are offset down.
1713 int c_arg = total_c_args - 1; 1862 GrowableArray<int> arg_order(2 * total_in_args);
1714 int stride = -1; 1863 VMRegPair tmp_vmreg;
1715 int init = total_in_args - 1; 1864 tmp_vmreg.set1(rbx->as_VMReg());
1716 if (is_critical_native) { 1865
1717 // stride forwards 1866 if (!is_critical_native) {
1718 c_arg = 0; 1867 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1719 stride = 1; 1868 arg_order.push(i);
1720 init = 0; 1869 arg_order.push(c_arg);
1721 } 1870 }
1722 for (int i = init, count = 0; count < total_in_args; i += stride, c_arg += stride, count++ ) { 1871 } else {
1872 // Compute a valid move order, using tmp_vmreg to break any cycles
1873 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1874 }
1875
1876 int temploc = -1;
1877 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1878 int i = arg_order.at(ai);
1879 int c_arg = arg_order.at(ai + 1);
1880 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1881 if (c_arg == -1) {
1882 assert(is_critical_native, "should only be required for critical natives");
1883 // This arg needs to be moved to a temporary
1884 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1885 in_regs[i] = tmp_vmreg;
1886 temploc = i;
1887 continue;
1888 } else if (i == -1) {
1889 assert(is_critical_native, "should only be required for critical natives");
1890 // Read from the temporary location
1891 assert(temploc != -1, "must be valid");
1892 i = temploc;
1893 temploc = -1;
1894 }
1723 #ifdef ASSERT 1895 #ifdef ASSERT
1724 if (in_regs[i].first()->is_Register()) { 1896 if (in_regs[i].first()->is_Register()) {
1725 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1897 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1726 } else if (in_regs[i].first()->is_XMMRegister()) { 1898 } else if (in_regs[i].first()->is_XMMRegister()) {
1727 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!"); 1899 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
1777 } 1949 }
1778 } 1950 }
1779 1951
1780 // point c_arg at the first arg that is already loaded in case we 1952 // point c_arg at the first arg that is already loaded in case we
1781 // need to spill before we call out 1953 // need to spill before we call out
1782 c_arg++; 1954 int c_arg = total_c_args - total_in_args;
1783 1955
1784 // Pre-load a static method's oop into r14. Used both by locking code and 1956 // Pre-load a static method's oop into r14. Used both by locking code and
1785 // the normal JNI call code. 1957 // the normal JNI call code.
1786 if (method->is_static() && !is_critical_native) { 1958 if (method->is_static() && !is_critical_native) {
1787 1959