comparison src/share/vm/oops/instanceKlass.cpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents 75b0f3cb1943
children d1605aabd0a1 37f87013dfd8
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
1253 return false; 1253 return false;
1254 } 1254 }
1255 #endif //PRODUCT 1255 #endif //PRODUCT
1256 1256
1257 1257
1258 #ifdef ASSERT
1259 template <class T> void assert_is_in(T *p) {
1260 T heap_oop = oopDesc::load_heap_oop(p);
1261 if (!oopDesc::is_null(heap_oop)) {
1262 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1263 assert(Universe::heap()->is_in(o), "should be in heap");
1264 }
1265 }
1266 template <class T> void assert_is_in_closed_subset(T *p) {
1267 T heap_oop = oopDesc::load_heap_oop(p);
1268 if (!oopDesc::is_null(heap_oop)) {
1269 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1270 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1271 }
1272 }
1273 template <class T> void assert_is_in_reserved(T *p) {
1274 T heap_oop = oopDesc::load_heap_oop(p);
1275 if (!oopDesc::is_null(heap_oop)) {
1276 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1277 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1278 }
1279 }
1280 template <class T> void assert_nothing(T *p) {}
1281
1282 #else
1283 template <class T> void assert_is_in(T *p) {}
1284 template <class T> void assert_is_in_closed_subset(T *p) {}
1285 template <class T> void assert_is_in_reserved(T *p) {}
1286 template <class T> void assert_nothing(T *p) {}
1287 #endif // ASSERT
1288
1289 //
1290 // Macros that iterate over areas of oops which are specialized on type of
1291 // oop pointer either narrow or wide, depending on UseCompressedOops
1292 //
1293 // Parameters are:
1294 // T - type of oop to point to (either oop or narrowOop)
1295 // start_p - starting pointer for region to iterate over
1296 // count - number of oops or narrowOops to iterate over
1297 // do_oop - action to perform on each oop (it's arbitrary C code which
1298 // makes it more efficient to put in a macro rather than making
1299 // it a template function)
1300 // assert_fn - assert function which is template function because performance
1301 // doesn't matter when enabled.
1302 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1303 T, start_p, count, do_oop, \
1304 assert_fn) \
1305 { \
1306 T* p = (T*)(start_p); \
1307 T* const end = p + (count); \
1308 while (p < end) { \
1309 (assert_fn)(p); \
1310 do_oop; \
1311 ++p; \
1312 } \
1313 }
1314
1315 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1316 T, start_p, count, do_oop, \
1317 assert_fn) \
1318 { \
1319 T* const start = (T*)(start_p); \
1320 T* p = start + (count); \
1321 while (start < p) { \
1322 --p; \
1323 (assert_fn)(p); \
1324 do_oop; \
1325 } \
1326 }
1327
1328 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1329 T, start_p, count, low, high, \
1330 do_oop, assert_fn) \
1331 { \
1332 T* const l = (T*)(low); \
1333 T* const h = (T*)(high); \
1334 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1335 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1336 "bounded region must be properly aligned"); \
1337 T* p = (T*)(start_p); \
1338 T* end = p + (count); \
1339 if (p < l) p = l; \
1340 if (end > h) end = h; \
1341 while (p < end) { \
1342 (assert_fn)(p); \
1343 do_oop; \
1344 ++p; \
1345 } \
1346 }
1347
1348
1349 // The following macros call specialized macros, passing either oop or
1350 // narrowOop as the specialization type. These test the UseCompressedOops
1351 // flag.
1352 #define InstanceKlass_OOP_ITERATE(start_p, count, \
1353 do_oop, assert_fn) \
1354 { \
1355 if (UseCompressedOops) { \
1356 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1357 start_p, count, \
1358 do_oop, assert_fn) \
1359 } else { \
1360 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1361 start_p, count, \
1362 do_oop, assert_fn) \
1363 } \
1364 }
1365
1366 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
1367 do_oop, assert_fn) \
1368 { \
1369 if (UseCompressedOops) { \
1370 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1371 start_p, count, \
1372 low, high, \
1373 do_oop, assert_fn) \
1374 } else { \
1375 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1376 start_p, count, \
1377 low, high, \
1378 do_oop, assert_fn) \
1379 } \
1380 }
1381
1382 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1383 { \
1384 /* Compute oopmap block range. The common case \
1385 is nonstatic_oop_map_size == 1. */ \
1386 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1387 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1388 if (UseCompressedOops) { \
1389 while (map < end_map) { \
1390 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1391 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1392 do_oop, assert_fn) \
1393 ++map; \
1394 } \
1395 } else { \
1396 while (map < end_map) { \
1397 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1398 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1399 do_oop, assert_fn) \
1400 ++map; \
1401 } \
1402 } \
1403 }
1404
1405 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1406 { \
1407 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1408 OopMapBlock* map = start_map + nonstatic_oop_map_size(); \
1409 if (UseCompressedOops) { \
1410 while (start_map < map) { \
1411 --map; \
1412 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1413 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1414 do_oop, assert_fn) \
1415 } \
1416 } else { \
1417 while (start_map < map) { \
1418 --map; \
1419 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1420 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1421 do_oop, assert_fn) \
1422 } \
1423 } \
1424 }
1425
1426 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1427 assert_fn) \
1428 { \
1429 /* Compute oopmap block range. The common case is \
1430 nonstatic_oop_map_size == 1, so we accept the \
1431 usually non-existent extra overhead of examining \
1432 all the maps. */ \
1433 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1434 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1435 if (UseCompressedOops) { \
1436 while (map < end_map) { \
1437 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1438 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
1439 low, high, \
1440 do_oop, assert_fn) \
1441 ++map; \
1442 } \
1443 } else { \
1444 while (map < end_map) { \
1445 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1446 obj->obj_field_addr<oop>(map->offset()), map->length(), \
1447 low, high, \
1448 do_oop, assert_fn) \
1449 ++map; \
1450 } \
1451 } \
1452 }
1453
1258 void instanceKlass::follow_static_fields() { 1454 void instanceKlass::follow_static_fields() {
1259 oop* start = start_of_static_fields(); 1455 InstanceKlass_OOP_ITERATE( \
1260 oop* end = start + static_oop_field_size(); 1456 start_of_static_fields(), static_oop_field_size(), \
1261 while (start < end) { 1457 MarkSweep::mark_and_push(p), \
1262 if (*start != NULL) { 1458 assert_is_in_closed_subset)
1263 assert(Universe::heap()->is_in_closed_subset(*start),
1264 "should be in heap");
1265 MarkSweep::mark_and_push(start);
1266 }
1267 start++;
1268 }
1269 } 1459 }
1270 1460
1271 #ifndef SERIALGC 1461 #ifndef SERIALGC
1272 void instanceKlass::follow_static_fields(ParCompactionManager* cm) { 1462 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1273 oop* start = start_of_static_fields(); 1463 InstanceKlass_OOP_ITERATE( \
1274 oop* end = start + static_oop_field_size(); 1464 start_of_static_fields(), static_oop_field_size(), \
1275 while (start < end) { 1465 PSParallelCompact::mark_and_push(cm, p), \
1276 if (*start != NULL) { 1466 assert_is_in)
1277 assert(Universe::heap()->is_in(*start), "should be in heap");
1278 PSParallelCompact::mark_and_push(cm, start);
1279 }
1280 start++;
1281 }
1282 } 1467 }
1283 #endif // SERIALGC 1468 #endif // SERIALGC
1284 1469
1285
1286 void instanceKlass::adjust_static_fields() { 1470 void instanceKlass::adjust_static_fields() {
1287 oop* start = start_of_static_fields(); 1471 InstanceKlass_OOP_ITERATE( \
1288 oop* end = start + static_oop_field_size(); 1472 start_of_static_fields(), static_oop_field_size(), \
1289 while (start < end) { 1473 MarkSweep::adjust_pointer(p), \
1290 MarkSweep::adjust_pointer(start); 1474 assert_nothing)
1291 start++;
1292 }
1293 } 1475 }
1294 1476
1295 #ifndef SERIALGC 1477 #ifndef SERIALGC
1296 void instanceKlass::update_static_fields() { 1478 void instanceKlass::update_static_fields() {
1297 oop* const start = start_of_static_fields(); 1479 InstanceKlass_OOP_ITERATE( \
1298 oop* const beg_oop = start; 1480 start_of_static_fields(), static_oop_field_size(), \
1299 oop* const end_oop = start + static_oop_field_size(); 1481 PSParallelCompact::adjust_pointer(p), \
1300 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { 1482 assert_nothing)
1301 PSParallelCompact::adjust_pointer(cur_oop); 1483 }
1302 } 1484
1303 } 1485 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1304 1486 InstanceKlass_BOUNDED_OOP_ITERATE( \
1305 void 1487 start_of_static_fields(), static_oop_field_size(), \
1306 instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { 1488 beg_addr, end_addr, \
1307 oop* const start = start_of_static_fields(); 1489 PSParallelCompact::adjust_pointer(p), \
1308 oop* const beg_oop = MAX2((oop*)beg_addr, start); 1490 assert_nothing )
1309 oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
1310 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1311 PSParallelCompact::adjust_pointer(cur_oop);
1312 }
1313 } 1491 }
1314 #endif // SERIALGC 1492 #endif // SERIALGC
1315 1493
1316 void instanceKlass::oop_follow_contents(oop obj) { 1494 void instanceKlass::oop_follow_contents(oop obj) {
1317 assert (obj!=NULL, "can't follow the content of NULL object"); 1495 assert(obj != NULL, "can't follow the content of NULL object");
1318 obj->follow_header(); 1496 obj->follow_header();
1319 OopMapBlock* map = start_of_nonstatic_oop_maps(); 1497 InstanceKlass_OOP_MAP_ITERATE( \
1320 OopMapBlock* end_map = map + nonstatic_oop_map_size(); 1498 obj, \
1321 while (map < end_map) { 1499 MarkSweep::mark_and_push(p), \
1322 oop* start = obj->obj_field_addr(map->offset()); 1500 assert_is_in_closed_subset)
1323 oop* end = start + map->length();
1324 while (start < end) {
1325 if (*start != NULL) {
1326 assert(Universe::heap()->is_in_closed_subset(*start),
1327 "should be in heap");
1328 MarkSweep::mark_and_push(start);
1329 }
1330 start++;
1331 }
1332 map++;
1333 }
1334 } 1501 }
1335 1502
1336 #ifndef SERIALGC 1503 #ifndef SERIALGC
1337 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, 1504 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1338 oop obj) { 1505 oop obj) {
1339 assert (obj!=NULL, "can't follow the content of NULL object"); 1506 assert(obj != NULL, "can't follow the content of NULL object");
1340 obj->follow_header(cm); 1507 obj->follow_header(cm);
1341 OopMapBlock* map = start_of_nonstatic_oop_maps(); 1508 InstanceKlass_OOP_MAP_ITERATE( \
1342 OopMapBlock* end_map = map + nonstatic_oop_map_size(); 1509 obj, \
1343 while (map < end_map) { 1510 PSParallelCompact::mark_and_push(cm, p), \
1344 oop* start = obj->obj_field_addr(map->offset()); 1511 assert_is_in)
1345 oop* end = start + map->length();
1346 while (start < end) {
1347 if (*start != NULL) {
1348 assert(Universe::heap()->is_in(*start), "should be in heap");
1349 PSParallelCompact::mark_and_push(cm, start);
1350 }
1351 start++;
1352 }
1353 map++;
1354 }
1355 } 1512 }
1356 #endif // SERIALGC 1513 #endif // SERIALGC
1357
1358 #define invoke_closure_on(start, closure, nv_suffix) { \
1359 oop obj = *(start); \
1360 if (obj != NULL) { \
1361 assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \
1362 (closure)->do_oop##nv_suffix(start); \
1363 } \
1364 }
1365 1514
1366 // closure's do_header() method dicates whether the given closure should be 1515 // closure's do_header() method dicates whether the given closure should be
1367 // applied to the klass ptr in the object header. 1516 // applied to the klass ptr in the object header.
1368 1517
1369 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 1518 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1370 \ 1519 \
1371 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \ 1520 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
1372 OopClosureType* closure) { \ 1521 OopClosureType* closure) {\
1373 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ 1522 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1374 /* header */ \ 1523 /* header */ \
1375 if (closure->do_header()) { \ 1524 if (closure->do_header()) { \
1376 obj->oop_iterate_header(closure); \ 1525 obj->oop_iterate_header(closure); \
1377 } \ 1526 } \
1378 /* instance variables */ \ 1527 InstanceKlass_OOP_MAP_ITERATE( \
1379 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1528 obj, \
1380 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ 1529 SpecializationStats:: \
1381 const intx field_offset = PrefetchFieldsAhead; \ 1530 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1382 if (field_offset > 0) { \ 1531 (closure)->do_oop##nv_suffix(p), \
1383 while (map < end_map) { \ 1532 assert_is_in_closed_subset) \
1384 oop* start = obj->obj_field_addr(map->offset()); \ 1533 return size_helper(); \
1385 oop* const end = start + map->length(); \ 1534 }
1386 while (start < end) { \ 1535
1387 prefetch_beyond(start, (oop*)end, field_offset, \ 1536 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1388 closure->prefetch_style()); \ 1537 \
1389 SpecializationStats:: \ 1538 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1390 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ 1539 OopClosureType* closure, \
1391 invoke_closure_on(start, closure, nv_suffix); \ 1540 MemRegion mr) { \
1392 start++; \ 1541 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1393 } \ 1542 if (closure->do_header()) { \
1394 map++; \ 1543 obj->oop_iterate_header(closure, mr); \
1395 } \ 1544 } \
1396 } else { \ 1545 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1397 while (map < end_map) { \ 1546 obj, mr.start(), mr.end(), \
1398 oop* start = obj->obj_field_addr(map->offset()); \ 1547 (closure)->do_oop##nv_suffix(p), \
1399 oop* const end = start + map->length(); \ 1548 assert_is_in_closed_subset) \
1400 while (start < end) { \ 1549 return size_helper(); \
1401 SpecializationStats:: \
1402 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1403 invoke_closure_on(start, closure, nv_suffix); \
1404 start++; \
1405 } \
1406 map++; \
1407 } \
1408 } \
1409 return size_helper(); \
1410 }
1411
1412 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1413 \
1414 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1415 OopClosureType* closure, \
1416 MemRegion mr) { \
1417 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1418 /* header */ \
1419 if (closure->do_header()) { \
1420 obj->oop_iterate_header(closure, mr); \
1421 } \
1422 /* instance variables */ \
1423 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1424 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
1425 HeapWord* bot = mr.start(); \
1426 HeapWord* top = mr.end(); \
1427 oop* start = obj->obj_field_addr(map->offset()); \
1428 HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \
1429 /* Find the first map entry that extends onto mr. */ \
1430 while (map < end_map && end <= bot) { \
1431 map++; \
1432 start = obj->obj_field_addr(map->offset()); \
1433 end = MIN2((HeapWord*)(start + map->length()), top); \
1434 } \
1435 if (map != end_map) { \
1436 /* The current map's end is past the start of "mr". Skip up to the first \
1437 entry on "mr". */ \
1438 while ((HeapWord*)start < bot) { \
1439 start++; \
1440 } \
1441 const intx field_offset = PrefetchFieldsAhead; \
1442 for (;;) { \
1443 if (field_offset > 0) { \
1444 while ((HeapWord*)start < end) { \
1445 prefetch_beyond(start, (oop*)end, field_offset, \
1446 closure->prefetch_style()); \
1447 invoke_closure_on(start, closure, nv_suffix); \
1448 start++; \
1449 } \
1450 } else { \
1451 while ((HeapWord*)start < end) { \
1452 invoke_closure_on(start, closure, nv_suffix); \
1453 start++; \
1454 } \
1455 } \
1456 /* Go to the next map. */ \
1457 map++; \
1458 if (map == end_map) { \
1459 break; \
1460 } \
1461 /* Otherwise, */ \
1462 start = obj->obj_field_addr(map->offset()); \
1463 if ((HeapWord*)start >= top) { \
1464 break; \
1465 } \
1466 end = MIN2((HeapWord*)(start + map->length()), top); \
1467 } \
1468 } \
1469 return size_helper(); \
1470 } 1550 }
1471 1551
1472 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1552 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1473 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1553 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1474 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1554 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1475 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1555 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1476 1556
1477
1478 void instanceKlass::iterate_static_fields(OopClosure* closure) { 1557 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1479 oop* start = start_of_static_fields(); 1558 InstanceKlass_OOP_ITERATE( \
1480 oop* end = start + static_oop_field_size(); 1559 start_of_static_fields(), static_oop_field_size(), \
1481 while (start < end) { 1560 closure->do_oop(p), \
1482 assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap"); 1561 assert_is_in_reserved)
1483 closure->do_oop(start);
1484 start++;
1485 }
1486 } 1562 }
1487 1563
1488 void instanceKlass::iterate_static_fields(OopClosure* closure, 1564 void instanceKlass::iterate_static_fields(OopClosure* closure,
1489 MemRegion mr) { 1565 MemRegion mr) {
1490 oop* start = start_of_static_fields(); 1566 InstanceKlass_BOUNDED_OOP_ITERATE( \
1491 oop* end = start + static_oop_field_size(); 1567 start_of_static_fields(), static_oop_field_size(), \
1492 // I gather that the the static fields of reference types come first, 1568 mr.start(), mr.end(), \
1493 // hence the name of "oop_field_size", and that is what makes this safe. 1569 (closure)->do_oop_v(p), \
1494 assert((intptr_t)mr.start() == 1570 assert_is_in_closed_subset)
1495 align_size_up((intptr_t)mr.start(), sizeof(oop)) && 1571 }
1496 (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
1497 "Memregion must be oop-aligned.");
1498 if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
1499 if ((HeapWord*)end > mr.end()) end = (oop*)mr.end();
1500 while (start < end) {
1501 invoke_closure_on(start, closure,_v);
1502 start++;
1503 }
1504 }
1505
1506 1572
1507 int instanceKlass::oop_adjust_pointers(oop obj) { 1573 int instanceKlass::oop_adjust_pointers(oop obj) {
1508 int size = size_helper(); 1574 int size = size_helper();
1509 1575 InstanceKlass_OOP_MAP_ITERATE( \
1510 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. 1576 obj, \
1511 OopMapBlock* map = start_of_nonstatic_oop_maps(); 1577 MarkSweep::adjust_pointer(p), \
1512 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); 1578 assert_is_in)
1513 // Iterate over oopmap blocks
1514 while (map < end_map) {
1515 // Compute oop range for this block
1516 oop* start = obj->obj_field_addr(map->offset());
1517 oop* end = start + map->length();
1518 // Iterate over oops
1519 while (start < end) {
1520 assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
1521 MarkSweep::adjust_pointer(start);
1522 start++;
1523 }
1524 map++;
1525 }
1526
1527 obj->adjust_header(); 1579 obj->adjust_header();
1528 return size; 1580 return size;
1529 } 1581 }
1530 1582
1531 #ifndef SERIALGC 1583 #ifndef SERIALGC
1532 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { 1584 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
1533 assert(!pm->depth_first(), "invariant"); 1585 assert(!pm->depth_first(), "invariant");
1534 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. 1586 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1535 OopMapBlock* start_map = start_of_nonstatic_oop_maps(); 1587 obj, \
1536 OopMapBlock* map = start_map + nonstatic_oop_map_size(); 1588 if (PSScavenge::should_scavenge(p)) { \
1537 1589 pm->claim_or_forward_breadth(p); \
1538 // Iterate over oopmap blocks 1590 }, \
1539 while (start_map < map) { 1591 assert_nothing )
1540 --map;
1541 // Compute oop range for this block
1542 oop* start = obj->obj_field_addr(map->offset());
1543 oop* curr = start + map->length();
1544 // Iterate over oops
1545 while (start < curr) {
1546 --curr;
1547 if (PSScavenge::should_scavenge(*curr)) {
1548 assert(Universe::heap()->is_in(*curr), "should be in heap");
1549 pm->claim_or_forward_breadth(curr);
1550 }
1551 }
1552 }
1553 } 1592 }
1554 1593
1555 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 1594 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1556 assert(pm->depth_first(), "invariant"); 1595 assert(pm->depth_first(), "invariant");
1557 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. 1596 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1558 OopMapBlock* start_map = start_of_nonstatic_oop_maps(); 1597 obj, \
1559 OopMapBlock* map = start_map + nonstatic_oop_map_size(); 1598 if (PSScavenge::should_scavenge(p)) { \
1560 1599 pm->claim_or_forward_depth(p); \
1561 // Iterate over oopmap blocks 1600 }, \
1562 while (start_map < map) { 1601 assert_nothing )
1563 --map;
1564 // Compute oop range for this block
1565 oop* start = obj->obj_field_addr(map->offset());
1566 oop* curr = start + map->length();
1567 // Iterate over oops
1568 while (start < curr) {
1569 --curr;
1570 if (PSScavenge::should_scavenge(*curr)) {
1571 assert(Universe::heap()->is_in(*curr), "should be in heap");
1572 pm->claim_or_forward_depth(curr);
1573 }
1574 }
1575 }
1576 } 1602 }
1577 1603
1578 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 1604 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1579 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. 1605 InstanceKlass_OOP_MAP_ITERATE( \
1580 OopMapBlock* map = start_of_nonstatic_oop_maps(); 1606 obj, \
1581 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); 1607 PSParallelCompact::adjust_pointer(p), \
1582 // Iterate over oopmap blocks 1608 assert_nothing)
1583 while (map < end_map) {
1584 // Compute oop range for this oopmap block.
1585 oop* const map_start = obj->obj_field_addr(map->offset());
1586 oop* const beg_oop = map_start;
1587 oop* const end_oop = map_start + map->length();
1588 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1589 PSParallelCompact::adjust_pointer(cur_oop);
1590 }
1591 ++map;
1592 }
1593
1594 return size_helper(); 1609 return size_helper();
1595 } 1610 }
1596 1611
1597 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, 1612 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1598 HeapWord* beg_addr, HeapWord* end_addr) { 1613 HeapWord* beg_addr, HeapWord* end_addr) {
1599 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. 1614 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1600 OopMapBlock* map = start_of_nonstatic_oop_maps(); 1615 obj, beg_addr, end_addr, \
1601 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); 1616 PSParallelCompact::adjust_pointer(p), \
1602 // Iterate over oopmap blocks 1617 assert_nothing)
1603 while (map < end_map) {
1604 // Compute oop range for this oopmap block.
1605 oop* const map_start = obj->obj_field_addr(map->offset());
1606 oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
1607 oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
1608 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
1609 PSParallelCompact::adjust_pointer(cur_oop);
1610 }
1611 ++map;
1612 }
1613
1614 return size_helper(); 1618 return size_helper();
1615 } 1619 }
1616 1620
1617 void instanceKlass::copy_static_fields(PSPromotionManager* pm) { 1621 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
1618 assert(!pm->depth_first(), "invariant"); 1622 assert(!pm->depth_first(), "invariant");
1619 // Compute oop range 1623 InstanceKlass_OOP_ITERATE( \
1620 oop* start = start_of_static_fields(); 1624 start_of_static_fields(), static_oop_field_size(), \
1621 oop* end = start + static_oop_field_size(); 1625 if (PSScavenge::should_scavenge(p)) { \
1622 // Iterate over oops 1626 pm->claim_or_forward_breadth(p); \
1623 while (start < end) { 1627 }, \
1624 if (PSScavenge::should_scavenge(*start)) { 1628 assert_nothing )
1625 assert(Universe::heap()->is_in(*start), "should be in heap");
1626 pm->claim_or_forward_breadth(start);
1627 }
1628 start++;
1629 }
1630 } 1629 }
1631 1630
1632 void instanceKlass::push_static_fields(PSPromotionManager* pm) { 1631 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1633 assert(pm->depth_first(), "invariant"); 1632 assert(pm->depth_first(), "invariant");
1634 // Compute oop range 1633 InstanceKlass_OOP_ITERATE( \
1635 oop* start = start_of_static_fields(); 1634 start_of_static_fields(), static_oop_field_size(), \
1636 oop* end = start + static_oop_field_size(); 1635 if (PSScavenge::should_scavenge(p)) { \
1637 // Iterate over oops 1636 pm->claim_or_forward_depth(p); \
1638 while (start < end) { 1637 }, \
1639 if (PSScavenge::should_scavenge(*start)) { 1638 assert_nothing )
1640 assert(Universe::heap()->is_in(*start), "should be in heap");
1641 pm->claim_or_forward_depth(start);
1642 }
1643 start++;
1644 }
1645 } 1639 }
1646 1640
1647 void instanceKlass::copy_static_fields(ParCompactionManager* cm) { 1641 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1648 // Compute oop range 1642 InstanceKlass_OOP_ITERATE( \
1649 oop* start = start_of_static_fields(); 1643 start_of_static_fields(), static_oop_field_size(), \
1650 oop* end = start + static_oop_field_size(); 1644 PSParallelCompact::adjust_pointer(p), \
1651 // Iterate over oops 1645 assert_is_in)
1652 while (start < end) {
1653 if (*start != NULL) {
1654 assert(Universe::heap()->is_in(*start), "should be in heap");
1655 // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
1656 PSParallelCompact::adjust_pointer(start);
1657 }
1658 start++;
1659 }
1660 } 1646 }
1661 #endif // SERIALGC 1647 #endif // SERIALGC
1662 1648
1663 // This klass is alive but the implementor link is not followed/updated. 1649 // This klass is alive but the implementor link is not followed/updated.
1664 // Subklass and sibling links are handled by Klass::follow_weak_klass_links 1650 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1685 } 1671 }
1686 } 1672 }
1687 Klass::follow_weak_klass_links(is_alive, keep_alive); 1673 Klass::follow_weak_klass_links(is_alive, keep_alive);
1688 } 1674 }
1689 1675
1690
1691 void instanceKlass::remove_unshareable_info() { 1676 void instanceKlass::remove_unshareable_info() {
1692 Klass::remove_unshareable_info(); 1677 Klass::remove_unshareable_info();
1693 init_implementor(); 1678 init_implementor();
1694 } 1679 }
1695 1680
1696
1697 static void clear_all_breakpoints(methodOop m) { 1681 static void clear_all_breakpoints(methodOop m) {
1698 m->clear_all_breakpoints(); 1682 m->clear_all_breakpoints();
1699 } 1683 }
1700
1701 1684
1702 void instanceKlass::release_C_heap_structures() { 1685 void instanceKlass::release_C_heap_structures() {
1703 // Deallocate oop map cache 1686 // Deallocate oop map cache
1704 if (_oop_map_cache != NULL) { 1687 if (_oop_map_cache != NULL) {
1705 delete _oop_map_cache; 1688 delete _oop_map_cache;
2045 st->print("a "); 2028 st->print("a ");
2046 name()->print_value_on(st); 2029 name()->print_value_on(st);
2047 obj->print_address_on(st); 2030 obj->print_address_on(st);
2048 } 2031 }
2049 2032
2050 #endif 2033 #endif // ndef PRODUCT
2051 2034
2052 const char* instanceKlass::internal_name() const { 2035 const char* instanceKlass::internal_name() const {
2053 return external_name(); 2036 return external_name();
2054 } 2037 }
2055 2038
2056
2057
2058 // Verification 2039 // Verification
2059 2040
2060 class VerifyFieldClosure: public OopClosure { 2041 class VerifyFieldClosure: public OopClosure {
2061 public: 2042 protected:
2062 void do_oop(oop* p) { 2043 template <class T> void do_oop_work(T* p) {
2063 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); 2044 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2064 if (!(*p)->is_oop_or_null()) { 2045 oop obj = oopDesc::load_decode_heap_oop(p);
2065 tty->print_cr("Failed: %p -> %p",p,(address)*p); 2046 if (!obj->is_oop_or_null()) {
2047 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2066 Universe::print(); 2048 Universe::print();
2067 guarantee(false, "boom"); 2049 guarantee(false, "boom");
2068 } 2050 }
2069 } 2051 }
2052 public:
2053 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2054 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2070 }; 2055 };
2071
2072 2056
2073 void instanceKlass::oop_verify_on(oop obj, outputStream* st) { 2057 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2074 Klass::oop_verify_on(obj, st); 2058 Klass::oop_verify_on(obj, st);
2075 VerifyFieldClosure blk; 2059 VerifyFieldClosure blk;
2076 oop_oop_iterate(obj, &blk); 2060 oop_oop_iterate(obj, &blk);
2108 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); 2092 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2109 guarantee(map->offset() == offset && map->length() == extra, "just checking"); 2093 guarantee(map->offset() == offset && map->length() == extra, "just checking");
2110 } 2094 }
2111 } 2095 }
2112 2096
2113 #endif 2097 #endif // ndef PRODUCT
2114 2098
2115 2099 // JNIid class for jfieldIDs only
2116 /* JNIid class for jfieldIDs only */ 2100 // Note to reviewers:
2117 JNIid::JNIid(klassOop holder, int offset, JNIid* next) { 2101 // These JNI functions are just moved over to column 1 and not changed
2118 _holder = holder; 2102 // in the compressed oops workspace.
2119 _offset = offset; 2103 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2120 _next = next; 2104 _holder = holder;
2121 debug_only(_is_static_field_id = false;) 2105 _offset = offset;
2122 } 2106 _next = next;
2123 2107 debug_only(_is_static_field_id = false;)
2124 2108 }
2125 JNIid* JNIid::find(int offset) { 2109
2126 JNIid* current = this; 2110
2127 while (current != NULL) { 2111 JNIid* JNIid::find(int offset) {
2128 if (current->offset() == offset) return current; 2112 JNIid* current = this;
2129 current = current->next(); 2113 while (current != NULL) {
2130 } 2114 if (current->offset() == offset) return current;
2131 return NULL; 2115 current = current->next();
2132 } 2116 }
2117 return NULL;
2118 }
2133 2119
2134 void JNIid::oops_do(OopClosure* f) { 2120 void JNIid::oops_do(OopClosure* f) {
2135 for (JNIid* cur = this; cur != NULL; cur = cur->next()) { 2121 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2136 f->do_oop(cur->holder_addr()); 2122 f->do_oop(cur->holder_addr());
2137 } 2123 }
2138 } 2124 }
2139 2125
2140 void JNIid::deallocate(JNIid* current) { 2126 void JNIid::deallocate(JNIid* current) {
2141 while (current != NULL) { 2127 while (current != NULL) {
2142 JNIid* next = current->next(); 2128 JNIid* next = current->next();
2143 delete current; 2129 delete current;
2144 current = next; 2130 current = next;
2145 } 2131 }
2146 } 2132 }
2147 2133
2148 2134
2149 void JNIid::verify(klassOop holder) { 2135 void JNIid::verify(klassOop holder) {
2150 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); 2136 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2151 int end_field_offset; 2137 int end_field_offset;
2152 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 2138 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2153 2139
2154 JNIid* current = this; 2140 JNIid* current = this;
2155 while (current != NULL) { 2141 while (current != NULL) {
2156 guarantee(current->holder() == holder, "Invalid klass in JNIid"); 2142 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2157 #ifdef ASSERT
2158 int o = current->offset();
2159 if (current->is_static_field_id()) {
2160 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2161 }
2162 #endif
2163 current = current->next();
2164 }
2165 }
2166
2167
2168 #ifdef ASSERT 2143 #ifdef ASSERT
2169 void instanceKlass::set_init_state(ClassState state) { 2144 int o = current->offset();
2170 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) 2145 if (current->is_static_field_id()) {
2171 : (_init_state < state); 2146 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2172 assert(good_state || state == allocated, "illegal state transition"); 2147 }
2173 _init_state = state; 2148 #endif
2174 } 2149 current = current->next();
2150 }
2151 }
2152
2153
2154 #ifdef ASSERT
2155 void instanceKlass::set_init_state(ClassState state) {
2156 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2157 : (_init_state < state);
2158 assert(good_state || state == allocated, "illegal state transition");
2159 _init_state = state;
2160 }
2175 #endif 2161 #endif
2176 2162
2177 2163
2178 // RedefineClasses() support for previous versions: 2164 // RedefineClasses() support for previous versions:
2179 2165
2180 // Add an information node that contains weak references to the 2166 // Add an information node that contains weak references to the
2181 // interesting parts of the previous version of the_class. 2167 // interesting parts of the previous version of the_class.
2182 void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2168 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2183 BitMap * emcp_methods, int emcp_method_count) { 2169 BitMap* emcp_methods, int emcp_method_count) {
2184 assert(Thread::current()->is_VM_thread(), 2170 assert(Thread::current()->is_VM_thread(),
2185 "only VMThread can add previous versions"); 2171 "only VMThread can add previous versions");
2186 2172
2187 if (_previous_versions == NULL) { 2173 if (_previous_versions == NULL) {
2188 // This is the first previous version so make some space. 2174 // This is the first previous version so make some space.
2189 // Start with 2 elements under the assumption that the class 2175 // Start with 2 elements under the assumption that the class
2190 // won't be redefined much. 2176 // won't be redefined much.