comparison src/share/vm/gc_interface/collectedHeap.inline.hpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 ee019285a52c
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
239 inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) { 239 inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
240 NoHeaderExtendedOopClosure no_header_cl(cl); 240 NoHeaderExtendedOopClosure no_header_cl(cl);
241 oop_iterate(&no_header_cl); 241 oop_iterate(&no_header_cl);
242 } 242 }
243 243
244
245 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
246 HeapWord* end,
247 unsigned short alignment_in_bytes) {
248 if (alignment_in_bytes <= ObjectAlignmentInBytes) {
249 return addr;
250 }
251
252 assert(is_ptr_aligned(addr, HeapWordSize),
253 err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr)));
254 assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
255 err_msg("Alignment size %u is incorrect.", alignment_in_bytes));
256
257 HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
258 size_t padding = pointer_delta(new_addr, addr);
259
260 if (padding == 0) {
261 return addr;
262 }
263
264 if (padding < CollectedHeap::min_fill_size()) {
265 padding += alignment_in_bytes / HeapWordSize;
266 assert(padding >= CollectedHeap::min_fill_size(),
267 err_msg("alignment_in_bytes %u is expect to be larger "
268 "than the minimum object size", alignment_in_bytes));
269 new_addr = addr + padding;
270 }
271
272 assert(new_addr > addr, err_msg("Unexpected arithmetic overflow "
273 PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)));
274 if(new_addr < end) {
275 CollectedHeap::fill_with_object(addr, padding);
276 return new_addr;
277 } else {
278 return NULL;
279 }
280 }
281
244 #ifndef PRODUCT 282 #ifndef PRODUCT
245 283
246 inline bool 284 inline bool
247 CollectedHeap::promotion_should_fail(volatile size_t* count) { 285 CollectedHeap::promotion_should_fail(volatile size_t* count) {
248 // Access to count is not atomic; the value does not have to be exact. 286 // Access to count is not atomic; the value does not have to be exact.