comparison src/share/vm/memory/resourceArea.hpp @ 6882:716c64bda5ba

7199092: NMT: NMT needs to deal overlapped virtual memory ranges Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map. Reviewed-by: acorn, coleenp
author zgu
date Fri, 19 Oct 2012 21:40:07 -0400
parents 7b5885dadbdc
children f34d701e952e
comparison
equal deleted inserted replaced
6879:8ebcedb7604d 6882:716c64bda5ba
125 } 125 }
126 126
127 void reset_to_mark() { 127 void reset_to_mark() {
128 if (UseMallocOnly) free_malloced_objects(); 128 if (UseMallocOnly) free_malloced_objects();
129 129
130 if( _chunk->next() ) // Delete later chunks 130 if( _chunk->next() ) { // Delete later chunks
131 // reset arena size before delete chunks. Otherwise, the total
132 // arena size could exceed total chunk size
133 assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
134 _area->set_size_in_bytes(size_in_bytes());
131 _chunk->next_chop(); 135 _chunk->next_chop();
136 } else {
137 assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
138 }
132 _area->_chunk = _chunk; // Roll back arena to saved chunk 139 _area->_chunk = _chunk; // Roll back arena to saved chunk
133 _area->_hwm = _hwm; 140 _area->_hwm = _hwm;
134 _area->_max = _max; 141 _area->_max = _max;
135 142
136 // clear out this chunk (to detect allocation bugs) 143 // clear out this chunk (to detect allocation bugs)
137 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); 144 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
138 _area->set_size_in_bytes(size_in_bytes());
139 } 145 }
140 146
141 ~ResourceMark() { 147 ~ResourceMark() {
142 assert( _area->_nesting > 0, "must stack allocate RMs" ); 148 assert( _area->_nesting > 0, "must stack allocate RMs" );
143 debug_only(_area->_nesting--;) 149 debug_only(_area->_nesting--;)
217 } 223 }
218 224
219 void reset_to_mark() { 225 void reset_to_mark() {
220 if (UseMallocOnly) free_malloced_objects(); 226 if (UseMallocOnly) free_malloced_objects();
221 227
222 if( _chunk->next() ) // Delete later chunks 228 if( _chunk->next() ) { // Delete later chunks
229 // reset arena size before delete chunks. Otherwise, the total
230 // arena size could exceed total chunk size
231 assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
232 _area->set_size_in_bytes(size_in_bytes());
223 _chunk->next_chop(); 233 _chunk->next_chop();
234 } else {
235 assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
236 }
224 _area->_chunk = _chunk; // Roll back arena to saved chunk 237 _area->_chunk = _chunk; // Roll back arena to saved chunk
225 _area->_hwm = _hwm; 238 _area->_hwm = _hwm;
226 _area->_max = _max; 239 _area->_max = _max;
227 240
228 // clear out this chunk (to detect allocation bugs) 241 // clear out this chunk (to detect allocation bugs)
229 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); 242 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
230 _area->set_size_in_bytes(size_in_bytes());
231 } 243 }
232 244
233 ~DeoptResourceMark() { 245 ~DeoptResourceMark() {
234 assert( _area->_nesting > 0, "must stack allocate RMs" ); 246 assert( _area->_nesting > 0, "must stack allocate RMs" );
235 debug_only(_area->_nesting--;) 247 debug_only(_area->_nesting--;)