Mercurial > hg > truffle
annotate src/share/vm/interpreter/oopMapCache.cpp @ 20543:e7d0505c8a30
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
author | tschatzl |
---|---|
date | Fri, 10 Oct 2014 15:51:58 +0200 |
parents | c204e2044c29 |
children |
rev | line source |
---|---|
0 | 1 /* |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
6725
diff
changeset
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "interpreter/oopMapCache.hpp" | |
27 #include "memory/allocation.inline.hpp" | |
28 #include "memory/resourceArea.hpp" | |
29 #include "oops/oop.inline.hpp" | |
30 #include "prims/jvmtiRedefineClassesTrace.hpp" | |
31 #include "runtime/handles.inline.hpp" | |
32 #include "runtime/signature.hpp" | |
0 | 33 |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
6725
diff
changeset
|
34 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
6725
diff
changeset
|
35 |
0 | 36 class OopMapCacheEntry: private InterpreterOopMap { |
37 friend class InterpreterOopMap; | |
38 friend class OopMapForCacheEntry; | |
39 friend class OopMapCache; | |
40 friend class VerifyClosure; | |
41 | |
42 protected: | |
43 // Initialization | |
44 void fill(methodHandle method, int bci); | |
45 // fills the bit mask for native calls | |
46 void fill_for_native(methodHandle method); | |
47 void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); | |
48 | |
49 // Deallocate bit masks and initialize fields | |
50 void flush(); | |
51 | |
52 private: | |
53 void allocate_bit_mask(); // allocates the bit mask on C heap f necessary | |
54 void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary | |
55 bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); | |
56 | |
57 public: | |
58 OopMapCacheEntry() : InterpreterOopMap() { | |
59 #ifdef ASSERT | |
60 _resource_allocate_bit_mask = false; | |
61 #endif | |
62 } | |
63 }; | |
64 | |
65 | |
66 // Implementation of OopMapForCacheEntry | |
67 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) | |
68 | |
69 class OopMapForCacheEntry: public GenerateOopMap { | |
70 OopMapCacheEntry *_entry; | |
71 int _bci; | |
72 int _stack_top; | |
73 | |
74 virtual bool report_results() const { return false; } | |
75 virtual bool possible_gc_point (BytecodeStream *bcs); | |
76 virtual void fill_stackmap_prolog (int nof_gc_points); | |
77 virtual void fill_stackmap_epilog (); | |
78 virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, | |
79 CellTypeState* vars, | |
80 CellTypeState* stack, | |
81 int stack_top); | |
82 virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); | |
83 | |
84 public: | |
85 OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry); | |
86 | |
87 // Computes stack map for (method,bci) and initialize entry | |
88 void compute_map(TRAPS); | |
89 int size(); | |
90 }; | |
91 | |
92 | |
93 OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { | |
94 _bci = bci; | |
95 _entry = entry; | |
96 _stack_top = -1; | |
97 } | |
98 | |
99 | |
100 void OopMapForCacheEntry::compute_map(TRAPS) { | |
101 assert(!method()->is_native(), "cannot compute oop map for native methods"); | |
102 // First check if it is a method where the stackmap is always empty | |
103 if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { | |
104 _entry->set_mask_size(0); | |
105 } else { | |
106 ResourceMark rm; | |
107 GenerateOopMap::compute_map(CATCH); | |
108 result_for_basicblock(_bci); | |
109 } | |
110 } | |
111 | |
112 | |
113 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { | |
114 return false; // We are not reporting any result. We call result_for_basicblock directly | |
115 } | |
116 | |
117 | |
118 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { | |
119 // Do nothing | |
120 } | |
121 | |
122 | |
123 void OopMapForCacheEntry::fill_stackmap_epilog() { | |
124 // Do nothing | |
125 } | |
126 | |
127 | |
128 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { | |
129 // Do nothing | |
130 } | |
131 | |
132 | |
133 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, | |
134 CellTypeState* vars, | |
135 CellTypeState* stack, | |
136 int stack_top) { | |
137 // Only interested in one specific bci | |
138 if (bcs->bci() == _bci) { | |
139 _entry->set_mask(vars, stack, stack_top); | |
140 _stack_top = stack_top; | |
141 } | |
142 } | |
143 | |
144 | |
145 int OopMapForCacheEntry::size() { | |
146 assert(_stack_top != -1, "compute_map must be called first"); | |
147 return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; | |
148 } | |
149 | |
150 | |
151 // Implementation of InterpreterOopMap and OopMapCacheEntry | |
152 | |
153 class VerifyClosure : public OffsetClosure { | |
154 private: | |
155 OopMapCacheEntry* _entry; | |
156 bool _failed; | |
157 | |
158 public: | |
159 VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } | |
160 void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } | |
161 bool failed() const { return _failed; } | |
162 }; | |
163 | |
164 InterpreterOopMap::InterpreterOopMap() { | |
165 initialize(); | |
166 #ifdef ASSERT | |
167 _resource_allocate_bit_mask = true; | |
168 #endif | |
169 } | |
170 | |
171 InterpreterOopMap::~InterpreterOopMap() { | |
172 // The expection is that the bit mask was allocated | |
173 // last in this resource area. That would make the free of the | |
174 // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free). | |
175 // If it was not allocated last, there is not a correctness problem | |
176 // but the space for the bit_mask is not freed. | |
177 assert(_resource_allocate_bit_mask, "Trying to free C heap space"); | |
178 if (mask_size() > small_mask_limit) { | |
179 FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size()); | |
180 } | |
181 } | |
182 | |
20501
c204e2044c29
8038624: interpretedVFrame::expressions() must respect InterpreterOopMap for liveness
mgronlun
parents:
17937
diff
changeset
|
183 bool InterpreterOopMap::is_empty() const { |
0 | 184 bool result = _method == NULL; |
185 assert(_method != NULL || (_bci == 0 && | |
186 (_mask_size == 0 || _mask_size == USHRT_MAX) && | |
187 _bit_mask[0] == 0), "Should be completely empty"); | |
188 return result; | |
189 } | |
190 | |
191 void InterpreterOopMap::initialize() { | |
192 _method = NULL; | |
193 _mask_size = USHRT_MAX; // This value should cause a failure quickly | |
194 _bci = 0; | |
195 _expression_stack_size = 0; | |
196 for (int i = 0; i < N; i++) _bit_mask[i] = 0; | |
197 } | |
198 | |
20501
c204e2044c29
8038624: interpretedVFrame::expressions() must respect InterpreterOopMap for liveness
mgronlun
parents:
17937
diff
changeset
|
199 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const { |
0 | 200 int n = number_of_entries(); |
201 int word_index = 0; | |
202 uintptr_t value = 0; | |
203 uintptr_t mask = 0; | |
204 // iterate over entries | |
205 for (int i = 0; i < n; i++, mask <<= bits_per_entry) { | |
206 // get current word | |
207 if (mask == 0) { | |
208 value = bit_mask()[word_index++]; | |
209 mask = 1; | |
210 } | |
211 // test for oop | |
212 if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); | |
213 } | |
214 } | |
215 | |
216 | |
217 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
218 | |
219 void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) { | |
220 int n = number_of_entries(); | |
221 int word_index = 0; | |
222 uintptr_t value = 0; | |
223 uintptr_t mask = 0; | |
224 // iterate over entries | |
225 for (int i = 0; i < n; i++, mask <<= bits_per_entry) { | |
226 // get current word | |
227 if (mask == 0) { | |
228 value = bit_mask()[word_index++]; | |
229 mask = 1; | |
230 } | |
231 // test for dead values & oops, and for live values | |
232 if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops | |
233 else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops | |
234 else value_closure->offset_do(i); // call this for all live values | |
235 } | |
236 } | |
237 | |
238 #endif | |
239 | |
240 | |
20501
c204e2044c29
8038624: interpretedVFrame::expressions() must respect InterpreterOopMap for liveness
mgronlun
parents:
17937
diff
changeset
|
241 void InterpreterOopMap::print() const { |
0 | 242 int n = number_of_entries(); |
243 tty->print("oop map for "); | |
244 method()->print_value(); | |
245 tty->print(" @ %d = [%d] { ", bci(), n); | |
246 for (int i = 0; i < n; i++) { | |
247 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
248 if (is_dead(i)) tty->print("%d+ ", i); | |
249 else | |
250 #endif | |
251 if (is_oop(i)) tty->print("%d ", i); | |
252 } | |
253 tty->print_cr("}"); | |
254 } | |
255 | |
256 class MaskFillerForNative: public NativeSignatureIterator { | |
257 private: | |
258 uintptr_t * _mask; // the bit mask to be filled | |
259 int _size; // the mask size in bits | |
260 | |
261 void set_one(int i) { | |
262 i *= InterpreterOopMap::bits_per_entry; | |
263 assert(0 <= i && i < _size, "offset out of bounds"); | |
264 _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); | |
265 } | |
266 | |
267 public: | |
268 void pass_int() { /* ignore */ } | |
269 void pass_long() { /* ignore */ } | |
270 void pass_float() { /* ignore */ } | |
271 void pass_double() { /* ignore */ } | |
272 void pass_object() { set_one(offset()); } | |
273 | |
274 MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { | |
275 _mask = mask; | |
276 _size = size; | |
277 // initialize with 0 | |
278 int i = (size + BitsPerWord - 1) / BitsPerWord; | |
279 while (i-- > 0) _mask[i] = 0; | |
280 } | |
281 | |
282 void generate() { | |
283 NativeSignatureIterator::iterate(); | |
284 } | |
285 }; | |
286 | |
287 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { | |
288 // Check mask includes map | |
289 VerifyClosure blk(this); | |
290 iterate_oop(&blk); | |
291 if (blk.failed()) return false; | |
292 | |
293 // Check if map is generated correctly | |
294 // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) | |
295 if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals); | |
296 | |
297 for(int i = 0; i < max_locals; i++) { | |
298 bool v1 = is_oop(i) ? true : false; | |
299 bool v2 = vars[i].is_reference() ? true : false; | |
300 assert(v1 == v2, "locals oop mask generation error"); | |
301 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); | |
302 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
303 bool v3 = is_dead(i) ? true : false; | |
304 bool v4 = !vars[i].is_live() ? true : false; | |
305 assert(v3 == v4, "locals live mask generation error"); | |
306 assert(!(v1 && v3), "dead value marked as oop"); | |
307 #endif | |
308 } | |
309 | |
310 if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); } | |
311 for(int j = 0; j < stack_top; j++) { | |
312 bool v1 = is_oop(max_locals + j) ? true : false; | |
313 bool v2 = stack[j].is_reference() ? true : false; | |
314 assert(v1 == v2, "stack oop mask generation error"); | |
315 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); | |
316 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
317 bool v3 = is_dead(max_locals + j) ? true : false; | |
318 bool v4 = !stack[j].is_live() ? true : false; | |
319 assert(v3 == v4, "stack live mask generation error"); | |
320 assert(!(v1 && v3), "dead value marked as oop"); | |
321 #endif | |
322 } | |
323 if (TraceOopMapGeneration && Verbose) tty->cr(); | |
324 return true; | |
325 } | |
326 | |
327 void OopMapCacheEntry::allocate_bit_mask() { | |
328 if (mask_size() > small_mask_limit) { | |
329 assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); | |
330 _bit_mask[0] = (intptr_t) | |
6197 | 331 NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); |
0 | 332 } |
333 } | |
334 | |
335 void OopMapCacheEntry::deallocate_bit_mask() { | |
336 if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { | |
337 assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), | |
338 "This bit mask should not be in the resource area"); | |
6197 | 339 FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass); |
0 | 340 debug_only(_bit_mask[0] = 0;) |
341 } | |
342 } | |
343 | |
344 | |
345 void OopMapCacheEntry::fill_for_native(methodHandle mh) { | |
346 assert(mh->is_native(), "method must be native method"); | |
347 set_mask_size(mh->size_of_parameters() * bits_per_entry); | |
348 allocate_bit_mask(); | |
349 // fill mask for parameters | |
350 MaskFillerForNative mf(mh, bit_mask(), mask_size()); | |
351 mf.generate(); | |
352 } | |
353 | |
354 | |
355 void OopMapCacheEntry::fill(methodHandle method, int bci) { | |
356 HandleMark hm; | |
357 // Flush entry to deallocate an existing entry | |
358 flush(); | |
359 set_method(method()); | |
360 set_bci(bci); | |
361 if (method->is_native()) { | |
362 // Native method activations have oops only among the parameters and one | |
363 // extra oop following the parameters (the mirror for static native methods). | |
364 fill_for_native(method); | |
365 } else { | |
366 EXCEPTION_MARK; | |
367 OopMapForCacheEntry gen(method, bci, this); | |
368 gen.compute_map(CATCH); | |
369 } | |
370 } | |
371 | |
372 | |
373 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { | |
374 // compute bit mask size | |
375 int max_locals = method()->max_locals(); | |
376 int n_entries = max_locals + stack_top; | |
377 set_mask_size(n_entries * bits_per_entry); | |
378 allocate_bit_mask(); | |
379 set_expression_stack_size(stack_top); | |
380 | |
381 // compute bits | |
382 int word_index = 0; | |
383 uintptr_t value = 0; | |
384 uintptr_t mask = 1; | |
385 | |
386 CellTypeState* cell = vars; | |
387 for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { | |
388 // store last word | |
389 if (mask == 0) { | |
390 bit_mask()[word_index++] = value; | |
391 value = 0; | |
392 mask = 1; | |
393 } | |
394 | |
395 // switch to stack when done with locals | |
396 if (entry_index == max_locals) { | |
397 cell = stack; | |
398 } | |
399 | |
400 // set oop bit | |
401 if ( cell->is_reference()) { | |
402 value |= (mask << oop_bit_number ); | |
403 } | |
404 | |
405 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
406 // set dead bit | |
407 if (!cell->is_live()) { | |
408 value |= (mask << dead_bit_number); | |
409 assert(!cell->is_reference(), "dead value marked as oop"); | |
410 } | |
411 #endif | |
412 } | |
413 | |
414 // make sure last word is stored | |
415 bit_mask()[word_index] = value; | |
416 | |
417 // verify bit mask | |
418 assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); | |
419 | |
420 | |
421 } | |
422 | |
423 void OopMapCacheEntry::flush() { | |
424 deallocate_bit_mask(); | |
425 initialize(); | |
426 } | |
427 | |
428 | |
429 // Implementation of OopMapCache | |
430 | |
431 #ifndef PRODUCT | |
432 | |
433 static long _total_memory_usage = 0; | |
434 | |
435 long OopMapCache::memory_usage() { | |
436 return _total_memory_usage; | |
437 } | |
438 | |
439 #endif | |
440 | |
441 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { | |
442 assert(_resource_allocate_bit_mask, | |
443 "Should not resource allocate the _bit_mask"); | |
444 | |
445 set_method(from->method()); | |
446 set_bci(from->bci()); | |
447 set_mask_size(from->mask_size()); | |
448 set_expression_stack_size(from->expression_stack_size()); | |
449 | |
450 // Is the bit mask contained in the entry? | |
451 if (from->mask_size() <= small_mask_limit) { | |
452 memcpy((void *)_bit_mask, (void *)from->_bit_mask, | |
453 mask_word_size() * BytesPerWord); | |
454 } else { | |
455 // The expectation is that this InterpreterOopMap is a recently created | |
456 // and empty. It is used to get a copy of a cached entry. | |
457 // If the bit mask has a value, it should be in the | |
458 // resource area. | |
459 assert(_bit_mask[0] == 0 || | |
460 Thread::current()->resource_area()->contains((void*)_bit_mask[0]), | |
461 "The bit mask should have been allocated from a resource area"); | |
462 // Allocate the bit_mask from a Resource area for performance. Allocating | |
463 // from the C heap as is done for OopMapCache has a significant | |
464 // performance impact. | |
465 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); | |
466 assert(_bit_mask[0] != 0, "bit mask was not allocated"); | |
467 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], | |
468 mask_word_size() * BytesPerWord); | |
469 } | |
470 } | |
471 | |
20501
c204e2044c29
8038624: interpretedVFrame::expressions() must respect InterpreterOopMap for liveness
mgronlun
parents:
17937
diff
changeset
|
472 inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) const { |
0 | 473 // We use method->code_size() rather than method->identity_hash() below since |
474 // the mark may not be present if a pointer to the method is already reversed. | |
475 return ((unsigned int) bci) | |
476 ^ ((unsigned int) method->max_locals() << 2) | |
477 ^ ((unsigned int) method->code_size() << 4) | |
478 ^ ((unsigned int) method->size_of_parameters() << 6); | |
479 } | |
480 | |
481 | |
482 OopMapCache::OopMapCache() : | |
483 _mut(Mutex::leaf, "An OopMapCache lock", true) | |
484 { | |
6197 | 485 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass); |
0 | 486 // Cannot call flush for initialization, since flush |
487 // will check if memory should be deallocated | |
488 for(int i = 0; i < _size; i++) _array[i].initialize(); | |
489 NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) | |
490 } | |
491 | |
492 | |
493 OopMapCache::~OopMapCache() { | |
494 assert(_array != NULL, "sanity check"); | |
495 // Deallocate oop maps that are allocated out-of-line | |
496 flush(); | |
497 // Deallocate array | |
498 NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) | |
6197 | 499 FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass); |
0 | 500 } |
501 | |
502 OopMapCacheEntry* OopMapCache::entry_at(int i) const { | |
503 return &_array[i % _size]; | |
504 } | |
505 | |
506 void OopMapCache::flush() { | |
507 for (int i = 0; i < _size; i++) _array[i].flush(); | |
508 } | |
509 | |
510 void OopMapCache::flush_obsolete_entries() { | |
511 for (int i = 0; i < _size; i++) | |
512 if (!_array[i].is_empty() && _array[i].method()->is_old()) { | |
513 // Cache entry is occupied by an old redefined method and we don't want | |
514 // to pin it down so flush the entry. | |
48
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
515 RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
516 _array[i].method()->name()->as_C_string(), |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
517 _array[i].method()->signature()->as_C_string(), i)); |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
518 |
0 | 519 _array[i].flush(); |
520 } | |
521 } | |
522 | |
523 void OopMapCache::lookup(methodHandle method, | |
524 int bci, | |
20501
c204e2044c29
8038624: interpretedVFrame::expressions() must respect InterpreterOopMap for liveness
mgronlun
parents:
17937
diff
changeset
|
525 InterpreterOopMap* entry_for) const { |
0 | 526 MutexLocker x(&_mut); |
527 | |
528 OopMapCacheEntry* entry = NULL; | |
529 int probe = hash_value_for(method, bci); | |
530 | |
531 // Search hashtable for match | |
532 int i; | |
533 for(i = 0; i < _probe_depth; i++) { | |
534 entry = entry_at(probe + i); | |
535 if (entry->match(method, bci)) { | |
536 entry_for->resource_copy(entry); | |
537 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); | |
538 return; | |
539 } | |
540 } | |
541 | |
542 if (TraceOopMapGeneration) { | |
543 static int count = 0; | |
544 ResourceMark rm; | |
545 tty->print("%d - Computing oopmap at bci %d for ", ++count, bci); | |
546 method->print_value(); tty->cr(); | |
547 } | |
548 | |
549 // Entry is not in hashtable. | |
550 // Compute entry and return it | |
551 | |
48
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
552 if (method->should_not_be_cached()) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
553 // It is either not safe or not a good idea to cache this Method* |
48
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
554 // at this time. We give the caller of lookup() a copy of the |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
555 // interesting info via parameter entry_for, but we don't add it to |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
556 // the cache. See the gory details in Method*.cpp. |
48
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
557 compute_one_oop_map(method, bci, entry_for); |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
558 return; |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
559 } |
d8b3ef7ee3e5
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
0
diff
changeset
|
560 |
0 | 561 // First search for an empty slot |
562 for(i = 0; i < _probe_depth; i++) { | |
563 entry = entry_at(probe + i); | |
564 if (entry->is_empty()) { | |
565 entry->fill(method, bci); | |
566 entry_for->resource_copy(entry); | |
567 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); | |
568 return; | |
569 } | |
570 } | |
571 | |
572 if (TraceOopMapGeneration) { | |
573 ResourceMark rm; | |
574 tty->print_cr("*** collision in oopmap cache - flushing item ***"); | |
575 } | |
576 | |
577 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm | |
578 //entry_at(probe + _probe_depth - 1)->flush(); | |
579 //for(i = _probe_depth - 1; i > 0; i--) { | |
580 // // Coping entry[i] = entry[i-1]; | |
581 // OopMapCacheEntry *to = entry_at(probe + i); | |
582 // OopMapCacheEntry *from = entry_at(probe + i - 1); | |
583 // to->copy(from); | |
584 // } | |
585 | |
586 assert(method->is_method(), "gaga"); | |
587 | |
588 entry = entry_at(probe + 0); | |
589 entry->fill(method, bci); | |
590 | |
591 // Copy the newly cached entry to input parameter | |
592 entry_for->resource_copy(entry); | |
593 | |
594 if (TraceOopMapGeneration) { | |
595 ResourceMark rm; | |
596 tty->print("Done with "); | |
597 method->print_value(); tty->cr(); | |
598 } | |
599 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); | |
600 | |
601 return; | |
602 } | |
603 | |
604 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) { | |
605 // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack | |
6197 | 606 OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); |
0 | 607 tmp->initialize(); |
608 tmp->fill(method, bci); | |
609 entry->resource_copy(tmp); | |
6197 | 610 FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal); |
0 | 611 } |