0
|
1 /*
|
|
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_oopMapCache.cpp.incl"
|
|
27
|
|
28 class OopMapCacheEntry: private InterpreterOopMap {
|
|
29 friend class InterpreterOopMap;
|
|
30 friend class OopMapForCacheEntry;
|
|
31 friend class OopMapCache;
|
|
32 friend class VerifyClosure;
|
|
33
|
|
34 protected:
|
|
35 // Initialization
|
|
36 void fill(methodHandle method, int bci);
|
|
37 // fills the bit mask for native calls
|
|
38 void fill_for_native(methodHandle method);
|
|
39 void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
|
|
40
|
|
41 // Deallocate bit masks and initialize fields
|
|
42 void flush();
|
|
43
|
|
44 private:
|
|
45 void allocate_bit_mask(); // allocates the bit mask on C heap f necessary
|
|
46 void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
|
|
47 bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
|
|
48
|
|
49 public:
|
|
50 OopMapCacheEntry() : InterpreterOopMap() {
|
|
51 #ifdef ASSERT
|
|
52 _resource_allocate_bit_mask = false;
|
|
53 #endif
|
|
54 }
|
|
55 };
|
|
56
|
|
57
|
|
58 // Implementation of OopMapForCacheEntry
|
|
59 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
|
|
60
|
|
61 class OopMapForCacheEntry: public GenerateOopMap {
|
|
62 OopMapCacheEntry *_entry;
|
|
63 int _bci;
|
|
64 int _stack_top;
|
|
65
|
|
66 virtual bool report_results() const { return false; }
|
|
67 virtual bool possible_gc_point (BytecodeStream *bcs);
|
|
68 virtual void fill_stackmap_prolog (int nof_gc_points);
|
|
69 virtual void fill_stackmap_epilog ();
|
|
70 virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs,
|
|
71 CellTypeState* vars,
|
|
72 CellTypeState* stack,
|
|
73 int stack_top);
|
|
74 virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars);
|
|
75
|
|
76 public:
|
|
77 OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry);
|
|
78
|
|
79 // Computes stack map for (method,bci) and initialize entry
|
|
80 void compute_map(TRAPS);
|
|
81 int size();
|
|
82 };
|
|
83
|
|
84
|
|
85 OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
|
|
86 _bci = bci;
|
|
87 _entry = entry;
|
|
88 _stack_top = -1;
|
|
89 }
|
|
90
|
|
91
|
|
92 void OopMapForCacheEntry::compute_map(TRAPS) {
|
|
93 assert(!method()->is_native(), "cannot compute oop map for native methods");
|
|
94 // First check if it is a method where the stackmap is always empty
|
|
95 if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
|
|
96 _entry->set_mask_size(0);
|
|
97 } else {
|
|
98 ResourceMark rm;
|
|
99 GenerateOopMap::compute_map(CATCH);
|
|
100 result_for_basicblock(_bci);
|
|
101 }
|
|
102 }
|
|
103
|
|
104
|
|
105 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
|
|
106 return false; // We are not reporting any result. We call result_for_basicblock directly
|
|
107 }
|
|
108
|
|
109
|
|
110 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
|
|
111 // Do nothing
|
|
112 }
|
|
113
|
|
114
|
|
115 void OopMapForCacheEntry::fill_stackmap_epilog() {
|
|
116 // Do nothing
|
|
117 }
|
|
118
|
|
119
|
|
120 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
|
|
121 // Do nothing
|
|
122 }
|
|
123
|
|
124
|
|
125 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
|
|
126 CellTypeState* vars,
|
|
127 CellTypeState* stack,
|
|
128 int stack_top) {
|
|
129 // Only interested in one specific bci
|
|
130 if (bcs->bci() == _bci) {
|
|
131 _entry->set_mask(vars, stack, stack_top);
|
|
132 _stack_top = stack_top;
|
|
133 }
|
|
134 }
|
|
135
|
|
136
|
|
137 int OopMapForCacheEntry::size() {
|
|
138 assert(_stack_top != -1, "compute_map must be called first");
|
|
139 return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
|
|
140 }
|
|
141
|
|
142
|
|
143 // Implementation of InterpreterOopMap and OopMapCacheEntry
|
|
144
|
|
145 class VerifyClosure : public OffsetClosure {
|
|
146 private:
|
|
147 OopMapCacheEntry* _entry;
|
|
148 bool _failed;
|
|
149
|
|
150 public:
|
|
151 VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; }
|
|
152 void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; }
|
|
153 bool failed() const { return _failed; }
|
|
154 };
|
|
155
|
|
156 InterpreterOopMap::InterpreterOopMap() {
|
|
157 initialize();
|
|
158 #ifdef ASSERT
|
|
159 _resource_allocate_bit_mask = true;
|
|
160 #endif
|
|
161 }
|
|
162
|
|
163 InterpreterOopMap::~InterpreterOopMap() {
|
|
164 // The expection is that the bit mask was allocated
|
|
165 // last in this resource area. That would make the free of the
|
|
166 // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free).
|
|
167 // If it was not allocated last, there is not a correctness problem
|
|
168 // but the space for the bit_mask is not freed.
|
|
169 assert(_resource_allocate_bit_mask, "Trying to free C heap space");
|
|
170 if (mask_size() > small_mask_limit) {
|
|
171 FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size());
|
|
172 }
|
|
173 }
|
|
174
|
|
175 bool InterpreterOopMap::is_empty() {
|
|
176 bool result = _method == NULL;
|
|
177 assert(_method != NULL || (_bci == 0 &&
|
|
178 (_mask_size == 0 || _mask_size == USHRT_MAX) &&
|
|
179 _bit_mask[0] == 0), "Should be completely empty");
|
|
180 return result;
|
|
181 }
|
|
182
|
|
183 void InterpreterOopMap::initialize() {
|
|
184 _method = NULL;
|
|
185 _mask_size = USHRT_MAX; // This value should cause a failure quickly
|
|
186 _bci = 0;
|
|
187 _expression_stack_size = 0;
|
|
188 for (int i = 0; i < N; i++) _bit_mask[i] = 0;
|
|
189 }
|
|
190
|
|
191
|
|
192 void InterpreterOopMap::oop_iterate(OopClosure *blk) {
|
|
193 if (method() != NULL) {
|
|
194 blk->do_oop((oop*) &_method);
|
|
195 }
|
|
196 }
|
|
197
|
|
198 void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
|
|
199 if (method() != NULL && mr.contains(&_method)) {
|
|
200 blk->do_oop((oop*) &_method);
|
|
201 }
|
|
202 }
|
|
203
|
|
204
|
|
205
|
|
206 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
|
|
207 int n = number_of_entries();
|
|
208 int word_index = 0;
|
|
209 uintptr_t value = 0;
|
|
210 uintptr_t mask = 0;
|
|
211 // iterate over entries
|
|
212 for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
|
|
213 // get current word
|
|
214 if (mask == 0) {
|
|
215 value = bit_mask()[word_index++];
|
|
216 mask = 1;
|
|
217 }
|
|
218 // test for oop
|
|
219 if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
|
|
220 }
|
|
221 }
|
|
222
|
|
223 void InterpreterOopMap::verify() {
|
|
224 // If we are doing mark sweep _method may not have a valid header
|
|
225 // $$$ This used to happen only for m/s collections; we might want to
|
|
226 // think of an appropriate generalization of this distinction.
|
|
227 guarantee(Universe::heap()->is_gc_active() ||
|
|
228 _method->is_oop_or_null(), "invalid oop in oopMapCache")
|
|
229 }
|
|
230
|
|
231 #ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
232
|
|
233 void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
|
|
234 int n = number_of_entries();
|
|
235 int word_index = 0;
|
|
236 uintptr_t value = 0;
|
|
237 uintptr_t mask = 0;
|
|
238 // iterate over entries
|
|
239 for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
|
|
240 // get current word
|
|
241 if (mask == 0) {
|
|
242 value = bit_mask()[word_index++];
|
|
243 mask = 1;
|
|
244 }
|
|
245 // test for dead values & oops, and for live values
|
|
246 if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops
|
|
247 else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops
|
|
248 else value_closure->offset_do(i); // call this for all live values
|
|
249 }
|
|
250 }
|
|
251
|
|
252 #endif
|
|
253
|
|
254
|
|
255 void InterpreterOopMap::print() {
|
|
256 int n = number_of_entries();
|
|
257 tty->print("oop map for ");
|
|
258 method()->print_value();
|
|
259 tty->print(" @ %d = [%d] { ", bci(), n);
|
|
260 for (int i = 0; i < n; i++) {
|
|
261 #ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
262 if (is_dead(i)) tty->print("%d+ ", i);
|
|
263 else
|
|
264 #endif
|
|
265 if (is_oop(i)) tty->print("%d ", i);
|
|
266 }
|
|
267 tty->print_cr("}");
|
|
268 }
|
|
269
|
|
270 class MaskFillerForNative: public NativeSignatureIterator {
|
|
271 private:
|
|
272 uintptr_t * _mask; // the bit mask to be filled
|
|
273 int _size; // the mask size in bits
|
|
274
|
|
275 void set_one(int i) {
|
|
276 i *= InterpreterOopMap::bits_per_entry;
|
|
277 assert(0 <= i && i < _size, "offset out of bounds");
|
|
278 _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
|
|
279 }
|
|
280
|
|
281 public:
|
|
282 void pass_int() { /* ignore */ }
|
|
283 void pass_long() { /* ignore */ }
|
|
284 #ifdef _LP64
|
|
285 void pass_float() { /* ignore */ }
|
|
286 #endif
|
|
287 void pass_double() { /* ignore */ }
|
|
288 void pass_object() { set_one(offset()); }
|
|
289
|
|
290 MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
|
|
291 _mask = mask;
|
|
292 _size = size;
|
|
293 // initialize with 0
|
|
294 int i = (size + BitsPerWord - 1) / BitsPerWord;
|
|
295 while (i-- > 0) _mask[i] = 0;
|
|
296 }
|
|
297
|
|
298 void generate() {
|
|
299 NativeSignatureIterator::iterate();
|
|
300 }
|
|
301 };
|
|
302
|
|
303 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
|
|
304 // Check mask includes map
|
|
305 VerifyClosure blk(this);
|
|
306 iterate_oop(&blk);
|
|
307 if (blk.failed()) return false;
|
|
308
|
|
309 // Check if map is generated correctly
|
|
310 // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
|
|
311 if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
|
|
312
|
|
313 for(int i = 0; i < max_locals; i++) {
|
|
314 bool v1 = is_oop(i) ? true : false;
|
|
315 bool v2 = vars[i].is_reference() ? true : false;
|
|
316 assert(v1 == v2, "locals oop mask generation error");
|
|
317 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
|
318 #ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
319 bool v3 = is_dead(i) ? true : false;
|
|
320 bool v4 = !vars[i].is_live() ? true : false;
|
|
321 assert(v3 == v4, "locals live mask generation error");
|
|
322 assert(!(v1 && v3), "dead value marked as oop");
|
|
323 #endif
|
|
324 }
|
|
325
|
|
326 if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
|
|
327 for(int j = 0; j < stack_top; j++) {
|
|
328 bool v1 = is_oop(max_locals + j) ? true : false;
|
|
329 bool v2 = stack[j].is_reference() ? true : false;
|
|
330 assert(v1 == v2, "stack oop mask generation error");
|
|
331 if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
|
332 #ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
333 bool v3 = is_dead(max_locals + j) ? true : false;
|
|
334 bool v4 = !stack[j].is_live() ? true : false;
|
|
335 assert(v3 == v4, "stack live mask generation error");
|
|
336 assert(!(v1 && v3), "dead value marked as oop");
|
|
337 #endif
|
|
338 }
|
|
339 if (TraceOopMapGeneration && Verbose) tty->cr();
|
|
340 return true;
|
|
341 }
|
|
342
|
|
343 void OopMapCacheEntry::allocate_bit_mask() {
|
|
344 if (mask_size() > small_mask_limit) {
|
|
345 assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
|
|
346 _bit_mask[0] = (intptr_t)
|
|
347 NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
|
|
348 }
|
|
349 }
|
|
350
|
|
351 void OopMapCacheEntry::deallocate_bit_mask() {
|
|
352 if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
|
|
353 assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
|
|
354 "This bit mask should not be in the resource area");
|
|
355 FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
|
|
356 debug_only(_bit_mask[0] = 0;)
|
|
357 }
|
|
358 }
|
|
359
|
|
360
|
|
361 void OopMapCacheEntry::fill_for_native(methodHandle mh) {
|
|
362 assert(mh->is_native(), "method must be native method");
|
|
363 set_mask_size(mh->size_of_parameters() * bits_per_entry);
|
|
364 allocate_bit_mask();
|
|
365 // fill mask for parameters
|
|
366 MaskFillerForNative mf(mh, bit_mask(), mask_size());
|
|
367 mf.generate();
|
|
368 }
|
|
369
|
|
370
|
|
371 void OopMapCacheEntry::fill(methodHandle method, int bci) {
|
|
372 HandleMark hm;
|
|
373 // Flush entry to deallocate an existing entry
|
|
374 flush();
|
|
375 set_method(method());
|
|
376 set_bci(bci);
|
|
377 if (method->is_native()) {
|
|
378 // Native method activations have oops only among the parameters and one
|
|
379 // extra oop following the parameters (the mirror for static native methods).
|
|
380 fill_for_native(method);
|
|
381 } else {
|
|
382 EXCEPTION_MARK;
|
|
383 OopMapForCacheEntry gen(method, bci, this);
|
|
384 gen.compute_map(CATCH);
|
|
385 }
|
|
386 #ifdef ASSERT
|
|
387 verify();
|
|
388 #endif
|
|
389 }
|
|
390
|
|
391
|
|
392 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
|
|
393 // compute bit mask size
|
|
394 int max_locals = method()->max_locals();
|
|
395 int n_entries = max_locals + stack_top;
|
|
396 set_mask_size(n_entries * bits_per_entry);
|
|
397 allocate_bit_mask();
|
|
398 set_expression_stack_size(stack_top);
|
|
399
|
|
400 // compute bits
|
|
401 int word_index = 0;
|
|
402 uintptr_t value = 0;
|
|
403 uintptr_t mask = 1;
|
|
404
|
|
405 CellTypeState* cell = vars;
|
|
406 for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
|
|
407 // store last word
|
|
408 if (mask == 0) {
|
|
409 bit_mask()[word_index++] = value;
|
|
410 value = 0;
|
|
411 mask = 1;
|
|
412 }
|
|
413
|
|
414 // switch to stack when done with locals
|
|
415 if (entry_index == max_locals) {
|
|
416 cell = stack;
|
|
417 }
|
|
418
|
|
419 // set oop bit
|
|
420 if ( cell->is_reference()) {
|
|
421 value |= (mask << oop_bit_number );
|
|
422 }
|
|
423
|
|
424 #ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
425 // set dead bit
|
|
426 if (!cell->is_live()) {
|
|
427 value |= (mask << dead_bit_number);
|
|
428 assert(!cell->is_reference(), "dead value marked as oop");
|
|
429 }
|
|
430 #endif
|
|
431 }
|
|
432
|
|
433 // make sure last word is stored
|
|
434 bit_mask()[word_index] = value;
|
|
435
|
|
436 // verify bit mask
|
|
437 assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
|
|
438
|
|
439
|
|
440 }
|
|
441
|
|
442 void OopMapCacheEntry::flush() {
|
|
443 deallocate_bit_mask();
|
|
444 initialize();
|
|
445 }
|
|
446
|
|
447
|
|
448 // Implementation of OopMapCache
|
|
449
|
|
450 #ifndef PRODUCT
|
|
451
|
|
452 static long _total_memory_usage = 0;
|
|
453
|
|
454 long OopMapCache::memory_usage() {
|
|
455 return _total_memory_usage;
|
|
456 }
|
|
457
|
|
458 #endif
|
|
459
|
|
460 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
|
|
461 assert(_resource_allocate_bit_mask,
|
|
462 "Should not resource allocate the _bit_mask");
|
|
463 assert(from->method()->is_oop(), "MethodOop is bad");
|
|
464
|
|
465 set_method(from->method());
|
|
466 set_bci(from->bci());
|
|
467 set_mask_size(from->mask_size());
|
|
468 set_expression_stack_size(from->expression_stack_size());
|
|
469
|
|
470 // Is the bit mask contained in the entry?
|
|
471 if (from->mask_size() <= small_mask_limit) {
|
|
472 memcpy((void *)_bit_mask, (void *)from->_bit_mask,
|
|
473 mask_word_size() * BytesPerWord);
|
|
474 } else {
|
|
475 // The expectation is that this InterpreterOopMap is a recently created
|
|
476 // and empty. It is used to get a copy of a cached entry.
|
|
477 // If the bit mask has a value, it should be in the
|
|
478 // resource area.
|
|
479 assert(_bit_mask[0] == 0 ||
|
|
480 Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
|
|
481 "The bit mask should have been allocated from a resource area");
|
|
482 // Allocate the bit_mask from a Resource area for performance. Allocating
|
|
483 // from the C heap as is done for OopMapCache has a significant
|
|
484 // performance impact.
|
|
485 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
|
|
486 assert(_bit_mask[0] != 0, "bit mask was not allocated");
|
|
487 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
|
|
488 mask_word_size() * BytesPerWord);
|
|
489 }
|
|
490 }
|
|
491
|
|
492 inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
|
|
493 // We use method->code_size() rather than method->identity_hash() below since
|
|
494 // the mark may not be present if a pointer to the method is already reversed.
|
|
495 return ((unsigned int) bci)
|
|
496 ^ ((unsigned int) method->max_locals() << 2)
|
|
497 ^ ((unsigned int) method->code_size() << 4)
|
|
498 ^ ((unsigned int) method->size_of_parameters() << 6);
|
|
499 }
|
|
500
|
|
501
|
|
502 OopMapCache::OopMapCache() :
|
|
503 _mut(Mutex::leaf, "An OopMapCache lock", true)
|
|
504 {
|
|
505 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size);
|
|
506 // Cannot call flush for initialization, since flush
|
|
507 // will check if memory should be deallocated
|
|
508 for(int i = 0; i < _size; i++) _array[i].initialize();
|
|
509 NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
|
|
510 }
|
|
511
|
|
512
|
|
513 OopMapCache::~OopMapCache() {
|
|
514 assert(_array != NULL, "sanity check");
|
|
515 // Deallocate oop maps that are allocated out-of-line
|
|
516 flush();
|
|
517 // Deallocate array
|
|
518 NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
|
|
519 FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
|
|
520 }
|
|
521
|
|
522 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
|
|
523 return &_array[i % _size];
|
|
524 }
|
|
525
|
|
526 void OopMapCache::flush() {
|
|
527 for (int i = 0; i < _size; i++) _array[i].flush();
|
|
528 }
|
|
529
|
|
530 void OopMapCache::flush_obsolete_entries() {
|
|
531 for (int i = 0; i < _size; i++)
|
|
532 if (!_array[i].is_empty() && _array[i].method()->is_old()) {
|
|
533 // Cache entry is occupied by an old redefined method and we don't want
|
|
534 // to pin it down so flush the entry.
|
|
535 _array[i].flush();
|
|
536 }
|
|
537 }
|
|
538
|
|
539 void OopMapCache::oop_iterate(OopClosure *blk) {
|
|
540 for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk);
|
|
541 }
|
|
542
|
|
543 void OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) {
|
|
544 for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk, mr);
|
|
545 }
|
|
546
|
|
547 void OopMapCache::verify() {
|
|
548 for (int i = 0; i < _size; i++) _array[i].verify();
|
|
549 }
|
|
550
|
|
551 void OopMapCache::lookup(methodHandle method,
|
|
552 int bci,
|
|
553 InterpreterOopMap* entry_for) {
|
|
554 MutexLocker x(&_mut);
|
|
555
|
|
556 OopMapCacheEntry* entry = NULL;
|
|
557 int probe = hash_value_for(method, bci);
|
|
558
|
|
559 // Search hashtable for match
|
|
560 int i;
|
|
561 for(i = 0; i < _probe_depth; i++) {
|
|
562 entry = entry_at(probe + i);
|
|
563 if (entry->match(method, bci)) {
|
|
564 entry_for->resource_copy(entry);
|
|
565 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
|
566 return;
|
|
567 }
|
|
568 }
|
|
569
|
|
570 if (TraceOopMapGeneration) {
|
|
571 static int count = 0;
|
|
572 ResourceMark rm;
|
|
573 tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
|
|
574 method->print_value(); tty->cr();
|
|
575 }
|
|
576
|
|
577 // Entry is not in hashtable.
|
|
578 // Compute entry and return it
|
|
579
|
|
580 // First search for an empty slot
|
|
581 for(i = 0; i < _probe_depth; i++) {
|
|
582 entry = entry_at(probe + i);
|
|
583 if (entry->is_empty()) {
|
|
584 entry->fill(method, bci);
|
|
585 entry_for->resource_copy(entry);
|
|
586 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
|
587 if (method->is_old()) {
|
|
588 // The caller of lookup() will receive a copy of the interesting
|
|
589 // info via entry_for, but we don't keep an old redefined method in
|
|
590 // the cache to avoid pinning down the method.
|
|
591 entry->flush();
|
|
592 }
|
|
593 return;
|
|
594 }
|
|
595 }
|
|
596
|
|
597 if (TraceOopMapGeneration) {
|
|
598 ResourceMark rm;
|
|
599 tty->print_cr("*** collision in oopmap cache - flushing item ***");
|
|
600 }
|
|
601
|
|
602 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
|
|
603 //entry_at(probe + _probe_depth - 1)->flush();
|
|
604 //for(i = _probe_depth - 1; i > 0; i--) {
|
|
605 // // Coping entry[i] = entry[i-1];
|
|
606 // OopMapCacheEntry *to = entry_at(probe + i);
|
|
607 // OopMapCacheEntry *from = entry_at(probe + i - 1);
|
|
608 // to->copy(from);
|
|
609 // }
|
|
610
|
|
611 assert(method->is_method(), "gaga");
|
|
612
|
|
613 entry = entry_at(probe + 0);
|
|
614 entry->fill(method, bci);
|
|
615
|
|
616 // Copy the newly cached entry to input parameter
|
|
617 entry_for->resource_copy(entry);
|
|
618
|
|
619 if (TraceOopMapGeneration) {
|
|
620 ResourceMark rm;
|
|
621 tty->print("Done with ");
|
|
622 method->print_value(); tty->cr();
|
|
623 }
|
|
624 assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
|
625
|
|
626 if (method->is_old()) {
|
|
627 // The caller of lookup() will receive a copy of the interesting
|
|
628 // info via entry_for, but we don't keep an old redefined method in
|
|
629 // the cache to avoid pinning down the method.
|
|
630 entry->flush();
|
|
631 }
|
|
632
|
|
633 return;
|
|
634 }
|
|
635
|
|
636 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
|
|
637 // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
|
|
638 OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1);
|
|
639 tmp->initialize();
|
|
640 tmp->fill(method, bci);
|
|
641 entry->resource_copy(tmp);
|
|
642 FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
|
|
643 }
|