Mercurial > hg > truffle
diff src/share/vm/interpreter/rewriter.hpp @ 6725:da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author | coleenp |
---|---|
date | Sat, 01 Sep 2012 13:25:18 -0400 |
parents | 1d7922586cf6 |
children | f6b0eb4e44cf |
line wrap: on
line diff
--- a/src/share/vm/interpreter/rewriter.hpp Fri Aug 31 16:39:35 2012 -0700 +++ b/src/share/vm/interpreter/rewriter.hpp Sat Sep 01 13:25:18 2012 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,59 +36,121 @@ private: instanceKlassHandle _klass; constantPoolHandle _pool; - objArrayHandle _methods; + Array<Method*>* _methods; intArray _cp_map; - intStack _cp_cache_map; + intStack _cp_cache_map; // for Methodref, Fieldref, + // InterfaceMethodref and InvokeDynamic + intArray _reference_map; // maps from cp index to resolved_refs index (or -1) + intStack _resolved_references_map; // for strings, methodHandle, methodType + intStack _invokedynamic_references_map; // for invokedynamic resolved refs intArray _method_handle_invokers; + int _resolved_reference_limit; - void init_cp_map(int length) { + void init_maps(int length) { _cp_map.initialize(length, -1); // Choose an initial value large enough that we don't get frequent // calls to grow(). _cp_cache_map.initialize(length / 2); + // Also cache resolved objects, in another different cache. + _reference_map.initialize(length, -1); + _resolved_references_map.initialize(length / 2); + _invokedynamic_references_map.initialize(length / 2); + _resolved_reference_limit = -1; + DEBUG_ONLY(_cp_cache_index_limit = -1); } + + int _cp_cache_index_limit; + void record_map_limits() { +#ifdef ASSERT + // Record initial size of the two arrays generated for the CP cache: + _cp_cache_index_limit = _cp_cache_map.length(); +#endif //ASSERT + _resolved_reference_limit = _resolved_references_map.length(); + } + int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } - int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); } + int add_cp_cache_entry(int cp_index) { - assert((cp_index & _secondary_entry_tag) == 0, "bad tag"); + assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); + assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration"); int cache_index = _cp_cache_map.append(cp_index); _cp_map.at_put(cp_index, cache_index); assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); + assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); + return cache_index; + } + + // add a new CP cache entry beyond the normal cache (for invokedynamic only) + int add_invokedynamic_cp_cache_entry(int cp_index) { + assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); + assert(_cp_map[cp_index] == -1, "do not map from cp_index"); + assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration"); + int cache_index = _cp_cache_map.append(cp_index); + assert(cache_index >= _cp_cache_index_limit, ""); + // do not update _cp_map, since the mapping is one-to-many + assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); return cache_index; } - int add_secondary_cp_cache_entry(int main_cpc_entry) { - assert(main_cpc_entry < _cp_cache_map.length(), "must be earlier CP cache entry"); - int cache_index = _cp_cache_map.append(main_cpc_entry | _secondary_entry_tag); - return cache_index; + + // fix duplicated code later + int cp_entry_to_resolved_references(int cp_index) const { + assert(has_entry_in_resolved_references(cp_index), "oob"); + return _reference_map[cp_index]; + } + bool has_entry_in_resolved_references(int cp_index) const { + return (uint)cp_index < (uint)_reference_map.length() && _reference_map[cp_index] >= 0; + } + + // add a new entry to the resolved_references map + int add_resolved_references_entry(int cp_index) { + assert(_reference_map[cp_index] == -1, "not twice on same cp_index"); + assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration"); + int ref_index = _resolved_references_map.append(cp_index); + _reference_map.at_put(cp_index, ref_index); + assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); + return ref_index; + } + + // add a new entry to the resolved_references map (for invokedynamic only) + int add_invokedynamic_resolved_references_entry(int cp_index, int cache_index) { + assert(_resolved_reference_limit >= 0, "must add indy refs after first iteration"); + int ref_index = _resolved_references_map.append(cp_index); // many-to-one + assert(ref_index >= _resolved_reference_limit, ""); + _invokedynamic_references_map.at_put_grow(ref_index, cache_index, -1); + return ref_index; + } + + int resolved_references_entry_to_pool_index(int ref_index) { + int cp_index = _resolved_references_map[ref_index]; + return cp_index; + } + + // invokedynamic support - append the cpCache entry (encoded) in object map. + // The resolved_references_map should still be in ascending order + // The resolved_references has the invokedynamic call site objects appended after + // the objects that are resolved in the constant pool. + int add_callsite_entry(int main_cpc_entry) { + int ref_index = _resolved_references_map.append(main_cpc_entry); + return ref_index; } // Access the contents of _cp_cache_map to determine CP cache layout. int cp_cache_entry_pool_index(int cache_index) { int cp_index = _cp_cache_map[cache_index]; - if ((cp_index & _secondary_entry_tag) != 0) - return -1; - else return cp_index; } - int cp_cache_secondary_entry_main_index(int cache_index) { - int cp_index = _cp_cache_map[cache_index]; - if ((cp_index & _secondary_entry_tag) == 0) - return -1; - else - return (cp_index - _secondary_entry_tag); - } // All the work goes in here: - Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS); + Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS); void compute_index_maps(); void make_constant_pool_cache(TRAPS); - void scan_method(methodOop m, bool reverse = false); + void scan_method(Method* m, bool reverse = false); void rewrite_Object_init(methodHandle m, TRAPS); void rewrite_member_reference(address bcp, int offset, bool reverse = false); - void maybe_rewrite_invokehandle(address opc, int cp_index, bool reverse = false); + void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false); void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); // Revert bytecodes in case of an exception. @@ -98,17 +160,13 @@ public: // Driver routine: static void rewrite(instanceKlassHandle klass, TRAPS); - static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS); - - enum { - _secondary_entry_tag = nth_bit(30) - }; + static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS); // Second pass, not gated by is_rewritten flag static void relocate_and_link(instanceKlassHandle klass, TRAPS); // JSR292 version to call with it's own methods. static void relocate_and_link(instanceKlassHandle klass, - objArrayHandle methods, TRAPS); + Array<Method*>* methods, TRAPS); };