Mercurial > hg > truffle
annotate src/share/vm/memory/allocation.cpp @ 7666:31540ca73e81
Remove ControlFlowException in SimpleLanguage.
author | Thomas Wuerthinger <thomas.wuerthinger@oracle.com> |
---|---|
date | Fri, 01 Feb 2013 19:53:52 +0100 |
parents | 59c790074993 |
children | 6f817ce50129 f75faf51e8c4 |
rev | line source |
---|---|
0 | 1 /* |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "memory/allocation.hpp" | |
27 #include "memory/allocation.inline.hpp" | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
28 #include "memory/genCollectedHeap.hpp" |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
29 #include "memory/metaspaceShared.hpp" |
1972 | 30 #include "memory/resourceArea.hpp" |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
31 #include "memory/universe.hpp" |
6197 | 32 #include "runtime/atomic.hpp" |
1972 | 33 #include "runtime/os.hpp" |
34 #include "runtime/task.hpp" | |
35 #include "runtime/threadCritical.hpp" | |
6197 | 36 #include "services/memTracker.hpp" |
1972 | 37 #include "utilities/ostream.hpp" |
6197 | 38 |
1972 | 39 #ifdef TARGET_OS_FAMILY_linux |
40 # include "os_linux.inline.hpp" | |
41 #endif | |
42 #ifdef TARGET_OS_FAMILY_solaris | |
43 # include "os_solaris.inline.hpp" | |
44 #endif | |
45 #ifdef TARGET_OS_FAMILY_windows | |
46 # include "os_windows.inline.hpp" | |
47 #endif | |
3960 | 48 #ifdef TARGET_OS_FAMILY_bsd |
49 # include "os_bsd.inline.hpp" | |
50 #endif | |
0 | 51 |
52 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | |
53 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; | |
54 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | |
55 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; | |
56 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
57 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
58 size_t word_size, bool read_only, TRAPS) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
59 // Klass has it's own operator new |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
60 return Metaspace::allocate(loader_data, word_size, read_only, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
61 Metaspace::NonClassType, CHECK_NULL); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
62 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
63 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
64 bool MetaspaceObj::is_shared() const { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
65 return MetaspaceShared::is_in_shared_space(this); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
66 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
67 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
68 bool MetaspaceObj::is_metadata() const { |
7176
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
69 // GC Verify checks use this in guarantees. |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
70 // TODO: either replace them with is_metaspace_object() or remove them. |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
71 // is_metaspace_object() is slower than this test. This test doesn't |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
72 // seem very useful for metaspace objects anymore though. |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
73 return !Universe::heap()->is_in_reserved(this); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
74 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
75 |
7176
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
76 bool MetaspaceObj::is_metaspace_object() const { |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
77 return Metaspace::contains((void*)this); |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
78 } |
59c790074993
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
6882
diff
changeset
|
79 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
80 void MetaspaceObj::print_address_on(outputStream* st) const { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
81 st->print(" {"INTPTR_FORMAT"}", this); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
82 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
83 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
84 |
6197 | 85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { |
0 | 86 address res; |
87 switch (type) { | |
88 case C_HEAP: | |
6197 | 89 res = (address)AllocateHeap(size, flags, CALLER_PC); |
1685 | 90 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
0 | 91 break; |
92 case RESOURCE_AREA: | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
93 // new(size) sets allocation type RESOURCE_AREA. |
0 | 94 res = (address)operator new(size); |
95 break; | |
96 default: | |
97 ShouldNotReachHere(); | |
98 } | |
99 return res; | |
100 } | |
101 | |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
102 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
103 allocation_type type, MEMFLAGS flags) { |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
104 //should only call this with std::nothrow, use other operator new() otherwise |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
105 address res; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
106 switch (type) { |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
107 case C_HEAP: |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
108 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
109 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
110 break; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
111 case RESOURCE_AREA: |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
112 // new(size) sets allocation type RESOURCE_AREA. |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
113 res = (address)operator new(size, std::nothrow); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
114 break; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
115 default: |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
116 ShouldNotReachHere(); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
117 } |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
118 return res; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
119 } |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
120 |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
121 |
0 | 122 void ResourceObj::operator delete(void* p) { |
123 assert(((ResourceObj *)p)->allocated_on_C_heap(), | |
124 "delete only allowed for C_HEAP objects"); | |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
125 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) |
0 | 126 FreeHeap(p); |
127 } | |
128 | |
1685 | 129 #ifdef ASSERT |
130 void ResourceObj::set_allocation_type(address res, allocation_type type) { | |
131 // Set allocation type in the resource object | |
132 uintptr_t allocation = (uintptr_t)res; | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
133 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least"); |
1685 | 134 assert(type <= allocation_mask, "incorrect allocation type"); |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
135 ResourceObj* resobj = (ResourceObj *)res; |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
136 resobj->_allocation_t[0] = ~(allocation + type); |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
137 if (type != STACK_OR_EMBEDDED) { |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
138 // Called from operator new() and CollectionSetChooser(), |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
139 // set verification value. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
140 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
141 } |
1685 | 142 } |
143 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
144 ResourceObj::allocation_type ResourceObj::get_allocation_type() const { |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
145 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
146 return (allocation_type)((~_allocation_t[0]) & allocation_mask); |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
147 } |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
148 |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
149 bool ResourceObj::is_type_set() const { |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
150 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
151 return get_allocation_type() == type && |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
152 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); |
1685 | 153 } |
154 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
155 ResourceObj::ResourceObj() { // default constructor |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
156 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
157 // Operator new() is not called for allocations |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
158 // on stack and for embedded objects. |
1685 | 159 set_allocation_type((address)this, STACK_OR_EMBEDDED); |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
160 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
161 // For some reason we got a value which resembles |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
162 // an embedded or stack object (operator new() does not |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
163 // set such type). Keep it since it is valid value |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
164 // (even if it was garbage). |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
165 // Ignore garbage in other fields. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
166 } else if (is_type_set()) { |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
167 // Operator new() was called and type was set. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
168 assert(!allocated_on_stack(), |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
169 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
170 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
1685 | 171 } else { |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
172 // Operator new() was not called. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
173 // Assume that it is embedded or stack object. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
174 set_allocation_type((address)this, STACK_OR_EMBEDDED); |
1685 | 175 } |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
176 _allocation_t[1] = 0; // Zap verification value |
1685 | 177 } |
178 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
179 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor |
1685 | 180 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
181 // Note: garbage may resembles valid value. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
182 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
183 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
184 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
1685 | 185 set_allocation_type((address)this, STACK_OR_EMBEDDED); |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
186 _allocation_t[1] = 0; // Zap verification value |
1685 | 187 } |
188 | |
189 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment | |
190 // Used in InlineTree::ok_to_inline() for WarmCallInfo. | |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
191 assert(allocated_on_stack(), |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
192 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
193 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
194 // Keep current _allocation_t value; |
1685 | 195 return *this; |
196 } | |
197 | |
198 ResourceObj::~ResourceObj() { | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
199 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. |
2015
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
200 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. |
79d8657be916
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
1972
diff
changeset
|
201 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type |
1685 | 202 } |
203 } | |
204 #endif // ASSERT | |
205 | |
206 | |
0 | 207 void trace_heap_malloc(size_t size, const char* name, void* p) { |
208 // A lock is not needed here - tty uses a lock internally | |
2250 | 209 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); |
0 | 210 } |
211 | |
212 | |
213 void trace_heap_free(void* p) { | |
214 // A lock is not needed here - tty uses a lock internally | |
215 tty->print_cr("Heap free " INTPTR_FORMAT, p); | |
216 } | |
217 | |
218 bool warn_new_operator = false; // see vm_main | |
219 | |
220 //-------------------------------------------------------------------------------------- | |
221 // ChunkPool implementation | |
222 | |
223 // MT-safe pool of chunks to reduce malloc/free thrashing | |
224 // NB: not using Mutex because pools are used before Threads are initialized | |
6197 | 225 class ChunkPool: public CHeapObj<mtInternal> { |
0 | 226 Chunk* _first; // first cached Chunk; its first word points to next chunk |
227 size_t _num_chunks; // number of unused chunks in pool | |
228 size_t _num_used; // number of chunks currently checked out | |
229 const size_t _size; // size of each chunk (must be uniform) | |
230 | |
231 // Our three static pools | |
232 static ChunkPool* _large_pool; | |
233 static ChunkPool* _medium_pool; | |
234 static ChunkPool* _small_pool; | |
235 | |
236 // return first element or null | |
237 void* get_first() { | |
238 Chunk* c = _first; | |
239 if (_first) { | |
240 _first = _first->next(); | |
241 _num_chunks--; | |
242 } | |
243 return c; | |
244 } | |
245 | |
246 public: | |
247 // All chunks in a ChunkPool has the same size | |
248 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } | |
249 | |
250 // Allocate a new chunk from the pool (might expand the pool) | |
6197 | 251 _NOINLINE_ void* allocate(size_t bytes) { |
0 | 252 assert(bytes == _size, "bad size"); |
253 void* p = NULL; | |
6197 | 254 // No VM lock can be taken inside ThreadCritical lock, so os::malloc |
255 // should be done outside ThreadCritical lock due to NMT | |
0 | 256 { ThreadCritical tc; |
257 _num_used++; | |
258 p = get_first(); | |
259 } | |
6197 | 260 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); |
0 | 261 if (p == NULL) |
262 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); | |
263 | |
264 return p; | |
265 } | |
266 | |
267 // Return a chunk to the pool | |
268 void free(Chunk* chunk) { | |
269 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); | |
270 ThreadCritical tc; | |
271 _num_used--; | |
272 | |
273 // Add chunk to list | |
274 chunk->set_next(_first); | |
275 _first = chunk; | |
276 _num_chunks++; | |
277 } | |
278 | |
279 // Prune the pool | |
280 void free_all_but(size_t n) { | |
6197 | 281 Chunk* cur = NULL; |
282 Chunk* next; | |
283 { | |
0 | 284 // if we have more than n chunks, free all of them |
285 ThreadCritical tc; | |
286 if (_num_chunks > n) { | |
287 // free chunks at end of queue, for better locality | |
6197 | 288 cur = _first; |
0 | 289 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); |
290 | |
291 if (cur != NULL) { | |
6197 | 292 next = cur->next(); |
0 | 293 cur->set_next(NULL); |
294 cur = next; | |
295 | |
6197 | 296 _num_chunks = n; |
297 } | |
298 } | |
299 } | |
300 | |
301 // Free all remaining chunks, outside of ThreadCritical | |
302 // to avoid deadlock with NMT | |
0 | 303 while(cur != NULL) { |
304 next = cur->next(); | |
6197 | 305 os::free(cur, mtChunk); |
0 | 306 cur = next; |
307 } | |
308 } | |
309 | |
310 // Accessors to preallocated pool's | |
311 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } | |
312 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } | |
313 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } | |
314 | |
315 static void initialize() { | |
316 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); | |
317 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); | |
318 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); | |
319 } | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
320 |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
321 static void clean() { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
322 enum { BlocksToKeep = 5 }; |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
323 _small_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
324 _medium_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
325 _large_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
326 } |
0 | 327 }; |
328 | |
329 ChunkPool* ChunkPool::_large_pool = NULL; | |
330 ChunkPool* ChunkPool::_medium_pool = NULL; | |
331 ChunkPool* ChunkPool::_small_pool = NULL; | |
332 | |
333 void chunkpool_init() { | |
334 ChunkPool::initialize(); | |
335 } | |
336 | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
337 void |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
338 Chunk::clean_chunk_pool() { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
339 ChunkPool::clean(); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
340 } |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
341 |
0 | 342 |
343 //-------------------------------------------------------------------------------------- | |
344 // ChunkPoolCleaner implementation | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
345 // |
0 | 346 |
347 class ChunkPoolCleaner : public PeriodicTask { | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
348 enum { CleaningInterval = 5000 }; // cleaning interval in ms |
0 | 349 |
350 public: | |
351 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} | |
352 void task() { | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
353 ChunkPool::clean(); |
0 | 354 } |
355 }; | |
356 | |
357 //-------------------------------------------------------------------------------------- | |
358 // Chunk implementation | |
359 | |
360 void* Chunk::operator new(size_t requested_size, size_t length) { | |
361 // requested_size is equal to sizeof(Chunk) but in order for the arena | |
362 // allocations to come out aligned as expected the size must be aligned | |
363 // to expected arean alignment. | |
364 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. | |
365 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); | |
366 size_t bytes = ARENA_ALIGN(requested_size) + length; | |
367 switch (length) { | |
368 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); | |
369 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); | |
370 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); | |
371 default: { | |
6197 | 372 void *p = os::malloc(bytes, mtChunk, CALLER_PC); |
0 | 373 if (p == NULL) |
374 vm_exit_out_of_memory(bytes, "Chunk::new"); | |
375 return p; | |
376 } | |
377 } | |
378 } | |
379 | |
380 void Chunk::operator delete(void* p) { | |
381 Chunk* c = (Chunk*)p; | |
382 switch (c->length()) { | |
383 case Chunk::size: ChunkPool::large_pool()->free(c); break; | |
384 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; | |
385 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; | |
6197 | 386 default: os::free(c, mtChunk); |
0 | 387 } |
388 } | |
389 | |
390 Chunk::Chunk(size_t length) : _len(length) { | |
391 _next = NULL; // Chain on the linked list | |
392 } | |
393 | |
394 | |
395 void Chunk::chop() { | |
396 Chunk *k = this; | |
397 while( k ) { | |
398 Chunk *tmp = k->next(); | |
399 // clear out this chunk (to detect allocation bugs) | |
400 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); | |
401 delete k; // Free chunk (was malloc'd) | |
402 k = tmp; | |
403 } | |
404 } | |
405 | |
406 void Chunk::next_chop() { | |
407 _next->chop(); | |
408 _next = NULL; | |
409 } | |
410 | |
411 | |
412 void Chunk::start_chunk_pool_cleaner_task() { | |
413 #ifdef ASSERT | |
414 static bool task_created = false; | |
415 assert(!task_created, "should not start chuck pool cleaner twice"); | |
416 task_created = true; | |
417 #endif | |
418 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); | |
419 cleaner->enroll(); | |
420 } | |
421 | |
422 //------------------------------Arena------------------------------------------ | |
6197 | 423 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) |
0 | 424 |
425 Arena::Arena(size_t init_size) { | |
426 size_t round_size = (sizeof (char *)) - 1; | |
427 init_size = (init_size+round_size) & ~round_size; | |
428 _first = _chunk = new (init_size) Chunk(init_size); | |
429 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
430 _max = _chunk->top(); | |
431 set_size_in_bytes(init_size); | |
6197 | 432 NOT_PRODUCT(Atomic::inc(&_instance_count);) |
0 | 433 } |
434 | |
435 Arena::Arena() { | |
436 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); | |
437 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
438 _max = _chunk->top(); | |
439 set_size_in_bytes(Chunk::init_size); | |
6197 | 440 NOT_PRODUCT(Atomic::inc(&_instance_count);) |
0 | 441 } |
442 | |
443 Arena *Arena::move_contents(Arena *copy) { | |
444 copy->destruct_contents(); | |
445 copy->_chunk = _chunk; | |
446 copy->_hwm = _hwm; | |
447 copy->_max = _max; | |
448 copy->_first = _first; | |
6882
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
449 |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
450 // workaround rare racing condition, which could double count |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
451 // the arena size by native memory tracking |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
452 size_t size = size_in_bytes(); |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
453 set_size_in_bytes(0); |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
454 copy->set_size_in_bytes(size); |
0 | 455 // Destroy original arena |
456 reset(); | |
457 return copy; // Return Arena with contents | |
458 } | |
459 | |
460 Arena::~Arena() { | |
461 destruct_contents(); | |
6197 | 462 NOT_PRODUCT(Atomic::dec(&_instance_count);) |
463 } | |
464 | |
465 void* Arena::operator new(size_t size) { | |
466 assert(false, "Use dynamic memory type binding"); | |
467 return NULL; | |
468 } | |
469 | |
470 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { | |
471 assert(false, "Use dynamic memory type binding"); | |
472 return NULL; | |
473 } | |
474 | |
475 // dynamic memory type binding | |
476 void* Arena::operator new(size_t size, MEMFLAGS flags) { | |
477 #ifdef ASSERT | |
478 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); | |
479 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); | |
480 return p; | |
481 #else | |
482 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); | |
483 #endif | |
484 } | |
485 | |
486 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { | |
487 #ifdef ASSERT | |
488 void* p = os::malloc(size, flags|otArena, CALLER_PC); | |
489 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); | |
490 return p; | |
491 #else | |
492 return os::malloc(size, flags|otArena, CALLER_PC); | |
493 #endif | |
494 } | |
495 | |
496 void Arena::operator delete(void* p) { | |
497 FreeHeap(p); | |
0 | 498 } |
499 | |
500 // Destroy this arenas contents and reset to empty | |
501 void Arena::destruct_contents() { | |
502 if (UseMallocOnly && _first != NULL) { | |
503 char* end = _first->next() ? _first->top() : _hwm; | |
504 free_malloced_objects(_first, _first->bottom(), end, _hwm); | |
505 } | |
6882
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
506 // reset size before chop to avoid a rare racing condition |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
507 // that can have total arena memory exceed total chunk memory |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6872
diff
changeset
|
508 set_size_in_bytes(0); |
0 | 509 _first->chop(); |
510 reset(); | |
511 } | |
512 | |
6197 | 513 // This is high traffic method, but many calls actually don't |
514 // change the size | |
515 void Arena::set_size_in_bytes(size_t size) { | |
516 if (_size_in_bytes != size) { | |
517 _size_in_bytes = size; | |
518 MemTracker::record_arena_size((address)this, size); | |
519 } | |
520 } | |
0 | 521 |
522 // Total of all Chunks in arena | |
523 size_t Arena::used() const { | |
524 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk | |
525 register Chunk *k = _first; | |
526 while( k != _chunk) { // Whilst have Chunks in a row | |
527 sum += k->length(); // Total size of this Chunk | |
528 k = k->next(); // Bump along to next Chunk | |
529 } | |
530 return sum; // Return total consumed space. | |
531 } | |
532 | |
2307
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
533 void Arena::signal_out_of_memory(size_t sz, const char* whence) const { |
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
534 vm_exit_out_of_memory(sz, whence); |
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
535 } |
0 | 536 |
537 // Grow a new Chunk | |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
538 void* Arena::grow(size_t x, AllocFailType alloc_failmode) { |
0 | 539 // Get minimal required size. Either real big, or even bigger for giant objs |
540 size_t len = MAX2(x, (size_t) Chunk::size); | |
541 | |
542 Chunk *k = _chunk; // Get filled-up chunk address | |
543 _chunk = new (len) Chunk(len); | |
544 | |
2307
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
545 if (_chunk == NULL) { |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
546 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
547 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
548 } |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
549 return NULL; |
2307
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
550 } |
0 | 551 if (k) k->set_next(_chunk); // Append new chunk to end of linked list |
552 else _first = _chunk; | |
553 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
554 _max = _chunk->top(); | |
555 set_size_in_bytes(size_in_bytes() + len); | |
556 void* result = _hwm; | |
557 _hwm += x; | |
558 return result; | |
559 } | |
560 | |
561 | |
562 | |
563 // Reallocate storage in Arena. | |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
564 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { |
0 | 565 assert(new_size >= 0, "bad size"); |
566 if (new_size == 0) return NULL; | |
567 #ifdef ASSERT | |
568 if (UseMallocOnly) { | |
569 // always allocate a new object (otherwise we'll free this one twice) | |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
570 char* copy = (char*)Amalloc(new_size, alloc_failmode); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
571 if (copy == NULL) { |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
572 return NULL; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
573 } |
0 | 574 size_t n = MIN2(old_size, new_size); |
575 if (n > 0) memcpy(copy, old_ptr, n); | |
576 Afree(old_ptr,old_size); // Mostly done to keep stats accurate | |
577 return copy; | |
578 } | |
579 #endif | |
580 char *c_old = (char*)old_ptr; // Handy name | |
581 // Stupid fast special case | |
582 if( new_size <= old_size ) { // Shrink in-place | |
583 if( c_old+old_size == _hwm) // Attempt to free the excess bytes | |
584 _hwm = c_old+new_size; // Adjust hwm | |
585 return c_old; | |
586 } | |
587 | |
588 // make sure that new_size is legal | |
589 size_t corrected_new_size = ARENA_ALIGN(new_size); | |
590 | |
591 // See if we can resize in-place | |
592 if( (c_old+old_size == _hwm) && // Adjusting recent thing | |
593 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits | |
594 _hwm = c_old+corrected_new_size; // Adjust hwm | |
595 return c_old; // Return old pointer | |
596 } | |
597 | |
598 // Oops, got to relocate guts | |
6872
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
599 void *new_ptr = Amalloc(new_size, alloc_failmode); |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
600 if (new_ptr == NULL) { |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
601 return NULL; |
7b5885dadbdc
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
6725
diff
changeset
|
602 } |
0 | 603 memcpy( new_ptr, c_old, old_size ); |
604 Afree(c_old,old_size); // Mostly done to keep stats accurate | |
605 return new_ptr; | |
606 } | |
607 | |
608 | |
609 // Determine if pointer belongs to this Arena or not. | |
610 bool Arena::contains( const void *ptr ) const { | |
611 #ifdef ASSERT | |
612 if (UseMallocOnly) { | |
613 // really slow, but not easy to make fast | |
614 if (_chunk == NULL) return false; | |
615 char** bottom = (char**)_chunk->bottom(); | |
616 for (char** p = (char**)_hwm - 1; p >= bottom; p--) { | |
617 if (*p == ptr) return true; | |
618 } | |
619 for (Chunk *c = _first; c != NULL; c = c->next()) { | |
620 if (c == _chunk) continue; // current chunk has been processed | |
621 char** bottom = (char**)c->bottom(); | |
622 for (char** p = (char**)c->top() - 1; p >= bottom; p--) { | |
623 if (*p == ptr) return true; | |
624 } | |
625 } | |
626 return false; | |
627 } | |
628 #endif | |
629 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) | |
630 return true; // Check for in this chunk | |
631 for (Chunk *c = _first; c; c = c->next()) { | |
632 if (c == _chunk) continue; // current chunk has been processed | |
633 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { | |
634 return true; // Check for every chunk in Arena | |
635 } | |
636 } | |
637 return false; // Not in any Chunk, so not in Arena | |
638 } | |
639 | |
640 | |
641 #ifdef ASSERT | |
642 void* Arena::malloc(size_t size) { | |
643 assert(UseMallocOnly, "shouldn't call"); | |
644 // use malloc, but save pointer in res. area for later freeing | |
645 char** save = (char**)internal_malloc_4(sizeof(char*)); | |
6197 | 646 return (*save = (char*)os::malloc(size, mtChunk)); |
0 | 647 } |
648 | |
649 // for debugging with UseMallocOnly | |
650 void* Arena::internal_malloc_4(size_t x) { | |
651 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); | |
2307
4a9604cd7c5f
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
2250
diff
changeset
|
652 check_for_overflow(x, "Arena::internal_malloc_4"); |
0 | 653 if (_hwm + x > _max) { |
654 return grow(x); | |
655 } else { | |
656 char *old = _hwm; | |
657 _hwm += x; | |
658 return old; | |
659 } | |
660 } | |
661 #endif | |
662 | |
663 | |
664 //-------------------------------------------------------------------------------------- | |
665 // Non-product code | |
666 | |
667 #ifndef PRODUCT | |
668 // The global operator new should never be called since it will usually indicate | |
669 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit | |
670 // that they're allocated on the C heap. | |
671 // Commented out in product version to avoid conflicts with third-party C++ native code. | |
672 // %% note this is causing a problem on solaris debug build. the global | |
673 // new is being called from jdk source and causing data corruption. | |
674 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew | |
675 // define CATCH_OPERATOR_NEW_USAGE if you want to use this. | |
676 #ifdef CATCH_OPERATOR_NEW_USAGE | |
677 void* operator new(size_t size){ | |
678 static bool warned = false; | |
679 if (!warned && warn_new_operator) | |
680 warning("should not call global (default) operator new"); | |
681 warned = true; | |
682 return (void *) AllocateHeap(size, "global operator new"); | |
683 } | |
684 #endif | |
685 | |
686 void AllocatedObj::print() const { print_on(tty); } | |
687 void AllocatedObj::print_value() const { print_value_on(tty); } | |
688 | |
689 void AllocatedObj::print_on(outputStream* st) const { | |
690 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); | |
691 } | |
692 | |
693 void AllocatedObj::print_value_on(outputStream* st) const { | |
694 st->print("AllocatedObj(" INTPTR_FORMAT ")", this); | |
695 } | |
696 | |
2250 | 697 julong Arena::_bytes_allocated = 0; |
698 | |
699 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } | |
0 | 700 |
701 AllocStats::AllocStats() { | |
2250 | 702 start_mallocs = os::num_mallocs; |
703 start_frees = os::num_frees; | |
0 | 704 start_malloc_bytes = os::alloc_bytes; |
2250 | 705 start_mfree_bytes = os::free_bytes; |
706 start_res_bytes = Arena::_bytes_allocated; | |
0 | 707 } |
708 | |
2250 | 709 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } |
710 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } | |
711 julong AllocStats::num_frees() { return os::num_frees - start_frees; } | |
712 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } | |
713 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } | |
0 | 714 void AllocStats::print() { |
2250 | 715 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " |
716 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", | |
717 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); | |
0 | 718 } |
719 | |
720 | |
721 // debugging code | |
722 inline void Arena::free_all(char** start, char** end) { | |
723 for (char** p = start; p < end; p++) if (*p) os::free(*p); | |
724 } | |
725 | |
726 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { | |
727 assert(UseMallocOnly, "should not call"); | |
728 // free all objects malloced since resource mark was created; resource area | |
729 // contains their addresses | |
730 if (chunk->next()) { | |
731 // this chunk is full, and some others too | |
732 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { | |
733 char* top = c->top(); | |
734 if (c->next() == NULL) { | |
735 top = hwm2; // last junk is only used up to hwm2 | |
736 assert(c->contains(hwm2), "bad hwm2"); | |
737 } | |
738 free_all((char**)c->bottom(), (char**)top); | |
739 } | |
740 assert(chunk->contains(hwm), "bad hwm"); | |
741 assert(chunk->contains(max), "bad max"); | |
742 free_all((char**)hwm, (char**)max); | |
743 } else { | |
744 // this chunk was partially used | |
745 assert(chunk->contains(hwm), "bad hwm"); | |
746 assert(chunk->contains(hwm2), "bad hwm2"); | |
747 free_all((char**)hwm, (char**)hwm2); | |
748 } | |
749 } | |
750 | |
751 | |
752 ReallocMark::ReallocMark() { | |
753 #ifdef ASSERT | |
754 Thread *thread = ThreadLocalStorage::get_thread_slow(); | |
755 _nesting = thread->resource_area()->nesting(); | |
756 #endif | |
757 } | |
758 | |
759 void ReallocMark::check() { | |
760 #ifdef ASSERT | |
761 if (_nesting != Thread::current()->resource_area()->nesting()) { | |
762 fatal("allocation bug: array could grow within nested ResourceMark"); | |
763 } | |
764 #endif | |
765 } | |
766 | |
767 #endif // Non-product |