Mercurial > hg > truffle
comparison src/cpu/x86/vm/c1_Runtime1_x86.cpp @ 4762:069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
Summary: Moved sizeof(klassOopDesc), changed the return type to ByteSize and removed the _in_bytes suffix.
Reviewed-by: never, bdelsart, coleenp, jrose
author | stefank |
---|---|
date | Wed, 07 Dec 2011 11:35:03 +0100 |
parents | cec1757a0134 |
children | 22cee0ee8927 |
comparison
equal
deleted
inserted
replaced
4761:65149e74c706 | 4762:069ab3f976d3 |
---|---|
1009 __ push(rdi); | 1009 __ push(rdi); |
1010 __ push(rbx); | 1010 __ push(rbx); |
1011 | 1011 |
1012 if (id == fast_new_instance_init_check_id) { | 1012 if (id == fast_new_instance_init_check_id) { |
1013 // make sure the klass is initialized | 1013 // make sure the klass is initialized |
1014 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); | 1014 __ cmpl(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized); |
1015 __ jcc(Assembler::notEqual, slow_path); | 1015 __ jcc(Assembler::notEqual, slow_path); |
1016 } | 1016 } |
1017 | 1017 |
1018 #ifdef ASSERT | 1018 #ifdef ASSERT |
1019 // assert object can be fast path allocated | 1019 // assert object can be fast path allocated |
1020 { | 1020 { |
1021 Label ok, not_ok; | 1021 Label ok, not_ok; |
1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | 1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) | 1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) |
1024 __ jcc(Assembler::lessEqual, not_ok); | 1024 __ jcc(Assembler::lessEqual, not_ok); |
1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); | 1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); |
1026 __ jcc(Assembler::zero, ok); | 1026 __ jcc(Assembler::zero, ok); |
1027 __ bind(not_ok); | 1027 __ bind(not_ok); |
1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi | 1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi |
1039 | 1039 |
1040 __ bind(retry_tlab); | 1040 __ bind(retry_tlab); |
1041 | 1041 |
1042 // get the instance size (size is postive so movl is fine for 64bit) | 1042 // get the instance size (size is postive so movl is fine for 64bit) |
1043 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); | 1043 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
1044 | 1044 |
1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); | 1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); |
1046 | 1046 |
1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2); | 1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
1048 __ verify_oop(obj); | 1048 __ verify_oop(obj); |
1050 __ pop(rdi); | 1050 __ pop(rdi); |
1051 __ ret(0); | 1051 __ ret(0); |
1052 | 1052 |
1053 __ bind(try_eden); | 1053 __ bind(try_eden); |
1054 // get the instance size (size is postive so movl is fine for 64bit) | 1054 // get the instance size (size is postive so movl is fine for 64bit) |
1055 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); | 1055 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
1056 | 1056 |
1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path); | 1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path); |
1058 __ incr_allocated_bytes(thread, obj_size, 0); | 1058 __ incr_allocated_bytes(thread, obj_size, 0); |
1059 | 1059 |
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2); | 1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
1117 #ifdef ASSERT | 1117 #ifdef ASSERT |
1118 // assert object type is really an array of the proper kind | 1118 // assert object type is really an array of the proper kind |
1119 { | 1119 { |
1120 Label ok; | 1120 Label ok; |
1121 Register t0 = obj; | 1121 Register t0 = obj; |
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | 1122 __ movl(t0, Address(klass, Klass::layout_helper_offset())); |
1123 __ sarl(t0, Klass::_lh_array_tag_shift); | 1123 __ sarl(t0, Klass::_lh_array_tag_shift); |
1124 int tag = ((id == new_type_array_id) | 1124 int tag = ((id == new_type_array_id) |
1125 ? Klass::_lh_array_tag_type_value | 1125 ? Klass::_lh_array_tag_type_value |
1126 : Klass::_lh_array_tag_obj_value); | 1126 : Klass::_lh_array_tag_obj_value); |
1127 __ cmpl(t0, tag); | 1127 __ cmpl(t0, tag); |
1151 | 1151 |
1152 __ bind(retry_tlab); | 1152 __ bind(retry_tlab); |
1153 | 1153 |
1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) | 1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) |
1155 // since size is positive movl does right thing on 64bit | 1155 // since size is positive movl does right thing on 64bit |
1156 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); | 1156 __ movl(t1, Address(klass, Klass::layout_helper_offset())); |
1157 // since size is postive movl does right thing on 64bit | 1157 // since size is postive movl does right thing on 64bit |
1158 __ movl(arr_size, length); | 1158 __ movl(arr_size, length); |
1159 assert(t1 == rcx, "fixed register usage"); | 1159 assert(t1 == rcx, "fixed register usage"); |
1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */); | 1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
1161 __ shrptr(t1, Klass::_lh_header_size_shift); | 1161 __ shrptr(t1, Klass::_lh_header_size_shift); |
1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); | 1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); |
1166 | 1166 |
1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size | 1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size |
1168 | 1168 |
1169 __ initialize_header(obj, klass, length, t1, t2); | 1169 __ initialize_header(obj, klass, length, t1, t2); |
1170 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); | 1170 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); |
1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); | 1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); | 1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); |
1173 __ andptr(t1, Klass::_lh_header_size_mask); | 1173 __ andptr(t1, Klass::_lh_header_size_mask); |
1174 __ subptr(arr_size, t1); // body length | 1174 __ subptr(arr_size, t1); // body length |
1175 __ addptr(t1, obj); // body start | 1175 __ addptr(t1, obj); // body start |
1178 __ ret(0); | 1178 __ ret(0); |
1179 | 1179 |
1180 __ bind(try_eden); | 1180 __ bind(try_eden); |
1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) | 1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) |
1182 // since size is positive movl does right thing on 64bit | 1182 // since size is positive movl does right thing on 64bit |
1183 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); | 1183 __ movl(t1, Address(klass, Klass::layout_helper_offset())); |
1184 // since size is postive movl does right thing on 64bit | 1184 // since size is postive movl does right thing on 64bit |
1185 __ movl(arr_size, length); | 1185 __ movl(arr_size, length); |
1186 assert(t1 == rcx, "fixed register usage"); | 1186 assert(t1 == rcx, "fixed register usage"); |
1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */); | 1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
1188 __ shrptr(t1, Klass::_lh_header_size_shift); | 1188 __ shrptr(t1, Klass::_lh_header_size_shift); |
1193 | 1193 |
1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size | 1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size |
1195 __ incr_allocated_bytes(thread, arr_size, 0); | 1195 __ incr_allocated_bytes(thread, arr_size, 0); |
1196 | 1196 |
1197 __ initialize_header(obj, klass, length, t1, t2); | 1197 __ initialize_header(obj, klass, length, t1, t2); |
1198 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); | 1198 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); |
1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); | 1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); | 1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); |
1201 __ andptr(t1, Klass::_lh_header_size_mask); | 1201 __ andptr(t1, Klass::_lh_header_size_mask); |
1202 __ subptr(arr_size, t1); // body length | 1202 __ subptr(arr_size, t1); // body length |
1203 __ addptr(t1, obj); // body start | 1203 __ addptr(t1, obj); // body start |
1265 | 1265 |
1266 // load the klass and check the has finalizer flag | 1266 // load the klass and check the has finalizer flag |
1267 Label register_finalizer; | 1267 Label register_finalizer; |
1268 Register t = rsi; | 1268 Register t = rsi; |
1269 __ load_klass(t, rax); | 1269 __ load_klass(t, rax); |
1270 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); | 1270 __ movl(t, Address(t, Klass::access_flags_offset())); |
1271 __ testl(t, JVM_ACC_HAS_FINALIZER); | 1271 __ testl(t, JVM_ACC_HAS_FINALIZER); |
1272 __ jcc(Assembler::notZero, register_finalizer); | 1272 __ jcc(Assembler::notZero, register_finalizer); |
1273 __ ret(0); | 1273 __ ret(0); |
1274 | 1274 |
1275 __ bind(register_finalizer); | 1275 __ bind(register_finalizer); |