comparison src/cpu/x86/vm/c1_CodeStubs_x86.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 09aad8452938
children d02120b7a34f
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
1 /* 1 /*
2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
282 if (CommentedAssembly) { 282 if (CommentedAssembly) {
283 __ block_comment(" patch template"); 283 __ block_comment(" patch template");
284 } 284 }
285 if (_id == load_klass_id) { 285 if (_id == load_klass_id) {
286 // produce a copy of the load klass instruction for use by the being initialized case 286 // produce a copy of the load klass instruction for use by the being initialized case
287 #ifdef ASSERT
287 address start = __ pc(); 288 address start = __ pc();
289 #endif
290 Metadata* o = NULL;
291 __ mov_metadata(_obj, o);
292 #ifdef ASSERT
293 for (int i = 0; i < _bytes_to_copy; i++) {
294 address ptr = (address)(_pc_start + i);
295 int a_byte = (*ptr) & 0xFF;
296 assert(a_byte == *start++, "should be the same code");
297 }
298 #endif
299 } else if (_id == load_mirror_id) {
300 // produce a copy of the load mirror instruction for use by the being
301 // initialized case
302 #ifdef ASSERT
303 address start = __ pc();
304 #endif
288 jobject o = NULL; 305 jobject o = NULL;
289 __ movoop(_obj, o); 306 __ movoop(_obj, o);
290 #ifdef ASSERT 307 #ifdef ASSERT
291 for (int i = 0; i < _bytes_to_copy; i++) { 308 for (int i = 0; i < _bytes_to_copy; i++) {
292 address ptr = (address)(_pc_start + i); 309 address ptr = (address)(_pc_start + i);
304 } 321 }
305 } 322 }
306 323
307 address end_of_patch = __ pc(); 324 address end_of_patch = __ pc();
308 int bytes_to_skip = 0; 325 int bytes_to_skip = 0;
309 if (_id == load_klass_id) { 326 if (_id == load_mirror_id) {
310 int offset = __ offset(); 327 int offset = __ offset();
311 if (CommentedAssembly) { 328 if (CommentedAssembly) {
312 __ block_comment(" being_initialized check"); 329 __ block_comment(" being_initialized check");
313 } 330 }
314 assert(_obj != noreg, "must be a valid register"); 331 assert(_obj != noreg, "must be a valid register");
316 Register tmp2 = rbx; 333 Register tmp2 = rbx;
317 __ push(tmp); 334 __ push(tmp);
318 __ push(tmp2); 335 __ push(tmp2);
319 // Load without verification to keep code size small. We need it because 336 // Load without verification to keep code size small. We need it because
320 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 337 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
321 __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 338 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
322 __ get_thread(tmp); 339 __ get_thread(tmp);
323 __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset())); 340 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
324 __ pop(tmp2); 341 __ pop(tmp2);
325 __ pop(tmp); 342 __ pop(tmp);
326 __ jcc(Assembler::notEqual, call_patch); 343 __ jcc(Assembler::notEqual, call_patch);
327 344
328 // access_field patches may execute the patched code before it's 345 // access_field patches may execute the patched code before it's
355 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 372 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
356 373
357 address entry = __ pc(); 374 address entry = __ pc();
358 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 375 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
359 address target = NULL; 376 address target = NULL;
377 relocInfo::relocType reloc_type = relocInfo::none;
360 switch (_id) { 378 switch (_id) {
361 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 379 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
362 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; 380 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
381 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
363 default: ShouldNotReachHere(); 382 default: ShouldNotReachHere();
364 } 383 }
365 __ bind(call_patch); 384 __ bind(call_patch);
366 385
367 if (CommentedAssembly) { 386 if (CommentedAssembly) {
375 // Add enough nops so deoptimization can overwrite the jmp above with a call 394 // Add enough nops so deoptimization can overwrite the jmp above with a call
376 // and not destroy the world. 395 // and not destroy the world.
377 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { 396 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
378 __ nop(); 397 __ nop();
379 } 398 }
380 if (_id == load_klass_id) { 399 if (_id == load_klass_id || _id == load_mirror_id) {
381 CodeSection* cs = __ code_section(); 400 CodeSection* cs = __ code_section();
382 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 401 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
383 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none); 402 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
384 } 403 }
385 } 404 }
386 405
387 406
388 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 407 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
418 437
419 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 438 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
420 //---------------slow case: call to native----------------- 439 //---------------slow case: call to native-----------------
421 __ bind(_entry); 440 __ bind(_entry);
422 // Figure out where the args should go 441 // Figure out where the args should go
423 // This should really convert the IntrinsicID to the methodOop and signature 442 // This should really convert the IntrinsicID to the Method* and signature
424 // but I don't know how to do that. 443 // but I don't know how to do that.
425 // 444 //
426 VMRegPair args[5]; 445 VMRegPair args[5];
427 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 446 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
428 SharedRuntime::java_calling_convention(signature, args, 5, true); 447 SharedRuntime::java_calling_convention(signature, args, 5, true);