comparison src/share/vm/c1/c1_LIR.hpp @ 2002:ac637b7220d1

6985015: C1 needs to support compressed oops Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered. Reviewed-by: twisti, kvn, never, phh
author iveresov
date Tue, 30 Nov 2010 23:23:40 -0800
parents f95d63e2154a
children 037c727f35fb
comparison
equal deleted inserted replaced
1972:f95d63e2154a 2002:ac637b7220d1
983 983
984 enum LIR_MoveKind { 984 enum LIR_MoveKind {
985 lir_move_normal, 985 lir_move_normal,
986 lir_move_volatile, 986 lir_move_volatile,
987 lir_move_unaligned, 987 lir_move_unaligned,
988 lir_move_wide,
988 lir_move_max_flag 989 lir_move_max_flag
989 }; 990 };
990 991
991 992
992 // -------------------------------------------------- 993 // --------------------------------------------------
1930 void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); } 1931 void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1931 void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); } 1932 void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1932 void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } 1933 void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1933 void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); } 1934 void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1934 void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); } 1935 void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1935 1936 void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
1937 if (UseCompressedOops) {
1938 append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
1939 } else {
1940 move(src, dst, info);
1941 }
1942 }
1943 void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
1944 if (UseCompressedOops) {
1945 append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
1946 } else {
1947 move(src, dst, info);
1948 }
1949 }
1936 void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); } 1950 void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1937 1951
1938 void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); } 1952 void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
1939 void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info); 1953 void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1940 1954