# HG changeset patch # User iveresov # Date 1396568275 25200 # Node ID 6df24530bf141fae396b3b43c1214c179348fcee # Parent 876390ee9b6ffa1b8410efe7fd4a1d33b2d8f536# Parent 56e7f5560e602c5db96d2e1c474f7400777338b7 Merge diff -r 876390ee9b6f -r 6df24530bf14 make/linux/Makefile --- a/make/linux/Makefile Wed Apr 02 11:24:51 2014 -0700 +++ b/make/linux/Makefile Thu Apr 03 16:37:55 2014 -0700 @@ -66,8 +66,8 @@ FORCE_TIERED=1 endif endif -# C1 is not ported on ppc64(le), so we cannot build a tiered VM: -ifneq (,$(filter $(ARCH),ppc64 pp64le)) +# C1 is not ported on ppc64, so we cannot build a tiered VM: +ifeq ($(ARCH),ppc64) FORCE_TIERED=0 endif diff -r 876390ee9b6f -r 6df24530bf14 make/linux/makefiles/defs.make --- a/make/linux/makefiles/defs.make Wed Apr 02 11:24:51 2014 -0700 +++ b/make/linux/makefiles/defs.make Thu Apr 03 16:37:55 2014 -0700 @@ -33,6 +33,11 @@ # ARCH can be set explicitly in spec.gmk ifndef ARCH ARCH := $(shell uname -m) + # Fold little endian PowerPC64 into big-endian (if ARCH is set in + # hotspot-spec.gmk, this will be done by the configure script). + ifeq ($(ARCH),ppc64le) + ARCH := ppc64 + endif endif PATH_SEP ?= : diff -r 876390ee9b6f -r 6df24530bf14 make/linux/makefiles/ppc64.make --- a/make/linux/makefiles/ppc64.make Wed Apr 02 11:24:51 2014 -0700 +++ b/make/linux/makefiles/ppc64.make Thu Apr 03 16:37:55 2014 -0700 @@ -26,14 +26,26 @@ # make c code know it is on a 64 bit platform. CFLAGS += -D_LP64=1 -# fixes `relocation truncated to fit' error for gcc 4.1. -CFLAGS += -mminimal-toc +ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined) + # This can happen during hotspot standalone build. Set endianness from + # uname. We assume build and target machines are the same. + OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big) +endif -# finds use ppc64 instructions, but schedule for power5 -CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),) + $(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little') +endif -# let linker find external 64 bit libs. -LFLAGS_VM += -L/lib64 +ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big) + # fixes `relocation truncated to fit' error for gcc 4.1. + CFLAGS += -mminimal-toc -# specify lib format. -LFLAGS_VM += -Wl,-melf64ppc + # finds use ppc64 instructions, but schedule for power5 + CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +else + # Little endian machine uses ELFv2 ABI. + CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2 + + # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI. + CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +endif diff -r 876390ee9b6f -r 6df24530bf14 src/cpu/ppc/vm/assembler_ppc.hpp --- a/src/cpu/ppc/vm/assembler_ppc.hpp Wed Apr 02 11:24:51 2014 -0700 +++ b/src/cpu/ppc/vm/assembler_ppc.hpp Thu Apr 03 16:37:55 2014 -0700 @@ -1025,15 +1025,14 @@ } static void set_imm(int* instr, short s) { - short* p = ((short *)instr) + 1; - *p = s; + // imm is always in the lower 16 bits of the instruction, + // so this is endian-neutral. Same for the get_imm below. + uint32_t w = *(uint32_t *)instr; + *instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF)); } static int get_imm(address a, int instruction_number) { - short imm; - short *p =((short *)a)+2*instruction_number+1; - imm = *p; - return (int)imm; + return (short)((int *)a)[instruction_number]; } static inline int hi16_signed( int x) { return (int)(int16_t)(x >> 16); } diff -r 876390ee9b6f -r 6df24530bf14 src/cpu/ppc/vm/bytes_ppc.hpp --- a/src/cpu/ppc/vm/bytes_ppc.hpp Wed Apr 02 11:24:51 2014 -0700 +++ b/src/cpu/ppc/vm/bytes_ppc.hpp Thu Apr 03 16:37:55 2014 -0700 @@ -35,6 +35,126 @@ // Can I count on address always being a pointer to an unsigned char? Yes. +#if defined(VM_LITTLE_ENDIAN) + + // Returns true, if the byte ordering used by Java is different from the native byte ordering + // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc. + static inline bool is_Java_byte_ordering_different() { return true; } + + // Forward declarations of the compiler-dependent implementation + static inline u2 swap_u2(u2 x); + static inline u4 swap_u4(u4 x); + static inline u8 swap_u8(u8 x); + + static inline u2 get_native_u2(address p) { + return (intptr_t(p) & 1) == 0 + ? *(u2*)p + : ( u2(p[1]) << 8 ) + | ( u2(p[0]) ); + } + + static inline u4 get_native_u4(address p) { + switch (intptr_t(p) & 3) { + case 0: return *(u4*)p; + + case 2: return ( u4( ((u2*)p)[1] ) << 16 ) + | ( u4( ((u2*)p)[0] ) ); + + default: return ( u4(p[3]) << 24 ) + | ( u4(p[2]) << 16 ) + | ( u4(p[1]) << 8 ) + | u4(p[0]); + } + } + + static inline u8 get_native_u8(address p) { + switch (intptr_t(p) & 7) { + case 0: return *(u8*)p; + + case 4: return ( u8( ((u4*)p)[1] ) << 32 ) + | ( u8( ((u4*)p)[0] ) ); + + case 2: return ( u8( ((u2*)p)[3] ) << 48 ) + | ( u8( ((u2*)p)[2] ) << 32 ) + | ( u8( ((u2*)p)[1] ) << 16 ) + | ( u8( ((u2*)p)[0] ) ); + + default: return ( u8(p[7]) << 56 ) + | ( u8(p[6]) << 48 ) + | ( u8(p[5]) << 40 ) + | ( u8(p[4]) << 32 ) + | ( u8(p[3]) << 24 ) + | ( u8(p[2]) << 16 ) + | ( u8(p[1]) << 8 ) + | u8(p[0]); + } + } + + + + static inline void put_native_u2(address p, u2 x) { + if ( (intptr_t(p) & 1) == 0 ) *(u2*)p = x; + else { + p[1] = x >> 8; + p[0] = x; + } + } + + static inline void put_native_u4(address p, u4 x) { + switch ( intptr_t(p) & 3 ) { + case 0: *(u4*)p = x; + break; + + case 2: ((u2*)p)[1] = x >> 16; + ((u2*)p)[0] = x; + break; + + default: ((u1*)p)[3] = x >> 24; + ((u1*)p)[2] = x >> 16; + ((u1*)p)[1] = x >> 8; + ((u1*)p)[0] = x; + break; + } + } + + static inline void put_native_u8(address p, u8 x) { + switch ( intptr_t(p) & 7 ) { + case 0: *(u8*)p = x; + break; + + case 4: ((u4*)p)[1] = x >> 32; + ((u4*)p)[0] = x; + break; + + case 2: ((u2*)p)[3] = x >> 48; + ((u2*)p)[2] = x >> 32; + ((u2*)p)[1] = x >> 16; + ((u2*)p)[0] = x; + break; + + default: ((u1*)p)[7] = x >> 56; + ((u1*)p)[6] = x >> 48; + ((u1*)p)[5] = x >> 40; + ((u1*)p)[4] = x >> 32; + ((u1*)p)[3] = x >> 24; + ((u1*)p)[2] = x >> 16; + ((u1*)p)[1] = x >> 8; + ((u1*)p)[0] = x; + } + } + + // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering) + // (no byte-order reversal is needed since Power CPUs are big-endian oriented). + static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); } + static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); } + static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); } + + static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); } + static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); } + static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); } + +#else // !defined(VM_LITTLE_ENDIAN) + // Returns true, if the byte ordering used by Java is different from the nativ byte ordering // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc. static inline bool is_Java_byte_ordering_different() { return false; } @@ -150,6 +270,12 @@ static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); } static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); } static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); } + +#endif // VM_LITTLE_ENDIAN }; +#if defined(TARGET_OS_ARCH_linux_ppc) +#include "bytes_linux_ppc.inline.hpp" +#endif + #endif // CPU_PPC_VM_BYTES_PPC_HPP diff -r 876390ee9b6f -r 6df24530bf14 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Wed Apr 02 11:24:51 2014 -0700 +++ b/src/os/linux/vm/os_linux.cpp Thu Apr 03 16:37:55 2014 -0700 @@ -1963,7 +1963,11 @@ {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, +#if defined(VM_LITTLE_ENDIAN) + {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"}, +#else {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, +#endif {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"}, {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, diff -r 876390ee9b6f -r 6df24530bf14 src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp Thu Apr 03 16:37:55 2014 -0700 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright 2014 Google Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP +#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP + +#if defined(VM_LITTLE_ENDIAN) +#include + +// Efficient swapping of data bytes from Java byte +// ordering to native byte ordering and vice versa. +inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); } +inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); } +inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); } +#endif // VM_LITTLE_ENDIAN + +#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP