# HG changeset patch # User Christos Kotselidis # Date 1385042694 -3600 # Node ID f9f4503a4ab52c2e22adc37c7eb485396b45c144 # Parent 790ebab62d23b1901713b94faf0b115192b2304a# Parent df7fa4734c448fed4a732fadffef245d6f75d4f9 Merge diff -r 790ebab62d23 -r f9f4503a4ab5 .hgtags --- a/.hgtags Thu Nov 21 15:04:26 2013 +0100 +++ b/.hgtags Thu Nov 21 15:04:54 2013 +0100 @@ -383,3 +383,15 @@ 58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52 6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110 562a3d356de67670b4172b82aca2d30743449e04 hs25-b53 +f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111 +4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54 +0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112 +23b8db5ea31d3079f1326afde4cd5c67b1dac49c hs25-b55 +4589b398ab03aba6a5da8c06ff53603488d1b8f4 jdk8-b113 +82a9cdbf683e374a76f2009352de53e16bed5a91 hs25-b56 +7fd913010dbbf75260688fd2fa8964763fa49a09 jdk8-b114 +3b32d287da89a47a45d16f6d9ba5bd3cd9bf4b3e hs25-b57 +9ebaac78a8a0061fb9597e07f806498cb626cdeb jdk8-b115 +e510dfdec6dd701410f3398ed86ebcdff0cca63a hs25-b58 +52b076e6ffae247c1c7d8b7aba995195be2b6fc2 jdk8-b116 +c78d517c7ea47501b456e707afd4b78e7b5b202e hs25-b59 diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/os/bsd/ps_core.c --- a/agent/src/os/bsd/ps_core.c Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/os/bsd/ps_core.c Thu Nov 21 15:04:54 2013 +0100 @@ -44,6 +44,7 @@ // close all file descriptors static void close_files(struct ps_prochandle* ph) { lib_info* lib = NULL; + // close core file descriptor if (ph->core->core_fd >= 0) close(ph->core->core_fd); @@ -149,8 +150,7 @@ // Return the map_info for the given virtual address. We keep a sorted // array of pointers in ph->map_array, so we can binary search. -static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) -{ +static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) { int mid, lo = 0, hi = ph->core->num_maps - 1; map_info *mp; @@ -230,9 +230,9 @@ size_t _used; // for setting space top on read // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with - // the C type matching the C++ bool type on any given platform. For - // Hotspot on BSD we assume the corresponding C type is char but - // licensees on BSD versions may need to adjust the type of these fields. + // the C type matching the C++ bool type on any given platform. + // We assume the corresponding C type is char but licensees + // may need to adjust the type of these fields. char _read_only; // read only space? char _allow_exec; // executable code in space? @@ -286,10 +286,12 @@ #define USE_SHARED_SPACES_SYM "_UseSharedSpaces" // mangled name of Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE" +#define LIBJVM_NAME "/libjvm.dylib" #else #define USE_SHARED_SPACES_SYM "UseSharedSpaces" // mangled name of Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE" +#define LIBJVM_NAME "/libjvm.so" #endif // __APPLE_ static bool init_classsharing_workaround(struct ps_prochandle* ph) { @@ -300,12 +302,7 @@ // we are iterating over shared objects from the core dump. look for // libjvm.so. const char *jvm_name = 0; -#ifdef __APPLE__ - if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0) -#else - if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) -#endif // __APPLE__ - { + if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) { char classes_jsa[PATH_MAX]; struct FileMapHeader header; int fd = -1; @@ -399,8 +396,8 @@ } } return true; - } - lib = lib->next; + } + lib = lib->next; } return true; } @@ -432,8 +429,8 @@ // allocate map_array map_info** array; if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) { - print_debug("can't allocate memory for map array\n"); - return false; + print_debug("can't allocate memory for map array\n"); + return false; } // add maps to array @@ -450,7 +447,7 @@ ph->core->map_array = array; // sort the map_info array by base virtual address. qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*), - core_cmp_mapping); + core_cmp_mapping); // print map if (is_debug()) { @@ -458,7 +455,7 @@ print_debug("---- sorted virtual address map ----\n"); for (j = 0; j < ph->core->num_maps; j++) { print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr, - ph->core->map_array[j]->memsz); + ph->core->map_array[j]->memsz); } } @@ -1091,9 +1088,9 @@ notep->n_type, notep->n_descsz); if (notep->n_type == NT_PRSTATUS) { - if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) { - return false; - } + if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) { + return false; + } } p = descdata + ROUNDUP(notep->n_descsz, 4); } @@ -1121,7 +1118,7 @@ * contains a set of saved /proc structures), and PT_LOAD (which * represents a memory mapping from the process's address space). * - * Difference b/w Solaris PT_NOTE and BSD PT_NOTE: + * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE: * * In Solaris there are two PT_NOTE segments the first PT_NOTE (if present) * contains /proc structs in the pre-2.6 unstructured /proc format. the last @@ -1167,32 +1164,61 @@ // read segments of a shared object static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) { - int i = 0; - ELF_PHDR* phbuf; - ELF_PHDR* lib_php = NULL; + int i = 0; + ELF_PHDR* phbuf; + ELF_PHDR* lib_php = NULL; + + int page_size=sysconf(_SC_PAGE_SIZE); - if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) - return false; + if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) { + return false; + } + + // we want to process only PT_LOAD segments that are not writable. + // i.e., text segments. The read/write/exec (data) segments would + // have been already added from core file segments. + for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) { + if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) { + + uintptr_t target_vaddr = lib_php->p_vaddr + lib_base; + map_info *existing_map = core_lookup(ph, target_vaddr); - // we want to process only PT_LOAD segments that are not writable. - // i.e., text segments. The read/write/exec (data) segments would - // have been already added from core file segments. - for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) { - if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) { - if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL) - goto err; + if (existing_map == NULL){ + if (add_map_info(ph, lib_fd, lib_php->p_offset, + target_vaddr, lib_php->p_filesz) == NULL) { + goto err; + } + } else { + if ((existing_map->memsz != page_size) && + (existing_map->fd != lib_fd) && + (existing_map->memsz != lib_php->p_filesz)){ + + print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)", + target_vaddr, lib_php->p_filesz, lib_php->p_flags); + goto err; + } + + /* replace PT_LOAD segment with library segment */ + print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n", + existing_map->memsz, lib_php->p_filesz); + + existing_map->fd = lib_fd; + existing_map->offset = lib_php->p_offset; + existing_map->memsz = lib_php->p_filesz; } - lib_php++; - } + } + + lib_php++; + } - free(phbuf); - return true; + free(phbuf); + return true; err: - free(phbuf); - return false; + free(phbuf); + return false; } -// process segments from interpreter (ld-elf.so.1) +// process segments from interpreter (ld.so or ld-linux.so or ld-elf.so) static bool read_interp_segments(struct ps_prochandle* ph) { ELF_EHDR interp_ehdr; @@ -1303,32 +1329,34 @@ debug_base = dyn.d_un.d_ptr; // at debug_base we have struct r_debug. This has first link map in r_map field if (ps_pread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET, - &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) { + &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) { print_debug("can't read first link map address\n"); return false; } // read ld_base address from struct r_debug - // XXX: There is no r_ldbase member on BSD - /* +#if 0 // There is no r_ldbase member on BSD if (ps_pread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr, sizeof(uintptr_t)) != PS_OK) { print_debug("can't read ld base address\n"); return false; } ph->core->ld_base_addr = ld_base_addr; - */ +#else ph->core->ld_base_addr = 0; +#endif print_debug("interpreter base address is 0x%lx\n", ld_base_addr); - // now read segments from interp (i.e ld-elf.so.1) - if (read_interp_segments(ph) != true) + // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so) + if (read_interp_segments(ph) != true) { return false; + } // after adding interpreter (ld.so) mappings sort again - if (sort_map_array(ph) != true) + if (sort_map_array(ph) != true) { return false; + } print_debug("first link map is at 0x%lx\n", first_link_map_addr); @@ -1380,8 +1408,9 @@ add_lib_info_fd(ph, lib_name, lib_fd, lib_base); // Map info is added for the library (lib_name) so // we need to re-sort it before calling the p_pdread. - if (sort_map_array(ph) != true) + if (sort_map_array(ph) != true) { return false; + } } else { print_debug("can't read ELF header for shared object %s\n", lib_name); close(lib_fd); @@ -1392,7 +1421,7 @@ // read next link_map address if (ps_pread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET, - &link_map_addr, sizeof(uintptr_t)) != PS_OK) { + &link_map_addr, sizeof(uintptr_t)) != PS_OK) { print_debug("can't read next link in link_map\n"); return false; } @@ -1408,7 +1437,7 @@ struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle)); if (ph == NULL) { - print_debug("cant allocate ps_prochandle\n"); + print_debug("can't allocate ps_prochandle\n"); return NULL; } @@ -1444,38 +1473,45 @@ } if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) { - print_debug("executable file is not a valid ELF ET_EXEC file\n"); - goto err; + print_debug("executable file is not a valid ELF ET_EXEC file\n"); + goto err; } // process core file segments - if (read_core_segments(ph, &core_ehdr) != true) - goto err; + if (read_core_segments(ph, &core_ehdr) != true) { + goto err; + } // process exec file segments - if (read_exec_segments(ph, &exec_ehdr) != true) - goto err; + if (read_exec_segments(ph, &exec_ehdr) != true) { + goto err; + } // exec file is also treated like a shared object for symbol search if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd, - (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) - goto err; + (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) { + goto err; + } // allocate and sort maps into map_array, we need to do this // here because read_shared_lib_info needs to read from debuggee // address space - if (sort_map_array(ph) != true) + if (sort_map_array(ph) != true) { goto err; + } - if (read_shared_lib_info(ph) != true) + if (read_shared_lib_info(ph) != true) { goto err; + } // sort again because we have added more mappings from shared objects - if (sort_map_array(ph) != true) + if (sort_map_array(ph) != true) { goto err; + } - if (init_classsharing_workaround(ph) != true) + if (init_classsharing_workaround(ph) != true) { goto err; + } print_debug("Leave Pgrab_core\n"); return ph; diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/os/bsd/ps_proc.c --- a/agent/src/os/bsd/ps_proc.c Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/os/bsd/ps_proc.c Thu Nov 21 15:04:54 2013 +0100 @@ -131,7 +131,7 @@ static bool ptrace_continue(pid_t pid, int signal) { // pass the signal to the process so we don't swallow it - if (ptrace(PTRACE_CONT, pid, NULL, signal) < 0) { + if (ptrace(PT_CONTINUE, pid, NULL, signal) < 0) { print_debug("ptrace(PTRACE_CONT, ..) failed for %d\n", pid); return false; } @@ -434,7 +434,6 @@ // attach to the process. One and only one exposed stuff struct ps_prochandle* Pgrab(pid_t pid) { struct ps_prochandle* ph = NULL; - thread_info* thr = NULL; if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) { print_debug("can't allocate memory for ps_prochandle\n"); diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/os/linux/ps_core.c --- a/agent/src/os/linux/ps_core.c Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/os/linux/ps_core.c Thu Nov 21 15:04:54 2013 +0100 @@ -41,155 +41,158 @@ // ps_prochandle cleanup helper functions // close all file descriptors -static void close_elf_files(struct ps_prochandle* ph) { - lib_info* lib = NULL; +static void close_files(struct ps_prochandle* ph) { + lib_info* lib = NULL; - // close core file descriptor - if (ph->core->core_fd >= 0) - close(ph->core->core_fd); + // close core file descriptor + if (ph->core->core_fd >= 0) + close(ph->core->core_fd); - // close exec file descriptor - if (ph->core->exec_fd >= 0) - close(ph->core->exec_fd); + // close exec file descriptor + if (ph->core->exec_fd >= 0) + close(ph->core->exec_fd); - // close interp file descriptor - if (ph->core->interp_fd >= 0) - close(ph->core->interp_fd); + // close interp file descriptor + if (ph->core->interp_fd >= 0) + close(ph->core->interp_fd); - // close class share archive file - if (ph->core->classes_jsa_fd >= 0) - close(ph->core->classes_jsa_fd); + // close class share archive file + if (ph->core->classes_jsa_fd >= 0) + close(ph->core->classes_jsa_fd); - // close all library file descriptors - lib = ph->libs; - while (lib) { - int fd = lib->fd; - if (fd >= 0 && fd != ph->core->exec_fd) close(fd); - lib = lib->next; - } + // close all library file descriptors + lib = ph->libs; + while (lib) { + int fd = lib->fd; + if (fd >= 0 && fd != ph->core->exec_fd) { + close(fd); + } + lib = lib->next; + } } // clean all map_info stuff static void destroy_map_info(struct ps_prochandle* ph) { map_info* map = ph->core->maps; while (map) { - map_info* next = map->next; - free(map); - map = next; + map_info* next = map->next; + free(map); + map = next; } if (ph->core->map_array) { - free(ph->core->map_array); + free(ph->core->map_array); } // Part of the class sharing workaround map = ph->core->class_share_maps; while (map) { - map_info* next = map->next; - free(map); - map = next; + map_info* next = map->next; + free(map); + map = next; } } // ps_prochandle operations static void core_release(struct ps_prochandle* ph) { - if (ph->core) { - close_elf_files(ph); - destroy_map_info(ph); - free(ph->core); - } + if (ph->core) { + close_files(ph); + destroy_map_info(ph); + free(ph->core); + } } static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) { - map_info* map; - if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) { - print_debug("can't allocate memory for map_info\n"); - return NULL; - } + map_info* map; + if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) { + print_debug("can't allocate memory for map_info\n"); + return NULL; + } - // initialize map - map->fd = fd; - map->offset = offset; - map->vaddr = vaddr; - map->memsz = memsz; - return map; + // initialize map + map->fd = fd; + map->offset = offset; + map->vaddr = vaddr; + map->memsz = memsz; + return map; } // add map info with given fd, offset, vaddr and memsz static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset, uintptr_t vaddr, size_t memsz) { - map_info* map; - if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) { - return NULL; - } + map_info* map; + if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) { + return NULL; + } - // add this to map list - map->next = ph->core->maps; - ph->core->maps = map; - ph->core->num_maps++; + // add this to map list + map->next = ph->core->maps; + ph->core->maps = map; + ph->core->num_maps++; - return map; + return map; } // Part of the class sharing workaround -static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset, +static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset, uintptr_t vaddr, size_t memsz) { - map_info* map; - if ((map = allocate_init_map(ph->core->classes_jsa_fd, - offset, vaddr, memsz)) == NULL) { - return; - } + map_info* map; + if ((map = allocate_init_map(ph->core->classes_jsa_fd, + offset, vaddr, memsz)) == NULL) { + return NULL; + } - map->next = ph->core->class_share_maps; - ph->core->class_share_maps = map; + map->next = ph->core->class_share_maps; + ph->core->class_share_maps = map; + return map; } // Return the map_info for the given virtual address. We keep a sorted // array of pointers in ph->map_array, so we can binary search. -static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) -{ - int mid, lo = 0, hi = ph->core->num_maps - 1; - map_info *mp; +static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) { + int mid, lo = 0, hi = ph->core->num_maps - 1; + map_info *mp; - while (hi - lo > 1) { - mid = (lo + hi) / 2; - if (addr >= ph->core->map_array[mid]->vaddr) - lo = mid; - else - hi = mid; - } + while (hi - lo > 1) { + mid = (lo + hi) / 2; + if (addr >= ph->core->map_array[mid]->vaddr) { + lo = mid; + } else { + hi = mid; + } + } - if (addr < ph->core->map_array[hi]->vaddr) - mp = ph->core->map_array[lo]; - else - mp = ph->core->map_array[hi]; + if (addr < ph->core->map_array[hi]->vaddr) { + mp = ph->core->map_array[lo]; + } else { + mp = ph->core->map_array[hi]; + } - if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) - return (mp); + if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) { + return (mp); + } - // Part of the class sharing workaround - // Unfortunately, we have no way of detecting -Xshare state. - // Check out the share maps atlast, if we don't find anywhere. - // This is done this way so to avoid reading share pages - // ahead of other normal maps. For eg. with -Xshare:off we don't - // want to prefer class sharing data to data from core. - mp = ph->core->class_share_maps; - if (mp) { - print_debug("can't locate map_info at 0x%lx, trying class share maps\n", - addr); - } - while (mp) { - if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) { - print_debug("located map_info at 0x%lx from class share maps\n", - addr); - return (mp); - } - mp = mp->next; - } + // Part of the class sharing workaround + // Unfortunately, we have no way of detecting -Xshare state. + // Check out the share maps atlast, if we don't find anywhere. + // This is done this way so to avoid reading share pages + // ahead of other normal maps. For eg. with -Xshare:off we don't + // want to prefer class sharing data to data from core. + mp = ph->core->class_share_maps; + if (mp) { + print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr); + } + while (mp) { + if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) { + print_debug("located map_info at 0x%lx from class share maps\n", addr); + return (mp); + } + mp = mp->next; + } - print_debug("can't locate map_info at 0x%lx\n", addr); - return (NULL); + print_debug("can't locate map_info at 0x%lx\n", addr); + return (NULL); } //--------------------------------------------------------------- @@ -226,9 +229,9 @@ size_t _used; // for setting space top on read // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with - // the C type matching the C++ bool type on any given platform. For - // Hotspot on Linux we assume the corresponding C type is char but - // licensees on Linux versions may need to adjust the type of these fields. + // the C type matching the C++ bool type on any given platform. + // We assume the corresponding C type is char but licensees + // may need to adjust the type of these fields. char _read_only; // read only space? char _allow_exec; // executable code in space? @@ -238,154 +241,159 @@ }; static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) { - jboolean i; - if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) { - *pvalue = i; - return true; - } else { - return false; - } + jboolean i; + if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) { + *pvalue = i; + return true; + } else { + return false; + } } static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) { - uintptr_t uip; - if (ps_pdread(ph, (psaddr_t) addr, &uip, sizeof(uip)) == PS_OK) { - *pvalue = uip; - return true; - } else { - return false; - } + uintptr_t uip; + if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) { + *pvalue = uip; + return true; + } else { + return false; + } } // used to read strings from debuggee static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) { - size_t i = 0; - char c = ' '; + size_t i = 0; + char c = ' '; - while (c != '\0') { - if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) - return false; - if (i < size - 1) - buf[i] = c; - else // smaller buffer - return false; - i++; addr++; - } + while (c != '\0') { + if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) { + return false; + } + if (i < size - 1) { + buf[i] = c; + } else { + // smaller buffer + return false; + } + i++; addr++; + } - buf[i] = '\0'; - return true; + buf[i] = '\0'; + return true; } #define USE_SHARED_SPACES_SYM "UseSharedSpaces" // mangled name of Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE" +#define LIBJVM_NAME "/libjvm.so" static bool init_classsharing_workaround(struct ps_prochandle* ph) { - lib_info* lib = ph->libs; - while (lib != NULL) { - // we are iterating over shared objects from the core dump. look for - // libjvm.so. - const char *jvm_name = 0; - if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) { - char classes_jsa[PATH_MAX]; - struct FileMapHeader header; - size_t n = 0; - int fd = -1, m = 0; - uintptr_t base = 0, useSharedSpacesAddr = 0; - uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0; - jboolean useSharedSpaces = 0; - map_info* mi = 0; + lib_info* lib = ph->libs; + while (lib != NULL) { + // we are iterating over shared objects from the core dump. look for + // libjvm.so. + const char *jvm_name = 0; + if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) { + char classes_jsa[PATH_MAX]; + struct FileMapHeader header; + int fd = -1; + int m = 0; + size_t n = 0; + uintptr_t base = 0, useSharedSpacesAddr = 0; + uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0; + jboolean useSharedSpaces = 0; + map_info* mi = 0; - memset(classes_jsa, 0, sizeof(classes_jsa)); - jvm_name = lib->name; - useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM); - if (useSharedSpacesAddr == 0) { - print_debug("can't lookup 'UseSharedSpaces' flag\n"); - return false; - } + memset(classes_jsa, 0, sizeof(classes_jsa)); + jvm_name = lib->name; + useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM); + if (useSharedSpacesAddr == 0) { + print_debug("can't lookup 'UseSharedSpaces' flag\n"); + return false; + } - // Hotspot vm types are not exported to build this library. So - // using equivalent type jboolean to read the value of - // UseSharedSpaces which is same as hotspot type "bool". - if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) { - print_debug("can't read the value of 'UseSharedSpaces' flag\n"); - return false; - } + // Hotspot vm types are not exported to build this library. So + // using equivalent type jboolean to read the value of + // UseSharedSpaces which is same as hotspot type "bool". + if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) { + print_debug("can't read the value of 'UseSharedSpaces' flag\n"); + return false; + } - if ((int)useSharedSpaces == 0) { - print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n"); - return true; - } + if ((int)useSharedSpaces == 0) { + print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n"); + return true; + } - sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM); - if (sharedArchivePathAddrAddr == 0) { - print_debug("can't lookup shared archive path symbol\n"); - return false; - } + sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM); + if (sharedArchivePathAddrAddr == 0) { + print_debug("can't lookup shared archive path symbol\n"); + return false; + } - if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) { - print_debug("can't read shared archive path pointer\n"); - return false; - } + if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) { + print_debug("can't read shared archive path pointer\n"); + return false; + } - if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) { - print_debug("can't read shared archive path value\n"); - return false; - } + if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) { + print_debug("can't read shared archive path value\n"); + return false; + } - print_debug("looking for %s\n", classes_jsa); - // open the class sharing archive file - fd = pathmap_open(classes_jsa); - if (fd < 0) { - print_debug("can't open %s!\n", classes_jsa); - ph->core->classes_jsa_fd = -1; - return false; - } else { - print_debug("opened %s\n", classes_jsa); - } + print_debug("looking for %s\n", classes_jsa); + // open the class sharing archive file + fd = pathmap_open(classes_jsa); + if (fd < 0) { + print_debug("can't open %s!\n", classes_jsa); + ph->core->classes_jsa_fd = -1; + return false; + } else { + print_debug("opened %s\n", classes_jsa); + } - // read FileMapHeader from the file - memset(&header, 0, sizeof(struct FileMapHeader)); - if ((n = read(fd, &header, sizeof(struct FileMapHeader))) - != sizeof(struct FileMapHeader)) { - print_debug("can't read shared archive file map header from %s\n", classes_jsa); - close(fd); - return false; - } + // read FileMapHeader from the file + memset(&header, 0, sizeof(struct FileMapHeader)); + if ((n = read(fd, &header, sizeof(struct FileMapHeader))) + != sizeof(struct FileMapHeader)) { + print_debug("can't read shared archive file map header from %s\n", classes_jsa); + close(fd); + return false; + } - // check file magic - if (header._magic != 0xf00baba2) { - print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n", - classes_jsa, header._magic); - close(fd); - return false; - } + // check file magic + if (header._magic != 0xf00baba2) { + print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n", + classes_jsa, header._magic); + close(fd); + return false; + } - // check version - if (header._version != CURRENT_ARCHIVE_VERSION) { - print_debug("%s has wrong shared archive file version %d, expecting %d\n", - classes_jsa, header._version, CURRENT_ARCHIVE_VERSION); - close(fd); - return false; - } + // check version + if (header._version != CURRENT_ARCHIVE_VERSION) { + print_debug("%s has wrong shared archive file version %d, expecting %d\n", + classes_jsa, header._version, CURRENT_ARCHIVE_VERSION); + close(fd); + return false; + } - ph->core->classes_jsa_fd = fd; - // add read-only maps from classes.jsa to the list of maps - for (m = 0; m < NUM_SHARED_MAPS; m++) { - if (header._space[m]._read_only) { - base = (uintptr_t) header._space[m]._base; - // no need to worry about the fractional pages at-the-end. - // possible fractional pages are handled by core_read_data. - add_class_share_map_info(ph, (off_t) header._space[m]._file_offset, - base, (size_t) header._space[m]._used); - print_debug("added a share archive map at 0x%lx\n", base); - } - } - return true; + ph->core->classes_jsa_fd = fd; + // add read-only maps from classes.jsa to the list of maps + for (m = 0; m < NUM_SHARED_MAPS; m++) { + if (header._space[m]._read_only) { + base = (uintptr_t) header._space[m]._base; + // no need to worry about the fractional pages at-the-end. + // possible fractional pages are handled by core_read_data. + add_class_share_map_info(ph, (off_t) header._space[m]._file_offset, + base, (size_t) header._space[m]._used); + print_debug("added a share archive map at 0x%lx\n", base); + } } - lib = lib->next; + return true; } - return true; + lib = lib->next; + } + return true; } @@ -396,54 +404,58 @@ // callback for sorting the array of map_info pointers. static int core_cmp_mapping(const void *lhsp, const void *rhsp) { - const map_info *lhs = *((const map_info **)lhsp); - const map_info *rhs = *((const map_info **)rhsp); + const map_info *lhs = *((const map_info **)lhsp); + const map_info *rhs = *((const map_info **)rhsp); - if (lhs->vaddr == rhs->vaddr) - return (0); + if (lhs->vaddr == rhs->vaddr) { + return (0); + } - return (lhs->vaddr < rhs->vaddr ? -1 : 1); + return (lhs->vaddr < rhs->vaddr ? -1 : 1); } // we sort map_info by starting virtual address so that we can do // binary search to read from an address. static bool sort_map_array(struct ps_prochandle* ph) { - size_t num_maps = ph->core->num_maps; - map_info* map = ph->core->maps; - int i = 0; + size_t num_maps = ph->core->num_maps; + map_info* map = ph->core->maps; + int i = 0; - // allocate map_array - map_info** array; - if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) { - print_debug("can't allocate memory for map array\n"); - return false; - } + // allocate map_array + map_info** array; + if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) { + print_debug("can't allocate memory for map array\n"); + return false; + } - // add maps to array - while (map) { - array[i] = map; - i++; - map = map->next; - } + // add maps to array + while (map) { + array[i] = map; + i++; + map = map->next; + } - // sort is called twice. If this is second time, clear map array - if (ph->core->map_array) free(ph->core->map_array); - ph->core->map_array = array; - // sort the map_info array by base virtual address. - qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*), - core_cmp_mapping); + // sort is called twice. If this is second time, clear map array + if (ph->core->map_array) { + free(ph->core->map_array); + } + + ph->core->map_array = array; + // sort the map_info array by base virtual address. + qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*), + core_cmp_mapping); - // print map - if (is_debug()) { - int j = 0; - print_debug("---- sorted virtual address map ----\n"); - for (j = 0; j < ph->core->num_maps; j++) { - print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr, - ph->core->map_array[j]->memsz); - } - } + // print map + if (is_debug()) { + int j = 0; + print_debug("---- sorted virtual address map ----\n"); + for (j = 0; j < ph->core->num_maps; j++) { + print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr, + ph->core->map_array[j]->memsz); + } + } - return true; + return true; } #ifndef MIN @@ -460,16 +472,18 @@ off_t off; int fd; - if (mp == NULL) + if (mp == NULL) { break; /* No mapping for this address */ + } fd = mp->fd; mapoff = addr - mp->vaddr; len = MIN(resid, mp->memsz - mapoff); off = mp->offset + mapoff; - if ((len = pread(fd, buf, len, off)) <= 0) + if ((len = pread(fd, buf, len, off)) <= 0) { break; + } resid -= len; addr += len; @@ -625,8 +639,9 @@ notep->n_type, notep->n_descsz); if (notep->n_type == NT_PRSTATUS) { - if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) - return false; + if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) { + return false; + } } p = descdata + ROUNDUP(notep->n_descsz, 4); } @@ -654,7 +669,7 @@ * contains a set of saved /proc structures), and PT_LOAD (which * represents a memory mapping from the process's address space). * - * Difference b/w Solaris PT_NOTE and Linux PT_NOTE: + * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE: * * In Solaris there are two PT_NOTE segments the first PT_NOTE (if present) * contains /proc structs in the pre-2.6 unstructured /proc format. the last @@ -674,7 +689,9 @@ for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) { switch (core_php->p_type) { case PT_NOTE: - if (core_handle_note(ph, core_php) != true) goto err; + if (core_handle_note(ph, core_php) != true) { + goto err; + } break; case PT_LOAD: { @@ -702,7 +719,7 @@ ELF_PHDR* phbuf; ELF_PHDR* lib_php = NULL; - int page_size=sysconf(_SC_PAGE_SIZE); + int page_size = sysconf(_SC_PAGE_SIZE); if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) { return false; @@ -719,26 +736,29 @@ if (existing_map == NULL){ if (add_map_info(ph, lib_fd, lib_php->p_offset, - target_vaddr, lib_php->p_filesz) == NULL) { + target_vaddr, lib_php->p_memsz) == NULL) { goto err; } } else { + // Coredump stores value of p_memsz elf field + // rounded up to page boundary. + if ((existing_map->memsz != page_size) && (existing_map->fd != lib_fd) && - (existing_map->memsz != lib_php->p_filesz)){ + (ROUNDUP(existing_map->memsz, page_size) != ROUNDUP(lib_php->p_memsz, page_size))) { - print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)", - target_vaddr, lib_php->p_filesz, lib_php->p_flags); + print_debug("address conflict @ 0x%lx (existing map size = %ld, size = %ld, flags = %d)\n", + target_vaddr, existing_map->memsz, lib_php->p_memsz, lib_php->p_flags); goto err; } /* replace PT_LOAD segment with library segment */ print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n", - existing_map->memsz, lib_php->p_filesz); + existing_map->memsz, ROUNDUP(lib_php->p_memsz, page_size)); existing_map->fd = lib_fd; existing_map->offset = lib_php->p_offset; - existing_map->memsz = lib_php->p_filesz; + existing_map->memsz = ROUNDUP(lib_php->p_memsz, page_size); } } @@ -832,60 +852,62 @@ // read shared library info from runtime linker's data structures. // This work is done by librtlb_db in Solaris static bool read_shared_lib_info(struct ps_prochandle* ph) { - uintptr_t addr = ph->core->dynamic_addr; - uintptr_t debug_base; - uintptr_t first_link_map_addr; - uintptr_t ld_base_addr; - uintptr_t link_map_addr; - uintptr_t lib_base_diff; - uintptr_t lib_base; - uintptr_t lib_name_addr; - char lib_name[BUF_SIZE]; - ELF_DYN dyn; - ELF_EHDR elf_ehdr; - int lib_fd; + uintptr_t addr = ph->core->dynamic_addr; + uintptr_t debug_base; + uintptr_t first_link_map_addr; + uintptr_t ld_base_addr; + uintptr_t link_map_addr; + uintptr_t lib_base_diff; + uintptr_t lib_base; + uintptr_t lib_name_addr; + char lib_name[BUF_SIZE]; + ELF_DYN dyn; + ELF_EHDR elf_ehdr; + int lib_fd; - // _DYNAMIC has information of the form - // [tag] [data] [tag] [data] ..... - // Both tag and data are pointer sized. - // We look for dynamic info with DT_DEBUG. This has shared object info. - // refer to struct r_debug in link.h + // _DYNAMIC has information of the form + // [tag] [data] [tag] [data] ..... + // Both tag and data are pointer sized. + // We look for dynamic info with DT_DEBUG. This has shared object info. + // refer to struct r_debug in link.h + + dyn.d_tag = DT_NULL; + while (dyn.d_tag != DT_DEBUG) { + if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) { + print_debug("can't read debug info from _DYNAMIC\n"); + return false; + } + addr += sizeof(ELF_DYN); + } - dyn.d_tag = DT_NULL; - while (dyn.d_tag != DT_DEBUG) { - if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) { - print_debug("can't read debug info from _DYNAMIC\n"); - return false; - } - addr += sizeof(ELF_DYN); - } - - // we have got Dyn entry with DT_DEBUG - debug_base = dyn.d_un.d_ptr; - // at debug_base we have struct r_debug. This has first link map in r_map field - if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET, + // we have got Dyn entry with DT_DEBUG + debug_base = dyn.d_un.d_ptr; + // at debug_base we have struct r_debug. This has first link map in r_map field + if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET, &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) { - print_debug("can't read first link map address\n"); - return false; - } + print_debug("can't read first link map address\n"); + return false; + } - // read ld_base address from struct r_debug - if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr, + // read ld_base address from struct r_debug + if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr, sizeof(uintptr_t)) != PS_OK) { - print_debug("can't read ld base address\n"); - return false; - } - ph->core->ld_base_addr = ld_base_addr; + print_debug("can't read ld base address\n"); + return false; + } + ph->core->ld_base_addr = ld_base_addr; + + print_debug("interpreter base address is 0x%lx\n", ld_base_addr); - print_debug("interpreter base address is 0x%lx\n", ld_base_addr); - - // now read segments from interp (i.e ld.so or ld-linux.so) - if (read_interp_segments(ph) != true) + // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so) + if (read_interp_segments(ph) != true) { return false; + } - // after adding interpreter (ld.so) mappings sort again - if (sort_map_array(ph) != true) - return false; + // after adding interpreter (ld.so) mappings sort again + if (sort_map_array(ph) != true) { + return false; + } print_debug("first link map is at 0x%lx\n", first_link_map_addr); @@ -950,95 +972,102 @@ } } - // read next link_map address - if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET, - &link_map_addr, sizeof(uintptr_t)) != PS_OK) { - print_debug("can't read next link in link_map\n"); - return false; - } - } + // read next link_map address + if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET, + &link_map_addr, sizeof(uintptr_t)) != PS_OK) { + print_debug("can't read next link in link_map\n"); + return false; + } + } - return true; + return true; } // the one and only one exposed stuff from this file struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) { - ELF_EHDR core_ehdr; - ELF_EHDR exec_ehdr; - ELF_EHDR lib_ehdr; + ELF_EHDR core_ehdr; + ELF_EHDR exec_ehdr; + ELF_EHDR lib_ehdr; - struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle)); - if (ph == NULL) { - print_debug("can't allocate ps_prochandle\n"); - return NULL; - } + struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle)); + if (ph == NULL) { + print_debug("can't allocate ps_prochandle\n"); + return NULL; + } - if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) { - free(ph); - print_debug("can't allocate ps_prochandle\n"); - return NULL; - } + if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) { + free(ph); + print_debug("can't allocate ps_prochandle\n"); + return NULL; + } - // initialize ph - ph->ops = &core_ops; - ph->core->core_fd = -1; - ph->core->exec_fd = -1; - ph->core->interp_fd = -1; + // initialize ph + ph->ops = &core_ops; + ph->core->core_fd = -1; + ph->core->exec_fd = -1; + ph->core->interp_fd = -1; - // open the core file - if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) { - print_debug("can't open core file\n"); - goto err; - } + // open the core file + if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) { + print_debug("can't open core file\n"); + goto err; + } - // read core file ELF header - if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) { - print_debug("core file is not a valid ELF ET_CORE file\n"); - goto err; - } + // read core file ELF header + if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) { + print_debug("core file is not a valid ELF ET_CORE file\n"); + goto err; + } + + if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) { + print_debug("can't open executable file\n"); + goto err; + } - if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) { - print_debug("can't open executable file\n"); - goto err; - } + if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) { + print_debug("executable file is not a valid ELF ET_EXEC file\n"); + goto err; + } + + // process core file segments + if (read_core_segments(ph, &core_ehdr) != true) { + goto err; + } - if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) { - print_debug("executable file is not a valid ELF ET_EXEC file\n"); - goto err; - } + // process exec file segments + if (read_exec_segments(ph, &exec_ehdr) != true) { + goto err; + } - // process core file segments - if (read_core_segments(ph, &core_ehdr) != true) - goto err; - - // process exec file segments - if (read_exec_segments(ph, &exec_ehdr) != true) - goto err; + // exec file is also treated like a shared object for symbol search + if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd, + (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) { + goto err; + } - // exec file is also treated like a shared object for symbol search - if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd, - (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) - goto err; + // allocate and sort maps into map_array, we need to do this + // here because read_shared_lib_info needs to read from debuggee + // address space + if (sort_map_array(ph) != true) { + goto err; + } - // allocate and sort maps into map_array, we need to do this - // here because read_shared_lib_info needs to read from debuggee - // address space - if (sort_map_array(ph) != true) - goto err; + if (read_shared_lib_info(ph) != true) { + goto err; + } - if (read_shared_lib_info(ph) != true) - goto err; + // sort again because we have added more mappings from shared objects + if (sort_map_array(ph) != true) { + goto err; + } - // sort again because we have added more mappings from shared objects - if (sort_map_array(ph) != true) - goto err; + if (init_classsharing_workaround(ph) != true) { + goto err; + } - if (init_classsharing_workaround(ph) != true) - goto err; - - return ph; + return ph; err: - Prelease(ph); - return NULL; + Prelease(ph); + return NULL; } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java --- a/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java Thu Nov 21 15:04:54 2013 +0100 @@ -67,6 +67,13 @@ String libname = "hsdis"; String arch = System.getProperty("os.arch"); if (os.lastIndexOf("Windows", 0) != -1) { + if (arch.equals("x86")) { + libname += "-i386"; + } else if (arch.equals("amd64")) { + libname += "-amd64"; + } else { + libname += "-" + arch; + } path.append(sep + "bin" + sep); libname += ".dll"; } else if (os.lastIndexOf("SunOS", 0) != -1) { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_implementation.g1; + +import java.util.Iterator; +import java.util.Observable; +import java.util.Observer; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.runtime.VMObject; +import sun.jvm.hotspot.runtime.VMObjectFactory; +import sun.jvm.hotspot.types.AddressField; +import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; + +// Mirror class for G1HeapRegionTable. It's essentially an index -> HeapRegion map. + +public class G1HeapRegionTable extends VMObject { + // HeapRegion** _base; + static private AddressField baseField; + // uint _length; + static private CIntegerField lengthField; + // HeapRegion** _biased_base + static private AddressField biasedBaseField; + // size_t _bias + static private CIntegerField biasField; + // uint _shift_by + static private CIntegerField shiftByField; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + static private synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("G1HeapRegionTable"); + + baseField = type.getAddressField("_base"); + lengthField = type.getCIntegerField("_length"); + biasedBaseField = type.getAddressField("_biased_base"); + biasField = type.getCIntegerField("_bias"); + shiftByField = type.getCIntegerField("_shift_by"); + } + + private HeapRegion at(long index) { + Address arrayAddr = baseField.getValue(addr); + // Offset of &_base[index] + long offset = index * VM.getVM().getAddressSize(); + Address regionAddr = arrayAddr.getAddressAt(offset); + return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class, + regionAddr); + } + + public long length() { + return lengthField.getValue(addr); + } + + public long bias() { + return biasField.getValue(addr); + } + + public long shiftBy() { + return shiftByField.getValue(addr); + } + + private class HeapRegionIterator implements Iterator { + private long index; + private long length; + + @Override + public boolean hasNext() { return index < length; } + + @Override + public HeapRegion next() { return at(index++); } + + @Override + public void remove() { /* not supported */ } + + HeapRegionIterator(Address addr) { + index = 0; + length = length(); + } + } + + public Iterator heapRegionIterator() { + return new HeapRegionIterator(addr); + } + + public G1HeapRegionTable(Address addr) { + super(addr); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,13 +37,11 @@ import sun.jvm.hotspot.types.Type; import sun.jvm.hotspot.types.TypeDataBase; -// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map. +// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable. public class HeapRegionSeq extends VMObject { - // HeapRegion** _regions; - static private AddressField regionsField; - // uint _length; - static private CIntegerField lengthField; + // G1HeapRegionTable _regions + static private long regionsFieldOffset; static { VM.registerVMInitializedObserver(new Observer() { @@ -56,44 +54,21 @@ static private synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("HeapRegionSeq"); - regionsField = type.getAddressField("_regions"); - lengthField = type.getCIntegerField("_length"); + regionsFieldOffset = type.getField("_regions").getOffset(); } - private HeapRegion at(long index) { - Address arrayAddr = regionsField.getValue(addr); - // Offset of &_region[index] - long offset = index * VM.getVM().getAddressSize(); - Address regionAddr = arrayAddr.getAddressAt(offset); - return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class, - regionAddr); + private G1HeapRegionTable regions() { + Address regionsAddr = addr.addOffsetTo(regionsFieldOffset); + return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class, + regionsAddr); } public long length() { - return lengthField.getValue(addr); - } - - private class HeapRegionIterator implements Iterator { - private long index; - private long length; - - @Override - public boolean hasNext() { return index < length; } - - @Override - public HeapRegion next() { return at(index++); } - - @Override - public void remove() { /* not supported */ } - - HeapRegionIterator(Address addr) { - index = 0; - length = length(); - } + return regions().length(); } public Iterator heapRegionIterator() { - return new HeapRegionIterator(addr); + return regions().heapRegionIterator(); } public HeapRegionSeq(Address addr) { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.memory; + +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.oops.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; + +public class ProtectionDomainCacheEntry extends VMObject { + private static sun.jvm.hotspot.types.OopField protectionDomainField; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("ProtectionDomainCacheEntry"); + protectionDomainField = type.getOopField("_literal"); + } + + public ProtectionDomainCacheEntry(Address addr) { + super(addr); + } + + public Oop protectionDomain() { + return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java --- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ public class ProtectionDomainEntry extends VMObject { private static AddressField nextField; - private static sun.jvm.hotspot.types.OopField protectionDomainField; + private static AddressField pdCacheField; static { VM.registerVMInitializedObserver(new Observer() { @@ -46,7 +46,7 @@ Type type = db.lookupType("ProtectionDomainEntry"); nextField = type.getAddressField("_next"); - protectionDomainField = type.getOopField("_protection_domain"); + pdCacheField = type.getAddressField("_pd_cache"); } public ProtectionDomainEntry(Address addr) { @@ -54,10 +54,12 @@ } public ProtectionDomainEntry next() { - return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr); + return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr)); } public Oop protectionDomain() { - return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); + ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry) + VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr)); + return pd_cache.protectionDomain(); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java --- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Thu Nov 21 15:04:54 2013 +0100 @@ -44,12 +44,10 @@ private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("SymbolTable"); theTableField = type.getAddressField("_the_table"); - symbolTableSize = db.lookupIntConstant("SymbolTable::symbol_table_size").intValue(); } // Fields private static AddressField theTableField; - private static int symbolTableSize; // Accessors public static SymbolTable getTheTable() { @@ -57,10 +55,6 @@ return (SymbolTable) VMObjectFactory.newObject(SymbolTable.class, tmp); } - public static int getSymbolTableSize() { - return symbolTableSize; - } - public SymbolTable(Address addr) { super(addr); } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java Thu Nov 21 15:04:54 2013 +0100 @@ -51,6 +51,7 @@ private static int HAS_GENERIC_SIGNATURE; private static int HAS_METHOD_ANNOTATIONS; private static int HAS_PARAMETER_ANNOTATIONS; + private static int HAS_METHOD_PARAMETERS; private static int HAS_DEFAULT_ANNOTATIONS; private static int HAS_TYPE_ANNOTATIONS; @@ -70,6 +71,7 @@ HAS_GENERIC_SIGNATURE = db.lookupIntConstant("ConstMethod::_has_generic_signature").intValue(); HAS_METHOD_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_method_annotations").intValue(); HAS_PARAMETER_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_parameter_annotations").intValue(); + HAS_METHOD_PARAMETERS = db.lookupIntConstant("ConstMethod::_has_method_parameters").intValue(); HAS_DEFAULT_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_default_annotations").intValue(); HAS_TYPE_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_type_annotations").intValue(); @@ -85,6 +87,9 @@ // start of byte code bytecodeOffset = type.getSize(); + type = db.lookupType("MethodParametersElement"); + methodParametersElementSize = type.getSize(); + type = db.lookupType("CheckedExceptionElement"); checkedExceptionElementSize = type.getSize(); @@ -113,7 +118,7 @@ // start of bytecode private static long bytecodeOffset; - + private static long methodParametersElementSize; private static long checkedExceptionElementSize; private static long localVariableTableElementSize; private static long exceptionTableElementSize; @@ -387,6 +392,10 @@ return ret; } + private boolean hasMethodParameters() { + return (getFlags() & HAS_METHOD_PARAMETERS) != 0; + } + private boolean hasGenericSignature() { return (getFlags() & HAS_GENERIC_SIGNATURE) != 0; } @@ -442,11 +451,41 @@ return offsetOfLastU2Element(); } - private long offsetOfCheckedExceptionsLength() { + private long offsetOfMethodParametersLength() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(hasMethodParameters(), "should only be called if table is present"); + } return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort : offsetOfLastU2Element(); } + private int getMethodParametersLength() { + if (hasMethodParameters()) + return (int) getAddress().getCIntegerAt(offsetOfMethodParametersLength(), 2, true); + else + return 0; + } + + // Offset of start of checked exceptions + private long offsetOfMethodParameters() { + long offset = offsetOfMethodParametersLength(); + long length = getMethodParametersLength(); + if (Assert.ASSERTS_ENABLED) { + Assert.that(length > 0, "should only be called if method parameter information is present"); + } + offset -= length * methodParametersElementSize; + return offset; + } + + private long offsetOfCheckedExceptionsLength() { + if (hasMethodParameters()) + return offsetOfMethodParameters() - sizeofShort; + else { + return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort : + offsetOfLastU2Element(); + } + } + private int getCheckedExceptionsLength() { if (hasCheckedExceptions()) { return (int) getAddress().getCIntegerAt(offsetOfCheckedExceptionsLength(), 2, true); @@ -496,6 +535,8 @@ return offsetOfExceptionTable() - sizeofShort; } else if (hasCheckedExceptions()) { return offsetOfCheckedExceptions() - sizeofShort; + } else if (hasMethodParameters()) { + return offsetOfMethodParameters() - sizeofShort; } else { return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort : offsetOfLastU2Element(); @@ -526,6 +567,8 @@ } if (hasCheckedExceptions()) { return offsetOfCheckedExceptions() - sizeofShort; + } else if (hasMethodParameters()) { + return offsetOfMethodParameters() - sizeofShort; } else { return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort : offsetOfLastU2Element(); diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Thu Nov 21 15:04:54 2013 +0100 @@ -51,8 +51,7 @@ public static void main(String[] args) { ClassLoaderStats cls = new ClassLoaderStats(); - cls.start(args); - cls.stop(); + cls.execute(args); } private static class ClassData { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Thu Nov 21 15:04:54 2013 +0100 @@ -54,8 +54,7 @@ public static void main(String[] args) { FinalizerInfo finfo = new FinalizerInfo(); - finfo.start(args); - finfo.stop(); + finfo.execute(args); } public void run() { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Thu Nov 21 15:04:54 2013 +0100 @@ -54,7 +54,6 @@ public static void main(String[] args) { FlagDumper fd = new FlagDumper(); - fd.start(args); - fd.stop(); + fd.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Thu Nov 21 15:04:54 2013 +0100 @@ -80,8 +80,7 @@ } HeapDumper dumper = new HeapDumper(file); - dumper.start(args); - dumper.stop(); + dumper.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Nov 21 15:04:54 2013 +0100 @@ -46,8 +46,7 @@ public static void main(String[] args) { HeapSummary hs = new HeapSummary(); - hs.start(args); - hs.stop(); + hs.execute(args); } public void run() { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Thu Nov 21 15:04:54 2013 +0100 @@ -134,8 +134,7 @@ } JInfo jinfo = new JInfo(mode); - jinfo.start(args); - jinfo.stop(); + jinfo.execute(args); } private void printVMFlags() { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Thu Nov 21 15:04:54 2013 +0100 @@ -136,7 +136,9 @@ mode = MODE_HEAP_GRAPH_GXL; } else { System.err.println("unknown heap format:" + format); - return; + + // Exit with error status + System.exit(1); } } else { copyArgs = false; @@ -153,8 +155,7 @@ } JMap jmap = new JMap(mode); - jmap.start(args); - jmap.stop(); + jmap.execute(args); } public boolean writeHeapHprofBin(String fileName) { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Thu Nov 21 15:04:54 2013 +0100 @@ -64,7 +64,6 @@ public static void main(String[] args) { JSnap js = new JSnap(); - js.start(args); - js.stop(); + js.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Thu Nov 21 15:04:54 2013 +0100 @@ -89,8 +89,7 @@ } JStack jstack = new JStack(mixedMode, concurrentLocks); - jstack.start(args); - jstack.stop(); + jstack.execute(args); } private boolean mixedMode; diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Thu Nov 21 15:04:54 2013 +0100 @@ -61,7 +61,6 @@ public static void main(String[] args) { ObjectHistogram oh = new ObjectHistogram(); - oh.start(args); - oh.stop(); + oh.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Thu Nov 21 15:04:54 2013 +0100 @@ -69,7 +69,6 @@ public static void main(String[] args) throws Exception { PMap t = new PMap(); - t.start(args); - t.stop(); + t.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Thu Nov 21 15:04:54 2013 +0100 @@ -182,8 +182,7 @@ public static void main(String[] args) throws Exception { PStack t = new PStack(); - t.start(args); - t.stop(); + t.execute(args); } // -- Internals only below this point diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Thu Nov 21 15:04:54 2013 +0100 @@ -137,8 +137,7 @@ public static void main(String[] args) { StackTrace st = new StackTrace(); - st.start(args); - st.stop(); + st.execute(args); } private boolean verbose; diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Thu Nov 21 15:04:54 2013 +0100 @@ -58,7 +58,6 @@ public static void main(String[] args) { SysPropsDumper pd = new SysPropsDumper(); - pd.start(args); - pd.stop(); + pd.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Thu Nov 21 15:04:54 2013 +0100 @@ -26,6 +26,7 @@ import java.io.PrintStream; import java.util.Hashtable; + import sun.jvm.hotspot.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.debugger.*; @@ -105,26 +106,44 @@ public static void main(String[] args) { obj = new ; - obj.start(args); + obj.execute(args); } */ - protected void stop() { + protected void execute(String[] args) { + int returnStatus = 1; + + try { + returnStatus = start(args); + } finally { + stop(); + } + + // Exit with 0 or 1 + System.exit(returnStatus); + } + + public void stop() { if (agent != null) { agent.detach(); } } - protected void start(String[] args) { + private int start(String[] args) { + if ((args.length < 1) || (args.length > 2)) { usage(); - return; + return 1; } // Attempt to handle -h or -help or some invalid flag - if (args[0].startsWith("-")) { + if (args[0].startsWith("-h")) { usage(); + return 0; + } else if (args[0].startsWith("-")) { + usage(); + return 1; } PrintStream err = System.err; @@ -154,6 +173,7 @@ default: usage(); + return 1; } agent = new HotSpotAgent(); @@ -191,15 +211,16 @@ break; } if (e.getMessage() != null) { - err.print(e.getMessage()); + err.println(e.getMessage()); e.printStackTrace(); } err.println(); - return; + return 1; } err.println("Debugger attached successfully."); startInternal(); + return 0; } // When using an existing JVMDebugger. diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Thu Nov 21 15:04:54 2013 +0100 @@ -177,7 +177,6 @@ public static void main(String[] args) { ClassDump cd = new ClassDump(); - cd.start(args); - cd.stop(); + cd.execute(args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Thu Nov 21 15:04:54 2013 +0100 @@ -42,8 +42,7 @@ public static void main(String[] args) { JSDB jsdb = new JSDB(); - jsdb.start(args); - jsdb.stop(); + jsdb.execute(args); } public void run() { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Thu Nov 21 15:04:54 2013 +0100 @@ -40,8 +40,7 @@ public class SOQL extends Tool { public static void main(String[] args) { SOQL soql = new SOQL(); - soql.start(args); - soql.stop(); + soql.execute(args); } public SOQL() { diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java Thu Nov 21 15:04:54 2013 +0100 @@ -59,6 +59,7 @@ public boolean doObj(Oop oop) { try { + writeHeapRecordPrologue(); if (oop instanceof TypeArray) { writePrimitiveArray((TypeArray)oop); } else if (oop instanceof ObjArray) { @@ -97,6 +98,7 @@ // not-a-Java-visible oop writeInternalObject(oop); } + writeHeapRecordEpilogue(); } catch (IOException exp) { throw new RuntimeException(exp); } @@ -416,6 +418,12 @@ protected void writeHeapFooter() throws IOException { } + protected void writeHeapRecordPrologue() throws IOException { + } + + protected void writeHeapRecordEpilogue() throws IOException { + } + // HeapVisitor, OopVisitor methods can't throw any non-runtime // exception. But, derived class write methods (which are called // from visitor callbacks) may throw IOException. Hence, we throw diff -r 790ebab62d23 -r f9f4503a4ab5 agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Thu Nov 21 15:04:26 2013 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Thu Nov 21 15:04:54 2013 +0100 @@ -44,7 +44,7 @@ * WARNING: This format is still under development, and is subject to * change without notice. * - * header "JAVA PROFILE 1.0.1" (0-terminated) + * header "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated) * u4 size of identifiers. Identifiers are used to represent * UTF8 strings, objects, stack traces, etc. They usually * have the same size as host pointers. For example, on @@ -292,11 +292,34 @@ * 0x00000002: cpu sampling on/off * u2 stack trace depth * + * + * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally + * be generated as a sequence of heap dump segments. This sequence is + * terminated by an end record. The additional tags allowed by format + * "JAVA PROFILE 1.0.2" are: + * + * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment + * + * [heap dump sub-records]* + * The same sub-record types allowed by HPROF_HEAP_DUMP + * + * HPROF_HEAP_DUMP_END denotes the end of a heap dump + * */ public class HeapHprofBinWriter extends AbstractHeapGraphWriter { + + // The heap size threshold used to determine if segmented format + // ("JAVA PROFILE 1.0.2") should be used. + private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000; + + // The approximate size of a heap segment. Used to calculate when to create + // a new segment. + private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000; + // hprof binary file header - private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1"; + private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1"; + private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2"; // constants in enum HprofTag private static final int HPROF_UTF8 = 0x01; @@ -312,6 +335,10 @@ private static final int HPROF_CPU_SAMPLES = 0x0D; private static final int HPROF_CONTROL_SETTINGS = 0x0E; + // 1.0.2 record types + private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C; + private static final int HPROF_HEAP_DUMP_END = 0x2C; + // Heap dump constants // constants in enum HprofGcTag private static final int HPROF_GC_ROOT_UNKNOWN = 0xFF; @@ -352,11 +379,9 @@ private static final int JVM_SIGNATURE_ARRAY = '['; private static final int JVM_SIGNATURE_CLASS = 'L'; - public synchronized void write(String fileName) throws IOException { // open file stream and create buffered data output stream - FileOutputStream fos = new FileOutputStream(fileName); - FileChannel chn = fos.getChannel(); + fos = new FileOutputStream(fileName); out = new DataOutputStream(new BufferedOutputStream(fos)); VM vm = VM.getVM(); @@ -385,6 +410,9 @@ FLOAT_SIZE = objectHeap.getFloatSize(); DOUBLE_SIZE = objectHeap.getDoubleSize(); + // Check weather we should dump the heap as segments + useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD; + // hprof bin format header writeFileHeader(); @@ -394,38 +422,87 @@ // hprof UTF-8 symbols section writeSymbols(); + // HPROF_LOAD_CLASS records for all classes writeClasses(); - // write heap data now - out.writeByte((byte)HPROF_HEAP_DUMP); - out.writeInt(0); // relative timestamp - - // remember position of dump length, we will fixup - // length later - hprof format requires length. - out.flush(); - long dumpStart = chn.position(); - - // write dummy length of 0 and we'll fix it later. - out.writeInt(0); - // write CLASS_DUMP records writeClassDumpRecords(); // this will write heap data into the buffer stream super.write(); + // flush buffer stream. + out.flush(); + + // Fill in final length + fillInHeapRecordLength(); + + if (useSegmentedHeapDump) { + // Write heap segment-end record + out.writeByte((byte) HPROF_HEAP_DUMP_END); + out.writeInt(0); + out.writeInt(0); + } + // flush buffer stream and throw it. out.flush(); out = null; + // close the file stream + fos.close(); + } + + @Override + protected void writeHeapRecordPrologue() throws IOException { + if (currentSegmentStart == 0) { + // write heap data header, depending on heap size use segmented heap + // format + out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT + : HPROF_HEAP_DUMP)); + out.writeInt(0); + + // remember position of dump length, we will fixup + // length later - hprof format requires length. + out.flush(); + currentSegmentStart = fos.getChannel().position(); + + // write dummy length of 0 and we'll fix it later. + out.writeInt(0); + } + } + + @Override + protected void writeHeapRecordEpilogue() throws IOException { + if (useSegmentedHeapDump) { + out.flush(); + if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) { + fillInHeapRecordLength(); + currentSegmentStart = 0; + } + } + } + + private void fillInHeapRecordLength() throws IOException { + // now get current position to calculate length - long dumpEnd = chn.position(); + long dumpEnd = fos.getChannel().position(); + // calculate length of heap data - int dumpLen = (int) (dumpEnd - dumpStart - 4); + long dumpLenLong = (dumpEnd - currentSegmentStart - 4L); + + // Check length boundary, overflow could happen but is _very_ unlikely + if(dumpLenLong >= (4L * 0x40000000)){ + throw new RuntimeException("Heap segment size overflow."); + } + + // Save the current position + long currentPosition = fos.getChannel().position(); // seek the position to write length - chn.position(dumpStart); + fos.getChannel().position(currentSegmentStart); + + int dumpLen = (int) dumpLenLong; // write length as integer fos.write((dumpLen >>> 24) & 0xFF); @@ -433,8 +510,8 @@ fos.write((dumpLen >>> 8) & 0xFF); fos.write((dumpLen >>> 0) & 0xFF); - // close the file stream - fos.close(); + //Reset to previous current position + fos.getChannel().position(currentPosition); } private void writeClassDumpRecords() throws IOException { @@ -443,7 +520,9 @@ sysDict.allClassesDo(new SystemDictionary.ClassVisitor() { public void visit(Klass k) { try { + writeHeapRecordPrologue(); writeClassDumpRecord(k); + writeHeapRecordEpilogue(); } catch (IOException e) { throw new RuntimeException(e); } @@ -884,7 +963,12 @@ // writes hprof binary file header private void writeFileHeader() throws IOException { // version string - out.writeBytes(HPROF_HEADER); + if(useSegmentedHeapDump) { + out.writeBytes(HPROF_HEADER_1_0_2); + } + else { + out.writeBytes(HPROF_HEADER_1_0_1); + } out.writeByte((byte)'\0'); // write identifier size. we use pointers as identifiers. @@ -976,6 +1060,7 @@ private static final int EMPTY_FRAME_DEPTH = -1; private DataOutputStream out; + private FileOutputStream fos; private Debugger dbg; private ObjectHeap objectHeap; private SymbolTable symTbl; @@ -983,6 +1068,10 @@ // oopSize of the debuggee private int OBJ_ID_SIZE; + // Added for hprof file format 1.0.2 support + private boolean useSegmentedHeapDump; + private long currentSegmentStart; + private long BOOLEAN_BASE_OFFSET; private long BYTE_BASE_OFFSET; private long CHAR_BASE_OFFSET; @@ -1005,6 +1094,7 @@ private static class ClassData { int instSize; List fields; + ClassData(int instSize, List fields) { this.instSize = instSize; this.fields = fields; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.asm.hsail/src/com/oracle/graal/asm/hsail/HSAILAssembler.java --- a/graal/com.oracle.graal.asm.hsail/src/com/oracle/graal/asm/hsail/HSAILAssembler.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.asm.hsail/src/com/oracle/graal/asm/hsail/HSAILAssembler.java Thu Nov 21 15:04:54 2013 +0100 @@ -23,6 +23,8 @@ package com.oracle.graal.asm.hsail; +import java.lang.reflect.*; + import com.oracle.graal.api.code.*; import static com.oracle.graal.api.code.MemoryBarriers.*; @@ -75,23 +77,35 @@ } /** - * An Object is only moved into a register when it is a class constant (which is not really a - * constant because it can be moved by GC). Because we can't patch the HSAIL once it is - * finalized, we handle changes due to GC movement by dereferencing a global reference that is - * created by JNI since these JNI global references do not move. + * Moves an Object into a register. + * + * Because Object references become stale after Garbage collection (GC) the technique used here + * is to load a JNI global reference to that Object into the register. These JNI global + * references get updated by the GC whenever the GC moves an Object. + * + * @param a the destination register + * @param obj the Object being moved */ public final void mov(Register a, Object obj) { String regName = "$d" + a.encoding(); - if (obj instanceof Class) { - Class clazz = (Class) obj; - long refHandle = OkraUtil.getRefHandle(clazz); - String className = clazz.getName(); - emitString("mov_b64 " + regName + ", 0x" + Long.toHexString(refHandle) + "; // handle for " + className); - emitString("ld_global_u64 " + regName + ", [" + regName + "];"); - } else if (obj == null) { + // For a null object simply move 0x0 into the destination register. + if (obj == null) { emitString("mov_b64 " + regName + ", 0x0; // null object"); } else { - throw GraalInternalError.shouldNotReachHere("mov from object not a class"); + // Get a JNI reference handle to the object. + long refHandle = OkraUtil.getRefHandle(obj); + // Get the clasname of the object for emitting a comment. + Class clazz = obj.getClass(); + String className = clazz.getName(); + String comment = "// handle for object of type " + className; + // If the object is an array note the array length in the comment. + if (className.startsWith("[")) { + comment += ", length " + Array.getLength(obj); + } + // First move the reference handle into a register. + emitString("mov_b64 " + regName + ", 0x" + Long.toHexString(refHandle) + "; " + comment); + // Next load the Object addressed by this reference handle into the destination reg. + emitString("ld_global_u64 " + regName + ", [" + regName + "];"); } } @@ -247,9 +261,22 @@ return prefix; } + /** + * Emits a compare instruction. + * + * @param src0 - the first source register + * @param src1 - the second source register + * @param condition - the compare condition i.e., eq, ne, lt, gt + * @param unordered - flag specifying if this is an unordered compare. This only applies to + * float compares. + * @param isUnsignedCompare - flag specifying if this is a compare of unsigned values. + */ public void emitCompare(Value src0, Value src1, String condition, boolean unordered, boolean isUnsignedCompare) { + // Formulate the prefix of the instruction. String prefix = "cmp_" + condition + (unordered ? "u" : "") + "_b1_" + (isUnsignedCompare ? getArgTypeForceUnsigned(src1) : getArgType(src1)); + // Generate a comment for debugging purposes String comment = (isConstant(src1) && (src1.getKind() == Kind.Object) && (asConstant(src1).asObject() == null) ? " // null test " : ""); + // Emit the instruction. emitString(prefix + " $c0, " + mapRegOrConstToString(src0) + ", " + mapRegOrConstToString(src1) + ";" + comment); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/IntLookupSwitchTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/IntLookupSwitchTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.compiler.hsail.test; + +import org.junit.Test; +import com.oracle.graal.compiler.hsail.test.infra.*; + +/** + * Tests a switch statement with integer keys. This test exercises the LOOKUPSWITCH Java bytecode + * instruction. + * + * The HSAIL code generated for this example is a series of cascading compare and branch + * instructions for each case of the switch. + * + * These HSAIL instructions have the following form: + * + * + * //Check whether the key matches the key constant of the case. Store the result of the compare (0 + * or 1) in the control register c0. + * + * cmp_eq $c0 , + * + * //Branch to the corresponding label of that case if there's a match. + * + * cbr $c0 + */ +public class IntLookupSwitchTest extends GraalKernelTester { + + static final int num = 20; + // Output array storing the results of the operations. + @Result protected int[] outArray = new int[num]; + + /** + * The static "kernel" method we will be testing. This method writes to an output array based on + * switching on an element of an input array. By convention the gid is the last parameter. + * + * Note: Because the key constants used in the cases of the switch are sparsely distributed, the + * Java source compiler compiles this example into the LOOKUPSWITCH bytecode instruction. So + * this is really a test to see whether the HSAIL backend is appropriately handling the + * LOOKUPSWITCH bytecode. + * + * @param out the output array + * @param ina the input array + * @param gid the parameter used to index into the input and output arrays + */ + public static void run(int[] out, int[] ina, int gid) { + switch (ina[gid]) { + case 0: + out[gid] = ina[gid]; + break; + case 1: + case 2: + break; + case 5: + out[gid] = ina[gid] * ina[gid]; + break; + case 10: + out[gid] = -ina[gid]; + break; + case 15: + out[gid] = ina[gid] - ina[gid]; + break; + case 19: + out[gid] = ina[gid] + ina[gid]; + break; + default: + out[gid] = 9; + break; + } + out[gid] += ina[gid]; + } + + /** + * Tests the HSAIL code generated for this unit test by comparing the result of executing this + * code with the result of executing a sequential Java version of this unit test. + */ + @Test + public void test() { + super.testGeneratedHsail(); + } + + /** + * Initializes the input and output arrays passed to the run routine. + * + * @param in the input array + */ + void setupArrays(int[] in) { + for (int i = 0; i < num; i++) { + in[i] = i < num / 2 ? i : -i; + outArray[i] = 0; + } + } + + /** + * Dispatches the HSAIL kernel for this test case. + */ + @Override + public void runTest() { + int[] inArray = new int[num]; + setupArrays(inArray); + dispatchMethodKernel(num, outArray, inArray); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/IntTableSwitchTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/IntTableSwitchTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.compiler.hsail.test; + +import org.junit.Test; +import com.oracle.graal.compiler.hsail.test.infra.*; + +/** + * Tests a switch statement with integer keys. This test exercises the TABLESWITCH Java bytecode + * instruction. + * + * The HSAIL code generated for this example is a series of cascading compare and branch + * instructions for each case of the switch. + * + * These instruction have the following form: + * + * + * //Check whether the key matches the key constant of the case. Store the result of the compare (0 + * or 1) in the control register c0. + * + * cmp_eq $c0 , + * + * //Branch to the corresponding label of that case if there's a match. + * + * cbr $c0 + */ +public class IntTableSwitchTest extends GraalKernelTester { + + static final int num = 20; + // Output array storing the results of the operations. + @Result protected int[] outArray = new int[num]; + + /** + * The static "kernel" method we will be testing. This method writes to an output array based on + * switching on an element of an input array. + * + * Note: Because the key constants used in the cases of the switch are in consecutive order, the + * Java source compiler compiles this example into the TABLESWITCH bytecode instruction. So this + * is really a test to see whether the HSAIL backend is appropriately handling the TABLESWITCH + * bytecode. + * + * @param out the output array + * @param ina the input array + * @param gid the parameter used to index into the input and output arrays + */ + public static void run(int[] out, int[] ina, int gid) { + switch (ina[gid]) { + case 0: + out[gid] = ina[gid]; + break; + case 1: + out[gid] = ina[gid] * ina[gid]; + break; + case 2: + out[gid] = -ina[gid]; + break; + case 3: + out[gid] = ina[gid] - ina[gid]; + break; + case 4: + out[gid] = ina[gid] + ina[gid]; + break; + case 5: + case 6: + out[gid] = ina[gid] * ina[gid]; + break; + case 7: + out[gid] = -ina[gid] * 7; + break; + case 8: + break; + case 9: + int i = ina[gid] * 5; + out[gid] = ina[gid] - ina[gid] + i; + break; + case 10: + out[gid] = ina[gid] + ina[gid]; + break; + default: + out[gid] = 9; + break; + } + out[gid] += ina[gid]; + } + + /** + * Tests the HSAIL code generated for this unit test by comparing the result of executing this + * code with the result of executing a sequential Java version of this unit test. + */ + @Test + public void test() { + super.testGeneratedHsail(); + } + + /** + * Initializes the input and output arrays passed to the run routine. + * + * @param in the input array + */ + void setupArrays(int[] in) { + for (int i = 0; i < num; i++) { + in[i] = i < num / 2 ? i : -i; + outArray[i] = 0; + } + } + + /** + * Dispatches the HSAIL kernel for this test case. + */ + @Override + public void runTest() { + int[] inArray = new int[num]; + setupArrays(inArray); + dispatchMethodKernel(num, outArray, inArray); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/StringSwitchTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.hsail.test/src/com/oracle/graal/compiler/hsail/test/StringSwitchTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.compiler.hsail.test; + +import static org.junit.Assume.*; + +import org.junit.Test; +import com.oracle.graal.compiler.hsail.test.infra.*; + +/** + * Tests switch statement with String literal keys. + * + * Note: In Java bytecode, this example reduces to a LOOKUPSWITCH over int keys because the Java + * source compiler generates a call to String.hashcode( ) to convert to int values. + * + * The HSAIL code generated for this example is a series of cascading compare and branch + * instructions for each case of the switch. + * + * These instruction have the following form: + * + * + * //Check whether the key matches the key constant of the case. Store the result of the compare (0 + * or 1) in the control register c0. + * + * cmp_eq $c0 , + * + * //Branch to the corresponding label of that case if there's a match. + * + * cbr $c0 + */ +public class StringSwitchTest extends GraalKernelTester { + + static final int num = 40; + // Output array storing the results of the operations. + @Result protected int[] outArray = new int[num]; + + // Array of Strings + String[] names = {"0-42L", "0-43-", "Mazda", "Nissan", "Chevrolet", "Porsche", "Ford Focus", "Volvo", "Cadillac", "BMW", "Indy Car", "Police Car", "Lexus", "Datsun", "Saab", "Volkswagen", + "Honda Civic", "Jeeo Wrangler", "Toyota", "Mustang", "Chrysler", "Subaru"}; + + /** + * The static "kernel" method we will be testing. This method performs a switch statement over a + * String literal key. + * + * @param out the output array + * @param ina the input array of String literal keys + * @param gid the parameter used to index into the input and output arrays + */ + public static void run(int[] out, String[] ina, int gid) { + switch (ina[gid]) { + case "Mazda": + out[gid] = 1; + break; + case "Nissan": + out[gid] = 2; + break; + case "Chevrolet": + out[gid] = 3; + break; + case "Porsche": + out[gid] = 4; + break; + case "Jeep Wrangler": + out[gid] = 5; + break; + case "Toyota": + out[gid] = 6; + break; + case "0-42L": + out[gid] = 890; + break; + case "0-43-": + out[gid] = 995; + break; + case "Chrysler": + out[gid] = 7; + break; + case "Mitsubishi": + out[gid] = 8; + break; + case "Ford Focus": + out[gid] = 9; + break; + case "Volvo": + out[gid] = 10; + break; + case "Subaru": + out[gid] = 11; + break; + case "BMW": + out[gid] = 12; + break; + case "Indy Car": + out[gid] = 13; + break; + case "Police Car": + out[gid] = 14; + break; + } + } + + /** + * Tests the HSAIL code generated for this unit test by comparing the result of executing this + * code with the result of executing a sequential Java version of this unit test. + */ + @Test + public void test() { + // This test is only run if inlining is enabled since it requires method call support. + assumeTrue(aggressiveInliningEnabled() || canHandleHSAILMethodCalls()); + super.testGeneratedHsail(); + } + + /** + * Initializes the input and output arrays passed to the run routine. + * + * @param in the input array + */ + void setupArrays(String[] in) { + for (int i = 0; i < num; i++) { + // fill the input array with Strings. + in[i] = names[i % names.length]; + outArray[i] = 0; + } + } + + /** + * Dispatches the HSAIL kernel for this test case. + */ + @Override + public void runTest() { + String[] inArray = new String[num]; + setupArrays(inArray); + dispatchMethodKernel(num, outArray, inArray); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java --- a/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java Thu Nov 21 15:04:54 2013 +0100 @@ -47,12 +47,14 @@ import com.oracle.graal.lir.hsail.HSAILControlFlow.FloatCompareBranchOp; import com.oracle.graal.lir.hsail.HSAILControlFlow.FloatCondMoveOp; import com.oracle.graal.lir.hsail.HSAILControlFlow.ReturnOp; +import com.oracle.graal.lir.hsail.HSAILControlFlow.SwitchOp; import com.oracle.graal.lir.hsail.HSAILMove.LeaOp; import com.oracle.graal.lir.hsail.HSAILMove.MembarOp; import com.oracle.graal.lir.hsail.HSAILMove.MoveFromRegOp; import com.oracle.graal.lir.hsail.HSAILMove.MoveToRegOp; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.calc.*; +import com.oracle.graal.nodes.extended.*; import com.oracle.graal.phases.util.*; /** @@ -688,9 +690,61 @@ append(new ReturnOp(input)); } + /** + * This routine handles the LIR code generation for switch nodes by calling + * emitSequentialSwitch. + * + * This routine overrides LIRGenerator.emitSwitch( ) which calls emitSequentialSwitch or + * emitTableSwitch based on a heuristic. + * + * The recommended approach in HSAIL for generating performant code for switch statements is to + * emit a series of cascading compare and branches. Thus this routines always calls + * emitSequentialSwitch, which implements this approach. + * + * Note: Only IntegerSwitchNodes are currently supported. The IntegerSwitchNode is the node that + * Graal generates for any switch construct appearing in Java bytecode. + * + * @param x the SwitchNode + */ + @Override + public void emitSwitch(SwitchNode x) { + // get the key of the switch. + Variable key = load(operand(x.value())); + // set the default target. + LabelRef defaultTarget = x.defaultSuccessor() == null ? null : getLIRBlock(x.defaultSuccessor()); + // emit a sequential switch for the specified key and default target. + emitSequentialSwitch(x, key, defaultTarget); + } + + /** + * Generates the LIR instruction for a switch construct that is meant to be assembled into a + * series of cascading compare and branch instructions. This is currently the recommended way of + * generating performant HSAIL code for switch constructs. + * + * In Java bytecode the keys for switch statements are always ints. + * + * The x86 backend also adds support for handling keys of type long or Object but these two + * special cases are for handling the TypeSwitchNode, which is a node that the JVM produces for + * handling operations related to method dispatch. We haven't yet added support for the + * TypeSwitchNode, so for the time being we have added a check to ensure that the keys are of + * type int. This also allows us to flag any test cases/execution paths that may trigger the + * creation fo a TypeSwitchNode which we don't support yet. + * + * + * @param keyConstants array of key constants used for the case statements. + * @param keyTargets array of branch targets for each of the cases. + * @param defaultTarget the branch target for the default case. + * @param key the key that is compared against the key constants in the case statements. + */ @Override protected void emitSequentialSwitch(Constant[] keyConstants, LabelRef[] keyTargets, LabelRef defaultTarget, Value key) { - throw GraalInternalError.unimplemented(); + if (key.getKind() == Kind.Int) { + // Append the LIR instruction for generating compare and branch instructions. + append(new SwitchOp(keyConstants, keyTargets, defaultTarget, key)); + } else { + // Throw an exception if the keys aren't ints. + throw GraalInternalError.unimplemented("Switch statements are only supported for keys of type int"); + } } @Override diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java --- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -22,6 +22,7 @@ */ package com.oracle.graal.compiler.test; +import static com.oracle.graal.phases.GraalOptions.*; import static org.junit.Assert.*; import java.util.*; @@ -37,6 +38,8 @@ import com.oracle.graal.nodes.cfg.*; import com.oracle.graal.nodes.extended.*; import com.oracle.graal.nodes.util.*; +import com.oracle.graal.options.*; +import com.oracle.graal.options.OptionValue.OverrideScope; import com.oracle.graal.phases.*; import com.oracle.graal.phases.common.*; import com.oracle.graal.phases.schedule.*; @@ -408,6 +411,30 @@ } } + /** + * read should move inside the loop (out of loop is disabled). + */ + public static int testBlockSchedule2Snippet(int value) { + int res = 0; + + container.a = value; + for (int i = 0; i < 100; i++) { + if (i == 10) { + return container.a; + } + res += i; + } + return res; + } + + @Test + public void testBlockSchedule2() { + SchedulePhase schedule = getFinalSchedule("testBlockSchedule2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, SchedulingStrategy.LATEST); + assertReadWithinStartBlock(schedule, false); + assertReadWithinReturnBlock(schedule, false); + assertReadAndWriteInSameBlock(schedule, false); + } + /* * read of field a should be in first block, read of field b in loop begin block */ @@ -547,43 +574,51 @@ } private SchedulePhase getFinalSchedule(final String snippet, final TestMode mode, final MemoryScheduling memsched) { + return getFinalSchedule(snippet, mode, memsched, SchedulingStrategy.LATEST_OUT_OF_LOOPS); + } + + private SchedulePhase getFinalSchedule(final String snippet, final TestMode mode, final MemoryScheduling memsched, final SchedulingStrategy schedulingStrategy) { final StructuredGraph graph = parse(snippet); return Debug.scope("FloatingReadTest", graph, new Callable() { @Override public SchedulePhase call() throws Exception { - Assumptions assumptions = new Assumptions(false); - HighTierContext context = new HighTierContext(getProviders(), assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL); - new CanonicalizerPhase(true).apply(graph, context); - if (mode == TestMode.INLINED_WITHOUT_FRAMESTATES) { - new InliningPhase(new CanonicalizerPhase(true)).apply(graph, context); - } - new LoweringPhase(new CanonicalizerPhase(true)).apply(graph, context); - if (mode == TestMode.WITHOUT_FRAMESTATES || mode == TestMode.INLINED_WITHOUT_FRAMESTATES) { - for (Node node : graph.getNodes()) { - if (node instanceof StateSplit) { - FrameState stateAfter = ((StateSplit) node).stateAfter(); - if (stateAfter != null) { - ((StateSplit) node).setStateAfter(null); - GraphUtil.killWithUnusedFloatingInputs(stateAfter); + + try (OverrideScope s = OptionValue.override(OptScheduleOutOfLoops, schedulingStrategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS)) { + Assumptions assumptions = new Assumptions(false); + HighTierContext context = new HighTierContext(getProviders(), assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL); + CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true); + canonicalizer.apply(graph, context); + if (mode == TestMode.INLINED_WITHOUT_FRAMESTATES) { + new InliningPhase(canonicalizer).apply(graph, context); + } + new LoweringPhase(canonicalizer).apply(graph, context); + if (mode == TestMode.WITHOUT_FRAMESTATES || mode == TestMode.INLINED_WITHOUT_FRAMESTATES) { + for (Node node : graph.getNodes()) { + if (node instanceof StateSplit) { + FrameState stateAfter = ((StateSplit) node).stateAfter(); + if (stateAfter != null) { + ((StateSplit) node).setStateAfter(null); + GraphUtil.killWithUnusedFloatingInputs(stateAfter); + } } } } - } - Debug.dump(graph, "after removal of framestates"); + Debug.dump(graph, "after removal of framestates"); - new FloatingReadPhase().apply(graph); - new RemoveValueProxyPhase().apply(graph); + new FloatingReadPhase().apply(graph); + new RemoveValueProxyPhase().apply(graph); - MidTierContext midContext = new MidTierContext(getProviders(), assumptions, getCodeCache().getTarget(), OptimisticOptimizations.ALL); - new GuardLoweringPhase().apply(graph, midContext); - new LoweringPhase(new CanonicalizerPhase(true)).apply(graph, midContext); - new LoweringPhase(new CanonicalizerPhase(true)).apply(graph, midContext); + MidTierContext midContext = new MidTierContext(getProviders(), assumptions, getCodeCache().getTarget(), OptimisticOptimizations.ALL); + new GuardLoweringPhase().apply(graph, midContext); + new LoweringPhase(canonicalizer).apply(graph, midContext); + new LoweringPhase(canonicalizer).apply(graph, midContext); - SchedulePhase schedule = new SchedulePhase(SchedulingStrategy.LATEST_OUT_OF_LOOPS, memsched); - schedule.apply(graph); - assertEquals(1, graph.getNodes().filter(StartNode.class).count()); - return schedule; + SchedulePhase schedule = new SchedulePhase(schedulingStrategy, memsched); + schedule.apply(graph); + assertEquals(1, graph.getNodes().filter(StartNode.class).count()); + return schedule; + } } }); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/MethodFilter.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/MethodFilter.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/MethodFilter.java Thu Nov 21 15:04:54 2013 +0100 @@ -32,6 +32,7 @@ * parameters. The syntax for the source pattern that is passed to the constructor is as follows: * *
+ * SourcePatterns = SourcePattern ["," SourcePatterns] .
  * SourcePattern = [ Class "." ] method [ "(" [ Parameter { ";" Parameter } ] ")" ] .
  * Parameter = Class | "int" | "long" | "float" | "double" | "short" | "char" | "boolean" .
  * Class = { package "." } class .
@@ -80,6 +81,14 @@
  * 
  * Matches all methods named "visit" in classes in the package
  * "com.oracle.graal.compiler.graph".
+ *
  • + * + *
    + * arraycopy,toString
    + * 
    + * + * Matches all methods named "arraycopy" or "toString", meaning that ',' acts as an or + * operator.
  • * */ public class MethodFilter { diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Thu Nov 21 15:04:54 2013 +0100 @@ -1874,6 +1874,7 @@ printLir("After register number assignment", true); EdgeMoveOptimizer.optimize(ir); ControlFlowOptimizer.optimize(ir); + NullCheckOptimizer.optimize(ir, target.implicitNullCheckLimit); printLir("After control flow optimization", false); } }); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.debug/src/com/oracle/graal/debug/Debug.java --- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/Debug.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/Debug.java Thu Nov 21 15:04:54 2013 +0100 @@ -353,8 +353,20 @@ return null; } + /** + * Creates a {@linkplain DebugMetric metric} that is enabled iff debugging is + * {@linkplain #isEnabled() enabled} or the system property whose name is formed by adding to + * {@value #ENABLE_METRIC_PROPERTY_NAME_PREFIX} to {@code name} is + * {@linkplain Boolean#getBoolean(String) true}. If the latter condition is true, then the + * returned metric is {@linkplain DebugMetric#isConditional() unconditional} otherwise it is + * conditional. + *

    + * A disabled metric has virtually no overhead. + */ public static DebugMetric metric(String name) { - if (ENABLED) { + if (Boolean.getBoolean(ENABLE_METRIC_PROPERTY_NAME_PREFIX + name)) { + return new MetricImpl(name, false); + } else if (ENABLED) { return new MetricImpl(name, true); } else { return VOID_METRIC; @@ -438,10 +450,36 @@ public boolean isConditional() { return false; } + + public long getCurrentValue() { + return 0L; + } }; + /** + * @see #timer(String) + */ + public static final String ENABLE_TIMER_PROPERTY_NAME_PREFIX = "graal.debug.timer."; + + /** + * @see #metric(String) + */ + public static final String ENABLE_METRIC_PROPERTY_NAME_PREFIX = "graal.debug.metric."; + + /** + * Creates a {@linkplain DebugTimer timer} that is enabled iff debugging is + * {@linkplain #isEnabled() enabled} or the system property whose name is formed by adding to + * {@value #ENABLE_TIMER_PROPERTY_NAME_PREFIX} to {@code name} is + * {@linkplain Boolean#getBoolean(String) true}. If the latter condition is true, then the + * returned timer is {@linkplain DebugMetric#isConditional() unconditional} otherwise it is + * conditional. + *

    + * A disabled timer has virtually no overhead. + */ public static DebugTimer timer(String name) { - if (ENABLED) { + if (Boolean.getBoolean(ENABLE_TIMER_PROPERTY_NAME_PREFIX + name)) { + return new TimerImpl(name, false); + } else if (ENABLED) { return new TimerImpl(name, true); } else { return VOID_TIMER; @@ -461,5 +499,13 @@ public boolean isConditional() { return false; } + + public long getCurrentValue() { + return 0L; + } + + public TimeUnit getTimeUnit() { + return null; + } }; } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugMetric.java --- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugMetric.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugMetric.java Thu Nov 21 15:04:54 2013 +0100 @@ -50,4 +50,9 @@ * enabled}. */ boolean isConditional(); + + /** + * Gets the current value of this metric. + */ + long getCurrentValue(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugTimer.java --- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugTimer.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/DebugTimer.java Thu Nov 21 15:04:54 2013 +0100 @@ -22,6 +22,8 @@ */ package com.oracle.graal.debug; +import java.util.concurrent.*; + import com.oracle.graal.debug.internal.*; /** @@ -56,4 +58,14 @@ * enabled}. */ boolean isConditional(); + + /** + * Gets the current value of this timer. + */ + long getCurrentValue(); + + /** + * Gets the time unit of this timer. + */ + TimeUnit getTimeUnit(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugValue.java --- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugValue.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugValue.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ this.conditional = conditional; } - protected long getCurrentValue() { + public long getCurrentValue() { ensureInitialized(); return DebugScope.getInstance().getCurrentValue(index); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/TimerImpl.java --- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/TimerImpl.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/TimerImpl.java Thu Nov 21 15:04:54 2013 +0100 @@ -23,6 +23,7 @@ package com.oracle.graal.debug.internal; import java.lang.management.*; +import java.util.concurrent.*; import com.oracle.graal.debug.*; @@ -87,6 +88,10 @@ return valueToString(value); } + public TimeUnit getTimeUnit() { + return TimeUnit.NANOSECONDS; + } + private abstract class AbstractTimer implements TimerCloseable { private final AbstractTimer parent; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeList.java --- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeList.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeList.java Thu Nov 21 15:04:54 2013 +0100 @@ -26,7 +26,7 @@ import com.oracle.graal.graph.iterators.*; -public abstract class NodeList extends AbstractList implements NodeIterable { +public abstract class NodeList extends AbstractList implements NodeIterable, RandomAccess { protected static final Node[] EMPTY_NODE_ARRAY = new Node[0]; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackendFactory.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackendFactory.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackendFactory.java Thu Nov 21 15:04:54 2013 +0100 @@ -52,12 +52,13 @@ assert host == null; TargetDescription target = createTarget(runtime.getConfig()); + HotSpotRegistersProvider registers = createRegisters(); HotSpotMetaAccessProvider metaAccess = createMetaAccess(runtime); HotSpotCodeCacheProvider codeCache = createCodeCache(runtime, target); HotSpotConstantReflectionProvider constantReflection = createConstantReflection(runtime); Value[] nativeABICallerSaveRegisters = createNativeABICallerSaveRegisters(runtime.getConfig(), codeCache.getRegisterConfig()); HotSpotHostForeignCallsProvider foreignCalls = createForeignCalls(runtime, metaAccess, codeCache, nativeABICallerSaveRegisters); - HotSpotHostLoweringProvider lowerer = createLowerer(runtime, metaAccess, foreignCalls); + HotSpotLoweringProvider lowerer = createLowerer(runtime, metaAccess, foreignCalls, registers); // Replacements cannot have speculative optimizations since they have // to be valid for the entire run of the VM. Assumptions assumptions = new Assumptions(false); @@ -65,7 +66,6 @@ Replacements replacements = createReplacements(runtime, assumptions, p); HotSpotDisassemblerProvider disassembler = createDisassembler(runtime); HotSpotSuitesProvider suites = createSuites(runtime); - HotSpotRegisters registers = createRegisters(); HotSpotProviders providers = new HotSpotProviders(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, replacements, disassembler, suites, registers); return createBackend(runtime, providers); @@ -75,7 +75,7 @@ return new AMD64HotSpotBackend(runtime, providers); } - protected HotSpotRegisters createRegisters() { + protected HotSpotRegistersProvider createRegisters() { return new HotSpotRegisters(AMD64.r15, AMD64.r12, AMD64.rsp); } @@ -108,8 +108,8 @@ return new HotSpotSuitesProvider(runtime); } - protected AMD64HotSpotLoweringProvider createLowerer(HotSpotGraalRuntime runtime, HotSpotMetaAccessProvider metaAccess, HotSpotForeignCallsProvider foreignCalls) { - return new AMD64HotSpotLoweringProvider(runtime, metaAccess, foreignCalls); + protected AMD64HotSpotLoweringProvider createLowerer(HotSpotGraalRuntime runtime, HotSpotMetaAccessProvider metaAccess, HotSpotForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers) { + return new AMD64HotSpotLoweringProvider(runtime, metaAccess, foreignCalls, registers); } protected Value[] createNativeABICallerSaveRegisters(HotSpotVMConfig config, RegisterConfig regConfig) { diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Thu Nov 21 15:04:54 2013 +0100 @@ -76,7 +76,7 @@ } @Override - protected HotSpotProviders getProviders() { + public HotSpotProviders getProviders() { return (HotSpotProviders) super.getProviders(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLoweringProvider.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLoweringProvider.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLoweringProvider.java Thu Nov 21 15:04:54 2013 +0100 @@ -31,12 +31,12 @@ import com.oracle.graal.nodes.spi.*; import com.oracle.graal.replacements.amd64.*; -public class AMD64HotSpotLoweringProvider extends HotSpotHostLoweringProvider { +public class AMD64HotSpotLoweringProvider extends HotSpotLoweringProvider { private AMD64ConvertSnippets.Templates convertSnippets; - public AMD64HotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls) { - super(runtime, metaAccess, foreignCalls); + public AMD64HotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers) { + super(runtime, metaAccess, foreignCalls, registers); } @Override diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackend.java --- a/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackend.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackend.java Thu Nov 21 15:04:54 2013 +0100 @@ -64,6 +64,14 @@ return true; } + @Override + public void completeInitialization() { + final HotSpotProviders providers = getProviders(); + HotSpotVMConfig config = getRuntime().getConfig(); + final HotSpotLoweringProvider lowerer = (HotSpotLoweringProvider) providers.getLowerer(); + lowerer.initialize(providers, config); + } + /** * Use the HSAIL register set when the compilation target is HSAIL. */ diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackendFactory.java --- a/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackendFactory.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotBackendFactory.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,11 +38,12 @@ public HSAILHotSpotBackend createBackend(HotSpotGraalRuntime runtime, HotSpotBackend hostBackend) { HotSpotProviders host = hostBackend.getProviders(); + HotSpotRegisters registers = new HotSpotRegisters(Register.None, Register.None, Register.None); HotSpotMetaAccessProvider metaAccess = host.getMetaAccess(); HSAILHotSpotCodeCacheProvider codeCache = new HSAILHotSpotCodeCacheProvider(runtime, createTarget()); ConstantReflectionProvider constantReflection = host.getConstantReflection(); HotSpotForeignCallsProvider foreignCalls = new HSAILHotSpotForeignCallsProvider(runtime, metaAccess, codeCache); - LoweringProvider lowerer = new HSAILHotSpotLoweringProvider(host.getLowerer()); + LoweringProvider lowerer = new HSAILHotSpotLoweringProvider(runtime, metaAccess, foreignCalls, registers); // Replacements cannot have speculative optimizations since they have // to be valid for the entire run of the VM. Assumptions assumptions = new Assumptions(false); @@ -50,7 +51,6 @@ Replacements replacements = new HSAILHotSpotReplacementsImpl(p, assumptions, host.getReplacements()); HotSpotDisassemblerProvider disassembler = host.getDisassembler(); HotSpotSuitesProvider suites = host.getSuites(); - HotSpotRegisters registers = new HotSpotRegisters(Register.None, Register.None, Register.None); HotSpotProviders providers = new HotSpotProviders(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, replacements, disassembler, suites, registers); return new HSAILHotSpotBackend(runtime, providers); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotLoweringProvider.java --- a/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotLoweringProvider.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotLoweringProvider.java Thu Nov 21 15:04:54 2013 +0100 @@ -22,30 +22,27 @@ */ package com.oracle.graal.hotspot.hsail; +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; import com.oracle.graal.graph.*; -import com.oracle.graal.nodes.*; +import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.nodes.calc.*; -import com.oracle.graal.nodes.extended.*; import com.oracle.graal.nodes.spi.*; -public class HSAILHotSpotLoweringProvider implements LoweringProvider { +public class HSAILHotSpotLoweringProvider extends HotSpotLoweringProvider { - private LoweringProvider host; - - public HSAILHotSpotLoweringProvider(LoweringProvider host) { - this.host = host; + public HSAILHotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers) { + super(runtime, metaAccess, foreignCalls, registers); } + @Override public void lower(Node n, LoweringTool tool) { if (n instanceof ConvertNode) { - // TODO return; } else { - host.lower(n, tool); + super.lower(n, tool); } } - public ValueNode reconstructArrayIndex(LocationNode location) { - throw GraalInternalError.unimplemented(); - } } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotReplacementsImpl.java --- a/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotReplacementsImpl.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.hsail/src/com/oracle/graal/hotspot/hsail/HSAILHotSpotReplacementsImpl.java Thu Nov 21 15:04:54 2013 +0100 @@ -62,8 +62,8 @@ @Override public StructuredGraph getSnippet(ResolvedJavaMethod method) { - // TODO must work in cooperation with HSAILHotSpotLoweringProvider - return null; + // Must work in cooperation with HSAILHotSpotLoweringProvider + return host.getSnippet(method); } @Override diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackendFactory.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackendFactory.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackendFactory.java Thu Nov 21 15:04:54 2013 +0100 @@ -50,12 +50,13 @@ assert host == null; TargetDescription target = createTarget(); + HotSpotRegistersProvider registers = new HotSpotRegisters(Register.None, Register.None, Register.None); // FIXME HotSpotMetaAccessProvider metaAccess = new HotSpotMetaAccessProvider(runtime); HotSpotCodeCacheProvider codeCache = new SPARCHotSpotCodeCacheProvider(runtime, target); HotSpotConstantReflectionProvider constantReflection = new HotSpotConstantReflectionProvider(runtime); Value[] nativeABICallerSaveRegisters = createNativeABICallerSaveRegisters(runtime.getConfig(), codeCache.getRegisterConfig()); HotSpotForeignCallsProvider foreignCalls = new SPARCHotSpotForeignCallsProvider(runtime, metaAccess, codeCache, nativeABICallerSaveRegisters); - LoweringProvider lowerer = new SPARCHotSpotLoweringProvider(runtime, metaAccess, foreignCalls); + LoweringProvider lowerer = new SPARCHotSpotLoweringProvider(runtime, metaAccess, foreignCalls, registers); // Replacements cannot have speculative optimizations since they have // to be valid for the entire run of the VM. Assumptions assumptions = new Assumptions(false); @@ -63,7 +64,6 @@ HotSpotReplacementsImpl replacements = new HotSpotReplacementsImpl(p, runtime.getConfig(), assumptions); HotSpotDisassemblerProvider disassembler = new HotSpotDisassemblerProvider(runtime); HotSpotSuitesProvider suites = new HotSpotSuitesProvider(runtime); - HotSpotRegisters registers = new HotSpotRegisters(Register.None, Register.None, Register.None); // FIXME HotSpotProviders providers = new HotSpotProviders(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, replacements, disassembler, suites, registers); return new SPARCHotSpotBackend(runtime, providers); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java Thu Nov 21 15:04:54 2013 +0100 @@ -57,7 +57,7 @@ } @Override - protected HotSpotProviders getProviders() { + public HotSpotProviders getProviders() { return (HotSpotProviders) super.getProviders(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLoweringProvider.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLoweringProvider.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLoweringProvider.java Thu Nov 21 15:04:54 2013 +0100 @@ -30,10 +30,10 @@ import com.oracle.graal.nodes.calc.*; import com.oracle.graal.nodes.spi.*; -public class SPARCHotSpotLoweringProvider extends HotSpotHostLoweringProvider { +public class SPARCHotSpotLoweringProvider extends HotSpotLoweringProvider { - public SPARCHotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls) { - super(runtime, metaAccess, foreignCalls); + public SPARCHotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers) { + super(runtime, metaAccess, foreignCalls, registers); } @Override diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompilationTask.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompilationTask.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompilationTask.java Thu Nov 21 15:04:54 2013 +0100 @@ -25,6 +25,7 @@ import static com.oracle.graal.api.code.CodeUtil.*; import static com.oracle.graal.nodes.StructuredGraph.*; import static com.oracle.graal.phases.GraalOptions.*; +import static com.oracle.graal.phases.common.InliningUtil.*; import java.lang.reflect.*; import java.util.concurrent.*; @@ -33,15 +34,15 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.code.CallingConvention.Type; import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.CompilerThreadFactory.CompilerThread; import com.oracle.graal.compiler.*; -import com.oracle.graal.compiler.CompilerThreadFactory.CompilerThread; import com.oracle.graal.debug.*; import com.oracle.graal.debug.internal.*; +import com.oracle.graal.hotspot.bridge.*; import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.phases.*; -import com.oracle.graal.phases.common.*; import com.oracle.graal.phases.tiers.*; public final class CompilationTask implements Runnable { @@ -119,6 +120,11 @@ * no code must be outside this try/finally because it could happen otherwise that * clearQueuedForCompilation() is not executed */ + + HotSpotVMConfig config = backend.getRuntime().getConfig(); + long previousInlinedBytecodes = InlinedBytecodes.getCurrentValue(); + long previousCompilationTime = CompilationTime.getCurrentValue(); + HotSpotInstalledCode installedCode = null; try (TimerCloseable a = CompilationTime.start()) { if (!tryToChangeStatus(CompilationStatus.Queued, CompilationStatus.Running) || method.hasCompiledCode()) { return; @@ -155,7 +161,7 @@ // Compiling method substitution - must clone the graph graph = graph.copy(); } - InliningUtil.InlinedBytecodes.add(method.getCodeSize()); + InlinedBytecodes.add(method.getCodeSize()); CallingConvention cc = getCallingConvention(providers.getCodeCache(), Type.JavaCallee, graph.method(), false); Suites suites = providers.getSuites().getDefaultSuites(); return GraalCompiler.compileGraph(graph, cc, method, providers, backend, backend.getTarget(), graphCache, plan, optimisticOpts, method.getSpeculationLog(), suites, @@ -173,7 +179,7 @@ } try (TimerCloseable b = CodeInstallationTime.start()) { - installMethod(result); + installedCode = installMethod(result); } stats.finish(method); } catch (BailoutException bailout) { @@ -194,6 +200,14 @@ System.exit(-1); } } finally { + if (config.ciTime && installedCode != null) { + long processedBytes = InlinedBytecodes.getCurrentValue() - previousInlinedBytecodes; + long time = CompilationTime.getCurrentValue() - previousCompilationTime; + TimeUnit timeUnit = CompilationTime.getTimeUnit(); + VMToCompiler vm2c = backend.getRuntime().getVMToCompiler(); + vm2c.notifyCompilationDone(id, method, entryBCI != INVOCATION_ENTRY_BCI, (int) processedBytes, time, timeUnit, installedCode); + } + assert method.isQueuedForCompilation(); method.clearQueuedForCompilation(); } @@ -214,12 +228,12 @@ MetaUtil.format("%H::%n(%p)", method), isOSR ? "@ " + entryBCI + " " : "", method.getCodeSize())); } - private void installMethod(final CompilationResult compResult) { + private HotSpotInstalledCode installMethod(final CompilationResult compResult) { final HotSpotCodeCacheProvider codeCache = backend.getProviders().getCodeCache(); - Debug.scope("CodeInstall", new Object[]{new DebugDumpScope(String.valueOf(id), true), codeCache, method}, new Runnable() { + return Debug.scope("CodeInstall", new Object[]{new DebugDumpScope(String.valueOf(id), true), codeCache, method}, new Callable() { @Override - public void run() { + public HotSpotInstalledCode call() { HotSpotInstalledCode installedCode = codeCache.installMethod(method, entryBCI, compResult); if (Debug.isDumpEnabled()) { Debug.dump(new Object[]{compResult, installedCode}, "After code installation"); @@ -227,6 +241,7 @@ if (Debug.isLogEnabled()) { Debug.log("%s", backend.getProviders().getDisassembler().disassemble(installedCode)); } + return installedCode; } }); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotHostBackend.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotHostBackend.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotHostBackend.java Thu Nov 21 15:04:54 2013 +0100 @@ -45,7 +45,7 @@ final HotSpotProviders providers = getProviders(); HotSpotVMConfig config = getRuntime().getConfig(); HotSpotHostForeignCallsProvider foreignCalls = (HotSpotHostForeignCallsProvider) providers.getForeignCalls(); - final HotSpotHostLoweringProvider lowerer = (HotSpotHostLoweringProvider) providers.getLowerer(); + final HotSpotLoweringProvider lowerer = (HotSpotLoweringProvider) providers.getLowerer(); foreignCalls.initialize(providers, config); lowerer.initialize(providers, config); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotLIRGenerator.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotLIRGenerator.java Thu Nov 21 15:04:54 2013 +0100 @@ -25,6 +25,7 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.gen.*; +import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.spi.*; @@ -55,4 +56,6 @@ * Gets a stack slot for a lock at a given lock nesting depth. */ StackSlot getLockSlot(int lockDepth); + + HotSpotProviders getProviders(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotOptions.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotOptions.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotOptions.java Thu Nov 21 15:04:54 2013 +0100 @@ -27,13 +27,17 @@ import static java.nio.file.Files.*; import java.io.*; +import java.lang.reflect.*; import java.nio.charset.*; import java.nio.file.*; import java.util.*; import com.oracle.graal.debug.*; +import com.oracle.graal.graph.*; import com.oracle.graal.hotspot.logging.*; +import com.oracle.graal.java.*; import com.oracle.graal.options.*; +import com.oracle.graal.phases.common.*; /** * Called from {@code graalCompiler.cpp} to parse any Graal specific options. Such options are @@ -184,13 +188,47 @@ } /** + * Sets the relevant system property such that a {@link DebugTimer} or {@link DebugMetric} + * associated with a field in a class will be unconditionally enabled when it is created. + *

    + * This method verifies that the named field exists and is of an expected type. However, it does + * not verify that the timer or metric created has the same name of the field. + * + * @param c the class in which the field is declared + * @param name the name of the field + */ + private static void unconditionallyEnableTimerOrMetric(Class c, String name) { + try { + Field field = c.getDeclaredField(name); + String propertyName; + if (DebugTimer.class.isAssignableFrom(field.getType())) { + propertyName = Debug.ENABLE_TIMER_PROPERTY_NAME_PREFIX + name; + } else { + assert DebugMetric.class.isAssignableFrom(field.getType()); + propertyName = Debug.ENABLE_METRIC_PROPERTY_NAME_PREFIX + name; + } + String previous = System.setProperty(propertyName, "true"); + if (previous != null) { + Logger.info("Overrode value \"" + previous + "\" of system property \"" + propertyName + "\" with \"true\""); + } + } catch (Exception e) { + throw new GraalInternalError(e); + } + } + + /** * Called from VM code once all Graal command line options have been processed by * {@link #setOption(String)}. * * @param ciTime the value of the CITime HotSpot VM option */ public static void finalizeOptions(boolean ciTime) { - if (areDebugScopePatternsEnabled() || ciTime) { + if (ciTime) { + unconditionallyEnableTimerOrMetric(GraphBuilderPhase.class, "BytecodesParsed"); + unconditionallyEnableTimerOrMetric(InliningUtil.class, "InlinedBytecodes"); + unconditionallyEnableTimerOrMetric(CompilationTask.class, "CompilationTime"); + } + if (areDebugScopePatternsEnabled()) { assert !Debug.Initialization.isDebugInitialized(); System.setProperty(Debug.Initialization.INITIALIZER_PROPERTY_NAME, "true"); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java Thu Nov 21 15:04:54 2013 +0100 @@ -648,6 +648,7 @@ @HotSpotVMFlag(name = "CodeEntryAlignment") @Stable public int codeEntryAlignment; @HotSpotVMFlag(name = "VerifyOops") @Stable public boolean verifyOops; @HotSpotVMFlag(name = "CITime") @Stable public boolean ciTime; + @HotSpotVMFlag(name = "CITimeEach", optional = true) @Stable public boolean ciTimeEach; @HotSpotVMFlag(name = "CompileThreshold") @Stable public long compileThreshold; @HotSpotVMFlag(name = "CompileTheWorld") @Stable public boolean compileTheWorld; @HotSpotVMFlag(name = "CompileTheWorldStartAt") @Stable public int compileTheWorldStartAt; @@ -737,6 +738,16 @@ @HotSpotVMConstant(name = "GRAAL_COUNTERS_SIZE", optional = true) @Stable public int graalCountersSize; + @HotSpotVMField(name = "CompilerStatistics::_standard", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsStandardOffset; + @HotSpotVMField(name = "CompilerStatistics::_osr", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsOsrOffset; + @HotSpotVMField(name = "CompilerStatistics::_nmethods_size", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsNmethodsSizeOffset; + @HotSpotVMField(name = "CompilerStatistics::_nmethods_code_size", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsNmethodsCodeSizeOffset; + @HotSpotVMField(name = "CompilerStatistics::Data::_bytes", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsDataBytesOffset; + @HotSpotVMField(name = "CompilerStatistics::Data::_time", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsDataTimeOffset; + @HotSpotVMField(name = "CompilerStatistics::Data::_count", get = HotSpotVMField.Type.OFFSET) @Stable public long compilerStatisticsDataCountOffset; + @HotSpotVMField(name = "elapsedTimer::_counter", get = HotSpotVMField.Type.OFFSET) @Stable public long elapsedTimerCounterOffset; + @Stable public long elapsedTimerFrequency; + /** * This field is used to pass exception objects into and out of the runtime system during * exception handling for compiled code. diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/MetricRateInPhase.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/MetricRateInPhase.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.bridge; - -import java.io.*; -import java.util.concurrent.*; - -import com.oracle.graal.debug.*; -import com.oracle.graal.debug.internal.*; - -/** - * The rate of accumulation for a metric within an execution phase. - */ -final class MetricRateInPhase { - - private final String phase; - private final MetricRateInPhase previous; - private final long time; - private final long value; - private final TimeUnit timeUnit; - - public static MetricRateInPhase snapshot(String phase, MetricRateInPhase previous, DebugMetric metric, DebugTimer timer, TimeUnit timeUnit) { - return new MetricRateInPhase(phase, previous, metric, timer, timeUnit); - } - - private MetricRateInPhase(String phase, MetricRateInPhase previous, DebugMetric metric, DebugTimer timer, TimeUnit timeUnit) { - this.phase = phase; - this.previous = previous; - this.time = VMToCompilerImpl.collectTotal((DebugValue) timer); - this.value = VMToCompilerImpl.collectTotal((DebugValue) metric); - this.timeUnit = timeUnit; - } - - public int rate() { - long t = time; - long v = value; - if (previous != null) { - t -= previous.time; - v -= previous.value; - } - - t = timeUnit.convert(t, TimeUnit.NANOSECONDS); - if (t == 0) { - t = 1; - } - return (int) (v / t); - } - - public void printAll(String label, PrintStream stream) { - MetricRateInPhase rs = this; - while (rs != null) { - stream.println(label + "@" + rs.phase + ": " + rs.rate()); - rs = rs.previous; - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompiler.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompiler.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompiler.java Thu Nov 21 15:04:54 2013 +0100 @@ -24,6 +24,7 @@ package com.oracle.graal.hotspot.bridge; import java.io.*; +import java.util.concurrent.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.hotspot.debug.*; @@ -38,16 +39,25 @@ * Compiles a method to machine code. This method is called from the VM * (VMToCompiler::compileMethod). */ - void compileMethod(long metaspaceMethod, HotSpotResolvedObjectType holder, int entryBCI, boolean blocking) throws Throwable; + void compileMethod(long metaspaceMethod, HotSpotResolvedObjectType holder, int entryBCI, boolean blocking); /** - * Compiles a method to machine code. + * Notifies this object of statistics for a completed compilation. + * + * @param id the identifier of the compilation + * @param method the method compiled + * @param osr specifies if the compilation was for on-stack-replacement + * @param processedBytecodes the number of bytecodes processed during the compilation, including + * the bytecodes of all inlined methods + * @param time the amount time spent compiling {@code method} + * @param timeUnit the units of {@code time} + * @param installedCode the nmethod installed as a result of the compilation */ - void compileMethod(HotSpotResolvedJavaMethod method, int entryBCI, boolean blocking) throws Throwable; + void notifyCompilationDone(int id, HotSpotResolvedJavaMethod method, boolean osr, int processedBytecodes, long time, TimeUnit timeUnit, HotSpotInstalledCode installedCode); - void shutdownCompiler() throws Throwable; + void shutdownCompiler() throws Exception; - void startCompiler(boolean bootstrapEnabled) throws Throwable; + void startCompiler(boolean bootstrapEnabled, long statsAddress) throws Throwable; void bootstrap() throws Throwable; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java Thu Nov 21 15:04:54 2013 +0100 @@ -25,13 +25,12 @@ import static com.oracle.graal.compiler.GraalDebugConfig.*; import static com.oracle.graal.graph.UnsafeAccess.*; -import static com.oracle.graal.hotspot.CompilationTask.*; import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; -import static com.oracle.graal.java.GraphBuilderPhase.*; -import static com.oracle.graal.phases.common.InliningUtil.*; +import static java.util.concurrent.TimeUnit.*; import java.io.*; import java.lang.reflect.*; +import java.security.*; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; @@ -102,6 +101,8 @@ private long compilerStartTime; + private long compilerStatistics; + public VMToCompilerImpl(HotSpotGraalRuntime runtime) { this.runtime = runtime; @@ -122,7 +123,7 @@ assert unsafe.getObject(mirror, offset) == type; } - public void startCompiler(boolean bootstrapEnabled) throws Throwable { + public void startCompiler(boolean bootstrapEnabled, long compilerStatisticsAddress) throws Throwable { FastNodeClassRegistry.initialize(); @@ -149,6 +150,8 @@ } } + compilerStatistics = compilerStatisticsAddress; + TTY.initialize(log); if (Log.getValue() == null && Meter.getValue() == null && Time.getValue() == null && Dump.getValue() == null) { @@ -157,12 +160,6 @@ } } - if (config.ciTime) { - BytecodesParsed.setConditional(false); - InlinedBytecodes.setConditional(false); - CompilationTime.setConditional(false); - } - if (Debug.isEnabled()) { DebugEnvironment.initialize(log); @@ -256,10 +253,6 @@ */ protected void phaseTransition(String phase) { CompilationStatistics.clear(phase); - if (runtime.getConfig().ciTime) { - parsedBytecodesPerSecond = MetricRateInPhase.snapshot(phase, parsedBytecodesPerSecond, BytecodesParsed, CompilationTime, TimeUnit.SECONDS); - inlinedBytecodesPerSecond = MetricRateInPhase.snapshot(phase, inlinedBytecodesPerSecond, InlinedBytecodes, CompilationTime, TimeUnit.SECONDS); - } } /** @@ -325,6 +318,7 @@ if (ResetDebugValuesAfterBootstrap.getValue()) { printDebugValues("bootstrap", true); + resetCompilerStatistics(); } phaseTransition("bootstrap"); @@ -343,9 +337,6 @@ System.exit(0); } - private MetricRateInPhase parsedBytecodesPerSecond; - private MetricRateInPhase inlinedBytecodesPerSecond; - private void enqueue(Method m) throws Throwable { JavaMethod javaMethod = runtime.getHostProviders().getMetaAccess().lookupJavaMethod(m); assert !Modifier.isAbstract(((HotSpotResolvedJavaMethod) javaMethod).getModifiers()) && !Modifier.isNative(((HotSpotResolvedJavaMethod) javaMethod).getModifiers()) : javaMethod; @@ -362,11 +353,18 @@ } } - public void shutdownCompiler() throws Throwable { + public void shutdownCompiler() throws Exception { try { assert !CompilationTask.withinEnqueue.get(); CompilationTask.withinEnqueue.set(Boolean.TRUE); - shutdownCompileQueue(compileQueue); + // We have to use a privileged action here because shutting down the compiler might be + // called from user code which very likely contains unprivileged frames. + AccessController.doPrivileged(new PrivilegedExceptionAction() { + public Void run() throws Exception { + shutdownCompileQueue(compileQueue); + return null; + } + }); } finally { CompilationTask.withinEnqueue.set(Boolean.FALSE); } @@ -374,11 +372,6 @@ printDebugValues(ResetDebugValuesAfterBootstrap.getValue() ? "application" : null, false); phaseTransition("final"); - if (runtime.getConfig().ciTime) { - parsedBytecodesPerSecond.printAll("ParsedBytecodesPerSecond", System.out); - inlinedBytecodesPerSecond.printAll("InlinedBytecodesPerSecond", System.out); - } - SnippetCounter.printGroups(TTY.out().out()); BenchmarkCounters.shutdown(runtime.getCompilerToVM(), compilerStartTime); } @@ -550,15 +543,22 @@ } @Override - public void compileMethod(long metaspaceMethod, final HotSpotResolvedObjectType holder, final int entryBCI, boolean blocking) throws Throwable { - HotSpotResolvedJavaMethod method = holder.createMethod(metaspaceMethod); - compileMethod(method, entryBCI, blocking); + public void compileMethod(long metaspaceMethod, final HotSpotResolvedObjectType holder, final int entryBCI, final boolean blocking) { + final HotSpotResolvedJavaMethod method = holder.createMethod(metaspaceMethod); + // We have to use a privileged action here because compilations are enqueued from user code + // which very likely contains unprivileged frames. + AccessController.doPrivileged(new PrivilegedAction() { + public Void run() { + compileMethod(method, entryBCI, blocking); + return null; + } + }); } /** * Compiles a method to machine code. */ - public void compileMethod(final HotSpotResolvedJavaMethod method, final int entryBCI, boolean blocking) throws Throwable { + public void compileMethod(final HotSpotResolvedJavaMethod method, final int entryBCI, final boolean blocking) { boolean osrCompilation = entryBCI != StructuredGraph.INVOCATION_ENTRY_BCI; if (osrCompilation && bootstrapRunning) { // no OSR compilations during bootstrap - the compiler is just too slow at this point, @@ -600,6 +600,64 @@ } } + private TimeUnit elapsedTimerTimeUnit; + + private TimeUnit getElapsedTimerTimeUnit() { + if (elapsedTimerTimeUnit == null) { + long freq = runtime.getConfig().elapsedTimerFrequency; + for (TimeUnit tu : TimeUnit.values()) { + if (tu.toSeconds(freq) == 1) { + elapsedTimerTimeUnit = tu; + break; + } + } + assert elapsedTimerTimeUnit != null; + } + return elapsedTimerTimeUnit; + } + + public synchronized void notifyCompilationDone(int id, HotSpotResolvedJavaMethod method, boolean osr, int processedBytecodes, long time, TimeUnit timeUnit, HotSpotInstalledCode installedCode) { + HotSpotVMConfig config = runtime.getConfig(); + long dataAddress = compilerStatistics + (osr ? config.compilerStatisticsOsrOffset : config.compilerStatisticsStandardOffset); + + long timeAddress = dataAddress + config.compilerStatisticsDataTimeOffset + config.elapsedTimerCounterOffset; + long previousElapsedTime = unsafe.getLong(timeAddress); + long elapsedTime = getElapsedTimerTimeUnit().convert(time, timeUnit); + unsafe.putLong(timeAddress, previousElapsedTime + elapsedTime); + + long bytesAddress = dataAddress + config.compilerStatisticsDataBytesOffset; + int currentBytes = unsafe.getInt(bytesAddress); + unsafe.putInt(bytesAddress, currentBytes + processedBytecodes); + + long countAddress = dataAddress + config.compilerStatisticsDataCountOffset; + int currentCount = unsafe.getInt(countAddress); + unsafe.putInt(countAddress, currentCount + 1); + + long nmethodsSizeAddress = compilerStatistics + config.compilerStatisticsNmethodsSizeOffset; + int currentSize = unsafe.getInt(nmethodsSizeAddress); + unsafe.putInt(nmethodsSizeAddress, currentSize + installedCode.getSize()); + + long nmethodsCodeSizeAddress = compilerStatistics + config.compilerStatisticsNmethodsCodeSizeOffset; + int currentCodeSize = unsafe.getInt(nmethodsCodeSizeAddress); + unsafe.putInt(nmethodsCodeSizeAddress, currentCodeSize + (int) installedCode.getCodeSize()); + + if (config.ciTimeEach) { + TTY.println(String.format("%-6d {%s: %d ms, %d bytes}", id, osr ? "osr" : "standard", MILLISECONDS.convert(time, timeUnit), processedBytecodes)); + } + } + + private static void resetCompilerStatisticsData(HotSpotVMConfig config, long dataAddress) { + unsafe.putInt(dataAddress + config.compilerStatisticsDataBytesOffset, 0); + unsafe.putInt(dataAddress + config.compilerStatisticsDataCountOffset, 0); + unsafe.putLong(dataAddress + config.compilerStatisticsDataTimeOffset + config.elapsedTimerCounterOffset, 0L); + } + + private void resetCompilerStatistics() { + HotSpotVMConfig config = runtime.getConfig(); + resetCompilerStatisticsData(config, compilerStatistics + config.compilerStatisticsStandardOffset); + resetCompilerStatisticsData(config, compilerStatistics + config.compilerStatisticsOsrOffset); + } + @Override public JavaMethod createUnresolvedJavaMethod(String name, String signature, JavaType holder) { return new HotSpotMethodUnresolved(name, signature, holder); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotHostLoweringProvider.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotHostLoweringProvider.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,725 +0,0 @@ -/* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.meta; - -import static com.oracle.graal.api.code.MemoryBarriers.*; -import static com.oracle.graal.api.meta.DeoptimizationAction.*; -import static com.oracle.graal.api.meta.DeoptimizationReason.*; -import static com.oracle.graal.api.meta.LocationIdentity.*; -import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; -import static com.oracle.graal.hotspot.meta.HotSpotHostForeignCallsProvider.*; -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; -import static com.oracle.graal.hotspot.replacements.NewObjectSnippets.*; -import static com.oracle.graal.nodes.java.ArrayLengthNode.*; -import static com.oracle.graal.phases.GraalOptions.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.asm.*; -import com.oracle.graal.graph.*; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.debug.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.hotspot.replacements.*; -import com.oracle.graal.java.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.HeapAccess.BarrierType; -import com.oracle.graal.nodes.calc.*; -import com.oracle.graal.nodes.debug.*; -import com.oracle.graal.nodes.extended.*; -import com.oracle.graal.nodes.java.*; -import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.nodes.type.*; -import com.oracle.graal.nodes.virtual.*; -import com.oracle.graal.replacements.*; - -/** - * HotSpot implementation of {@link LoweringProvider}. - */ -public class HotSpotHostLoweringProvider implements LoweringProvider { - - protected final HotSpotGraalRuntime runtime; - protected final MetaAccessProvider metaAccess; - protected final ForeignCallsProvider foreignCalls; - - private CheckCastDynamicSnippets.Templates checkcastDynamicSnippets; - private InstanceOfSnippets.Templates instanceofSnippets; - private NewObjectSnippets.Templates newObjectSnippets; - private MonitorSnippets.Templates monitorSnippets; - protected WriteBarrierSnippets.Templates writeBarrierSnippets; - private BoxingSnippets.Templates boxingSnippets; - private LoadExceptionObjectSnippets.Templates exceptionObjectSnippets; - private UnsafeLoadSnippets.Templates unsafeLoadSnippets; - - public HotSpotHostLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls) { - this.runtime = runtime; - this.metaAccess = metaAccess; - this.foreignCalls = foreignCalls; - } - - public void initialize(HotSpotProviders providers, HotSpotVMConfig config) { - TargetDescription target = providers.getCodeCache().getTarget(); - checkcastDynamicSnippets = new CheckCastDynamicSnippets.Templates(providers, target); - instanceofSnippets = new InstanceOfSnippets.Templates(providers, target); - newObjectSnippets = new NewObjectSnippets.Templates(providers, target); - monitorSnippets = new MonitorSnippets.Templates(providers, target, config.useFastLocking); - writeBarrierSnippets = new WriteBarrierSnippets.Templates(providers, target); - boxingSnippets = new BoxingSnippets.Templates(providers, target); - exceptionObjectSnippets = new LoadExceptionObjectSnippets.Templates(providers, target); - unsafeLoadSnippets = new UnsafeLoadSnippets.Templates(providers, target); - providers.getReplacements().registerSnippetTemplateCache(new UnsafeArrayCopySnippets.Templates(providers, target)); - } - - @Override - public void lower(Node n, LoweringTool tool) { - HotSpotVMConfig config = runtime.getConfig(); - StructuredGraph graph = (StructuredGraph) n.graph(); - - Kind wordKind = runtime.getTarget().wordKind; - if (n instanceof ArrayLengthNode) { - ArrayLengthNode arrayLengthNode = (ArrayLengthNode) n; - ValueNode array = arrayLengthNode.array(); - ReadNode arrayLengthRead = graph.add(new ReadNode(array, ConstantLocationNode.create(FINAL_LOCATION, Kind.Int, config.arrayLengthOffset, graph), StampFactory.positiveInt(), - BarrierType.NONE, false)); - tool.createNullCheckGuard(arrayLengthRead, array); - graph.replaceFixedWithFixed(arrayLengthNode, arrayLengthRead); - } else if (n instanceof Invoke) { - Invoke invoke = (Invoke) n; - if (invoke.callTarget() instanceof MethodCallTargetNode) { - - MethodCallTargetNode callTarget = (MethodCallTargetNode) invoke.callTarget(); - NodeInputList parameters = callTarget.arguments(); - ValueNode receiver = parameters.size() <= 0 ? null : parameters.get(0); - GuardingNode receiverNullCheck = null; - if (!callTarget.isStatic() && receiver.stamp() instanceof ObjectStamp && !ObjectStamp.isObjectNonNull(receiver)) { - receiverNullCheck = tool.createNullCheckGuard(invoke, receiver); - } - JavaType[] signature = MetaUtil.signatureToTypes(callTarget.targetMethod().getSignature(), callTarget.isStatic() ? null : callTarget.targetMethod().getDeclaringClass()); - - LoweredCallTargetNode loweredCallTarget = null; - if (callTarget.invokeKind() == InvokeKind.Virtual && InlineVTableStubs.getValue() && (AlwaysInlineVTableStubs.getValue() || invoke.isPolymorphic())) { - - HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) callTarget.targetMethod(); - if (!hsMethod.getDeclaringClass().isInterface()) { - if (hsMethod.isInVirtualMethodTable()) { - int vtableEntryOffset = hsMethod.vtableEntryOffset(); - assert vtableEntryOffset > 0; - FloatingReadNode hub = createReadHub(graph, wordKind, receiver, receiverNullCheck); - - ReadNode metaspaceMethod = createReadVirtualMethod(graph, wordKind, hub, hsMethod); - // We use LocationNode.ANY_LOCATION for the reads that access the - // compiled code entry as HotSpot does not guarantee they are final - // values. - ReadNode compiledEntry = graph.add(new ReadNode(metaspaceMethod, ConstantLocationNode.create(ANY_LOCATION, wordKind, config.methodCompiledEntryOffset, graph), - StampFactory.forKind(wordKind), BarrierType.NONE, false)); - - loweredCallTarget = graph.add(new HotSpotIndirectCallTargetNode(metaspaceMethod, compiledEntry, parameters, invoke.asNode().stamp(), signature, callTarget.targetMethod(), - CallingConvention.Type.JavaCall)); - - graph.addBeforeFixed(invoke.asNode(), metaspaceMethod); - graph.addAfterFixed(metaspaceMethod, compiledEntry); - } - } - } - - if (loweredCallTarget == null) { - loweredCallTarget = graph.add(new HotSpotDirectCallTargetNode(parameters, invoke.asNode().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall, - callTarget.invokeKind())); - } - callTarget.replaceAndDelete(loweredCallTarget); - } - } else if (n instanceof LoadFieldNode) { - LoadFieldNode loadField = (LoadFieldNode) n; - HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) loadField.field(); - ValueNode object = loadField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), metaAccess, graph) : loadField.object(); - assert loadField.kind() != Kind.Illegal; - BarrierType barrierType = getFieldLoadBarrierType(field); - ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field, false), loadField.stamp(), barrierType, (loadField.kind() == Kind.Object))); - graph.replaceFixedWithFixed(loadField, memoryRead); - tool.createNullCheckGuard(memoryRead, object); - - if (loadField.isVolatile()) { - MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_READ)); - graph.addBeforeFixed(memoryRead, preMembar); - MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_READ)); - graph.addAfterFixed(memoryRead, postMembar); - } - } else if (n instanceof StoreFieldNode) { - StoreFieldNode storeField = (StoreFieldNode) n; - HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) storeField.field(); - ValueNode object = storeField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), metaAccess, graph) : storeField.object(); - BarrierType barrierType = getFieldStoreBarrierType(storeField); - WriteNode memoryWrite = graph.add(new WriteNode(object, storeField.value(), createFieldLocation(graph, field, false), barrierType, storeField.field().getKind() == Kind.Object)); - tool.createNullCheckGuard(memoryWrite, object); - memoryWrite.setStateAfter(storeField.stateAfter()); - graph.replaceFixedWithFixed(storeField, memoryWrite); - FixedWithNextNode last = memoryWrite; - FixedWithNextNode first = memoryWrite; - - if (storeField.isVolatile()) { - MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_WRITE)); - graph.addBeforeFixed(first, preMembar); - MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_WRITE)); - graph.addAfterFixed(last, postMembar); - } - } else if (n instanceof CompareAndSwapNode) { - // Separate out GC barrier semantics - CompareAndSwapNode cas = (CompareAndSwapNode) n; - LocationNode location = IndexedLocationNode.create(cas.getLocationIdentity(), cas.expected().kind(), cas.displacement(), cas.offset(), graph, 1); - LoweredCompareAndSwapNode atomicNode = graph.add(new LoweredCompareAndSwapNode(cas.object(), location, cas.expected(), cas.newValue(), getCompareAndSwapBarrier(cas), - cas.expected().kind() == Kind.Object)); - atomicNode.setStateAfter(cas.stateAfter()); - graph.replaceFixedWithFixed(cas, atomicNode); - } else if (n instanceof LoadIndexedNode) { - LoadIndexedNode loadIndexed = (LoadIndexedNode) n; - GuardingNode boundsCheck = createBoundsCheck(loadIndexed, tool); - Kind elementKind = loadIndexed.elementKind(); - LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index(), false); - ReadNode memoryRead = graph.add(new ReadNode(loadIndexed.array(), arrayLocation, loadIndexed.stamp(), BarrierType.NONE, elementKind == Kind.Object)); - memoryRead.setGuard(boundsCheck); - graph.replaceFixedWithFixed(loadIndexed, memoryRead); - } else if (n instanceof StoreIndexedNode) { - StoreIndexedNode storeIndexed = (StoreIndexedNode) n; - GuardingNode boundsCheck = createBoundsCheck(storeIndexed, tool); - Kind elementKind = storeIndexed.elementKind(); - LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index(), false); - ValueNode value = storeIndexed.value(); - ValueNode array = storeIndexed.array(); - - CheckCastNode checkcastNode = null; - CheckCastDynamicNode checkcastDynamicNode = null; - if (elementKind == Kind.Object && !ObjectStamp.isObjectAlwaysNull(value)) { - // Store check! - ResolvedJavaType arrayType = ObjectStamp.typeOrNull(array); - if (arrayType != null && ObjectStamp.isExactType(array)) { - ResolvedJavaType elementType = arrayType.getComponentType(); - if (!MetaUtil.isJavaLangObject(elementType)) { - checkcastNode = graph.add(new CheckCastNode(elementType, value, null, true)); - graph.addBeforeFixed(storeIndexed, checkcastNode); - value = checkcastNode; - } - } else { - FloatingReadNode arrayClass = createReadHub(graph, wordKind, array, boundsCheck); - LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.arrayClassElementOffset, graph); - /* - * Anchor the read of the element klass to the cfg, because it is only valid - * when arrayClass is an object class, which might not be the case in other - * parts of the compiled method. - */ - FloatingReadNode arrayElementKlass = graph.unique(new FloatingReadNode(arrayClass, location, null, StampFactory.forKind(wordKind), BeginNode.prevBegin(storeIndexed))); - checkcastDynamicNode = graph.add(new CheckCastDynamicNode(arrayElementKlass, value, true)); - graph.addBeforeFixed(storeIndexed, checkcastDynamicNode); - value = checkcastDynamicNode; - } - } - BarrierType barrierType = getArrayStoreBarrierType(storeIndexed); - WriteNode memoryWrite = graph.add(new WriteNode(array, value, arrayLocation, barrierType, elementKind == Kind.Object)); - memoryWrite.setGuard(boundsCheck); - memoryWrite.setStateAfter(storeIndexed.stateAfter()); - graph.replaceFixedWithFixed(storeIndexed, memoryWrite); - - // Lower the associated checkcast node. - if (checkcastNode != null) { - checkcastNode.lower(tool); - } else if (checkcastDynamicNode != null) { - checkcastDynamicSnippets.lower(checkcastDynamicNode); - } - } else if (n instanceof UnsafeLoadNode) { - UnsafeLoadNode load = (UnsafeLoadNode) n; - if (load.getGuardingCondition() != null) { - boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object); - ConditionAnchorNode valueAnchorNode = graph.add(new ConditionAnchorNode(load.getGuardingCondition())); - LocationNode location = createLocation(load); - ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), valueAnchorNode, BarrierType.NONE, compressible)); - load.replaceAtUsages(memoryRead); - graph.replaceFixedWithFixed(load, valueAnchorNode); - graph.addAfterFixed(valueAnchorNode, memoryRead); - } else if (graph.getGuardsStage().ordinal() > StructuredGraph.GuardsStage.FLOATING_GUARDS.ordinal()) { - assert load.kind() != Kind.Illegal; - boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object); - if (addReadBarrier(load)) { - unsafeLoadSnippets.lower(load, tool); - } else { - LocationNode location = createLocation(load); - ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), BarrierType.NONE, compressible)); - // An unsafe read must not float outside its block otherwise - // it may float above an explicit null check on its object. - memoryRead.setGuard(AbstractBeginNode.prevBegin(load)); - graph.replaceFixedWithFixed(load, memoryRead); - } - } - } else if (n instanceof UnsafeStoreNode) { - UnsafeStoreNode store = (UnsafeStoreNode) n; - LocationNode location = createLocation(store); - ValueNode object = store.object(); - BarrierType barrierType = getUnsafeStoreBarrierType(store); - WriteNode write = graph.add(new WriteNode(object, store.value(), location, barrierType, store.value().kind() == Kind.Object)); - write.setStateAfter(store.stateAfter()); - graph.replaceFixedWithFixed(store, write); - } else if (n instanceof LoadHubNode) { - LoadHubNode loadHub = (LoadHubNode) n; - assert loadHub.kind() == wordKind; - ValueNode object = loadHub.object(); - GuardingNode guard = loadHub.getGuard(); - FloatingReadNode hub = createReadHub(graph, wordKind, object, guard); - graph.replaceFloating(loadHub, hub); - } else if (n instanceof LoadMethodNode) { - LoadMethodNode loadMethodNode = (LoadMethodNode) n; - ResolvedJavaMethod method = loadMethodNode.getMethod(); - ReadNode metaspaceMethod = createReadVirtualMethod(graph, wordKind, loadMethodNode.getHub(), method); - graph.replaceFixed(loadMethodNode, metaspaceMethod); - } else if (n instanceof StoreHubNode) { - StoreHubNode storeHub = (StoreHubNode) n; - WriteNode hub = createWriteHub(graph, wordKind, storeHub.getObject(), storeHub.getValue()); - graph.replaceFixed(storeHub, hub); - } else if (n instanceof CommitAllocationNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - CommitAllocationNode commit = (CommitAllocationNode) n; - ValueNode[] allocations = new ValueNode[commit.getVirtualObjects().size()]; - BitSet omittedValues = new BitSet(); - int valuePos = 0; - for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { - VirtualObjectNode virtual = commit.getVirtualObjects().get(objIndex); - int entryCount = virtual.entryCount(); - FixedWithNextNode newObject; - if (virtual instanceof VirtualInstanceNode) { - newObject = graph.add(new NewInstanceNode(virtual.type(), true)); - graph.addBeforeFixed(commit, newObject); - allocations[objIndex] = newObject; - for (int i = 0; i < entryCount; i++) { - ValueNode value = commit.getValues().get(valuePos); - if (value instanceof VirtualObjectNode) { - value = allocations[commit.getVirtualObjects().indexOf(value)]; - } - if (value == null) { - omittedValues.set(valuePos); - } else if (!(value.isConstant() && value.asConstant().isDefaultForKind())) { - // Constant.illegal is always the defaultForKind, so it is skipped - VirtualInstanceNode virtualInstance = (VirtualInstanceNode) virtual; - Kind accessKind; - HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) virtualInstance.field(i); - if (value.kind().getStackKind() != field.getKind().getStackKind()) { - assert value.kind() == Kind.Long || value.kind() == Kind.Double; - accessKind = value.kind(); - } else { - accessKind = field.getKind(); - } - ConstantLocationNode location = ConstantLocationNode.create(INIT_LOCATION, accessKind, field.offset(), graph); - BarrierType barrierType = (virtualInstance.field(i).getKind() == Kind.Object && !useDeferredInitBarriers()) ? BarrierType.IMPRECISE : BarrierType.NONE; - WriteNode write = new WriteNode(newObject, value, location, barrierType, virtualInstance.field(i).getKind() == Kind.Object); - graph.addAfterFixed(newObject, graph.add(write)); - } - valuePos++; - } - } else { - ResolvedJavaType element = ((VirtualArrayNode) virtual).componentType(); - newObject = graph.add(new NewArrayNode(element, ConstantNode.forInt(entryCount, graph), true)); - graph.addBeforeFixed(commit, newObject); - allocations[objIndex] = newObject; - for (int i = 0; i < entryCount; i++) { - ValueNode value = commit.getValues().get(valuePos); - if (value instanceof VirtualObjectNode) { - value = allocations[commit.getVirtualObjects().indexOf(value)]; - } - if (value == null) { - omittedValues.set(valuePos); - } else if (!(value.isConstant() && value.asConstant().isDefaultForKind())) { - // Constant.illegal is always the defaultForKind, so it is skipped - Kind componentKind = element.getKind(); - Kind accessKind; - Kind valueKind = value.kind(); - if (valueKind.getStackKind() != componentKind.getStackKind()) { - // Given how Truffle uses unsafe, it can happen that - // valueKind is Kind.Int - // assert valueKind == Kind.Long || valueKind == Kind.Double; - accessKind = valueKind; - } else { - accessKind = componentKind; - } - - int scale = getScalingFactor(componentKind); - ConstantLocationNode location = ConstantLocationNode.create(INIT_LOCATION, accessKind, getArrayBaseOffset(componentKind) + i * scale, graph); - BarrierType barrierType = (componentKind == Kind.Object && !useDeferredInitBarriers()) ? BarrierType.IMPRECISE : BarrierType.NONE; - WriteNode write = new WriteNode(newObject, value, location, barrierType, componentKind == Kind.Object); - graph.addAfterFixed(newObject, graph.add(write)); - } - valuePos++; - } - } - } - valuePos = 0; - - for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { - VirtualObjectNode virtual = commit.getVirtualObjects().get(objIndex); - int entryCount = virtual.entryCount(); - ValueNode newObject = allocations[objIndex]; - if (virtual instanceof VirtualInstanceNode) { - for (int i = 0; i < entryCount; i++) { - if (omittedValues.get(valuePos)) { - ValueNode value = commit.getValues().get(valuePos); - assert value instanceof VirtualObjectNode; - ValueNode allocValue = allocations[commit.getVirtualObjects().indexOf(value)]; - if (!(allocValue.isConstant() && allocValue.asConstant().isDefaultForKind())) { - VirtualInstanceNode virtualInstance = (VirtualInstanceNode) virtual; - assert virtualInstance.field(i).getKind() == Kind.Object; - WriteNode write = new WriteNode(newObject, allocValue, createFieldLocation(graph, (HotSpotResolvedJavaField) virtualInstance.field(i), true), - BarrierType.IMPRECISE, true); - graph.addBeforeFixed(commit, graph.add(write)); - } - } - valuePos++; - } - } else { - ResolvedJavaType element = ((VirtualArrayNode) virtual).componentType(); - for (int i = 0; i < entryCount; i++) { - if (omittedValues.get(valuePos)) { - ValueNode value = commit.getValues().get(valuePos); - assert value instanceof VirtualObjectNode; - ValueNode allocValue = allocations[commit.getVirtualObjects().indexOf(value)]; - if (!(allocValue.isConstant() && allocValue.asConstant().isDefaultForKind())) { - assert allocValue.kind() == Kind.Object; - WriteNode write = new WriteNode(newObject, allocValue, createArrayLocation(graph, element.getKind(), ConstantNode.forInt(i, graph), true), BarrierType.PRECISE, - true); - graph.addBeforeFixed(commit, graph.add(write)); - } - } - valuePos++; - } - } - } - - for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { - FixedValueAnchorNode anchor = graph.add(new FixedValueAnchorNode(allocations[objIndex])); - allocations[objIndex] = anchor; - graph.addBeforeFixed(commit, anchor); - } - for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { - for (int lockDepth : commit.getLocks().get(objIndex)) { - MonitorEnterNode enter = graph.add(new MonitorEnterNode(allocations[objIndex], lockDepth)); - graph.addBeforeFixed(commit, enter); - enter.lower(tool); - } - } - for (Node usage : commit.usages().snapshot()) { - AllocatedObjectNode addObject = (AllocatedObjectNode) usage; - int index = commit.getVirtualObjects().indexOf(addObject.getVirtualObject()); - graph.replaceFloating(addObject, allocations[index]); - } - graph.removeFixed(commit); - } - } else if (n instanceof OSRStartNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - OSRStartNode osrStart = (OSRStartNode) n; - StartNode newStart = graph.add(new StartNode()); - LocalNode buffer = graph.unique(new LocalNode(0, StampFactory.forKind(wordKind))); - ForeignCallNode migrationEnd = graph.add(new ForeignCallNode(foreignCalls, OSR_MIGRATION_END, buffer)); - migrationEnd.setStateAfter(osrStart.stateAfter()); - - newStart.setNext(migrationEnd); - FixedNode next = osrStart.next(); - osrStart.setNext(null); - migrationEnd.setNext(next); - graph.setStart(newStart); - - // mirroring the calculations in c1_GraphBuilder.cpp (setup_osr_entry_block) - int localsOffset = (graph.method().getMaxLocals() - 1) * 8; - for (OSRLocalNode osrLocal : graph.getNodes(OSRLocalNode.class)) { - int size = FrameStateBuilder.stackSlots(osrLocal.kind()); - int offset = localsOffset - (osrLocal.index() + size - 1) * 8; - IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, osrLocal.kind(), offset, ConstantNode.forLong(0, graph), graph, 1); - ReadNode load = graph.add(new ReadNode(buffer, location, osrLocal.stamp(), BarrierType.NONE, false)); - osrLocal.replaceAndDelete(load); - graph.addBeforeFixed(migrationEnd, load); - } - osrStart.replaceAtUsages(newStart); - osrStart.safeDelete(); - } - } else if (n instanceof DynamicCounterNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { - BenchmarkCounters.lower((DynamicCounterNode) n, runtime.getHostProviders().getRegisters(), runtime.getConfig(), wordKind); - } - } else if (n instanceof CheckCastDynamicNode) { - checkcastDynamicSnippets.lower((CheckCastDynamicNode) n); - } else if (n instanceof InstanceOfNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - instanceofSnippets.lower((InstanceOfNode) n, tool); - } - } else if (n instanceof InstanceOfDynamicNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - instanceofSnippets.lower((InstanceOfDynamicNode) n, tool); - } - } else if (n instanceof NewInstanceNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { - newObjectSnippets.lower((NewInstanceNode) n); - } - } else if (n instanceof NewArrayNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { - newObjectSnippets.lower((NewArrayNode) n); - } - } else if (n instanceof DynamicNewArrayNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { - newObjectSnippets.lower((DynamicNewArrayNode) n); - } - } else if (n instanceof MonitorEnterNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - monitorSnippets.lower((MonitorEnterNode) n, tool); - } - } else if (n instanceof MonitorExitNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { - monitorSnippets.lower((MonitorExitNode) n, tool); - } - } else if (n instanceof G1PreWriteBarrier) { - writeBarrierSnippets.lower((G1PreWriteBarrier) n, tool); - } else if (n instanceof G1PostWriteBarrier) { - writeBarrierSnippets.lower((G1PostWriteBarrier) n, tool); - } else if (n instanceof G1ReferentFieldReadBarrier) { - writeBarrierSnippets.lower((G1ReferentFieldReadBarrier) n, tool); - } else if (n instanceof SerialWriteBarrier) { - writeBarrierSnippets.lower((SerialWriteBarrier) n, tool); - } else if (n instanceof SerialArrayRangeWriteBarrier) { - writeBarrierSnippets.lower((SerialArrayRangeWriteBarrier) n, tool); - } else if (n instanceof G1ArrayRangePreWriteBarrier) { - writeBarrierSnippets.lower((G1ArrayRangePreWriteBarrier) n, tool); - } else if (n instanceof G1ArrayRangePostWriteBarrier) { - writeBarrierSnippets.lower((G1ArrayRangePostWriteBarrier) n, tool); - } else if (n instanceof NewMultiArrayNode) { - if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { - newObjectSnippets.lower((NewMultiArrayNode) n); - } - } else if (n instanceof LoadExceptionObjectNode) { - exceptionObjectSnippets.lower((LoadExceptionObjectNode) n); - } else if (n instanceof IntegerDivNode || n instanceof IntegerRemNode || n instanceof UnsignedDivNode || n instanceof UnsignedRemNode) { - // Nothing to do for division nodes. The HotSpot signal handler catches divisions by - // zero and the MIN_VALUE / -1 cases. - } else if (n instanceof BoxNode) { - boxingSnippets.lower((BoxNode) n, tool); - } else if (n instanceof UnboxNode) { - boxingSnippets.lower((UnboxNode) n, tool); - } else { - assert false : "Node implementing Lowerable not handled: " + n; - throw GraalInternalError.shouldNotReachHere(); - } - } - - private static LocationNode createLocation(UnsafeAccessNode access) { - ValueNode offset = access.offset(); - if (offset.isConstant()) { - long offsetValue = offset.asConstant().asLong(); - return ConstantLocationNode.create(access.getLocationIdentity(), access.accessKind(), offsetValue, access.graph()); - } - - long displacement = 0; - int indexScaling = 1; - if (offset instanceof IntegerAddNode) { - IntegerAddNode integerAddNode = (IntegerAddNode) offset; - if (integerAddNode.y() instanceof ConstantNode) { - displacement = integerAddNode.y().asConstant().asLong(); - offset = integerAddNode.x(); - } - } - - if (offset instanceof LeftShiftNode) { - LeftShiftNode leftShiftNode = (LeftShiftNode) offset; - if (leftShiftNode.y() instanceof ConstantNode) { - long shift = leftShiftNode.y().asConstant().asLong(); - if (shift >= 1 && shift <= 3) { - if (shift == 1) { - indexScaling = 2; - } else if (shift == 2) { - indexScaling = 4; - } else { - indexScaling = 8; - } - offset = leftShiftNode.x(); - } - } - } - - return IndexedLocationNode.create(access.getLocationIdentity(), access.accessKind(), displacement, offset, access.graph(), indexScaling); - } - - private static boolean addReadBarrier(UnsafeLoadNode load) { - if (useG1GC() && load.graph().getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS && load.object().kind() == Kind.Object && load.accessKind() == Kind.Object && - !ObjectStamp.isObjectAlwaysNull(load.object())) { - ResolvedJavaType type = ObjectStamp.typeOrNull(load.object()); - if (type != null && !type.isArray()) { - return true; - } - } - return false; - } - - private static ReadNode createReadVirtualMethod(StructuredGraph graph, Kind wordKind, ValueNode hub, ResolvedJavaMethod method) { - HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) method; - assert !hsMethod.getDeclaringClass().isInterface(); - assert hsMethod.isInVirtualMethodTable(); - - int vtableEntryOffset = hsMethod.vtableEntryOffset(); - assert vtableEntryOffset > 0; - // We use LocationNode.ANY_LOCATION for the reads that access the vtable - // entry as HotSpot does not guarantee that this is a final value. - ReadNode metaspaceMethod = graph.add(new ReadNode(hub, ConstantLocationNode.create(ANY_LOCATION, wordKind, vtableEntryOffset, graph), StampFactory.forKind(wordKind), BarrierType.NONE, false)); - return metaspaceMethod; - } - - private FloatingReadNode createReadHub(StructuredGraph graph, Kind wordKind, ValueNode object, GuardingNode guard) { - HotSpotVMConfig config = runtime.getConfig(); - LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.hubOffset, graph); - assert !object.isConstant() || object.asConstant().isNull(); - return graph.unique(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind), guard, BarrierType.NONE, config.useCompressedClassPointers)); - } - - private WriteNode createWriteHub(StructuredGraph graph, Kind wordKind, ValueNode object, ValueNode value) { - HotSpotVMConfig config = runtime.getConfig(); - LocationNode location = ConstantLocationNode.create(HUB_LOCATION, wordKind, config.hubOffset, graph); - assert !object.isConstant() || object.asConstant().isNull(); - return graph.add(new WriteNode(object, value, location, BarrierType.NONE, config.useCompressedClassPointers)); - } - - private static BarrierType getFieldLoadBarrierType(HotSpotResolvedJavaField loadField) { - BarrierType barrierType = BarrierType.NONE; - if (config().useG1GC && loadField.getKind() == Kind.Object && loadField.getDeclaringClass().mirror() == java.lang.ref.Reference.class && loadField.getName().equals("referent")) { - barrierType = BarrierType.PRECISE; - } - return barrierType; - } - - private static BarrierType getFieldStoreBarrierType(StoreFieldNode storeField) { - BarrierType barrierType = BarrierType.NONE; - if (storeField.field().getKind() == Kind.Object) { - barrierType = BarrierType.IMPRECISE; - } - return barrierType; - } - - private static BarrierType getArrayStoreBarrierType(StoreIndexedNode store) { - BarrierType barrierType = BarrierType.NONE; - if (store.elementKind() == Kind.Object) { - barrierType = BarrierType.PRECISE; - } - return barrierType; - } - - private static BarrierType getUnsafeStoreBarrierType(UnsafeStoreNode store) { - BarrierType barrierType = BarrierType.NONE; - if (store.value().kind() == Kind.Object) { - ResolvedJavaType type = ObjectStamp.typeOrNull(store.object()); - if (type != null && !type.isArray()) { - barrierType = BarrierType.IMPRECISE; - } else { - barrierType = BarrierType.PRECISE; - } - } - return barrierType; - } - - private static BarrierType getCompareAndSwapBarrier(CompareAndSwapNode cas) { - BarrierType barrierType = BarrierType.NONE; - if (cas.expected().kind() == Kind.Object) { - ResolvedJavaType type = ObjectStamp.typeOrNull(cas.object()); - if (type != null && !type.isArray()) { - barrierType = BarrierType.IMPRECISE; - } else { - barrierType = BarrierType.PRECISE; - } - } - return barrierType; - } - - protected static ConstantLocationNode createFieldLocation(StructuredGraph graph, HotSpotResolvedJavaField field, boolean initialization) { - LocationIdentity loc = initialization ? INIT_LOCATION : field; - return ConstantLocationNode.create(loc, field.getKind(), field.offset(), graph); - } - - public int getScalingFactor(Kind kind) { - if (useCompressedOops() && kind == Kind.Object) { - return this.runtime.getTarget().arch.getSizeInBytes(Kind.Int); - } else { - return this.runtime.getTarget().arch.getSizeInBytes(kind); - } - } - - protected IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index, boolean initialization) { - LocationIdentity loc = initialization ? INIT_LOCATION : NamedLocationIdentity.getArrayLocation(elementKind); - int scale = getScalingFactor(elementKind); - return IndexedLocationNode.create(loc, elementKind, getArrayBaseOffset(elementKind), index, graph, scale); - } - - @Override - public ValueNode reconstructArrayIndex(LocationNode location) { - Kind elementKind = location.getValueKind(); - assert location.getLocationIdentity().equals(NamedLocationIdentity.getArrayLocation(elementKind)); - - long base; - ValueNode index; - int scale = getScalingFactor(elementKind); - - if (location instanceof ConstantLocationNode) { - base = ((ConstantLocationNode) location).getDisplacement(); - index = null; - } else if (location instanceof IndexedLocationNode) { - IndexedLocationNode indexedLocation = (IndexedLocationNode) location; - assert indexedLocation.getIndexScaling() == scale; - base = indexedLocation.getDisplacement(); - index = indexedLocation.getIndex(); - } else { - throw GraalInternalError.shouldNotReachHere(); - } - - base -= getArrayBaseOffset(elementKind); - assert base >= 0 && base % scale == 0; - - base /= scale; - assert NumUtil.isInt(base); - - StructuredGraph graph = location.graph(); - if (index == null) { - return ConstantNode.forInt((int) base, graph); - } else { - if (base == 0) { - return index; - } else { - return IntegerArithmeticNode.add(graph, ConstantNode.forInt((int) base, graph), index); - } - } - } - - private GuardingNode createBoundsCheck(AccessIndexedNode n, LoweringTool tool) { - StructuredGraph g = n.graph(); - ValueNode array = n.array(); - ValueNode arrayLength = readArrayLength(n.graph(), array, tool.getConstantReflection()); - if (arrayLength == null) { - Stamp stamp = StampFactory.positiveInt(); - ReadNode readArrayLength = g.add(new ReadNode(array, ConstantLocationNode.create(FINAL_LOCATION, Kind.Int, runtime.getConfig().arrayLengthOffset, g), stamp, BarrierType.NONE, false)); - g.addBeforeFixed(n, readArrayLength); - tool.createNullCheckGuard(readArrayLength, array); - arrayLength = readArrayLength; - } - - return tool.createGuard(g.unique(new IntegerBelowThanNode(n.index(), arrayLength)), BoundsCheckException, InvalidateReprofile); - } - -} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotLoweringProvider.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotLoweringProvider.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.meta; + +import static com.oracle.graal.api.code.MemoryBarriers.*; +import static com.oracle.graal.api.meta.DeoptimizationAction.*; +import static com.oracle.graal.api.meta.DeoptimizationReason.*; +import static com.oracle.graal.api.meta.LocationIdentity.*; +import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; +import static com.oracle.graal.hotspot.meta.HotSpotHostForeignCallsProvider.*; +import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.replacements.NewObjectSnippets.*; +import static com.oracle.graal.nodes.java.ArrayLengthNode.*; +import static com.oracle.graal.phases.GraalOptions.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.asm.*; +import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.debug.*; +import com.oracle.graal.hotspot.nodes.*; +import com.oracle.graal.hotspot.replacements.*; +import com.oracle.graal.java.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.HeapAccess.BarrierType; +import com.oracle.graal.nodes.calc.*; +import com.oracle.graal.nodes.debug.*; +import com.oracle.graal.nodes.extended.*; +import com.oracle.graal.nodes.java.*; +import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.nodes.type.*; +import com.oracle.graal.nodes.virtual.*; +import com.oracle.graal.replacements.*; + +/** + * HotSpot implementation of {@link LoweringProvider}. + */ +public class HotSpotLoweringProvider implements LoweringProvider { + + protected final HotSpotGraalRuntime runtime; + protected final MetaAccessProvider metaAccess; + protected final ForeignCallsProvider foreignCalls; + protected final HotSpotRegistersProvider registers; + + protected CheckCastDynamicSnippets.Templates checkcastDynamicSnippets; + protected InstanceOfSnippets.Templates instanceofSnippets; + protected NewObjectSnippets.Templates newObjectSnippets; + protected MonitorSnippets.Templates monitorSnippets; + protected WriteBarrierSnippets.Templates writeBarrierSnippets; + protected BoxingSnippets.Templates boxingSnippets; + protected LoadExceptionObjectSnippets.Templates exceptionObjectSnippets; + protected UnsafeLoadSnippets.Templates unsafeLoadSnippets; + + public HotSpotLoweringProvider(HotSpotGraalRuntime runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers) { + this.runtime = runtime; + this.metaAccess = metaAccess; + this.foreignCalls = foreignCalls; + this.registers = registers; + } + + public void initialize(HotSpotProviders providers, HotSpotVMConfig config) { + TargetDescription target = providers.getCodeCache().getTarget(); + checkcastDynamicSnippets = new CheckCastDynamicSnippets.Templates(providers, target); + instanceofSnippets = new InstanceOfSnippets.Templates(providers, target); + newObjectSnippets = new NewObjectSnippets.Templates(providers, target); + monitorSnippets = new MonitorSnippets.Templates(providers, target, config.useFastLocking); + writeBarrierSnippets = new WriteBarrierSnippets.Templates(providers, target); + boxingSnippets = new BoxingSnippets.Templates(providers, target); + exceptionObjectSnippets = new LoadExceptionObjectSnippets.Templates(providers, target); + unsafeLoadSnippets = new UnsafeLoadSnippets.Templates(providers, target); + providers.getReplacements().registerSnippetTemplateCache(new UnsafeArrayCopySnippets.Templates(providers, target)); + } + + @Override + public void lower(Node n, LoweringTool tool) { + HotSpotVMConfig config = runtime.getConfig(); + StructuredGraph graph = (StructuredGraph) n.graph(); + + Kind wordKind = runtime.getTarget().wordKind; + if (n instanceof ArrayLengthNode) { + ArrayLengthNode arrayLengthNode = (ArrayLengthNode) n; + ValueNode array = arrayLengthNode.array(); + ReadNode arrayLengthRead = graph.add(new ReadNode(array, ConstantLocationNode.create(FINAL_LOCATION, Kind.Int, config.arrayLengthOffset, graph), StampFactory.positiveInt(), + BarrierType.NONE, false)); + tool.createNullCheckGuard(arrayLengthRead, array); + graph.replaceFixedWithFixed(arrayLengthNode, arrayLengthRead); + } else if (n instanceof Invoke) { + Invoke invoke = (Invoke) n; + if (invoke.callTarget() instanceof MethodCallTargetNode) { + + MethodCallTargetNode callTarget = (MethodCallTargetNode) invoke.callTarget(); + NodeInputList parameters = callTarget.arguments(); + ValueNode receiver = parameters.size() <= 0 ? null : parameters.get(0); + GuardingNode receiverNullCheck = null; + if (!callTarget.isStatic() && receiver.stamp() instanceof ObjectStamp && !ObjectStamp.isObjectNonNull(receiver)) { + receiverNullCheck = tool.createNullCheckGuard(invoke, receiver); + } + JavaType[] signature = MetaUtil.signatureToTypes(callTarget.targetMethod().getSignature(), callTarget.isStatic() ? null : callTarget.targetMethod().getDeclaringClass()); + + LoweredCallTargetNode loweredCallTarget = null; + if (callTarget.invokeKind() == InvokeKind.Virtual && InlineVTableStubs.getValue() && (AlwaysInlineVTableStubs.getValue() || invoke.isPolymorphic())) { + + HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) callTarget.targetMethod(); + if (!hsMethod.getDeclaringClass().isInterface()) { + if (hsMethod.isInVirtualMethodTable()) { + int vtableEntryOffset = hsMethod.vtableEntryOffset(); + assert vtableEntryOffset > 0; + FloatingReadNode hub = createReadHub(graph, wordKind, receiver, receiverNullCheck); + + ReadNode metaspaceMethod = createReadVirtualMethod(graph, wordKind, hub, hsMethod); + // We use LocationNode.ANY_LOCATION for the reads that access the + // compiled code entry as HotSpot does not guarantee they are final + // values. + ReadNode compiledEntry = graph.add(new ReadNode(metaspaceMethod, ConstantLocationNode.create(ANY_LOCATION, wordKind, config.methodCompiledEntryOffset, graph), + StampFactory.forKind(wordKind), BarrierType.NONE, false)); + + loweredCallTarget = graph.add(new HotSpotIndirectCallTargetNode(metaspaceMethod, compiledEntry, parameters, invoke.asNode().stamp(), signature, callTarget.targetMethod(), + CallingConvention.Type.JavaCall)); + + graph.addBeforeFixed(invoke.asNode(), metaspaceMethod); + graph.addAfterFixed(metaspaceMethod, compiledEntry); + } + } + } + + if (loweredCallTarget == null) { + loweredCallTarget = graph.add(new HotSpotDirectCallTargetNode(parameters, invoke.asNode().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall, + callTarget.invokeKind())); + } + callTarget.replaceAndDelete(loweredCallTarget); + } + } else if (n instanceof LoadFieldNode) { + LoadFieldNode loadField = (LoadFieldNode) n; + HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) loadField.field(); + ValueNode object = loadField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), metaAccess, graph) : loadField.object(); + assert loadField.kind() != Kind.Illegal; + BarrierType barrierType = getFieldLoadBarrierType(field); + ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field, false), loadField.stamp(), barrierType, (loadField.kind() == Kind.Object))); + graph.replaceFixedWithFixed(loadField, memoryRead); + tool.createNullCheckGuard(memoryRead, object); + + if (loadField.isVolatile()) { + MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_READ)); + graph.addBeforeFixed(memoryRead, preMembar); + MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_READ)); + graph.addAfterFixed(memoryRead, postMembar); + } + } else if (n instanceof StoreFieldNode) { + StoreFieldNode storeField = (StoreFieldNode) n; + HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) storeField.field(); + ValueNode object = storeField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), metaAccess, graph) : storeField.object(); + BarrierType barrierType = getFieldStoreBarrierType(storeField); + WriteNode memoryWrite = graph.add(new WriteNode(object, storeField.value(), createFieldLocation(graph, field, false), barrierType, storeField.field().getKind() == Kind.Object)); + tool.createNullCheckGuard(memoryWrite, object); + memoryWrite.setStateAfter(storeField.stateAfter()); + graph.replaceFixedWithFixed(storeField, memoryWrite); + FixedWithNextNode last = memoryWrite; + FixedWithNextNode first = memoryWrite; + + if (storeField.isVolatile()) { + MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_WRITE)); + graph.addBeforeFixed(first, preMembar); + MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_WRITE)); + graph.addAfterFixed(last, postMembar); + } + } else if (n instanceof CompareAndSwapNode) { + // Separate out GC barrier semantics + CompareAndSwapNode cas = (CompareAndSwapNode) n; + LocationNode location = IndexedLocationNode.create(cas.getLocationIdentity(), cas.expected().kind(), cas.displacement(), cas.offset(), graph, 1); + LoweredCompareAndSwapNode atomicNode = graph.add(new LoweredCompareAndSwapNode(cas.object(), location, cas.expected(), cas.newValue(), getCompareAndSwapBarrier(cas), + cas.expected().kind() == Kind.Object)); + atomicNode.setStateAfter(cas.stateAfter()); + graph.replaceFixedWithFixed(cas, atomicNode); + } else if (n instanceof LoadIndexedNode) { + LoadIndexedNode loadIndexed = (LoadIndexedNode) n; + GuardingNode boundsCheck = createBoundsCheck(loadIndexed, tool); + Kind elementKind = loadIndexed.elementKind(); + LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index(), false); + ReadNode memoryRead = graph.add(new ReadNode(loadIndexed.array(), arrayLocation, loadIndexed.stamp(), BarrierType.NONE, elementKind == Kind.Object)); + memoryRead.setGuard(boundsCheck); + graph.replaceFixedWithFixed(loadIndexed, memoryRead); + } else if (n instanceof StoreIndexedNode) { + StoreIndexedNode storeIndexed = (StoreIndexedNode) n; + GuardingNode boundsCheck = createBoundsCheck(storeIndexed, tool); + Kind elementKind = storeIndexed.elementKind(); + LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index(), false); + ValueNode value = storeIndexed.value(); + ValueNode array = storeIndexed.array(); + + CheckCastNode checkcastNode = null; + CheckCastDynamicNode checkcastDynamicNode = null; + if (elementKind == Kind.Object && !ObjectStamp.isObjectAlwaysNull(value)) { + // Store check! + ResolvedJavaType arrayType = ObjectStamp.typeOrNull(array); + if (arrayType != null && ObjectStamp.isExactType(array)) { + ResolvedJavaType elementType = arrayType.getComponentType(); + if (!MetaUtil.isJavaLangObject(elementType)) { + checkcastNode = graph.add(new CheckCastNode(elementType, value, null, true)); + graph.addBeforeFixed(storeIndexed, checkcastNode); + value = checkcastNode; + } + } else { + FloatingReadNode arrayClass = createReadHub(graph, wordKind, array, boundsCheck); + LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.arrayClassElementOffset, graph); + /* + * Anchor the read of the element klass to the cfg, because it is only valid + * when arrayClass is an object class, which might not be the case in other + * parts of the compiled method. + */ + FloatingReadNode arrayElementKlass = graph.unique(new FloatingReadNode(arrayClass, location, null, StampFactory.forKind(wordKind), BeginNode.prevBegin(storeIndexed))); + checkcastDynamicNode = graph.add(new CheckCastDynamicNode(arrayElementKlass, value, true)); + graph.addBeforeFixed(storeIndexed, checkcastDynamicNode); + value = checkcastDynamicNode; + } + } + BarrierType barrierType = getArrayStoreBarrierType(storeIndexed); + WriteNode memoryWrite = graph.add(new WriteNode(array, value, arrayLocation, barrierType, elementKind == Kind.Object)); + memoryWrite.setGuard(boundsCheck); + memoryWrite.setStateAfter(storeIndexed.stateAfter()); + graph.replaceFixedWithFixed(storeIndexed, memoryWrite); + + // Lower the associated checkcast node. + if (checkcastNode != null) { + checkcastNode.lower(tool); + } else if (checkcastDynamicNode != null) { + checkcastDynamicSnippets.lower(checkcastDynamicNode); + } + } else if (n instanceof UnsafeLoadNode) { + UnsafeLoadNode load = (UnsafeLoadNode) n; + if (load.getGuardingCondition() != null) { + boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object); + ConditionAnchorNode valueAnchorNode = graph.add(new ConditionAnchorNode(load.getGuardingCondition())); + LocationNode location = createLocation(load); + ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), valueAnchorNode, BarrierType.NONE, compressible)); + load.replaceAtUsages(memoryRead); + graph.replaceFixedWithFixed(load, valueAnchorNode); + graph.addAfterFixed(valueAnchorNode, memoryRead); + } else if (graph.getGuardsStage().ordinal() > StructuredGraph.GuardsStage.FLOATING_GUARDS.ordinal()) { + assert load.kind() != Kind.Illegal; + boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object); + if (addReadBarrier(load)) { + unsafeLoadSnippets.lower(load, tool); + } else { + LocationNode location = createLocation(load); + ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), BarrierType.NONE, compressible)); + // An unsafe read must not float outside its block otherwise + // it may float above an explicit null check on its object. + memoryRead.setGuard(AbstractBeginNode.prevBegin(load)); + graph.replaceFixedWithFixed(load, memoryRead); + } + } + } else if (n instanceof UnsafeStoreNode) { + UnsafeStoreNode store = (UnsafeStoreNode) n; + LocationNode location = createLocation(store); + ValueNode object = store.object(); + BarrierType barrierType = getUnsafeStoreBarrierType(store); + WriteNode write = graph.add(new WriteNode(object, store.value(), location, barrierType, store.value().kind() == Kind.Object)); + write.setStateAfter(store.stateAfter()); + graph.replaceFixedWithFixed(store, write); + } else if (n instanceof LoadHubNode) { + LoadHubNode loadHub = (LoadHubNode) n; + assert loadHub.kind() == wordKind; + ValueNode object = loadHub.object(); + GuardingNode guard = loadHub.getGuard(); + FloatingReadNode hub = createReadHub(graph, wordKind, object, guard); + graph.replaceFloating(loadHub, hub); + } else if (n instanceof LoadMethodNode) { + LoadMethodNode loadMethodNode = (LoadMethodNode) n; + ResolvedJavaMethod method = loadMethodNode.getMethod(); + ReadNode metaspaceMethod = createReadVirtualMethod(graph, wordKind, loadMethodNode.getHub(), method); + graph.replaceFixed(loadMethodNode, metaspaceMethod); + } else if (n instanceof StoreHubNode) { + StoreHubNode storeHub = (StoreHubNode) n; + WriteNode hub = createWriteHub(graph, wordKind, storeHub.getObject(), storeHub.getValue()); + graph.replaceFixed(storeHub, hub); + } else if (n instanceof CommitAllocationNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + CommitAllocationNode commit = (CommitAllocationNode) n; + ValueNode[] allocations = new ValueNode[commit.getVirtualObjects().size()]; + BitSet omittedValues = new BitSet(); + int valuePos = 0; + for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { + VirtualObjectNode virtual = commit.getVirtualObjects().get(objIndex); + int entryCount = virtual.entryCount(); + FixedWithNextNode newObject; + if (virtual instanceof VirtualInstanceNode) { + newObject = graph.add(new NewInstanceNode(virtual.type(), true)); + graph.addBeforeFixed(commit, newObject); + allocations[objIndex] = newObject; + for (int i = 0; i < entryCount; i++) { + ValueNode value = commit.getValues().get(valuePos); + if (value instanceof VirtualObjectNode) { + value = allocations[commit.getVirtualObjects().indexOf(value)]; + } + if (value == null) { + omittedValues.set(valuePos); + } else if (!(value.isConstant() && value.asConstant().isDefaultForKind())) { + // Constant.illegal is always the defaultForKind, so it is skipped + VirtualInstanceNode virtualInstance = (VirtualInstanceNode) virtual; + Kind accessKind; + HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) virtualInstance.field(i); + if (value.kind().getStackKind() != field.getKind().getStackKind()) { + assert value.kind() == Kind.Long || value.kind() == Kind.Double; + accessKind = value.kind(); + } else { + accessKind = field.getKind(); + } + ConstantLocationNode location = ConstantLocationNode.create(INIT_LOCATION, accessKind, field.offset(), graph); + BarrierType barrierType = (virtualInstance.field(i).getKind() == Kind.Object && !useDeferredInitBarriers()) ? BarrierType.IMPRECISE : BarrierType.NONE; + WriteNode write = new WriteNode(newObject, value, location, barrierType, virtualInstance.field(i).getKind() == Kind.Object); + graph.addAfterFixed(newObject, graph.add(write)); + } + valuePos++; + } + } else { + ResolvedJavaType element = ((VirtualArrayNode) virtual).componentType(); + newObject = graph.add(new NewArrayNode(element, ConstantNode.forInt(entryCount, graph), true)); + graph.addBeforeFixed(commit, newObject); + allocations[objIndex] = newObject; + for (int i = 0; i < entryCount; i++) { + ValueNode value = commit.getValues().get(valuePos); + if (value instanceof VirtualObjectNode) { + value = allocations[commit.getVirtualObjects().indexOf(value)]; + } + if (value == null) { + omittedValues.set(valuePos); + } else if (!(value.isConstant() && value.asConstant().isDefaultForKind())) { + // Constant.illegal is always the defaultForKind, so it is skipped + Kind componentKind = element.getKind(); + Kind accessKind; + Kind valueKind = value.kind(); + if (valueKind.getStackKind() != componentKind.getStackKind()) { + // Given how Truffle uses unsafe, it can happen that + // valueKind is Kind.Int + // assert valueKind == Kind.Long || valueKind == Kind.Double; + accessKind = valueKind; + } else { + accessKind = componentKind; + } + + int scale = getScalingFactor(componentKind); + ConstantLocationNode location = ConstantLocationNode.create(INIT_LOCATION, accessKind, getArrayBaseOffset(componentKind) + i * scale, graph); + BarrierType barrierType = (componentKind == Kind.Object && !useDeferredInitBarriers()) ? BarrierType.IMPRECISE : BarrierType.NONE; + WriteNode write = new WriteNode(newObject, value, location, barrierType, componentKind == Kind.Object); + graph.addAfterFixed(newObject, graph.add(write)); + } + valuePos++; + } + } + } + valuePos = 0; + + for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { + VirtualObjectNode virtual = commit.getVirtualObjects().get(objIndex); + int entryCount = virtual.entryCount(); + ValueNode newObject = allocations[objIndex]; + if (virtual instanceof VirtualInstanceNode) { + for (int i = 0; i < entryCount; i++) { + if (omittedValues.get(valuePos)) { + ValueNode value = commit.getValues().get(valuePos); + assert value instanceof VirtualObjectNode; + ValueNode allocValue = allocations[commit.getVirtualObjects().indexOf(value)]; + if (!(allocValue.isConstant() && allocValue.asConstant().isDefaultForKind())) { + VirtualInstanceNode virtualInstance = (VirtualInstanceNode) virtual; + assert virtualInstance.field(i).getKind() == Kind.Object; + WriteNode write = new WriteNode(newObject, allocValue, createFieldLocation(graph, (HotSpotResolvedJavaField) virtualInstance.field(i), true), + BarrierType.IMPRECISE, true); + graph.addBeforeFixed(commit, graph.add(write)); + } + } + valuePos++; + } + } else { + ResolvedJavaType element = ((VirtualArrayNode) virtual).componentType(); + for (int i = 0; i < entryCount; i++) { + if (omittedValues.get(valuePos)) { + ValueNode value = commit.getValues().get(valuePos); + assert value instanceof VirtualObjectNode; + ValueNode allocValue = allocations[commit.getVirtualObjects().indexOf(value)]; + if (!(allocValue.isConstant() && allocValue.asConstant().isDefaultForKind())) { + assert allocValue.kind() == Kind.Object; + WriteNode write = new WriteNode(newObject, allocValue, createArrayLocation(graph, element.getKind(), ConstantNode.forInt(i, graph), true), BarrierType.PRECISE, + true); + graph.addBeforeFixed(commit, graph.add(write)); + } + } + valuePos++; + } + } + } + + for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { + FixedValueAnchorNode anchor = graph.add(new FixedValueAnchorNode(allocations[objIndex])); + allocations[objIndex] = anchor; + graph.addBeforeFixed(commit, anchor); + } + for (int objIndex = 0; objIndex < commit.getVirtualObjects().size(); objIndex++) { + for (int lockDepth : commit.getLocks().get(objIndex)) { + MonitorEnterNode enter = graph.add(new MonitorEnterNode(allocations[objIndex], lockDepth)); + graph.addBeforeFixed(commit, enter); + enter.lower(tool); + } + } + for (Node usage : commit.usages().snapshot()) { + AllocatedObjectNode addObject = (AllocatedObjectNode) usage; + int index = commit.getVirtualObjects().indexOf(addObject.getVirtualObject()); + graph.replaceFloating(addObject, allocations[index]); + } + graph.removeFixed(commit); + } + } else if (n instanceof OSRStartNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + OSRStartNode osrStart = (OSRStartNode) n; + StartNode newStart = graph.add(new StartNode()); + LocalNode buffer = graph.unique(new LocalNode(0, StampFactory.forKind(wordKind))); + ForeignCallNode migrationEnd = graph.add(new ForeignCallNode(foreignCalls, OSR_MIGRATION_END, buffer)); + migrationEnd.setStateAfter(osrStart.stateAfter()); + + newStart.setNext(migrationEnd); + FixedNode next = osrStart.next(); + osrStart.setNext(null); + migrationEnd.setNext(next); + graph.setStart(newStart); + + // mirroring the calculations in c1_GraphBuilder.cpp (setup_osr_entry_block) + int localsOffset = (graph.method().getMaxLocals() - 1) * 8; + for (OSRLocalNode osrLocal : graph.getNodes(OSRLocalNode.class)) { + int size = FrameStateBuilder.stackSlots(osrLocal.kind()); + int offset = localsOffset - (osrLocal.index() + size - 1) * 8; + IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, osrLocal.kind(), offset, ConstantNode.forLong(0, graph), graph, 1); + ReadNode load = graph.add(new ReadNode(buffer, location, osrLocal.stamp(), BarrierType.NONE, false)); + osrLocal.replaceAndDelete(load); + graph.addBeforeFixed(migrationEnd, load); + } + osrStart.replaceAtUsages(newStart); + osrStart.safeDelete(); + } + } else if (n instanceof DynamicCounterNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { + BenchmarkCounters.lower((DynamicCounterNode) n, registers, runtime.getConfig(), wordKind); + } + } else if (n instanceof CheckCastDynamicNode) { + checkcastDynamicSnippets.lower((CheckCastDynamicNode) n); + } else if (n instanceof InstanceOfNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + instanceofSnippets.lower((InstanceOfNode) n, tool); + } + } else if (n instanceof InstanceOfDynamicNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + instanceofSnippets.lower((InstanceOfDynamicNode) n, tool); + } + } else if (n instanceof NewInstanceNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { + newObjectSnippets.lower((NewInstanceNode) n, registers); + } + } else if (n instanceof NewArrayNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { + newObjectSnippets.lower((NewArrayNode) n, registers); + } + } else if (n instanceof DynamicNewArrayNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { + newObjectSnippets.lower((DynamicNewArrayNode) n, registers); + } + } else if (n instanceof MonitorEnterNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + monitorSnippets.lower((MonitorEnterNode) n, registers); + } + } else if (n instanceof MonitorExitNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) { + monitorSnippets.lower((MonitorExitNode) n, tool); + } + } else if (n instanceof G1PreWriteBarrier) { + writeBarrierSnippets.lower((G1PreWriteBarrier) n, registers); + } else if (n instanceof G1PostWriteBarrier) { + writeBarrierSnippets.lower((G1PostWriteBarrier) n, registers); + } else if (n instanceof G1ReferentFieldReadBarrier) { + writeBarrierSnippets.lower((G1ReferentFieldReadBarrier) n, registers); + } else if (n instanceof SerialWriteBarrier) { + writeBarrierSnippets.lower((SerialWriteBarrier) n); + } else if (n instanceof SerialArrayRangeWriteBarrier) { + writeBarrierSnippets.lower((SerialArrayRangeWriteBarrier) n); + } else if (n instanceof G1ArrayRangePreWriteBarrier) { + writeBarrierSnippets.lower((G1ArrayRangePreWriteBarrier) n, registers); + } else if (n instanceof G1ArrayRangePostWriteBarrier) { + writeBarrierSnippets.lower((G1ArrayRangePostWriteBarrier) n, registers); + } else if (n instanceof NewMultiArrayNode) { + if (graph.getGuardsStage() == StructuredGraph.GuardsStage.AFTER_FSA) { + newObjectSnippets.lower((NewMultiArrayNode) n); + } + } else if (n instanceof LoadExceptionObjectNode) { + exceptionObjectSnippets.lower((LoadExceptionObjectNode) n, registers); + } else if (n instanceof IntegerDivNode || n instanceof IntegerRemNode || n instanceof UnsignedDivNode || n instanceof UnsignedRemNode) { + // Nothing to do for division nodes. The HotSpot signal handler catches divisions by + // zero and the MIN_VALUE / -1 cases. + } else if (n instanceof BoxNode) { + boxingSnippets.lower((BoxNode) n, tool); + } else if (n instanceof UnboxNode) { + boxingSnippets.lower((UnboxNode) n, tool); + } else { + assert false : "Node implementing Lowerable not handled: " + n; + throw GraalInternalError.shouldNotReachHere(); + } + } + + private static LocationNode createLocation(UnsafeAccessNode access) { + ValueNode offset = access.offset(); + if (offset.isConstant()) { + long offsetValue = offset.asConstant().asLong(); + return ConstantLocationNode.create(access.getLocationIdentity(), access.accessKind(), offsetValue, access.graph()); + } + + long displacement = 0; + int indexScaling = 1; + if (offset instanceof IntegerAddNode) { + IntegerAddNode integerAddNode = (IntegerAddNode) offset; + if (integerAddNode.y() instanceof ConstantNode) { + displacement = integerAddNode.y().asConstant().asLong(); + offset = integerAddNode.x(); + } + } + + if (offset instanceof LeftShiftNode) { + LeftShiftNode leftShiftNode = (LeftShiftNode) offset; + if (leftShiftNode.y() instanceof ConstantNode) { + long shift = leftShiftNode.y().asConstant().asLong(); + if (shift >= 1 && shift <= 3) { + if (shift == 1) { + indexScaling = 2; + } else if (shift == 2) { + indexScaling = 4; + } else { + indexScaling = 8; + } + offset = leftShiftNode.x(); + } + } + } + + return IndexedLocationNode.create(access.getLocationIdentity(), access.accessKind(), displacement, offset, access.graph(), indexScaling); + } + + private static boolean addReadBarrier(UnsafeLoadNode load) { + if (useG1GC() && load.graph().getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS && load.object().kind() == Kind.Object && load.accessKind() == Kind.Object && + !ObjectStamp.isObjectAlwaysNull(load.object())) { + ResolvedJavaType type = ObjectStamp.typeOrNull(load.object()); + if (type != null && !type.isArray()) { + return true; + } + } + return false; + } + + private static ReadNode createReadVirtualMethod(StructuredGraph graph, Kind wordKind, ValueNode hub, ResolvedJavaMethod method) { + HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) method; + assert !hsMethod.getDeclaringClass().isInterface(); + assert hsMethod.isInVirtualMethodTable(); + + int vtableEntryOffset = hsMethod.vtableEntryOffset(); + assert vtableEntryOffset > 0; + // We use LocationNode.ANY_LOCATION for the reads that access the vtable + // entry as HotSpot does not guarantee that this is a final value. + ReadNode metaspaceMethod = graph.add(new ReadNode(hub, ConstantLocationNode.create(ANY_LOCATION, wordKind, vtableEntryOffset, graph), StampFactory.forKind(wordKind), BarrierType.NONE, false)); + return metaspaceMethod; + } + + private FloatingReadNode createReadHub(StructuredGraph graph, Kind wordKind, ValueNode object, GuardingNode guard) { + HotSpotVMConfig config = runtime.getConfig(); + LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.hubOffset, graph); + assert !object.isConstant() || object.asConstant().isNull(); + return graph.unique(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind), guard, BarrierType.NONE, config.useCompressedClassPointers)); + } + + private WriteNode createWriteHub(StructuredGraph graph, Kind wordKind, ValueNode object, ValueNode value) { + HotSpotVMConfig config = runtime.getConfig(); + LocationNode location = ConstantLocationNode.create(HUB_LOCATION, wordKind, config.hubOffset, graph); + assert !object.isConstant() || object.asConstant().isNull(); + return graph.add(new WriteNode(object, value, location, BarrierType.NONE, config.useCompressedClassPointers)); + } + + private static BarrierType getFieldLoadBarrierType(HotSpotResolvedJavaField loadField) { + BarrierType barrierType = BarrierType.NONE; + if (config().useG1GC && loadField.getKind() == Kind.Object && loadField.getDeclaringClass().mirror() == java.lang.ref.Reference.class && loadField.getName().equals("referent")) { + barrierType = BarrierType.PRECISE; + } + return barrierType; + } + + private static BarrierType getFieldStoreBarrierType(StoreFieldNode storeField) { + BarrierType barrierType = BarrierType.NONE; + if (storeField.field().getKind() == Kind.Object) { + barrierType = BarrierType.IMPRECISE; + } + return barrierType; + } + + private static BarrierType getArrayStoreBarrierType(StoreIndexedNode store) { + BarrierType barrierType = BarrierType.NONE; + if (store.elementKind() == Kind.Object) { + barrierType = BarrierType.PRECISE; + } + return barrierType; + } + + private static BarrierType getUnsafeStoreBarrierType(UnsafeStoreNode store) { + BarrierType barrierType = BarrierType.NONE; + if (store.value().kind() == Kind.Object) { + ResolvedJavaType type = ObjectStamp.typeOrNull(store.object()); + if (type != null && !type.isArray()) { + barrierType = BarrierType.IMPRECISE; + } else { + barrierType = BarrierType.PRECISE; + } + } + return barrierType; + } + + private static BarrierType getCompareAndSwapBarrier(CompareAndSwapNode cas) { + BarrierType barrierType = BarrierType.NONE; + if (cas.expected().kind() == Kind.Object) { + ResolvedJavaType type = ObjectStamp.typeOrNull(cas.object()); + if (type != null && !type.isArray()) { + barrierType = BarrierType.IMPRECISE; + } else { + barrierType = BarrierType.PRECISE; + } + } + return barrierType; + } + + protected static ConstantLocationNode createFieldLocation(StructuredGraph graph, HotSpotResolvedJavaField field, boolean initialization) { + LocationIdentity loc = initialization ? INIT_LOCATION : field; + return ConstantLocationNode.create(loc, field.getKind(), field.offset(), graph); + } + + public int getScalingFactor(Kind kind) { + if (useCompressedOops() && kind == Kind.Object) { + return this.runtime.getTarget().arch.getSizeInBytes(Kind.Int); + } else { + return this.runtime.getTarget().arch.getSizeInBytes(kind); + } + } + + protected IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index, boolean initialization) { + LocationIdentity loc = initialization ? INIT_LOCATION : NamedLocationIdentity.getArrayLocation(elementKind); + int scale = getScalingFactor(elementKind); + return IndexedLocationNode.create(loc, elementKind, getArrayBaseOffset(elementKind), index, graph, scale); + } + + @Override + public ValueNode reconstructArrayIndex(LocationNode location) { + Kind elementKind = location.getValueKind(); + assert location.getLocationIdentity().equals(NamedLocationIdentity.getArrayLocation(elementKind)); + + long base; + ValueNode index; + int scale = getScalingFactor(elementKind); + + if (location instanceof ConstantLocationNode) { + base = ((ConstantLocationNode) location).getDisplacement(); + index = null; + } else if (location instanceof IndexedLocationNode) { + IndexedLocationNode indexedLocation = (IndexedLocationNode) location; + assert indexedLocation.getIndexScaling() == scale; + base = indexedLocation.getDisplacement(); + index = indexedLocation.getIndex(); + } else { + throw GraalInternalError.shouldNotReachHere(); + } + + base -= getArrayBaseOffset(elementKind); + assert base >= 0 && base % scale == 0; + + base /= scale; + assert NumUtil.isInt(base); + + StructuredGraph graph = location.graph(); + if (index == null) { + return ConstantNode.forInt((int) base, graph); + } else { + if (base == 0) { + return index; + } else { + return IntegerArithmeticNode.add(graph, ConstantNode.forInt((int) base, graph), index); + } + } + } + + private GuardingNode createBoundsCheck(AccessIndexedNode n, LoweringTool tool) { + StructuredGraph g = n.graph(); + ValueNode array = n.array(); + ValueNode arrayLength = readArrayLength(n.graph(), array, tool.getConstantReflection()); + if (arrayLength == null) { + Stamp stamp = StampFactory.positiveInt(); + ReadNode readArrayLength = g.add(new ReadNode(array, ConstantLocationNode.create(FINAL_LOCATION, Kind.Int, runtime.getConfig().arrayLengthOffset, g), stamp, BarrierType.NONE, false)); + g.addBeforeFixed(n, readArrayLength); + tool.createNullCheckGuard(readArrayLength, array); + arrayLength = readArrayLength; + } + + return tool.createGuard(g.unique(new IntegerBelowThanNode(n.index(), arrayLength)), BoundsCheckException, InvalidateReprofile); + } + +} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentJavaThreadNode.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentJavaThreadNode.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentJavaThreadNode.java Thu Nov 21 15:04:54 2013 +0100 @@ -26,6 +26,7 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.*; import com.oracle.graal.nodes.calc.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.word.*; @@ -41,7 +42,7 @@ @Override public void generate(LIRGeneratorTool gen) { - Register rawThread = runtime().getHostProviders().getRegisters().getThreadRegister(); + Register rawThread = ((HotSpotLIRGenerator) gen).getProviders().getRegisters().getThreadRegister(); gen.setResult(this, rawThread.asValue(this.kind())); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java Thu Nov 21 15:04:54 2013 +0100 @@ -202,16 +202,6 @@ } @Fold - public static Register threadRegister() { - return runtime().getHostProviders().getRegisters().getThreadRegister(); - } - - @Fold - public static Register stackPointerRegister() { - return runtime().getHostProviders().getRegisters().getStackPointerRegister(); - } - - @Fold public static int wordSize() { return runtime().getTarget().wordSize; } @@ -469,23 +459,19 @@ @NodeIntrinsic(ForeignCallNode.class) private static native Object verifyOopStub(@ConstantNodeParameter ForeignCallDescriptor descriptor, Object object); - /** - * Gets the value of the stack pointer register as a Word. - */ - public static Word stackPointer() { - return registerAsWord(stackPointerRegister(), true, false); + public static Word loadWordFromObject(Object object, int offset) { + assert offset != hubOffset() : "Use loadHubIntrinsic instead"; + return loadWordFromObjectIntrinsic(object, offset, getWordKind(), LocationIdentity.ANY_LOCATION); } /** - * Gets the value of the thread register as a Word. + * Reads the value of a given register. + * + * @param register a register which must not be available to the register allocator + * @return the value of {@code register} as a word */ - public static Word thread() { - return registerAsWord(threadRegister(), true, false); - } - - public static Word loadWordFromObject(Object object, int offset) { - assert offset != hubOffset() : "Use loadHubIntrinsic instead"; - return loadWordFromObjectIntrinsic(object, offset, getWordKind(), LocationIdentity.ANY_LOCATION); + public static Word registerAsWord(@ConstantNodeParameter Register register) { + return registerAsWord(register, true, false); } @NodeIntrinsic(value = ReadRegisterNode.class, setStampFromReturnType = true) diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/LoadExceptionObjectSnippets.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/LoadExceptionObjectSnippets.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/LoadExceptionObjectSnippets.java Thu Nov 21 15:04:54 2013 +0100 @@ -34,6 +34,7 @@ import com.oracle.graal.nodes.java.*; import com.oracle.graal.nodes.type.*; import com.oracle.graal.replacements.*; +import com.oracle.graal.replacements.Snippet.*; import com.oracle.graal.replacements.SnippetTemplate.AbstractTemplates; import com.oracle.graal.replacements.SnippetTemplate.Arguments; import com.oracle.graal.replacements.SnippetTemplate.SnippetInfo; @@ -51,8 +52,8 @@ private static final boolean USE_C_RUNTIME = Boolean.getBoolean("graal.loadExceptionObject.useCRuntime"); @Snippet - public static Object loadException() { - Word thread = thread(); + public static Object loadException(@ConstantParameter Register threadRegister) { + Word thread = registerAsWord(threadRegister); Object exception = readExceptionOop(thread); writeExceptionOop(thread, null); writeExceptionPc(thread, Word.zero()); @@ -67,10 +68,9 @@ super(providers, target); } - public void lower(LoadExceptionObjectNode loadExceptionObject) { + public void lower(LoadExceptionObjectNode loadExceptionObject, HotSpotRegistersProvider registers) { if (USE_C_RUNTIME) { StructuredGraph graph = loadExceptionObject.graph(); - HotSpotRegistersProvider registers = ((HotSpotProviders) providers).getRegisters(); ReadRegisterNode thread = graph.add(new ReadRegisterNode(registers.getThreadRegister(), true, false)); graph.addBeforeFixed(loadExceptionObject, thread); ForeignCallNode loadExceptionC = graph.add(new ForeignCallNode(providers.getForeignCalls(), LOAD_AND_CLEAR_EXCEPTION, thread)); @@ -78,6 +78,7 @@ graph.replaceFixedWithFixed(loadExceptionObject, loadExceptionC); } else { Arguments args = new Arguments(loadException, loadExceptionObject.graph().getGuardsStage()); + args.addConst("threadRegister", registers.getThreadRegister()); template(args).instantiate(providers.getMetaAccess(), loadExceptionObject, DEFAULT_REPLACER, args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/MonitorSnippets.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/MonitorSnippets.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/MonitorSnippets.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,6 +38,7 @@ import com.oracle.graal.graph.Node.ConstantNodeParameter; import com.oracle.graal.graph.Node.NodeIntrinsic; import com.oracle.graal.graph.iterators.*; +import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.debug.*; @@ -96,7 +97,8 @@ public static final boolean CHECK_BALANCED_MONITORS = Boolean.getBoolean("graal.monitors.checkBalanced"); @Snippet - public static void monitorenter(Object object, @ConstantParameter int lockDepth, @ConstantParameter boolean trace) { + public static void monitorenter(Object object, @ConstantParameter int lockDepth, @ConstantParameter Register threadRegister, @ConstantParameter Register stackPointerRegister, + @ConstantParameter boolean trace) { verifyOop(object); if (object == null) { @@ -130,7 +132,7 @@ // whether the bias owner and the epoch are both still current. Word hub = loadHubIntrinsic(object, getWordKind(), anchorNode); final Word prototypeMarkWord = hub.readWord(prototypeMarkWordOffset(), PROTOTYPE_MARK_WORD_LOCATION); - final Word thread = thread(); + final Word thread = registerAsWord(threadRegister); final Word tmp = prototypeMarkWord.or(thread).xor(mark).and(~ageMaskInPlace()); trace(trace, "prototypeMarkWord: 0x%016lx\n", prototypeMarkWord); trace(trace, " thread: 0x%016lx\n", thread); @@ -253,7 +255,7 @@ // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2 final Word alignedMask = Word.unsigned(wordSize() - 1); - final Word stackPointer = stackPointer(); + final Word stackPointer = registerAsWord(stackPointerRegister); if (probability(VERY_SLOW_PATH_PROBABILITY, currentMark.subtract(stackPointer).and(alignedMask.subtract(pageSize())).notEqual(0))) { // Most likely not a recursive lock, go into a slow runtime call traceObject(trace, "+lock{stub:failed-cas}", object, true); @@ -420,7 +422,7 @@ this.useFastLocking = useFastLocking; } - public void lower(MonitorEnterNode monitorenterNode, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(MonitorEnterNode monitorenterNode, HotSpotRegistersProvider registers) { StructuredGraph graph = monitorenterNode.graph(); checkBalancedMonitors(graph); FrameState stateAfter = monitorenterNode.stateAfter(); @@ -434,6 +436,8 @@ args.add("object", monitorenterNode.object()); args.addConst("lockDepth", monitorenterNode.getLockDepth()); boolean tracingEnabledForMethod = stateAfter != null && (isTracingEnabledForMethod(stateAfter.method()) || isTracingEnabledForMethod(graph.method())); + args.addConst("threadRegister", registers.getThreadRegister()); + args.addConst("stackPointerRegister", registers.getStackPointerRegister()); args.addConst("trace", isTracingEnabledForType(monitorenterNode.object()) || tracingEnabledForMethod); Map nodes = template(args).instantiate(providers.getMetaAccess(), monitorenterNode, DEFAULT_REPLACER, args); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java Thu Nov 21 15:04:54 2013 +0100 @@ -113,9 +113,10 @@ } @Snippet - public static Object allocateInstance(@ConstantParameter int size, Word hub, Word prototypeMarkWord, @ConstantParameter boolean fillContents, @ConstantParameter String typeContext) { + public static Object allocateInstance(@ConstantParameter int size, Word hub, Word prototypeMarkWord, @ConstantParameter boolean fillContents, @ConstantParameter Register threadRegister, + @ConstantParameter String typeContext) { Object result; - Word thread = thread(); + Word thread = registerAsWord(threadRegister); Word top = readTlabTop(thread); Word end = readTlabEnd(thread); Word newTop = top.add(size); @@ -137,19 +138,20 @@ @Snippet public static Object allocateArray(Word hub, int length, Word prototypeMarkWord, @ConstantParameter int headerSize, @ConstantParameter int log2ElementSize, - @ConstantParameter boolean fillContents, @ConstantParameter String typeContext) { + @ConstantParameter boolean fillContents, @ConstantParameter Register threadRegister, @ConstantParameter String typeContext) { if (!belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) { // This handles both negative array sizes and very large array sizes DeoptimizeNode.deopt(DeoptimizationAction.None, DeoptimizationReason.RuntimeConstraint); } - return allocateArrayImpl(hub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents, typeContext); + return allocateArrayImpl(hub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents, threadRegister, typeContext); } - private static Object allocateArrayImpl(Word hub, int length, Word prototypeMarkWord, int headerSize, int log2ElementSize, boolean fillContents, String typeContext) { + private static Object allocateArrayImpl(Word hub, int length, Word prototypeMarkWord, int headerSize, int log2ElementSize, boolean fillContents, @ConstantParameter Register threadRegister, + String typeContext) { Object result; int alignment = wordSize(); int allocationSize = computeArrayAllocationSize(length, alignment, headerSize, log2ElementSize); - Word thread = thread(); + Word thread = registerAsWord(threadRegister); Word top = readTlabTop(thread); Word end = readTlabEnd(thread); Word newTop = top.add(allocationSize); @@ -171,7 +173,7 @@ public static native Object dynamicNewArrayStub(@ConstantNodeParameter ForeignCallDescriptor descriptor, Class elementType, int length); @Snippet - public static Object allocateArrayDynamic(Class elementType, int length, @ConstantParameter boolean fillContents) { + public static Object allocateArrayDynamic(Class elementType, int length, @ConstantParameter boolean fillContents, @ConstantParameter Register threadRegister) { Word hub = loadWordFromObject(elementType, arrayKlassOffset()); if (hub.equal(Word.zero()) || !belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) { return dynamicNewArrayStub(DYNAMIC_NEW_ARRAY, elementType, length); @@ -195,7 +197,7 @@ int log2ElementSize = (layoutHelper >> layoutHelperLog2ElementSizeShift()) & layoutHelperLog2ElementSizeMask(); Word prototypeMarkWord = hub.readWord(prototypeMarkWordOffset(), PROTOTYPE_MARK_WORD_LOCATION); - return allocateArrayImpl(hub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents, "dynamic type"); + return allocateArrayImpl(hub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents, threadRegister, "dynamic type"); } /** @@ -288,7 +290,7 @@ /** * Lowers a {@link NewInstanceNode}. */ - public void lower(NewInstanceNode newInstanceNode) { + public void lower(NewInstanceNode newInstanceNode, HotSpotRegistersProvider registers) { StructuredGraph graph = newInstanceNode.graph(); HotSpotResolvedObjectType type = (HotSpotResolvedObjectType) newInstanceNode.instanceClass(); assert !type.isArray(); @@ -300,6 +302,7 @@ args.add("hub", hub); args.add("prototypeMarkWord", type.prototypeMarkWord()); args.addConst("fillContents", newInstanceNode.fillContents()); + args.addConst("threadRegister", registers.getThreadRegister()); args.addConst("typeContext", ProfileAllocations.getValue() ? toJavaName(type, false) : ""); SnippetTemplate template = template(args); @@ -310,14 +313,14 @@ /** * Lowers a {@link NewArrayNode}. */ - public void lower(NewArrayNode newArrayNode) { + public void lower(NewArrayNode newArrayNode, HotSpotRegistersProvider registers) { StructuredGraph graph = newArrayNode.graph(); ResolvedJavaType elementType = newArrayNode.elementType(); HotSpotResolvedObjectType arrayType = (HotSpotResolvedObjectType) elementType.getArrayClass(); Kind elementKind = elementType.getKind(); ConstantNode hub = ConstantNode.forConstant(arrayType.klass(), providers.getMetaAccess(), graph); final int headerSize = HotSpotGraalRuntime.getArrayBaseOffset(elementKind); - HotSpotHostLoweringProvider lowerer = (HotSpotHostLoweringProvider) providers.getLowerer(); + HotSpotLoweringProvider lowerer = (HotSpotLoweringProvider) providers.getLowerer(); int log2ElementSize = CodeUtil.log2(lowerer.getScalingFactor(elementKind)); Arguments args = new Arguments(allocateArray, graph.getGuardsStage()); @@ -327,6 +330,7 @@ args.addConst("headerSize", headerSize); args.addConst("log2ElementSize", log2ElementSize); args.addConst("fillContents", newArrayNode.fillContents()); + args.addConst("threadRegister", registers.getThreadRegister()); args.addConst("typeContext", ProfileAllocations.getValue() ? toJavaName(arrayType, false) : ""); SnippetTemplate template = template(args); @@ -334,11 +338,12 @@ template.instantiate(providers.getMetaAccess(), newArrayNode, DEFAULT_REPLACER, args); } - public void lower(DynamicNewArrayNode newArrayNode) { + public void lower(DynamicNewArrayNode newArrayNode, HotSpotRegistersProvider registers) { Arguments args = new Arguments(allocateArrayDynamic, newArrayNode.graph().getGuardsStage()); args.add("elementType", newArrayNode.getElementType()); args.add("length", newArrayNode.length()); args.addConst("fillContents", newArrayNode.fillContents()); + args.addConst("threadRegister", registers.getThreadRegister()); SnippetTemplate template = template(args); template.instantiate(providers.getMetaAccess(), newArrayNode, DEFAULT_REPLACER, args); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java Thu Nov 21 15:04:54 2013 +0100 @@ -31,11 +31,11 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.graph.Node.ConstantNodeParameter; import com.oracle.graal.graph.Node.NodeIntrinsic; +import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.HeapAccess.BarrierType; import com.oracle.graal.nodes.extended.*; -import com.oracle.graal.nodes.spi.*; import com.oracle.graal.phases.*; import com.oracle.graal.phases.util.*; import com.oracle.graal.replacements.*; @@ -106,11 +106,11 @@ @Snippet public static void g1PreWriteBarrier(Object object, Object expectedObject, Object location, @ConstantParameter boolean doLoad, @ConstantParameter boolean nullCheck, - @ConstantParameter boolean trace) { + @ConstantParameter Register threadRegister, @ConstantParameter boolean trace) { if (nullCheck && object == null) { DeoptimizeNode.deopt(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.NullCheckException); } - Word thread = thread(); + Word thread = registerAsWord(threadRegister); Object fixedObject = FixedValueAnchorNode.getObject(object); verifyOop(fixedObject); Object fixedExpectedObject = FixedValueAnchorNode.getObject(expectedObject); @@ -161,8 +161,9 @@ } @Snippet - public static void g1PostWriteBarrier(Object object, Object value, Object location, @ConstantParameter boolean usePrecise, @ConstantParameter boolean trace) { - Word thread = thread(); + public static void g1PostWriteBarrier(Object object, Object value, Object location, @ConstantParameter boolean usePrecise, @ConstantParameter Register threadRegister, + @ConstantParameter boolean trace) { + Word thread = registerAsWord(threadRegister); Object fixedObject = FixedValueAnchorNode.getObject(object); Object fixedValue = FixedValueAnchorNode.getObject(value); verifyOop(fixedObject); @@ -233,8 +234,8 @@ } @Snippet - public static void g1ArrayRangePreWriteBarrier(Object object, int startIndex, int length) { - Word thread = thread(); + public static void g1ArrayRangePreWriteBarrier(Object object, int startIndex, int length, @ConstantParameter Register threadRegister) { + Word thread = registerAsWord(threadRegister); byte markingValue = thread.readByte(g1SATBQueueMarkingOffset()); // If the concurrent marker is not enabled or the vector length is zero, return. if (markingValue == (byte) 0 || length == 0) { @@ -267,12 +268,12 @@ } @Snippet - public static void g1ArrayRangePostWriteBarrier(Object object, int startIndex, int length) { + public static void g1ArrayRangePostWriteBarrier(Object object, int startIndex, int length, @ConstantParameter Register threadRegister) { if (length == 0) { return; } Object dest = FixedValueAnchorNode.getObject(object); - Word thread = thread(); + Word thread = registerAsWord(threadRegister); Word bufferAddress = thread.readWord(g1CardQueueBufferOffset()); Word indexAddress = thread.add(g1CardQueueIndexOffset()); long indexValue = thread.readWord(g1CardQueueIndexOffset()).rawValue(); @@ -332,7 +333,7 @@ super(providers, target); } - public void lower(SerialWriteBarrier writeBarrier, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(SerialWriteBarrier writeBarrier) { if (writeBarrier.alwaysNull()) { writeBarrier.graph().removeFixed(writeBarrier); return; @@ -344,7 +345,7 @@ template(args).instantiate(providers.getMetaAccess(), writeBarrier, DEFAULT_REPLACER, args); } - public void lower(SerialArrayRangeWriteBarrier arrayRangeWriteBarrier, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(SerialArrayRangeWriteBarrier arrayRangeWriteBarrier) { Arguments args = new Arguments(serialArrayRangeWriteBarrier, arrayRangeWriteBarrier.graph().getGuardsStage()); args.add("object", arrayRangeWriteBarrier.getObject()); args.add("startIndex", arrayRangeWriteBarrier.getStartIndex()); @@ -352,29 +353,31 @@ template(args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args); } - public void lower(G1PreWriteBarrier writeBarrierPre, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(G1PreWriteBarrier writeBarrierPre, HotSpotRegistersProvider registers) { Arguments args = new Arguments(g1PreWriteBarrier, writeBarrierPre.graph().getGuardsStage()); args.add("object", writeBarrierPre.getObject()); args.add("expectedObject", writeBarrierPre.getExpectedObject()); args.add("location", writeBarrierPre.getLocation()); args.addConst("doLoad", writeBarrierPre.doLoad()); args.addConst("nullCheck", writeBarrierPre.getNullCheck()); + args.addConst("threadRegister", registers.getThreadRegister()); args.addConst("trace", traceBarrier()); template(args).instantiate(providers.getMetaAccess(), writeBarrierPre, DEFAULT_REPLACER, args); } - public void lower(G1ReferentFieldReadBarrier readBarrier, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(G1ReferentFieldReadBarrier readBarrier, HotSpotRegistersProvider registers) { Arguments args = new Arguments(g1ReferentReadBarrier, readBarrier.graph().getGuardsStage()); args.add("object", readBarrier.getObject()); args.add("expectedObject", readBarrier.getExpectedObject()); args.add("location", readBarrier.getLocation()); args.addConst("doLoad", readBarrier.doLoad()); args.addConst("nullCheck", false); + args.addConst("threadRegister", registers.getThreadRegister()); args.addConst("trace", traceBarrier()); template(args).instantiate(providers.getMetaAccess(), readBarrier, DEFAULT_REPLACER, args); } - public void lower(G1PostWriteBarrier writeBarrierPost, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(G1PostWriteBarrier writeBarrierPost, HotSpotRegistersProvider registers) { if (writeBarrierPost.alwaysNull()) { writeBarrierPost.graph().removeFixed(writeBarrierPost); return; @@ -384,23 +387,26 @@ args.add("value", writeBarrierPost.getValue()); args.add("location", writeBarrierPost.getLocation()); args.addConst("usePrecise", writeBarrierPost.usePrecise()); + args.addConst("threadRegister", registers.getThreadRegister()); args.addConst("trace", traceBarrier()); template(args).instantiate(providers.getMetaAccess(), writeBarrierPost, DEFAULT_REPLACER, args); } - public void lower(G1ArrayRangePreWriteBarrier arrayRangeWriteBarrier, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(G1ArrayRangePreWriteBarrier arrayRangeWriteBarrier, HotSpotRegistersProvider registers) { Arguments args = new Arguments(g1ArrayRangePreWriteBarrier, arrayRangeWriteBarrier.graph().getGuardsStage()); args.add("object", arrayRangeWriteBarrier.getObject()); args.add("startIndex", arrayRangeWriteBarrier.getStartIndex()); args.add("length", arrayRangeWriteBarrier.getLength()); + args.addConst("threadRegister", registers.getThreadRegister()); template(args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args); } - public void lower(G1ArrayRangePostWriteBarrier arrayRangeWriteBarrier, @SuppressWarnings("unused") LoweringTool tool) { + public void lower(G1ArrayRangePostWriteBarrier arrayRangeWriteBarrier, HotSpotRegistersProvider registers) { Arguments args = new Arguments(g1ArrayRangePostWriteBarrier, arrayRangeWriteBarrier.graph().getGuardsStage()); args.add("object", arrayRangeWriteBarrier.getObject()); args.add("startIndex", arrayRangeWriteBarrier.getStartIndex()); args.add("length", arrayRangeWriteBarrier.getLength()); + args.addConst("threadRegister", registers.getThreadRegister()); template(args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args); } } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -35,7 +35,7 @@ import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.replacements.*; -import com.oracle.graal.replacements.Snippet.Fold; +import com.oracle.graal.replacements.Snippet.*; import com.oracle.graal.word.*; /** @@ -62,12 +62,19 @@ return false; } + @Override + protected Object getConstantParameterValue(int index, String name) { + assert index == 2; + return providers.getRegisters().getThreadRegister(); + } + @Snippet - private static void exceptionHandler(Object exception, Word exceptionPc) { - checkNoExceptionInThread(assertionsEnabled()); + private static void exceptionHandler(Object exception, Word exceptionPc, @ConstantParameter Register threadRegister) { + Word thread = registerAsWord(threadRegister); + checkNoExceptionInThread(thread, assertionsEnabled()); checkExceptionNotNull(assertionsEnabled(), exception); - writeExceptionOop(thread(), exception); - writeExceptionPc(thread(), exceptionPc); + writeExceptionOop(thread, exception); + writeExceptionPc(thread, exceptionPc); if (logging()) { printf("handling exception %p (", Word.fromObject(exception).rawValue()); decipher(Word.fromObject(exception).rawValue()); @@ -79,7 +86,7 @@ // patch throwing pc into return address so that deoptimization finds the right debug info patchReturnAddress(exceptionPc); - Word handlerPc = exceptionHandlerForPc(EXCEPTION_HANDLER_FOR_PC, thread()); + Word handlerPc = exceptionHandlerForPc(EXCEPTION_HANDLER_FOR_PC, thread); if (logging()) { printf("handler for exception %p at %p is at %p (", Word.fromObject(exception).rawValue(), exceptionPc.rawValue(), handlerPc.rawValue()); @@ -91,16 +98,16 @@ patchReturnAddress(handlerPc); } - static void checkNoExceptionInThread(boolean enabled) { + static void checkNoExceptionInThread(Word thread, boolean enabled) { if (enabled) { - Object currentException = readExceptionOop(thread()); + Object currentException = readExceptionOop(thread); if (currentException != null) { fatal("exception object in thread must be null, not %p", Word.fromObject(currentException).rawValue()); } if (cAssertionsEnabled()) { // This thread-local is only cleared in DEBUG builds of the VM // (see OptoRuntime::generate_exception_blob) - Word currentExceptionPc = readExceptionPc(thread()); + Word currentExceptionPc = readExceptionPc(thread); if (currentExceptionPc.notEqual(Word.zero())) { fatal("exception PC in thread must be zero, not %p", currentExceptionPc.rawValue()); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ForeignCallStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ForeignCallStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ForeignCallStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -54,8 +54,8 @@ * deoptimization while the call is in progress. And since these are foreign/runtime calls on slow * paths, we don't want to force the register allocator to spill around the call. As such, this stub * saves and restores all allocatable registers. It also - * {@linkplain StubUtil#handlePendingException(boolean) handles} any exceptions raised during the - * foreign call. + * {@linkplain StubUtil#handlePendingException(Word, boolean) handles} any exceptions raised during + * the foreign call. */ public class ForeignCallStub extends Stub { @@ -226,9 +226,9 @@ LocalNode[] locals = createLocals(builder, args); List invokes = new ArrayList<>(3); - ReadRegisterNode thread = prependThread || isObjectResult ? builder.append(new ReadRegisterNode(providers.getRegisters().getThreadRegister(), true, false)) : null; + ReadRegisterNode thread = builder.append(new ReadRegisterNode(providers.getRegisters().getThreadRegister(), true, false)); ValueNode result = createTargetCall(builder, locals, thread); - invokes.add(createInvoke(builder, StubUtil.class, "handlePendingException", ConstantNode.forBoolean(isObjectResult, builder.graph))); + invokes.add(createInvoke(builder, StubUtil.class, "handlePendingException", thread, ConstantNode.forBoolean(isObjectResult, builder.graph))); if (isObjectResult) { InvokeNode object = createInvoke(builder, HotSpotReplacementsUtil.class, "getAndClearObjectResult", thread); result = createInvoke(builder, StubUtil.class, "verifyObject", object); @@ -276,7 +276,7 @@ return locals; } - private InvokeNode createInvoke(GraphBuilder builder, Class declaringClass, String name, ValueNode... hpeArgs) { + private InvokeNode createInvoke(GraphBuilder builder, Class declaringClass, String name, ValueNode... args) { ResolvedJavaMethod method = null; for (Method m : declaringClass.getDeclaredMethods()) { if (Modifier.isStatic(m.getModifiers()) && m.getName().equals(name)) { @@ -285,12 +285,25 @@ } } assert method != null : "did not find method in " + declaringClass + " named " + name; - JavaType returnType = method.getSignature().getReturnType(null); - MethodCallTargetNode callTarget = builder.graph.add(new MethodCallTargetNode(InvokeKind.Static, method, hpeArgs, returnType)); + Signature signature = method.getSignature(); + JavaType returnType = signature.getReturnType(null); + assert checkArgs(method, args); + MethodCallTargetNode callTarget = builder.graph.add(new MethodCallTargetNode(InvokeKind.Static, method, args, returnType)); InvokeNode invoke = builder.append(new InvokeNode(callTarget, FrameState.UNKNOWN_BCI)); return invoke; } + private boolean checkArgs(ResolvedJavaMethod method, ValueNode... args) { + Signature signature = method.getSignature(); + assert signature.getParameterCount(false) == args.length : target + ": wrong number of arguments to " + method; + for (int i = 0; i != args.length; i++) { + Kind expected = signature.getParameterKind(i).getStackKind(); + Kind actual = args[i].stamp().kind(); + assert expected == actual : target + ": wrong kind of value for argument " + i + " of calls to " + method + " [" + actual + " != " + expected + "]"; + } + return true; + } + private StubForeignCallNode createTargetCall(GraphBuilder builder, LocalNode[] locals, ReadRegisterNode thread) { if (prependThread) { ValueNode[] targetArguments = new ValueNode[1 + locals.length]; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -70,6 +70,7 @@ args.add("hub", null); args.add("length", null); args.addConst("intArrayHub", intArrayHub); + args.addConst("threadRegister", providers.getRegisters().getThreadRegister()); return args; } @@ -87,7 +88,7 @@ * @param intArrayHub the hub for {@code int[].class} */ @Snippet - private static Object newArray(Word hub, int length, @ConstantParameter Word intArrayHub) { + private static Object newArray(Word hub, int length, @ConstantParameter Word intArrayHub, @ConstantParameter Register threadRegister) { int layoutHelper = hub.readInt(layoutHelperOffset(), LocationIdentity.FINAL_LOCATION); int log2ElementSize = (layoutHelper >> layoutHelperLog2ElementSizeShift()) & layoutHelperLog2ElementSizeMask(); int headerSize = (layoutHelper >> layoutHelperHeaderSizeShift()) & layoutHelperHeaderSizeMask(); @@ -101,8 +102,9 @@ } // check that array length is small enough for fast path. + Word thread = registerAsWord(threadRegister); if (length <= MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH) { - Word memory = refillAllocate(intArrayHub, sizeInBytes, logging()); + Word memory = refillAllocate(thread, intArrayHub, sizeInBytes, logging()); if (memory.notEqual(0)) { if (logging()) { printf("newArray: allocated new array at %p\n", memory.rawValue()); @@ -114,9 +116,9 @@ printf("newArray: calling new_array_c\n"); } - newArrayC(NEW_ARRAY_C, thread(), hub, length); - handlePendingException(true); - return verifyObject(getAndClearObjectResult(thread())); + newArrayC(NEW_ARRAY_C, thread, hub, length); + handlePendingException(thread, true); + return verifyObject(getAndClearObjectResult(thread)); } public static final ForeignCallDescriptor NEW_ARRAY_C = descriptorFor(NewArrayStub.class, "newArrayC"); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -70,11 +70,11 @@ Arguments args = new Arguments(stub, GuardsStage.FLOATING_GUARDS); args.add("hub", null); args.addConst("intArrayHub", intArrayHub); + args.addConst("threadRegister", providers.getRegisters().getThreadRegister()); return args; } - private static Word allocate(int size) { - Word thread = thread(); + private static Word allocate(Word thread, int size) { Word top = readTlabTop(thread); Word end = readTlabEnd(thread); Word newTop = top.add(size); @@ -102,11 +102,12 @@ * @param intArrayHub the hub for {@code int[].class} */ @Snippet - private static Object newInstance(Word hub, @ConstantParameter Word intArrayHub) { + private static Object newInstance(Word hub, @ConstantParameter Word intArrayHub, @ConstantParameter Register threadRegister) { int sizeInBytes = hub.readInt(klassInstanceSizeOffset(), LocationIdentity.FINAL_LOCATION); + Word thread = registerAsWord(threadRegister); if (!forceSlowPath() && inlineContiguousAllocationSupported()) { if (hub.readByte(klassStateOffset(), CLASS_STATE_LOCATION) == klassStateFullyInitialized()) { - Word memory = refillAllocate(intArrayHub, sizeInBytes, logging()); + Word memory = refillAllocate(thread, intArrayHub, sizeInBytes, logging()); if (memory.notEqual(0)) { Word prototypeMarkWord = hub.readWord(prototypeMarkWordOffset(), PROTOTYPE_MARK_WORD_LOCATION); initializeObjectHeader(memory, prototypeMarkWord, hub); @@ -122,9 +123,9 @@ printf("newInstance: calling new_instance_c\n"); } - newInstanceC(NEW_INSTANCE_C, thread(), hub); - handlePendingException(true); - return verifyObject(getAndClearObjectResult(thread())); + newInstanceC(NEW_INSTANCE_C, thread, hub); + handlePendingException(thread, true); + return verifyObject(getAndClearObjectResult(thread)); } /** @@ -133,10 +134,11 @@ * @param intArrayHub the hub for {@code int[].class} * @param sizeInBytes the size of the allocation * @param log specifies if logging is enabled + * * @return the newly allocated, uninitialized chunk of memory, or {@link Word#zero()} if the * operation was unsuccessful */ - static Word refillAllocate(Word intArrayHub, int sizeInBytes, boolean log) { + static Word refillAllocate(Word thread, Word intArrayHub, int sizeInBytes, boolean log) { // If G1 is enabled, the "eden" allocation space is not the same always // and therefore we have to go to slowpath to allocate a new TLAB. if (useG1GC()) { @@ -148,7 +150,6 @@ Word intArrayMarkWord = Word.unsigned(tlabIntArrayMarkWord()); int alignmentReserveInBytes = tlabAlignmentReserveInHeapWords() * wordSize(); - Word thread = thread(); Word top = readTlabTop(thread); Word end = readTlabEnd(thread); @@ -206,7 +207,7 @@ end = top.add(tlabRefillSizeInBytes.subtract(alignmentReserveInBytes)); initializeTlab(thread, top, end); - return NewInstanceStub.allocate(sizeInBytes); + return NewInstanceStub.allocate(thread, sizeInBytes); } else { return Word.zero(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/SnippetStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/SnippetStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/SnippetStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -26,6 +26,7 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; +import com.oracle.graal.graph.*; import com.oracle.graal.hotspot.*; import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.nodes.*; @@ -82,11 +83,21 @@ protected Arguments makeArguments(SnippetInfo stub) { Arguments args = new Arguments(stub, GuardsStage.FLOATING_GUARDS); for (int i = 0; i < stub.getParameterCount(); i++) { - args.add(stub.getParameterName(i), null); + String name = stub.getParameterName(i); + if (stub.isConstantParameter(i)) { + args.addConst(name, getConstantParameterValue(i, name)); + } else { + assert !stub.isVarargsParameter(i); + args.add(name, null); + } } return args; } + protected Object getConstantParameterValue(int index, String name) { + throw new GraalInternalError("%s must override getConstantParameterValue() to provide a value for parameter %d%s", getClass().getName(), index, name == null ? "" : " (" + name + ")"); + } + @Override protected Object debugScopeContext() { return getInstalledCodeOwner(); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/StubUtil.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/StubUtil.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/StubUtil.java Thu Nov 21 15:04:54 2013 +0100 @@ -73,10 +73,10 @@ return new ForeignCallDescriptor(name, found.getReturnType(), cCallTypes); } - public static void handlePendingException(boolean isObjectResult) { - if (clearPendingException(thread())) { + public static void handlePendingException(Word thread, boolean isObjectResult) { + if (clearPendingException(thread)) { if (isObjectResult) { - getAndClearObjectResult(thread()); + getAndClearObjectResult(thread); } DeoptimizeCallerNode.deopt(InvalidateReprofile, RuntimeConstraint); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java Thu Nov 21 15:04:54 2013 +0100 @@ -36,7 +36,7 @@ import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.nodes.*; import com.oracle.graal.replacements.*; -import com.oracle.graal.replacements.Snippet.Fold; +import com.oracle.graal.replacements.Snippet.*; import com.oracle.graal.word.*; /** @@ -58,8 +58,14 @@ return false; } + @Override + protected Object getConstantParameterValue(int index, String name) { + assert index == 2; + return providers.getRegisters().getThreadRegister(); + } + @Snippet - private static void unwindExceptionToCaller(Object exception, Word returnAddress) { + private static void unwindExceptionToCaller(Object exception, Word returnAddress, @ConstantParameter Register threadRegister) { Pointer exceptionOop = Word.fromObject(exception); if (logging()) { printf("unwinding exception %p (", exceptionOop.rawValue()); @@ -68,10 +74,11 @@ decipher(returnAddress.rawValue()); printf(")\n"); } - checkNoExceptionInThread(assertionsEnabled()); + Word thread = registerAsWord(threadRegister); + checkNoExceptionInThread(thread, assertionsEnabled()); checkExceptionNotNull(assertionsEnabled(), exception); - Word handlerInCallerPc = exceptionHandlerForReturnAddress(EXCEPTION_HANDLER_FOR_RETURN_ADDRESS, thread(), returnAddress); + Word handlerInCallerPc = exceptionHandlerForReturnAddress(EXCEPTION_HANDLER_FOR_RETURN_ADDRESS, thread, returnAddress); if (logging()) { printf("handler for exception %p at return address %p is at %p (", exceptionOop.rawValue(), returnAddress.rawValue(), handlerInCallerPc.rawValue()); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Thu Nov 21 15:04:54 2013 +0100 @@ -34,7 +34,9 @@ import com.oracle.graal.asm.amd64.*; import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.StandardOp.ImplicitNullCheck; import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.StandardOp.NullCheck; import com.oracle.graal.lir.asm.*; public class AMD64Move { @@ -93,7 +95,7 @@ } } - public abstract static class MemOp extends AMD64LIRInstruction { + public abstract static class MemOp extends AMD64LIRInstruction implements ImplicitNullCheck { protected final Kind kind; @Use({COMPOSITE}) protected AMD64AddressValue address; @@ -114,6 +116,14 @@ } emitMemAccess(masm); } + + public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) { + if (state == null && value.equals(address.base) && address.index == Value.ILLEGAL && address.displacement >= 0 && address.displacement < implicitNullCheckLimit) { + state = nullCheckState; + return true; + } + return false; + } } public static class LoadOp extends MemOp { @@ -295,7 +305,7 @@ } } - public static class NullCheckOp extends AMD64LIRInstruction { + public static class NullCheckOp extends AMD64LIRInstruction implements NullCheck { @Use({REG}) protected AllocatableValue input; @State protected LIRFrameState state; @@ -310,6 +320,14 @@ tasm.recordImplicitException(masm.codeBuffer.position(), state); masm.nullCheck(asRegister(input)); } + + public Value getCheckedValue() { + return input; + } + + public LIRFrameState getState() { + return state; + } } @Opcode("CAS") diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILControlFlow.java --- a/graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILControlFlow.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILControlFlow.java Thu Nov 21 15:04:54 2013 +0100 @@ -28,6 +28,7 @@ import com.oracle.graal.asm.hsail.*; import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.StandardOp.FallThroughOp; import com.oracle.graal.lir.asm.*; import com.oracle.graal.nodes.calc.*; @@ -36,6 +37,96 @@ */ public class HSAILControlFlow { + /** + * This class represents the LIR instruction that the HSAIL backend generates for a switch + * construct in Java. + * + * The HSAIL backend compiles switch statements into a series of cascading compare and branch + * instructions because this is the currently the recommended way to generate optimally + * performing HSAIL code. Thus the execution path for both the TABLESWITCH and LOOKUPSWITCH + * bytecodes go through this op. + */ + public static class SwitchOp extends HSAILLIRInstruction implements FallThroughOp { + /** + * The array of key constants used for the cases of this switch statement. + */ + @Use({CONST}) protected Constant[] keyConstants; + /** + * The branch target labels that correspond to each case. + */ + private final LabelRef[] keyTargets; + private LabelRef defaultTarget; + /** + * The key of the switch. This will be compared with each of the keyConstants. + */ + @Alive({REG}) protected Value key; + + /** + * Constructor. Called from the HSAILLIRGenerator.emitSequentialSwitch routine. + * + * @param keyConstants + * @param keyTargets + * @param defaultTarget + * @param key + */ + public SwitchOp(Constant[] keyConstants, LabelRef[] keyTargets, LabelRef defaultTarget, Value key) { + assert keyConstants.length == keyTargets.length; + this.keyConstants = keyConstants; + this.keyTargets = keyTargets; + this.defaultTarget = defaultTarget; + this.key = key; + } + + /** + * Get the default target for this switch op. + */ + @Override + public LabelRef fallThroughTarget() { + return defaultTarget; + } + + /** + * Set the default target. + * + * @param target the default target + */ + @Override + public void setFallThroughTarget(LabelRef target) { + defaultTarget = target; + + } + + /** + * Generates the code for this switch op. + * + * The keys for switch statements in Java bytecode for of type int. However, Graal also + * generates a TypeSwitchNode (for method dispatch) which triggers the invocation of these + * routines with keys of type Long or Object. Currently we only support the + * IntegerSwitchNode so we throw an exception if the key isn't of type int. + * + * @param tasm the TargetMethodAssembler + * @param masm the HSAIL assembler + */ + @Override + public void emitCode(TargetMethodAssembler tasm, HSAILAssembler masm) { + if (key.getKind() == Kind.Int) { + for (int i = 0; i < keyConstants.length; i++) { + // Generate cascading compare and branches for each case. + masm.emitCompare(key, keyConstants[i], "eq", false, false); + masm.cbr(masm.nameOf(keyTargets[i].label())); + } + // Generate a jump for the default target if there is one. + if (defaultTarget != null) { + masm.jmp(defaultTarget.label()); + } + + } else { + // Throw an exception if the key isn't of type int. + throw new GraalInternalError("Switch statments are only supported for int keys"); + } + } + } + public static class ReturnOp extends HSAILLIRInstruction { @Use({REG, ILLEGAL}) protected Value x; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java --- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java Thu Nov 21 15:04:54 2013 +0100 @@ -32,7 +32,9 @@ import com.oracle.graal.asm.sparc.*; import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.StandardOp.ImplicitNullCheck; import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.StandardOp.NullCheck; import com.oracle.graal.lir.asm.*; public class SPARCMove { @@ -91,7 +93,7 @@ } } - public abstract static class MemOp extends SPARCLIRInstruction { + public abstract static class MemOp extends SPARCLIRInstruction implements ImplicitNullCheck { protected final Kind kind; @Use({COMPOSITE}) protected SPARCAddressValue address; @@ -112,6 +114,14 @@ } emitMemAccess(masm); } + + public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) { + if (state == null && value.equals(address.base) && address.index == Value.ILLEGAL && address.displacement >= 0 && address.displacement < implicitNullCheckLimit) { + state = nullCheckState; + return true; + } + return false; + } } public static class LoadOp extends MemOp { @@ -193,7 +203,7 @@ } } - public static class NullCheckOp extends SPARCLIRInstruction { + public static class NullCheckOp extends SPARCLIRInstruction implements NullCheck { @Use({REG}) protected AllocatableValue input; @State protected LIRFrameState state; @@ -208,6 +218,14 @@ tasm.recordImplicitException(masm.codeBuffer.position(), state); new Ldx(new SPARCAddress(asRegister(input), 0), r0).emit(masm); } + + public Value getCheckedValue() { + return input; + } + + public LIRFrameState getState() { + return state; + } } @Opcode("CAS") diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/NullCheckOptimizer.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/NullCheckOptimizer.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir; + +import java.util.*; + +import com.oracle.graal.lir.StandardOp.ImplicitNullCheck; +import com.oracle.graal.lir.StandardOp.NullCheck; +import com.oracle.graal.nodes.cfg.*; + +public final class NullCheckOptimizer { + + public static void optimize(LIR ir, int implicitNullCheckLimit) { + List blocks = ir.codeEmittingOrder(); + NullCheckOptimizer.foldNullChecks(ir, blocks, implicitNullCheckLimit); + } + + private NullCheckOptimizer() { + } + + private static void foldNullChecks(LIR ir, List blocks, int implicitNullCheckLimit) { + for (Block block : blocks) { + List list = ir.lir(block); + + if (!list.isEmpty()) { + + LIRInstruction lastInstruction = list.get(0); + for (int i = 0; i < list.size(); i++) { + LIRInstruction instruction = list.get(i); + + if (instruction instanceof ImplicitNullCheck && lastInstruction instanceof NullCheck) { + NullCheck nullCheck = (NullCheck) lastInstruction; + ImplicitNullCheck implicitNullCheck = (ImplicitNullCheck) instruction; + if (implicitNullCheck.makeNullCheckFor(nullCheck.getCheckedValue(), nullCheck.getState(), implicitNullCheckLimit)) { + list.remove(i - 1); + if (i < list.size()) { + instruction = list.get(i); + } + } + } + lastInstruction = instruction; + } + } + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java Thu Nov 21 15:04:54 2013 +0100 @@ -51,6 +51,16 @@ void setFallThroughTarget(LabelRef target); } + public interface NullCheck { + Value getCheckedValue(); + + LIRFrameState getState(); + } + + public interface ImplicitNullCheck { + boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit); + } + /** * LIR operation that defines the position of a label. The first operation of every block must * implement this interface. diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java Thu Nov 21 15:04:54 2013 +0100 @@ -94,7 +94,7 @@ return IntegerArithmeticNode.add(graph, x, y); case Float: case Double: - return x.graph().unique(new FloatAddNode(x.kind(), x, y, false)); + return graph.unique(new FloatAddNode(x.kind(), x, y, false)); default: throw GraalInternalError.shouldNotReachHere(); } @@ -111,7 +111,7 @@ return IntegerArithmeticNode.sub(graph, x, y); case Float: case Double: - return x.graph().unique(new FloatSubNode(x.kind(), x, y, false)); + return graph.unique(new FloatSubNode(x.kind(), x, y, false)); default: throw GraalInternalError.shouldNotReachHere(); } @@ -128,7 +128,7 @@ return IntegerArithmeticNode.mul(graph, x, y); case Float: case Double: - return x.graph().unique(new FloatMulNode(x.kind(), x, y, false)); + return graph.unique(new FloatMulNode(x.kind(), x, y, false)); default: throw GraalInternalError.shouldNotReachHere(); } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java Thu Nov 21 15:04:54 2013 +0100 @@ -226,9 +226,9 @@ public static final OptionValue CanOmitFrame = new OptionValue<>(true); @Option(help = "") - public static final OptionValue MemoryAwareScheduling = new OptionValue<>(true); + public static final OptionValue MemoryAwareScheduling = new OptionValue<>(false); @Option(help = "") - public static final OptionValue NewMemoryAwareScheduling = new OptionValue<>(false); + public static final OptionValue NewMemoryAwareScheduling = new OptionValue<>(true); // Translating tableswitch instructions @Option(help = "") diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/graph/ReentrantBlockIterator.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/graph/ReentrantBlockIterator.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/graph/ReentrantBlockIterator.java Thu Nov 21 15:04:54 2013 +0100 @@ -76,7 +76,7 @@ apply(closure, start, closure.getInitialState(), null); } - private static IdentityHashMap apply(BlockIteratorClosure closure, Block start, StateT initialState, Set boundary) { + public static IdentityHashMap apply(BlockIteratorClosure closure, Block start, StateT initialState, Set boundary) { Deque blockQueue = new ArrayDeque<>(); /* * States are stored on EndNodes before merges, and on BeginNodes after ControlSplitNodes. @@ -173,6 +173,7 @@ mergedStates.add(states.get(end)); } state = closure.merge(current, mergedStates); + states.put(merge, state); } } } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java Thu Nov 21 15:04:54 2013 +0100 @@ -175,104 +175,164 @@ } } - private class NewMemoryScheduleClosure extends BlockIteratorClosure> { + private class KillSet implements Iterable { + private final Set set; + + public KillSet() { + this.set = new HashSet<>(); + } + + public KillSet(KillSet other) { + this.set = new HashSet<>(other.set); + } + + public void add(LocationIdentity locationIdentity) { + set.add(locationIdentity); + } + + public void addAll(KillSet other) { + set.addAll(other.set); + } + + public Iterator iterator() { + return set.iterator(); + } + + public boolean isKilled(LocationIdentity locationIdentity) { + return set.contains(locationIdentity); + } + + } + + private class NewMemoryScheduleClosure extends BlockIteratorClosure { + private Node excludeNode; + private Block upperBoundBlock; + + public NewMemoryScheduleClosure(Node excludeNode, Block upperBoundBlock) { + this.excludeNode = excludeNode; + this.upperBoundBlock = upperBoundBlock; + } + + public NewMemoryScheduleClosure() { + this(null, null); + } @Override - protected Map getInitialState() { - return cloneState(blockToKillMap.get(getCFG().getStartBlock())); + protected KillSet getInitialState() { + return cloneState(blockToKillSet.get(getCFG().getStartBlock())); + } + + @Override + protected KillSet processBlock(Block block, KillSet currentState) { + assert block != null; + currentState.addAll(computeKillSet(block, block == upperBoundBlock ? excludeNode : null)); + return currentState; } @Override - protected Map processBlock(Block block, Map currentState) { + protected KillSet merge(Block merge, List states) { + assert merge.getBeginNode() instanceof MergeNode; - if (block.getBeginNode() instanceof MergeNode) { - MergeNode mergeNode = (MergeNode) block.getBeginNode(); - for (PhiNode phi : mergeNode.usages().filter(PhiNode.class)) { - if (phi.type() == PhiType.Memory) { - LocationIdentity identity = phi.getIdentity(); - locationKilledBy(identity, phi, currentState); - } - } - } - currentState.putAll(blockToKillMapInit.get(block)); - - for (Node node : block.getNodes()) { - if (node instanceof MemoryCheckpoint.Single) { - LocationIdentity identity = ((MemoryCheckpoint.Single) node).getLocationIdentity(); - locationKilledBy(identity, node, currentState); - } else if (node instanceof MemoryCheckpoint.Multi) { - for (LocationIdentity identity : ((MemoryCheckpoint.Multi) node).getLocationIdentities()) { - locationKilledBy(identity, node, currentState); - } - } - assert MemoryCheckpoint.TypeAssertion.correctType(node); + KillSet initKillSet = new KillSet(); + for (KillSet state : states) { + initKillSet.addAll(state); } - blockToKillMap.put(block, currentState); - return cloneState(currentState); + return initKillSet; + } + + @Override + protected KillSet cloneState(KillSet state) { + return new KillSet(state); } - private void locationKilledBy(LocationIdentity identity, Node checkpoint, Map state) { - state.put(identity, checkpoint); - if (identity == ANY_LOCATION) { - for (LocationIdentity locid : state.keySet()) { - state.put(locid, checkpoint); + @Override + protected List processLoop(Loop loop, KillSet state) { + LoopInfo info = ReentrantBlockIterator.processLoop(this, loop, cloneState(state)); + + assert loop.header.getBeginNode() instanceof LoopBeginNode; + KillSet headerState = merge(loop.header, info.endStates); + + // second iteration, for propagating information to loop exits + info = ReentrantBlockIterator.processLoop(this, loop, cloneState(headerState)); + + return info.exitStates; + } + } + + /** + * gather all kill locations by iterating trough the nodes assigned to a block. + * + * assumptions: {@link MemoryCheckpoint MemoryCheckPoints} are {@link FixedNode FixedNodes}. + * + * @param block block to analyze + * @param excludeNode if null, compute normal set of kill locations. if != null, don't add kills + * until we reach excludeNode. + * @return all killed locations + */ + private KillSet computeKillSet(Block block, Node excludeNode) { + // cache is only valid if we don't potentially exclude kills from the set + if (excludeNode == null) { + KillSet cachedSet = blockToKillSet.get(block); + if (cachedSet != null) { + return cachedSet; + } + } + + // add locations to excludedLocations until we reach the excluded node + boolean foundExcludeNode = excludeNode == null; + + KillSet set = new KillSet(); + KillSet excludedLocations = new KillSet(); + if (block.getBeginNode() instanceof MergeNode) { + MergeNode mergeNode = (MergeNode) block.getBeginNode(); + for (PhiNode phi : mergeNode.usages().filter(PhiNode.class)) { + if (phi.type() == PhiType.Memory) { + if (foundExcludeNode) { + set.add(phi.getIdentity()); + } else { + excludedLocations.add(phi.getIdentity()); + foundExcludeNode = phi == excludeNode; + } } } } - @Override - protected Map merge(Block merge, List> states) { - assert merge.getBeginNode() instanceof MergeNode; - MergeNode mergeNode = (MergeNode) merge.getBeginNode(); + AbstractBeginNode startNode = cfg.getStartBlock().getBeginNode(); + assert startNode instanceof StartNode; - Map initKillMap = new HashMap<>(); - for (Map state : states) { - for (LocationIdentity locid : state.keySet()) { - if (initKillMap.containsKey(locid)) { - if (!initKillMap.get(locid).equals(state.get(locid))) { - initKillMap.put(locid, mergeNode); - } - } else { - initKillMap.put(locid, state.get(locid)); - } + KillSet accm = foundExcludeNode ? set : excludedLocations; + for (Node node : block.getNodes()) { + if (!foundExcludeNode && node == excludeNode) { + foundExcludeNode = true; + } + if (node == startNode) { + continue; + } + if (node instanceof MemoryCheckpoint.Single) { + LocationIdentity identity = ((MemoryCheckpoint.Single) node).getLocationIdentity(); + accm.add(identity); + } else if (node instanceof MemoryCheckpoint.Multi) { + for (LocationIdentity identity : ((MemoryCheckpoint.Multi) node).getLocationIdentities()) { + accm.add(identity); } } + assert MemoryCheckpoint.TypeAssertion.correctType(node); - mergeToKillMap.set(mergeNode, cloneState(initKillMap)); - return initKillMap; + if (foundExcludeNode) { + accm = set; + } } - @Override - protected Map cloneState(Map state) { - return new HashMap<>(state); - } - - @Override - protected List> processLoop(Loop loop, Map state) { - LoopInfo> info = ReentrantBlockIterator.processLoop(this, loop, cloneState(state)); - - assert loop.header.getBeginNode() instanceof LoopBeginNode; - Map headerState = merge(loop.header, info.endStates); - // second iteration, for computing information at loop exits - info = ReentrantBlockIterator.processLoop(this, loop, cloneState(headerState)); + // merge it for the cache entry + excludedLocations.addAll(set); + blockToKillSet.put(block, excludedLocations); - int i = 0; - for (Block exit : loop.exits) { - Map exitState = info.exitStates.get(i++); + return set; + } - Node begin = exit.getBeginNode(); - assert begin instanceof LoopExitNode; - for (Node usage : begin.usages()) { - if (usage instanceof ProxyNode && ((ProxyNode) usage).type() == PhiType.Memory) { - ProxyNode proxy = (ProxyNode) usage; - LocationIdentity identity = proxy.getIdentity(); - locationKilledBy(identity, proxy, exitState); - } - } - } - return info.exitStates; - } + private KillSet computeKillSet(Block block) { + return computeKillSet(block, null); } private ControlFlowGraph cfg; @@ -282,9 +342,7 @@ * Map from blocks to the nodes in each block. */ private BlockMap> blockToNodesMap; - private BlockMap> blockToKillMapInit; - private BlockMap> blockToKillMap; - private NodeMap> mergeToKillMap; + private BlockMap blockToKillSet; private final Map> phantomUsages = new IdentityHashMap<>(); private final Map> phantomInputs = new IdentityHashMap<>(); private final SchedulingStrategy selectedStrategy; @@ -323,8 +381,7 @@ assignBlockToNodes(graph, SchedulingStrategy.EARLIEST); sortNodesWithinBlocks(graph, SchedulingStrategy.EARLIEST); - MemoryScheduleClosure closure = new MemoryScheduleClosure(); - ReentrantBlockIterator.apply(closure, getCFG().getStartBlock()); + ReentrantBlockIterator.apply(new MemoryScheduleClosure(), getCFG().getStartBlock()); cfg.clearNodeToBlock(); blockToNodesMap = new BlockMap<>(cfg); @@ -333,31 +390,7 @@ sortNodesWithinBlocks(graph, selectedStrategy); printSchedule("after sorting nodes within blocks"); } else if (memsched == MemoryScheduling.OPTIMAL && selectedStrategy != SchedulingStrategy.EARLIEST && graph.getNodes(FloatingReadNode.class).isNotEmpty()) { - mergeToKillMap = graph.createNodeMap(); - - blockToKillMapInit = new BlockMap<>(cfg); - blockToKillMap = new BlockMap<>(cfg); - for (Block b : cfg.getBlocks()) { - blockToKillMapInit.put(b, new HashMap()); - blockToKillMap.put(b, new HashMap()); - } - - // initialize killMaps with lastLocationAccess - for (FloatingReadNode n : graph.getNodes(FloatingReadNode.class)) { - if (n.location().getLocationIdentity() == FINAL_LOCATION) { - continue; - } - Node first = n.getLastLocationAccess(); - assert first != null; - - Map killMap = blockToKillMapInit.get(forKillLocation(first)); - killMap.put(n.location().getLocationIdentity(), first); - } - - // distribute and compute killMaps for all blocks - NewMemoryScheduleClosure closure = new NewMemoryScheduleClosure(); - ReentrantBlockIterator.apply(closure, getCFG().getStartBlock()); - printSchedule("after computing killMaps"); + blockToKillSet = new BlockMap<>(cfg); assignBlockToNodes(graph, selectedStrategy); printSchedule("after assign nodes to blocks"); @@ -370,46 +403,43 @@ } } - private Block forKillLocation(Node n) { + private Block blockForFixedNode(Node n) { Block b = cfg.getNodeToBlock().get(n); assert b != null : "all lastAccess locations should have a block assignment from CFG"; return b; } private void printSchedule(String desc) { - Debug.printf("=== %s / %s / %s (%s) ===\n", getCFG().getStartBlock().getBeginNode().graph(), selectedStrategy, memsched, desc); - for (Block b : getCFG().getBlocks()) { - Debug.printf("==== b: %s (loopDepth: %s). ", b, b.getLoopDepth()); - Debug.printf("dom: %s. ", b.getDominator()); - Debug.printf("post-dom: %s. ", b.getPostdominator()); - Debug.printf("preds: %s. ", b.getPredecessors()); - Debug.printf("succs: %s ====\n", b.getSuccessors()); - BlockMap> killMaps = blockToKillMap; - if (killMaps != null) { - if (b.getBeginNode() instanceof MergeNode) { - MergeNode merge = (MergeNode) b.getBeginNode(); - Debug.printf("M merge kills: \n"); - for (LocationIdentity locId : mergeToKillMap.get(merge).keySet()) { - Debug.printf("M %s killed by %s\n", locId, mergeToKillMap.get(merge).get(locId)); + if (Debug.isEnabled()) { + Debug.printf("=== %s / %s / %s (%s) ===\n", getCFG().getStartBlock().getBeginNode().graph(), selectedStrategy, memsched, desc); + for (Block b : getCFG().getBlocks()) { + Debug.printf("==== b: %s (loopDepth: %s). ", b, b.getLoopDepth()); + Debug.printf("dom: %s. ", b.getDominator()); + Debug.printf("post-dom: %s. ", b.getPostdominator()); + Debug.printf("preds: %s. ", b.getPredecessors()); + Debug.printf("succs: %s ====\n", b.getSuccessors()); + BlockMap killSets = blockToKillSet; + if (killSets != null) { + Debug.printf("X block kills: \n"); + if (killSets.get(b) != null) { + for (LocationIdentity locId : killSets.get(b)) { + Debug.printf("X %s killed by %s\n", locId, "dunno anymore"); + } } } - Debug.printf("X block kills: \n"); - for (LocationIdentity locId : killMaps.get(b).keySet()) { - Debug.printf("X %s killed by %s\n", locId, killMaps.get(b).get(locId)); + + if (blockToNodesMap.get(b) != null) { + for (Node n : nodesFor(b)) { + printNode(n); + } + } else { + for (Node n : b.getNodes()) { + printNode(n); + } } } - - if (blockToNodesMap.get(b) != null) { - for (Node n : nodesFor(b)) { - printNode(n); - } - } else { - for (Node n : b.getNodes()) { - printNode(n); - } - } + Debug.printf("\n\n"); } - Debug.printf("\n\n"); } private static void printNode(Node n) { @@ -487,22 +517,42 @@ } Block earliestBlock = earliestBlock(node); - Block block; + Block block = null; + Block latest = null; switch (strategy) { case EARLIEST: block = earliestBlock; break; case LATEST: case LATEST_OUT_OF_LOOPS: - if (memsched == MemoryScheduling.OPTIMAL && node instanceof FloatingReadNode && ((FloatingReadNode) node).location().getLocationIdentity() != FINAL_LOCATION) { - block = optimalBlock((FloatingReadNode) node, strategy); + boolean scheduleRead = memsched == MemoryScheduling.OPTIMAL && node instanceof FloatingReadNode && ((FloatingReadNode) node).location().getLocationIdentity() != FINAL_LOCATION; + if (scheduleRead) { + FloatingReadNode read = (FloatingReadNode) node; + block = optimalBlock(read, strategy); + Debug.printf("schedule for %s: %s\n", read, block); + assert earliestBlock.dominates(block) : String.format("%s (%s) cannot be scheduled before earliest schedule (%s). location: %s", read, block, earliestBlock, + read.getLocationIdentity()); } else { block = latestBlock(node, strategy); - if (block == null) { - block = earliestBlock; - } else if (strategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS && !(node instanceof VirtualObjectNode)) { - // schedule at the latest position possible in the outermost loop possible - block = scheduleOutOfLoops(node, block, earliestBlock); + } + if (block == null) { + block = earliestBlock; + } else if (strategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS && !(node instanceof VirtualObjectNode)) { + // schedule at the latest position possible in the outermost loop possible + latest = block; + block = scheduleOutOfLoops(node, block, earliestBlock); + } + + if (assertionEnabled()) { + if (scheduleRead) { + FloatingReadNode read = (FloatingReadNode) node; + Node lastLocationAccess = read.getLastLocationAccess(); + Block upperBound = blockForFixedNode(lastLocationAccess); + if (!blockForFixedNode(lastLocationAccess).dominates(block)) { + assert false : String.format("out of loop movement voilated memory semantics for %s (location %s). moved to %s but upper bound is %s (earliest: %s, latest: %s)", read, + read.getLocationIdentity(), block, upperBound, earliestBlock, latest); + } + } } break; @@ -516,11 +566,19 @@ blockToNodesMap.get(block).add(node); } + @SuppressWarnings("all") + private static boolean assertionEnabled() { + boolean enabled = false; + assert enabled = true; + return enabled; + } + /** - * this method tries to find the latest position for a read, by taking the information gathered - * by {@link NewMemoryScheduleClosure} into account. + * this method tries to find the "optimal" schedule for a read, by pushing it down towards its + * latest schedule starting by the earliest schedule. By doing this, it takes care of memory + * dependencies using kill sets. * - * The idea is to iterate the dominator tree starting with the latest schedule of the read. + * In terms of domination relation, it looks like this: * *

          *    U      upperbound block, defined by last access location of the floating read
    @@ -532,10 +590,7 @@
          *    L      latest block
          * 
    * - * i.e. upperbound `dom` earliest `dom` optimal `dom` latest. However, there're - * cases where earliest `dom` optimal is not true, because the position is - * (impliclitly) bounded by an anchor of the read's guard. In such cases, the earliest schedule - * is taken. + * i.e. upperbound `dom` earliest `dom` optimal `dom` latest. * */ private Block optimalBlock(FloatingReadNode n, SchedulingStrategy strategy) { @@ -544,76 +599,105 @@ LocationIdentity locid = n.location().getLocationIdentity(); assert locid != FINAL_LOCATION; - Node upperBound = n.getLastLocationAccess(); - Block upperBoundBlock = forKillLocation(upperBound); + Block upperBoundBlock = blockForFixedNode(n.getLastLocationAccess()); Block earliestBlock = earliestBlock(n); assert upperBoundBlock.dominates(earliestBlock) : "upper bound (" + upperBoundBlock + ") should dominate earliest (" + earliestBlock + ")"; - Block currentBlock = latestBlock(n, strategy); - assert currentBlock != null && earliestBlock.dominates(currentBlock) : "earliest (" + earliestBlock + ") should dominate latest block (" + currentBlock + ")"; - Block previousBlock = currentBlock; + Block latestBlock = latestBlock(n, strategy); + assert latestBlock != null && earliestBlock.dominates(latestBlock) : "earliest (" + earliestBlock + ") should dominate latest block (" + latestBlock + ")"; - Debug.printf("processing %s (accessing %s): latest %s, earliest %s, upper bound %s (%s)\n", n, locid, currentBlock, earliestBlock, upperBoundBlock, upperBound); + Debug.printf("processing %s (accessing %s): latest %s, earliest %s, upper bound %s (%s)\n", n, locid, latestBlock, earliestBlock, upperBoundBlock, n.getLastLocationAccess()); + if (earliestBlock == latestBlock) { + // read is fixed to this block, nothing to schedule + return latestBlock; + } + + Stack path = computePathInDominatorTree(earliestBlock, latestBlock); + Debug.printf("|path| is %d: %s\n", path.size(), path); - int iterations = 0; - // iterate the dominator tree - while (true) { - iterations++; - Node lastKill = blockToKillMap.get(currentBlock).get(locid); - assert lastKill != null : "should be never null, due to init of killMaps: " + currentBlock + ", location: " + locid; + // follow path, start at earliest schedule + while (path.size() > 0) { + Block currentBlock = path.pop(); + Block dominatedBlock = path.size() == 0 ? null : path.peek(); + if (dominatedBlock != null && !currentBlock.getSuccessors().contains(dominatedBlock)) { + // the dominated block is not a successor -> we have a split + assert dominatedBlock.getBeginNode() instanceof MergeNode; + + HashSet region = computeRegion(currentBlock, dominatedBlock); + Debug.printf("> merge. %s: region for %s -> %s: %s\n", n, currentBlock, dominatedBlock, region); + + NewMemoryScheduleClosure closure = null; + if (currentBlock == upperBoundBlock) { + assert earliestBlock == upperBoundBlock; + // don't treat lastLocationAccess node as a kill for this read. + closure = new NewMemoryScheduleClosure(n.getLastLocationAccess(), upperBoundBlock); + } else { + closure = new NewMemoryScheduleClosure(); + } + Map states; + states = ReentrantBlockIterator.apply(closure, currentBlock, new KillSet(), region); - if (lastKill.equals(upperBound)) { - // assign node to the block which kills the location - - boolean outOfLoop = false; + KillSet mergeState = states.get(dominatedBlock.getBeginNode()); + if (mergeState.isKilled(locid)) { + // location got killed somewhere in the branches, + // thus we've to move the read above it + return currentBlock; + } + } else { + if (currentBlock == upperBoundBlock) { + assert earliestBlock == upperBoundBlock; + KillSet ks = computeKillSet(upperBoundBlock, n.getLastLocationAccess()); + if (ks.isKilled(locid)) { + return upperBoundBlock; + } + } else if (dominatedBlock == null || computeKillSet(currentBlock).isKilled(locid)) { + return currentBlock; + } + } + } + assert false : "should have found a block for " + n; + return null; + } - // schedule read out of the loop if possible, in terms of killMaps and earliest - // schedule - if (currentBlock != earliestBlock && previousBlock != earliestBlock) { - Block t = currentBlock; - while (t.getLoop() != null && t.getDominator() != null && earliestBlock.dominates(t)) { - Block dom = t.getDominator(); - if (dom.getLoopDepth() < currentBlock.getLoopDepth() && blockToKillMap.get(dom).get(locid) == upperBound && earliestBlock.dominates(dom)) { - printIterations(iterations, "moved out of loop, from " + currentBlock + " to " + dom); - previousBlock = currentBlock = dom; - outOfLoop = true; - } - t = dom; + /** + * compute path in dominator tree from earliest schedule to latest schedule. + * + * @return the order of the stack is such as the first element is the earliest schedule. + */ + private static Stack computePathInDominatorTree(Block earliestBlock, Block latestBlock) { + Stack path = new Stack<>(); + Block currentBlock = latestBlock; + while (currentBlock != null && earliestBlock.dominates(currentBlock)) { + path.push(currentBlock); + currentBlock = currentBlock.getDominator(); + } + assert path.peek() == earliestBlock; + return path; + } + + /** + * compute a set that contains all blocks in a region spanned by dominatorBlock and + * dominatedBlock (exclusive the dominatedBlock). + */ + private static HashSet computeRegion(Block dominatorBlock, Block dominatedBlock) { + HashSet region = new HashSet<>(); + Stack workList = new Stack<>(); + + region.add(dominatorBlock); + workList.addAll(0, dominatorBlock.getSuccessors()); + while (workList.size() > 0) { + Block current = workList.pop(); + if (current != dominatedBlock) { + region.add(current); + for (Block b : current.getSuccessors()) { + if (!region.contains(b) && !workList.contains(b)) { + workList.add(b); } } - - if (!outOfLoop && previousBlock.getBeginNode() instanceof MergeNode) { - // merges kill locations right at the beginning of a block. if a merge is the - // killing node, we assign it to the dominating block. - - MergeNode merge = (MergeNode) previousBlock.getBeginNode(); - Node killer = mergeToKillMap.get(merge).get(locid); - - if (killer != null && killer == merge) { - printIterations(iterations, "kill by merge: " + currentBlock); - return currentBlock; - } - } - - // current block matches last access, that means the previous (dominated) block - // kills the location, therefore schedule read to previous block. - printIterations(iterations, "regular kill: " + previousBlock); - return previousBlock; } - - if (upperBoundBlock == currentBlock) { - printIterations(iterations, "upper bound: " + currentBlock + ", previous: " + previousBlock); - return currentBlock; - } - - previousBlock = currentBlock; - currentBlock = currentBlock.getDominator(); - assert currentBlock != null; } - } - - private static void printIterations(int iterations, String desc) { - Debug.printf("iterations: %d, %s\n", iterations, desc); + assert !region.contains(dominatedBlock) && region.containsAll(dominatedBlock.getPredecessors()); + return region; } /** @@ -722,6 +806,16 @@ return earliest; } + /** + * Schedules a node out of loop based on its earliest schedule. Note that this movement is only + * valid if it's done for every other node in the schedule, otherwise this movement is + * not valid. + * + * @param n Node to schedule + * @param latestBlock latest possible schedule for {@code n} + * @param earliest earliest possible schedule for {@code n} + * @return block schedule for {@code n} which is not inside a loop (if possible) + */ private static Block scheduleOutOfLoops(Node n, Block latestBlock, Block earliest) { if (latestBlock == null) { throw new SchedulingError("no latest : %s", n); diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/ReadRegisterNode.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/ReadRegisterNode.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/ReadRegisterNode.java Thu Nov 21 15:04:54 2013 +0100 @@ -56,6 +56,7 @@ public ReadRegisterNode(Register register, Kind kind, boolean directUse, boolean incoming) { super(StampFactory.forKind(kind)); + assert register != null; this.register = register; this.directUse = directUse; this.incoming = incoming; @@ -67,6 +68,7 @@ */ public ReadRegisterNode(Register register, boolean directUse, boolean incoming) { super(StampFactory.forNodeIntrinsic()); + assert register != null; this.register = register; this.directUse = directUse; this.incoming = incoming; diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/CompilerDirectivesSubstitutions.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/CompilerDirectivesSubstitutions.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/CompilerDirectivesSubstitutions.java Thu Nov 21 15:04:54 2013 +0100 @@ -44,6 +44,11 @@ } @MethodSubstitution + public static void transferToInterpreterAndInvalidate() { + DeoptimizeNode.deopt(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.UnreachedCode); + } + + @MethodSubstitution public static boolean inInterpreter() { return false; } diff -r 790ebab62d23 -r f9f4503a4ab5 graal/com.oracle.truffle.api/src/com/oracle/truffle/api/CompilerDirectives.java --- a/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/CompilerDirectives.java Thu Nov 21 15:04:26 2013 +0100 +++ b/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/CompilerDirectives.java Thu Nov 21 15:04:54 2013 +0100 @@ -68,6 +68,13 @@ } /** + * Directive for the compiler to discontinue compilation at this code position and instead + * insert a transfer to the interpreter, invalidating the currently executing machine code. + */ + public static void transferToInterpreterAndInvalidate() { + } + + /** * Returns a boolean value indicating whether the method is executed in the interpreter. * * @return {@code true} when executed in the interpreter, {@code false} in compiled code. diff -r 790ebab62d23 -r f9f4503a4ab5 make/Makefile --- a/make/Makefile Thu Nov 21 15:04:26 2013 +0100 +++ b/make/Makefile Thu Nov 21 15:04:54 2013 +0100 @@ -358,6 +358,11 @@ $(install-file) $(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz $(install-file) +# MacOS X +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM + $(install-dir) +$(EXPORT_SERVER_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM + $(install-dir) # Graal # Common @@ -401,6 +406,12 @@ $(install-file) $(EXPORT_SERVER_DIR)/64/%.diz: $(GRAAL_BUILD_DIR)/%.diz $(install-file) +# MacOS X +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(GRAAL_BUILD_DIR)/%.dSYM + $(install-dir) +$(EXPORT_SERVER_DIR)/%.dSYM: $(GRAAL_BUILD_DIR)/%.dSYM + $(install-dir) + endif # Client (C1) @@ -446,6 +457,11 @@ $(install-file) $(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz $(install-file) +# MacOS X +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM + $(install-dir) +$(EXPORT_CLIENT_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM + $(install-dir) endif # Minimal1 @@ -491,6 +507,7 @@ $(install-file) $(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz $(install-file) +# MacOS X does not support Minimal1 config endif # Zero @@ -513,6 +530,11 @@ $(install-file) $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz $(install-file) +# MacOS X +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM + $(install-dir) +$(EXPORT_SERVER_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM + $(install-dir) endif # Shark @@ -535,6 +557,11 @@ $(install-file) $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz $(install-file) +# MacOS X +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM + $(install-dir) +$(EXPORT_SERVER_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM + $(install-dir) endif $(EXPORT_JRE_LIB_DIR)/%.jar: $(SHARED_DIR)/%.jar diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/Makefile --- a/make/bsd/Makefile Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/Makefile Thu Nov 21 15:04:54 2013 +0100 @@ -206,6 +206,7 @@ BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX) BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) +BUILDTREE_VARS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE) BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS) @@ -351,9 +352,11 @@ # Doc target. This is the same for all build options. # Hence create a docs directory beside ...$(ARCH)_[...] +# We specify 'BUILD_FLAVOR=product' so that the proper +# ENABLE_FULL_DEBUG_SYMBOLS value is used. docs: checks $(QUIETLY) mkdir -p $(SUBDIR_DOCS) - $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs + $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs # Synonyms for win32-like targets. compiler2: debug product diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/buildtree.make --- a/make/bsd/makefiles/buildtree.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/buildtree.make Thu Nov 21 15:04:54 2013 +0100 @@ -271,6 +271,16 @@ echo "$(call gamma-path,commonsrc,gpu)"; \ [ -n "$(CFLAGS_BROWSE)" ] && \ echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \ + [ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \ + echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \ + [ -n "$(OBJCOPY)" ] && \ + echo && echo "OBJCOPY = $(OBJCOPY)"; \ + [ -n "$(STRIP_POLICY)" ] && \ + echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \ + [ -n "$(ZIP_DEBUGINFO_FILES)" ] && \ + echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \ + [ -n "$(ZIPEXE)" ] && \ + echo && echo "ZIPEXE = $(ZIPEXE)"; \ [ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/defs.make --- a/make/bsd/makefiles/defs.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/defs.make Thu Nov 21 15:04:54 2013 +0100 @@ -136,10 +136,127 @@ endif endif +OS_VENDOR:=$(shell uname -s) + +# determine if HotSpot is being built in JDK6 or earlier version +JDK6_OR_EARLIER=0 +ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1" + # if the longer variable names (newer build style) are set, then check those + ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1" + JDK6_OR_EARLIER=1 + endif +else + # the longer variables aren't set so check the shorter variable names + ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1" + JDK6_OR_EARLIER=1 + endif +endif + +ifeq ($(JDK6_OR_EARLIER),0) + # Full Debug Symbols is supported on JDK7 or newer. + # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product + # builds is enabled with debug info files ZIP'ed to save space. For + # BUILD_FLAVOR != product builds, FDS is always enabled, after all a + # debug build without debug info isn't very useful. + # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled. + # + # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be + # disabled for a BUILD_FLAVOR == product build. + # + # Note: Use of a different variable name for the FDS override option + # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS + # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass + # in options via environment variables, use of distinct variables + # prevents strange behaviours. For example, in a BUILD_FLAVOR != + # product build, the FULL_DEBUG_SYMBOLS environment variable will be + # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If + # the same variable name is used, then different values can be picked + # up by different parts of the build. Just to be clear, we only need + # two variable names because the incoming option value can be + # overridden in some situations, e.g., a BUILD_FLAVOR != product + # build. + + # Due to the multiple sub-make processes that occur this logic gets + # executed multiple times. We reduce the noise by at least checking that + # BUILD_FLAVOR has been set. + ifneq ($(BUILD_FLAVOR),) + ifeq ($(BUILD_FLAVOR), product) + FULL_DEBUG_SYMBOLS ?= 1 + ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS) + else + # debug variants always get Full Debug Symbols (if available) + ENABLE_FULL_DEBUG_SYMBOLS = 1 + endif + _JUNK_ := $(shell \ + echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)") + # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later + + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + # MacOS X doesn't use OBJCOPY or STRIP_POLICY + OBJCOPY= + STRIP_POLICY= + ZIP_DEBUGINFO_FILES ?= 1 + else + # Default OBJCOPY comes from GNU Binutils on BSD + ifeq ($(CROSS_COMPILE_ARCH),) + DEF_OBJCOPY=/usr/bin/objcopy + else + # Assume objcopy is part of the cross-compilation toolset + ifneq ($(ALT_COMPILER_PATH),) + DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy + endif + endif + OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY)) + ifneq ($(ALT_OBJCOPY),) + _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)") + OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY)) + endif + + ifeq ($(OBJCOPY),) + _JUNK_ := $(shell \ + echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo" \ + "files. You may need to set ALT_OBJCOPY.") + ENABLE_FULL_DEBUG_SYMBOLS=0 + _JUNK_ := $(shell \ + echo >&2 "INFO:" \ + "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)") + else + _JUNK_ := $(shell \ + echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo" \ + "files.") + + # Library stripping policies for .debuginfo configs: + # all_strip - strips everything from the library + # min_strip - strips most stuff from the library; leaves + # minimum symbols + # no_strip - does not strip the library at all + # + # Oracle security policy requires "all_strip". A waiver was + # granted on 2011.09.01 that permits using "min_strip" in the + # Java JDK and Java JRE. + # + # Currently, STRIP_POLICY is only used when Full Debug Symbols + # is enabled. + # + STRIP_POLICY ?= min_strip + + _JUNK_ := $(shell \ + echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)") + + ZIP_DEBUGINFO_FILES ?= 1 + endif + + _JUNK_ := $(shell \ + echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)") + endif + endif # ENABLE_FULL_DEBUG_SYMBOLS=1 + endif # BUILD_FLAVOR +endif # JDK_6_OR_EARLIER + JDK_INCLUDE_SUBDIR=bsd # Library suffix -OS_VENDOR:=$(shell uname -s) ifeq ($(OS_VENDOR),Darwin) LIBRARY_SUFFIX=dylib else @@ -150,6 +267,19 @@ # client and server subdirectories have symbolic links to ../libjsig.so EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX) + +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz + else + ifeq ($(OS_VENDOR), Darwin) + EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM + else + EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo + endif + endif +endif + EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal @@ -157,34 +287,76 @@ ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX) + + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz + else + ifeq ($(OS_VENDOR), Darwin) + EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM + else + EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo + endif + endif + endif endif ifeq ($(JVM_VARIANT_CLIENT),true) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) + + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz + else + ifeq ($(OS_VENDOR), Darwin) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM + else + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo + endif + endif + endif endif ifeq ($(JVM_VARIANT_MINIMAL1),true) EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX) - - ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) - ifeq ($(ZIP_DEBUGINFO_FILES),1) - EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz - else - EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo - endif - endif endif # Serviceability Binaries # No SA Support for PPC, IA64, ARM or zero ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ $(EXPORT_LIB_DIR)/sa-jdi.jar + +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz + else + ifeq ($(OS_VENDOR), Darwin) + ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM + else + ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo + endif + endif +endif + ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ $(EXPORT_LIB_DIR)/sa-jdi.jar ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ $(EXPORT_LIB_DIR)/sa-jdi.jar + +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz + else + ifeq ($(OS_VENDOR), Darwin) + ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM + else + ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo + endif + endif +endif + ADD_SA_BINARIES/ppc = ADD_SA_BINARIES/ia64 = ADD_SA_BINARIES/arm = @@ -225,6 +397,19 @@ # Files to simply copy in place UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(ZIP_DEBUGINFO_FILES),1) + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz + else + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM + UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM + endif + endif endif endif diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/dtrace.make --- a/make/bsd/makefiles/dtrace.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/dtrace.make Thu Nov 21 15:04:54 2013 +0100 @@ -39,9 +39,15 @@ JVM_DB = libjvm_db LIBJVM_DB = libjvm_db.dylib +LIBJVM_DB_DEBUGINFO = libjvm_db.dylib.dSYM +LIBJVM_DB_DIZ = libjvm_db.diz + JVM_DTRACE = jvm_dtrace LIBJVM_DTRACE = libjvm_dtrace.dylib +LIBJVM_DTRACE_DEBUGINFO = libjvm_dtrace.dylib.dSYM +LIBJVM_DTRACE_DIZ = libjvm_dtrace.diz + JVMOFFS = JvmOffsets JVMOFFS.o = $(JVMOFFS).o GENOFFS = generate$(JVMOFFS) @@ -76,21 +82,87 @@ # Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so ifneq ("${ISA}","${BUILDARCH}") -XLIBJVM_DB = 64/$(LIBJVM_DB) -XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE) +XLIBJVM_DIR = 64 +XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB) +XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE) XARCH = $(subst sparcv9,v9,$(shell echo $(ISA))) +XLIBJVM_DB_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO) +XLIBJVM_DB_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ) +XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO) +XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ) + $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) @echo Making $@ - $(QUIETLY) mkdir -p 64/ ; \ + $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the archived name: + ( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) ) + $(RM) -r $(XLIBJVM_DB_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the link name: + $(QUIETLY) ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ) + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -x $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the archived name: + ( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) ) + $(RM) $(XLIBJVM_DB_DEBUGINFO) + endif + endif +endif $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo Making $@ - $(QUIETLY) mkdir -p 64/ ; \ + $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the archived name: + ( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) ) + $(RM) -r $(XLIBJVM_DTRACE_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the link name: + ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ) + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -x $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) + # is not in the archived name: + ( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) ) + $(RM) $(XLIBJVM_DTRACE_DEBUGINFO) + endif + endif +endif endif # ifneq ("${ISA}","${BUILDARCH}") @@ -134,11 +206,59 @@ @echo Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) + $(RM) -r $(LIBJVM_DB_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO) + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@ + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -x $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) + $(RM) $(LIBJVM_DB_DEBUGINFO) + endif + endif +endif $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) + $(RM) -r $(LIBJVM_DTRACE_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO) + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@ + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -x $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) + $(RM) $(LIBJVM_DTRACE_DEBUGINFO) + endif + endif +endif #$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \ # $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/gcc.make --- a/make/bsd/makefiles/gcc.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/gcc.make Thu Nov 21 15:04:54 2013 +0100 @@ -83,6 +83,11 @@ AS = $(CC) -c endif +ifeq ($(OS_VENDOR), Darwin) + ifeq ($(DSYMUTIL),) + DSYMUTIL=dsymutil + endif +endif ifeq ($(USE_CLANG), true) CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1) @@ -450,6 +455,36 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) DEBUG_CFLAGS += -gstabs endif + + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + FASTDEBUG_CFLAGS/ia64 = -g + FASTDEBUG_CFLAGS/amd64 = -g + FASTDEBUG_CFLAGS/arm = -g + FASTDEBUG_CFLAGS/ppc = -g + FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH)) + ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),) + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + FASTDEBUG_CFLAGS += -g + else + FASTDEBUG_CFLAGS += -gstabs + endif + endif + + OPT_CFLAGS/ia64 = -g + OPT_CFLAGS/amd64 = -g + OPT_CFLAGS/arm = -g + OPT_CFLAGS/ppc = -g + OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH)) + ifeq ($(OPT_CFLAGS/$(BUILDARCH)),) + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + OPT_CFLAGS += -g + else + OPT_CFLAGS += -gstabs + endif + endif + endif endif # If we are building HEADLESS, pass on to VM diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/jsig.make --- a/make/bsd/makefiles/jsig.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/jsig.make Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,13 +29,21 @@ ifeq ($(OS_VENDOR), Darwin) LIBJSIG = lib$(JSIG).dylib + + LIBJSIG_DEBUGINFO = lib$(JSIG).dylib.dSYM + LIBJSIG_DIZ = lib$(JSIG).diz else LIBJSIG = lib$(JSIG).so + + LIBJSIG_DEBUGINFO = lib$(JSIG).debuginfo + LIBJSIG_DIZ = lib$(JSIG).diz endif JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm -DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG) +DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG) +DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO) +DEST_JSIG_DIZ = $(JDK_LIBDIR)/$(LIBJSIG_DIZ) LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig @@ -55,9 +63,42 @@ @echo Making signal interposition lib... $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -r -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) + $(RM) -r $(LIBJSIG_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO) + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -g $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) + $(RM) $(LIBJSIG_DEBUGINFO) + endif + endif +endif install_jsig: $(LIBJSIG) @echo "Copying $(LIBJSIG) to $(DEST_JSIG)" +ifeq ($(OS_VENDOR), Darwin) + -$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \ + cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO) +else + $(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \ + cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO) +endif + -$(QUIETLY) test -f $(LIBJSIG_DIZ) && \ + cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ) $(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done" .PHONY: install_jsig diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/product.make --- a/make/bsd/makefiles/product.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/product.make Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -43,15 +43,17 @@ SYSDEFS += -DPRODUCT VERSION = optimized -# use -g to strip library as -x will discard its symbol table; -x is fine for -# executables. -ifdef CROSS_COMPILE_ARCH - STRIP = $(ALT_COMPILER_PATH)/strip -else - STRIP = strip +ifneq ($(OS_VENDOR), Darwin) + # use -g to strip library as -x will discard its symbol table; -x is fine for + # executables. + ifdef CROSS_COMPILE_ARCH + STRIP = $(ALT_COMPILER_PATH)/strip + else + STRIP = strip + endif + STRIP_LIBJVM = $(STRIP) -g $@ || exit 1; + STRIP_AOUT = $(STRIP) -x $@ || exit 1; + + # Don't strip in VM build; JDK build will strip libraries later + # LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO)) endif -STRIP_LIBJVM = $(STRIP) -g $@ || exit 1; -STRIP_AOUT = $(STRIP) -x $@ || exit 1; - -# Don't strip in VM build; JDK build will strip libraries later -# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO)) diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/saproc.make --- a/make/bsd/makefiles/saproc.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/saproc.make Thu Nov 21 15:04:54 2013 +0100 @@ -28,9 +28,15 @@ SAPROC = saproc ifeq ($(OS_VENDOR), Darwin) - LIBSAPROC = lib$(SAPROC).dylib + LIBSAPROC = lib$(SAPROC).dylib + + LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM + LIBSAPROC_DIZ = lib$(SAPROC).diz else - LIBSAPROC = lib$(SAPROC).so + LIBSAPROC = lib$(SAPROC).so + + LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo + LIBSAPROC_DIZ = lib$(SAPROC).diz endif AGENT_DIR = $(GAMMADIR)/agent @@ -58,7 +64,11 @@ else ifeq ($(OS_VENDOR), Darwin) SASRCFILES = $(DARWIN_NON_STUB_SASRCFILES) - SALIBS = -g -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation + ifeq ($(wildcard /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/JavaVM.framework/Frameworks),) + SALIBS = -g -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation + else + SALIBS = -g -framework Foundation -F/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation + endif #objc compiler blows up on -march=i586, perhaps it should not be included in the macosx intel 32-bit C++ compiles? SAARCH = $(subst -march=i586,,$(ARCHFLAG)) else @@ -70,7 +80,9 @@ SAMAPFILE = $(SASRCDIR)/mapfile -DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC) +DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC) +DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO) +DEST_SAPROC_DIZ = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ) # DEBUG_BINARIES overrides everything, use full -g debug information ifeq ($(DEBUG_BINARIES), true) @@ -117,11 +129,42 @@ $(SA_DEBUG_CFLAGS) \ -o $@ \ $(SALIBS) +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -r -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) + $(RM) -r $(LIBSAPROC_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO) + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@ + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -g $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) + $(RM) $(LIBSAPROC_DEBUGINFO) + endif + endif +endif install_saproc: $(BUILDLIBSAPROC) - $(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \ - echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"; \ - cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"; \ - fi + @echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)" +ifeq ($(OS_VENDOR), Darwin) + -$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \ + cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO) +else + $(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \ + cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO) +endif + -$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \ + cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ) + $(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done" .PHONY: install_saproc diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/universal.gmk --- a/make/bsd/makefiles/universal.gmk Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/universal.gmk Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # macosx universal builds @@ -35,15 +35,15 @@ all_product_universal: # $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS) $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS) - $(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize + $(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize all_fastdebug_universal: # $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS) $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS) - $(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize + $(QUIETLY) $(MAKE) BUILD_FLAVOR=fastdebug EXPORT_SUBDIR=/fastdebug universalize all_debug_universal: # $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS) $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS) - $(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize + $(QUIETLY) $(MAKE) BUILD_FLAVOR=debug EXPORT_SUBDIR=/debug universalize # Consolidate architecture builds into a single Universal binary @@ -57,18 +57,18 @@ if [ -n "$${BUILT_LIPO_FILES}" ]; then \ $(MKDIR) -p $(shell dirname $@); \ lipo -create -output $@ $${BUILT_LIPO_FILES}; \ - fi + fi # Copy built non-universal binaries in place +# - copies directories; including empty dirs +# - copies files, symlinks, other non-directory files $(UNIVERSAL_COPY_LIST): - BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \ + BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \ if [ -n "$${BUILT_COPY_FILES}" ]; then \ for i in $${BUILT_COPY_FILES}; do \ - if [ -f $${i} ]; then \ - $(MKDIR) -p $(shell dirname $@); \ - $(CP) $${i} $@; \ - fi; \ + $(MKDIR) -p $(shell dirname $@); \ + $(CP) -R $${i} $@; \ done; \ fi diff -r 790ebab62d23 -r f9f4503a4ab5 make/bsd/makefiles/vm.make --- a/make/bsd/makefiles/vm.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/bsd/makefiles/vm.make Thu Nov 21 15:04:54 2013 +0100 @@ -60,10 +60,16 @@ # The order is important for the precompiled headers to work. INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%) -ifeq (${VERSION}, debug) +# SYMFLAG is used by {jsig,saproc}.make +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + # always build with debug info when we can create .dSYM/.debuginfo files SYMFLAG = -g else - SYMFLAG = + ifeq (${VERSION}, debug) + SYMFLAG = -g + else + SYMFLAG = + endif endif # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined @@ -151,8 +157,14 @@ ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug)) CFLAGS += -DALLOW_OPERATOR_NEW_USAGE endif + + LIBJVM_DEBUGINFO = lib$(JVM).dylib.dSYM + LIBJVM_DIZ = lib$(JVM).diz else LIBJVM = lib$(JVM).so + + LIBJVM_DEBUGINFO = lib$(JVM).debuginfo + LIBJVM_DIZ = lib$(JVM).diz endif SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt graal @@ -342,10 +354,47 @@ rm -f $@.1; ln -s $@ $@.1; \ } -DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM) +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) + ifeq ($(OS_VENDOR), Darwin) + $(DSYMUTIL) $@ + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -r -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) + $(RM) -r $(LIBJVM_DEBUGINFO) + endif + else + $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO) + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ + ifeq ($(STRIP_POLICY),all_strip) + $(QUIETLY) $(STRIP) $@ + else + ifeq ($(STRIP_POLICY),min_strip) + $(QUIETLY) $(STRIP) -g $@ + # implied else here is no stripping at all + endif + endif + ifeq ($(ZIP_DEBUGINFO_FILES),1) + $(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) + $(RM) $(LIBJVM_DEBUGINFO) + endif + endif +endif + +DEST_SUBDIR = $(JDK_LIBDIR)/$(VM_SUBDIR) +DEST_JVM = $(DEST_SUBDIR)/$(LIBJVM) +DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO) +DEST_JVM_DIZ = $(DEST_SUBDIR)/$(LIBJVM_DIZ) install_jvm: $(LIBJVM) @echo "Copying $(LIBJVM) to $(DEST_JVM)" +ifeq ($(OS_VENDOR), Darwin) + -$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \ + cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO) +else + $(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \ + cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO) +endif + -$(QUIETLY) test -f $(LIBJVM_DIZ) && \ + cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ) $(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done" #---------------------------------------------------------------------- @@ -360,11 +409,8 @@ #---------------------------------------------------------------------- ifeq ($(OS_VENDOR), Darwin) -$(LIBJVM).dSYM: $(LIBJVM) - dsymutil $(LIBJVM) - # no libjvm_db for macosx -build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM +build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck echo "Doing vm.make build:" else build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) diff -r 790ebab62d23 -r f9f4503a4ab5 make/defs.make --- a/make/defs.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/defs.make Thu Nov 21 15:04:54 2013 +0100 @@ -77,6 +77,16 @@ @$(RM) $@ $(CP) $< $@ endef + +# MacOS X strongly discourages 'cp -r' and provides 'cp -R' instead. +# May need to have a MacOS X specific definition of install-dir +# sometime in the future. +define install-dir +@$(MKDIR) -p $(@D) +@$(RM) -r $@ +$(CP) -r $< $@ +endef + define prep-target @$(MKDIR) -p $(@D) @$(RM) $@ diff -r 790ebab62d23 -r f9f4503a4ab5 make/hotspot_version --- a/make/hotspot_version Thu Nov 21 15:04:26 2013 +0100 +++ b/make/hotspot_version Thu Nov 21 15:04:54 2013 +0100 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=53 +HS_BUILD_NUMBER=59 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r 790ebab62d23 -r f9f4503a4ab5 make/jprt.properties --- a/make/jprt.properties Thu Nov 21 15:04:26 2013 +0100 +++ b/make/jprt.properties Thu Nov 21 15:04:54 2013 +0100 @@ -24,12 +24,7 @@ # Properties for jprt -# All build result bundles are full jdks, so the 64bit testing does not -# need the 32bit sibling bundle installed. -# Note: If the hotspot/make/Makefile changed to only bundle the 64bit files -# when bundling 64bit, and stripped out the 64bit files from any 32bit -# bundles, then this setting would be need to be "true". - +# All build result bundles are full jdks. jprt.need.sibling.build=false # At submit time, the release supplied will be in jprt.submit.release @@ -52,21 +47,11 @@ # sparc etc. # Define the Solaris platforms we want for the various releases -jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10 -jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 -jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7} -jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} - jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} -jprt.my.solaris.i586.jdk8=solaris_i586_5.10 -jprt.my.solaris.i586.jdk7=solaris_i586_5.10 -jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7} -jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} - jprt.my.solaris.x64.jdk8=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7} @@ -133,9 +118,7 @@ # Standard list of jprt build targets for this source tree jprt.build.targets.standard= \ - ${jprt.my.solaris.sparc}-{product|fastdebug}, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \ - ${jprt.my.solaris.i586}-{product|fastdebug}, \ ${jprt.my.solaris.x64}-{product|fastdebug}, \ ${jprt.my.linux.i586}-{product|fastdebug}, \ ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \ @@ -145,7 +128,6 @@ ${jprt.my.linux.armvh}-{product|fastdebug} jprt.build.targets.open= \ - ${jprt.my.solaris.i586}-{productOpen}, \ ${jprt.my.solaris.x64}-{debugOpen}, \ ${jprt.my.linux.x64}-{productOpen} @@ -168,31 +150,6 @@ # Subset lists of test targets for this source tree -jprt.my.solaris.sparc.test.targets= \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \ - ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \ - ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_G1, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParOldGC - jprt.my.solaris.sparcv9.test.targets= \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \ @@ -242,37 +199,6 @@ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC -jprt.my.solaris.i586.test.targets= \ - ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \ - ${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \ - ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \ - ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \ - ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParOldGC - jprt.my.linux.i586.test.targets = \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \ ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \ @@ -395,7 +321,6 @@ # Some basic "smoke" tests for OpenJDK builds jprt.test.targets.open = \ ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \ - ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \ ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98 # Testing for actual embedded builds is different to standard @@ -407,9 +332,7 @@ jprt.test.targets.standard = \ ${jprt.my.linux.i586.test.targets.embedded}, \ - ${jprt.my.solaris.sparc.test.targets}, \ ${jprt.my.solaris.sparcv9.test.targets}, \ - ${jprt.my.solaris.i586.test.targets}, \ ${jprt.my.solaris.x64.test.targets}, \ ${jprt.my.linux.i586.test.targets}, \ ${jprt.my.linux.x64.test.targets}, \ @@ -420,15 +343,12 @@ jprt.test.targets.embedded= \ ${jprt.my.linux.i586.test.targets.embedded}, \ - ${jprt.my.solaris.sparc.test.targets}, \ ${jprt.my.solaris.sparcv9.test.targets}, \ - ${jprt.my.solaris.i586.test.targets}, \ ${jprt.my.solaris.x64.test.targets}, \ ${jprt.my.linux.x64.test.targets}, \ ${jprt.my.windows.i586.test.targets}, \ ${jprt.my.windows.x64.test.targets} - jprt.test.targets.jdk8=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard} jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7} @@ -439,15 +359,11 @@ #jprt.make.rule.test.targets=*-product-*-packtest jprt.make.rule.test.targets.standard.client = \ - ${jprt.my.solaris.sparc}-*-c1-clienttest, \ - ${jprt.my.solaris.i586}-*-c1-clienttest, \ ${jprt.my.linux.i586}-*-c1-clienttest, \ ${jprt.my.windows.i586}-*-c1-clienttest jprt.make.rule.test.targets.standard.server = \ - ${jprt.my.solaris.sparc}-*-c2-servertest, \ ${jprt.my.solaris.sparcv9}-*-c2-servertest, \ - ${jprt.my.solaris.i586}-*-c2-servertest, \ ${jprt.my.solaris.x64}-*-c2-servertest, \ ${jprt.my.linux.i586}-*-c2-servertest, \ ${jprt.my.linux.x64}-*-c2-servertest, \ @@ -456,9 +372,7 @@ ${jprt.my.windows.x64}-*-c2-servertest jprt.make.rule.test.targets.standard.internalvmtests = \ - ${jprt.my.solaris.sparc}-fastdebug-c2-internalvmtests, \ ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \ - ${jprt.my.solaris.i586}-fastdebug-c2-internalvmtests, \ ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \ ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \ ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \ @@ -467,16 +381,12 @@ ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests jprt.make.rule.test.targets.standard.wbapi = \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.solaris.i586}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-c1-wbapitest, \ - ${jprt.my.solaris.i586}-{product|fastdebug}-c1-wbapitest, \ ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \ ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest diff -r 790ebab62d23 -r f9f4503a4ab5 make/solaris/makefiles/buildtree.make --- a/make/solaris/makefiles/buildtree.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/solaris/makefiles/buildtree.make Thu Nov 21 15:04:54 2013 +0100 @@ -231,7 +231,9 @@ echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \ echo "$(call gamma-path,commonsrc,os/posix/vm) \\"; \ echo "$(call gamma-path,altsrc,gpu/ptx/vm) \\"; \ - echo "$(call gamma-path,commonsrc,gpu/ptx/vm)"; \ + echo "$(call gamma-path,commonsrc,gpu/ptx/vm)" \\; \ + echo "$(call gamma-path,altsrc,gpu/hsail/vm) \\"; \ + echo "$(call gamma-path,commonsrc,gpu/hsail/vm)"; \ echo; \ echo "Src_Dirs_I = \\"; \ echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \ diff -r 790ebab62d23 -r f9f4503a4ab5 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/solaris/makefiles/vm.make Thu Nov 21 15:04:54 2013 +0100 @@ -178,6 +178,8 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm +SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/ptx/vm +SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/hsail/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles diff -r 790ebab62d23 -r f9f4503a4ab5 make/windows/makefiles/compile.make --- a/make/windows/makefiles/compile.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/windows/makefiles/compile.make Thu Nov 21 15:04:54 2013 +0100 @@ -44,6 +44,7 @@ # /GS Inserts security stack checks in some functions (VS2005 default) # /Oi Use intrinsics (in /O2) # /Od Disable all optimizations +# /MP Use multiple cores for compilation # # NOTE: Normally following any of the above with a '-' will turn off that flag # @@ -180,6 +181,7 @@ PRODUCT_OPT_OPTION = /O2 /Oy- FASTDEBUG_OPT_OPTION = /O2 /Oy- DEBUG_OPT_OPTION = /Od +SAFESEH_FLAG = /SAFESEH !endif !if "$(COMPILER_NAME)" == "VS2005" @@ -198,6 +200,7 @@ !if "x$(MT)" == "x" MT=mt.exe !endif +SAFESEH_FLAG = /SAFESEH !endif !if "$(COMPILER_NAME)" == "VS2008" @@ -206,11 +209,13 @@ DEBUG_OPT_OPTION = /Od GX_OPTION = /EHsc LD_FLAGS = /manifest $(LD_FLAGS) +MP_FLAG = /MP # Manifest Tool - used in VS2005 and later to adjust manifests stored # as resources inside build artifacts. !if "x$(MT)" == "x" MT=mt.exe !endif +SAFESEH_FLAG = /SAFESEH !endif !if "$(COMPILER_NAME)" == "VS2010" @@ -219,6 +224,7 @@ DEBUG_OPT_OPTION = /Od GX_OPTION = /EHsc LD_FLAGS = /manifest $(LD_FLAGS) +MP_FLAG = /MP # Manifest Tool - used in VS2005 and later to adjust manifests stored # as resources inside build artifacts. !if "x$(MT)" == "x" @@ -235,15 +241,20 @@ DEBUG_OPT_OPTION = /Od GX_OPTION = /EHsc LD_FLAGS = /manifest $(LD_FLAGS) +MP_FLAG = /MP # Manifest Tool - used in VS2005 and later to adjust manifests stored # as resources inside build artifacts. !if "x$(MT)" == "x" MT=mt.exe !endif +SAFESEH_FLAG = /SAFESEH +!endif + !if "$(BUILDARCH)" == "i486" -LD_FLAGS = /SAFESEH $(LD_FLAGS) +LD_FLAGS = $(SAFESEH_FLAG) $(LD_FLAGS) !endif -!endif + +CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG) # If NO_OPTIMIZATIONS is defined in the environment, turn everything off !ifdef NO_OPTIMIZATIONS diff -r 790ebab62d23 -r f9f4503a4ab5 make/windows/makefiles/fastdebug.make --- a/make/windows/makefiles/fastdebug.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/windows/makefiles/fastdebug.make Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ !include ../local.make !include compile.make -CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS" +CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) !include $(WorkSpace)/make/windows/makefiles/vm.make !include local.make diff -r 790ebab62d23 -r f9f4503a4ab5 make/windows/makefiles/sa.make --- a/make/windows/makefiles/sa.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/windows/makefiles/sa.make Thu Nov 21 15:04:54 2013 +0100 @@ -102,11 +102,19 @@ !if "$(MT)" != "" SA_LD_FLAGS = -manifest $(SA_LD_FLAGS) !endif -SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp + +SASRCFILES = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp \ + $(AGENT_DIR)/src/share/native/sadis.c + SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE) !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" SA_LFLAGS = $(SA_LFLAGS) -map -debug !endif +!if "$(BUILDARCH)" == "i486" +SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS) +!endif + +SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG) # Note that we do not keep sawindbj.obj around as it would then # get included in the dumpbin command in build_vm_def.sh @@ -114,16 +122,16 @@ # In VS2005 or VS2008 the link command creates a .manifest file that we want # to insert into the linked artifact so we do not need to track it separately. # Use ";#2" for .dll and ";#1" for .exe in the MT command below: -$(SAWINDBG): $(SASRCFILE) +$(SAWINDBG): $(SASRCFILES) set INCLUDE=$(SA_INCLUDE)$(INCLUDE) $(CXX) @<< -I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32" -I"$(GENERATED)" $(SA_CFLAGS) - $(SASRCFILE) + $(SASRCFILES) -out:$*.obj << set LIB=$(SA_LIB)$(LIB) - $(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS) + $(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS) !if "$(MT)" != "" $(MT) -manifest $(@F).manifest -outputresource:$(@F);#2 !endif diff -r 790ebab62d23 -r f9f4503a4ab5 make/windows/makefiles/trace.make --- a/make/windows/makefiles/trace.make Thu Nov 21 15:04:26 2013 +0100 +++ b/make/windows/makefiles/trace.make Thu Nov 21 15:04:54 2013 +0100 @@ -40,8 +40,7 @@ traceEventIds.hpp \ traceTypes.hpp - -!if "$(OPENJDK)" != "true" +!if EXISTS($(TraceAltSrcDir)) TraceGeneratedNames = $(TraceGeneratedNames) \ traceRequestables.hpp \ traceEventControl.hpp \ @@ -56,7 +55,7 @@ $(TraceOutDir)/traceEventIds.hpp \ $(TraceOutDir)/traceTypes.hpp -!if "$(OPENJDK)" != "true" +!if EXISTS($(TraceAltSrcDir)) TraceGeneratedFiles = $(TraceGeneratedFiles) \ $(TraceOutDir)/traceRequestables.hpp \ $(TraceOutDir)/traceEventControl.hpp \ @@ -68,7 +67,7 @@ XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod -!if "$(OPENJDK)" != "true" +!if EXISTS($(TraceAltSrcDir)) XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml !endif @@ -87,7 +86,7 @@ @echo Generating $@ @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp -!if "$(OPENJDK)" == "true" +!if !EXISTS($(TraceAltSrcDir)) $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) @echo Generating OpenJDK $@ diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/c1_FrameMap_sparc.cpp --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,6 +53,8 @@ opr = as_long_opr(reg); } else if (type == T_OBJECT || type == T_ARRAY) { opr = as_oop_opr(reg); + } else if (type == T_METADATA) { + opr = as_metadata_opr(reg); } else { opr = as_opr(reg); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2565,7 +2565,7 @@ Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias); __ ld_ptr(receiver_addr, tmp1); - __ verify_oop(tmp1); + __ verify_klass_ptr(tmp1); __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias); @@ -3100,6 +3100,10 @@ } } +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + fatal("Type profiling not implemented on this platform"); +} + void LIR_Assembler::align_backward_branch_target() { __ align(OptoLoopAlignment); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/c1_Runtime1_sparc.cpp --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -37,6 +37,9 @@ #include "runtime/vframeArray.hpp" #include "utilities/macros.hpp" #include "vmreg_sparc.inline.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#endif // Implementation of StubAssembler @@ -401,7 +404,9 @@ if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1); - __ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path); + __ cmp(G3_t1, InstanceKlass::fully_initialized); + __ br(Assembler::notEqual, false, Assembler::pn, slow_path); + __ delayed()->nop(); } #ifdef ASSERT // assert object can be fast path allocated @@ -512,7 +517,9 @@ // check that array length is small enough for fast path __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); - __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path); + __ cmp(G4_length, G3_t1); + __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); + __ delayed()->nop(); // if we got here then the TLAB allocation failed, so try // refilling the TLAB or allocating directly from eden. @@ -912,7 +919,7 @@ Register tmp2 = G3_scratch; jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; - Label not_already_dirty, restart, refill; + Label not_already_dirty, restart, refill, young_card; #ifdef _LP64 __ srlx(addr, CardTableModRefBS::card_shift, addr); @@ -924,9 +931,15 @@ __ set(rs, cardtable); // cardtable := __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] + __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); + + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); + __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] + assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); + __ bind(young_card); // We didn't take the branch, so we're already dirty: return. // Use return-from-leaf __ retl(); @@ -1067,6 +1080,25 @@ __ verify_not_null_oop(Oexception); +#ifdef ASSERT + // check that fields in JavaThread for exception oop and issuing pc are + // empty before writing to them + Label oop_empty; + Register scratch = I7; // We can use I7 here because it's overwritten later anyway. + __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch); + __ br_null(scratch, false, Assembler::pt, oop_empty); + __ delayed()->nop(); + __ stop("exception oop already set"); + __ bind(oop_empty); + + Label pc_empty; + __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch); + __ br_null(scratch, false, Assembler::pt, pc_empty); + __ delayed()->nop(); + __ stop("exception pc already set"); + __ bind(pc_empty); +#endif + // save the exception and issuing pc in the thread __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/cppInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -365,7 +365,7 @@ return entry; } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { // make it look good in the debugger return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/globals_sparc.hpp --- a/src/cpu/sparc/vm/globals_sparc.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -76,6 +76,8 @@ // GC Ergo Flags define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, TypeProfileLevel, 0); + #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \ \ product(intx, UseVIS, 99, \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/macroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -3333,7 +3333,8 @@ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. - ba_short(slow_case); + ba(slow_case); + delayed()->nop(); } ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); @@ -3358,7 +3359,8 @@ add(t2, 1, t2); stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); } - ba_short(try_eden); + ba(try_eden); + delayed()->nop(); bind(discard_tlab); if (TLABStats) { @@ -3420,7 +3422,8 @@ sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); verify_tlab(); - ba_short(retry); + ba(retry); + delayed()->nop(); } void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, @@ -3523,8 +3526,12 @@ delayed()->sub(Rtsp, Roffset, Rtsp); // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { + // At this point, (tmp-0) is the last address touched, so don't + // touch it again. (It was touched as (tmp-pagesize) but then tmp + // was post-decremented.) Skip this address by starting at i=1, and + // touch a few more pages below. N.B. It is important to touch all + // the way down to and including i=StackShadowPages. + for (int i = 1; i <= StackShadowPages; i++) { set((-i*offset)+STACK_BIAS, Rscratch); st(G0, Rtsp, Rscratch); } @@ -3752,7 +3759,7 @@ #define __ masm. address start = __ pc(); - Label not_already_dirty, restart, refill; + Label not_already_dirty, restart, refill, young_card; #ifdef _LP64 __ srlx(O0, CardTableModRefBS::card_shift, O0); @@ -3763,9 +3770,15 @@ __ set(addrlit, O1); // O1 := __ ldub(O0, O1, O2); // O2 := [O0 + O1] + __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); + + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); + __ ldub(O0, O1, O2); // O2 := [O0 + O1] + assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); + __ bind(young_card); // We didn't take the branch, so we're already dirty: return. // Use return-from-leaf __ retl(); @@ -4090,15 +4103,19 @@ void MacroAssembler::encode_klass_not_null(Register r) { assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - assert(r != G6_heapbase, "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - sub(r, G6_heapbase, r); - if (Universe::narrow_klass_shift() != 0) { - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); - srlx(r, LogKlassAlignmentInBytes, r); + if (Universe::narrow_klass_base() != NULL) { + assert(r != G6_heapbase, "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + sub(r, G6_heapbase, r); + if (Universe::narrow_klass_shift() != 0) { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); + srlx(r, LogKlassAlignmentInBytes, r); + } + reinit_heapbase(); + } else { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + srlx(r, Universe::narrow_klass_shift(), r); } - reinit_heapbase(); } void MacroAssembler::encode_klass_not_null(Register src, Register dst) { @@ -4106,11 +4123,16 @@ encode_klass_not_null(src); } else { assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - set((intptr_t)Universe::narrow_klass_base(), dst); - sub(src, dst, dst); - if (Universe::narrow_klass_shift() != 0) { - srlx(dst, LogKlassAlignmentInBytes, dst); + if (Universe::narrow_klass_base() != NULL) { + set((intptr_t)Universe::narrow_klass_base(), dst); + sub(src, dst, dst); + if (Universe::narrow_klass_shift() != 0) { + srlx(dst, LogKlassAlignmentInBytes, dst); + } + } else { + // shift src into dst + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + srlx(src, Universe::narrow_klass_shift(), dst); } } } @@ -4120,14 +4142,16 @@ // the instructions they generate change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { assert (UseCompressedClassPointers, "only for compressed klass ptrs"); - // set + add + set - int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + - insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); - if (Universe::narrow_klass_shift() == 0) { - return num_instrs * BytesPerInstWord; - } else { // sllx - return (num_instrs + 1) * BytesPerInstWord; + int num_instrs = 1; // shift src,dst or add + if (Universe::narrow_klass_base() != NULL) { + // set + add + set + num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + + insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); + if (Universe::narrow_klass_shift() != 0) { + num_instrs += 1; // sllx + } } + return num_instrs * BytesPerInstWord; } // !!! If the instructions that get generated here change then function @@ -4136,13 +4160,17 @@ // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - assert(r != G6_heapbase, "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - if (Universe::narrow_klass_shift() != 0) - sllx(r, LogKlassAlignmentInBytes, r); - add(r, G6_heapbase, r); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + assert(r != G6_heapbase, "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + if (Universe::narrow_klass_shift() != 0) + sllx(r, LogKlassAlignmentInBytes, r); + add(r, G6_heapbase, r); + reinit_heapbase(); + } else { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + sllx(r, Universe::narrow_klass_shift(), r); + } } void MacroAssembler::decode_klass_not_null(Register src, Register dst) { @@ -4152,16 +4180,21 @@ // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - if (Universe::narrow_klass_shift() != 0) { - assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - sllx(src, LogKlassAlignmentInBytes, dst); - add(dst, G6_heapbase, dst); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + if (Universe::narrow_klass_shift() != 0) { + assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + sllx(src, LogKlassAlignmentInBytes, dst); + add(dst, G6_heapbase, dst); + reinit_heapbase(); + } else { + set((intptr_t)Universe::narrow_klass_base(), dst); + add(src, dst, dst); + } } else { - set((intptr_t)Universe::narrow_klass_base(), dst); - add(src, dst, dst); + // shift/mov src into dst. + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + sllx(src, Universe::narrow_klass_shift(), dst); } } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/sharedRuntime_sparc.cpp --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1002,18 +1002,6 @@ // and the vm will find there should this case occur. Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); __ st_ptr(G5_method, callee_target_addr); - - if (StressNonEntrant) { - // Open a big window for deopt failure - __ save_frame(0); - __ mov(G0, L0); - Label loop; - __ bind(loop); - __ sub(L0, 1, L0); - __ br_null_short(L0, Assembler::pt, loop); - __ restore(); - } - __ jmpl(G3, 0, G0); __ delayed()->nop(); } @@ -3606,6 +3594,7 @@ // the pending exception will be picked up the interpreter. __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); + __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); __ bind(noException); // deallocate the deoptimization frame taking care to preserve the return values diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/sparc.ad Thu Nov 21 15:04:54 2013 +0100 @@ -1660,12 +1660,16 @@ if (UseCompressedClassPointers) { assert(Universe::heap() != NULL, "java heap should be initialized"); st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); - st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); - if (Universe::narrow_klass_shift() != 0) { - st->print_cr("\tSLL R_G5,3,R_G5"); + if (Universe::narrow_klass_base() != 0) { + st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); + if (Universe::narrow_klass_shift() != 0) { + st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); + } + st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); + st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); + } else { + st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); } - st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); - st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); } else { st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); } @@ -2022,6 +2026,10 @@ return G1_REGI_mask(); } +const RegMask Matcher::mathExactL_result_proj_mask() { + return G1_REGL_mask(); +} + const RegMask Matcher::mathExactI_flags_proj_mask() { return INT_FLAGS_mask(); } @@ -2908,6 +2916,9 @@ __ bind(LSkip2); } + // We have no guarantee that on 64 bit the higher half of limit_reg is 0 + __ signx(limit_reg); + __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); __ br(Assembler::equal, true, Assembler::pn, Ldone); __ delayed()->mov(O7, result_reg); // result is difference in lengths @@ -2965,6 +2976,9 @@ Register chr1_reg = result_reg; Register chr2_reg = tmp1_reg; + // We have no guarantee that on 64 bit the higher half of limit_reg is 0 + __ signx(limit_reg); + //check for alignment and position the pointers to the ends __ or3(str1_reg, str2_reg, chr1_reg); __ andcc(chr1_reg, 0x3, chr1_reg); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -153,13 +153,9 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { - TosState incoming_state = state; +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { + address entry = __ pc(); - Label cont; - address compiled_entry = __ pc(); - - address entry = __ pc(); #if !defined(_LP64) && defined(COMPILER2) // All return values are where we want them, except for Longs. C2 returns // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. @@ -170,14 +166,12 @@ // do this here. Unfortunately if we did a rethrow we'd see an machepilog node // first which would move g1 -> O0/O1 and destroy the exception we were throwing. - if (incoming_state == ltos) { + if (state == ltos) { __ srl (G1, 0, O1); __ srlx(G1, 32, O0); } #endif // !_LP64 && COMPILER2 - __ bind(cont); - // The callee returns with the stack possibly adjusted by adapter transition // We remove that possible adjustment here. // All interpreter local registers are untouched. Any result is passed back @@ -186,29 +180,18 @@ __ mov(Llast_SP, SP); // Remove any adapter added stack space. - Label L_got_cache, L_giant_index; const Register cache = G3_scratch; - const Register size = G1_scratch; - if (EnableInvokeDynamic) { - __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode. - __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index); - } - __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); - __ bind(L_got_cache); - __ ld_ptr(cache, ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset(), size); - __ and3(size, 0xFF, size); // argument size in words - __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes - __ add(Lesp, size, Lesp); // pop arguments + const Register index = G1_scratch; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); + const Register parameter_size = flags; + __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words + __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes + __ add(Lesp, parameter_size, Lesp); // pop arguments __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4)); - __ ba_short(L_got_cache); - } - return entry; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2932,9 +2932,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); AddressLiteral table(table_addr); __ set(table, temp); __ sll(ra, LogBytesPerWord, ra); @@ -2987,7 +2985,7 @@ __ verify_oop(O0_recv); // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + AddressLiteral table(Interpreter::invoke_return_entry_table()); __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type // Make sure we don't need to mask Rret after the above shift @@ -3029,7 +3027,7 @@ __ profile_final_call(O4); // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + AddressLiteral table(Interpreter::invoke_return_entry_table()); __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type // Make sure we don't need to mask Rret after the above shift diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1405,6 +1405,15 @@ } } +void Assembler::imull(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_int8(0x0F); + emit_int8((unsigned char) 0xAF); + emit_operand(dst, src); +} + + void Assembler::incl(Address dst) { // Don't use it directly. Use MacroAssembler::increment() instead. InstructionMark im(this); @@ -5024,6 +5033,14 @@ } } +void Assembler::imulq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_int8(0x0F); + emit_int8((unsigned char) 0xAF); + emit_operand(dst, src); +} + void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/assembler_x86.hpp --- a/src/cpu/x86/vm/assembler_x86.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/assembler_x86.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -1162,9 +1162,13 @@ void imull(Register dst, Register src); void imull(Register dst, Register src, int value); + void imull(Register dst, Address src); void imulq(Register dst, Register src); void imulq(Register dst, Register src, int value); +#ifdef _LP64 + void imulq(Register dst, Address src); +#endif // jcc is the generic conditional branch generator to run- diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/bytecodeInterpreter_x86.cpp --- a/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -40,11 +40,8 @@ #include "runtime/synchronizer.hpp" #include "runtime/vframeArray.hpp" #include "utilities/debug.hpp" -#ifdef TARGET_ARCH_MODEL_x86_32 -# include "interp_masm_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 -# include "interp_masm_x86_64.hpp" +#ifdef TARGET_ARCH_x86 +# include "interp_masm_x86.hpp" #endif #ifdef CC_INTERP diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/c1_FrameMap_x86.cpp --- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -52,6 +52,8 @@ #endif // _LP64 } else if (type == T_OBJECT || type == T_ARRAY) { opr = as_oop_opr(reg); + } else if (type == T_METADATA) { + opr = as_metadata_opr(reg); } else { opr = as_opr(reg); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -432,15 +432,16 @@ int offset = code_offset(); // Fetch the exception from TLS and clear out exception related thread state - __ get_thread(rsi); - __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); - __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); - __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); + Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); + NOT_LP64(__ get_thread(rsi)); + __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); __ bind(_unwind_handler_entry); __ verify_not_null_oop(rax); if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { - __ mov(rsi, rax); // Preserve the exception + __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) } // Preform needed unlocking @@ -448,19 +449,24 @@ if (method()->is_synchronized()) { monitor_address(0, FrameMap::rax_opr); stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); - __ unlock_object(rdi, rbx, rax, *stub->entry()); + __ unlock_object(rdi, rsi, rax, *stub->entry()); __ bind(*stub->continuation()); } if (compilation()->env()->dtrace_method_probes()) { +#ifdef _LP64 + __ mov(rdi, r15_thread); + __ mov_metadata(rsi, method()->constant_encoding()); +#else __ get_thread(rax); __ movptr(Address(rsp, 0), rax); __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); +#endif __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); } if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { - __ mov(rax, rsi); // Restore the exception + __ mov(rax, rbx); // Restore the exception } // remove the activation and dispatch to the unwind handler @@ -1206,6 +1212,10 @@ LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); + if (addr->base()->type() == T_OBJECT) { + __ verify_oop(addr->base()->as_pointer_register()); + } + switch (type) { case T_BOOLEAN: // fall through case T_BYTE: // fall through @@ -3632,6 +3642,161 @@ } } +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + Register obj = op->obj()->as_register(); + Register tmp = op->tmp()->as_pointer_register(); + Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); + ciKlass* exact_klass = op->exact_klass(); + intptr_t current_klass = op->current_klass(); + bool not_null = op->not_null(); + bool no_conflict = op->no_conflict(); + + Label update, next, none; + + bool do_null = !not_null; + bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; + bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; + + assert(do_null || do_update, "why are we here?"); + assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); + + __ verify_oop(obj); + + if (tmp != obj) { + __ mov(tmp, obj); + } + if (do_null) { + __ testptr(tmp, tmp); + __ jccb(Assembler::notZero, update); + if (!TypeEntries::was_null_seen(current_klass)) { + __ orptr(mdo_addr, TypeEntries::null_seen); + } + if (do_update) { +#ifndef ASSERT + __ jmpb(next); + } +#else + __ jmp(next); + } + } else { + __ testptr(tmp, tmp); + __ jccb(Assembler::notZero, update); + __ stop("unexpect null obj"); +#endif + } + + __ bind(update); + + if (do_update) { +#ifdef ASSERT + if (exact_klass != NULL) { + Label ok; + __ load_klass(tmp, tmp); + __ push(tmp); + __ mov_metadata(tmp, exact_klass->constant_encoding()); + __ cmpptr(tmp, Address(rsp, 0)); + __ jccb(Assembler::equal, ok); + __ stop("exact klass and actual klass differ"); + __ bind(ok); + __ pop(tmp); + } +#endif + if (!no_conflict) { + if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { + if (exact_klass != NULL) { + __ mov_metadata(tmp, exact_klass->constant_encoding()); + } else { + __ load_klass(tmp, tmp); + } + + __ xorptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_klass_mask); + // klass seen before, nothing to do. The unknown bit may have been + // set already but no need to check. + __ jccb(Assembler::zero, next); + + __ testptr(tmp, TypeEntries::type_unknown); + __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. + + if (TypeEntries::is_type_none(current_klass)) { + __ cmpptr(mdo_addr, 0); + __ jccb(Assembler::equal, none); + __ cmpptr(mdo_addr, TypeEntries::null_seen); + __ jccb(Assembler::equal, none); + // There is a chance that the checks above (re-reading profiling + // data from memory) fail if another thread has just set the + // profiling to this obj's klass + __ xorptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_klass_mask); + __ jccb(Assembler::zero, next); + } + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); + + __ movptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_unknown); + __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. + } + + // different than before. Cannot keep accurate profile. + __ orptr(mdo_addr, TypeEntries::type_unknown); + + if (TypeEntries::is_type_none(current_klass)) { + __ jmpb(next); + + __ bind(none); + // first time here. Set profile type. + __ movptr(mdo_addr, tmp); + } + } else { + // There's a single possible klass at this profile point + assert(exact_klass != NULL, "should be"); + if (TypeEntries::is_type_none(current_klass)) { + __ mov_metadata(tmp, exact_klass->constant_encoding()); + __ xorptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_klass_mask); +#ifdef ASSERT + __ jcc(Assembler::zero, next); + + { + Label ok; + __ push(tmp); + __ cmpptr(mdo_addr, 0); + __ jcc(Assembler::equal, ok); + __ cmpptr(mdo_addr, TypeEntries::null_seen); + __ jcc(Assembler::equal, ok); + // may have been set by another thread + __ mov_metadata(tmp, exact_klass->constant_encoding()); + __ xorptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_mask); + __ jcc(Assembler::zero, ok); + + __ stop("unexpected profiling mismatch"); + __ bind(ok); + __ pop(tmp); + } +#else + __ jccb(Assembler::zero, next); +#endif + // first time here. Set profile type. + __ movptr(mdo_addr, tmp); + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); + + __ movptr(tmp, mdo_addr); + __ testptr(tmp, TypeEntries::type_unknown); + __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. + + __ orptr(mdo_addr, TypeEntries::type_unknown); + } + } + + __ bind(next); + } +} + void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/c1_LIRGenerator_x86.cpp --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1468,19 +1468,18 @@ addr = new LIR_Address(src.result(), offset, type); } - if (data != dst) { - __ move(data, dst); - data = dst; - } + // Because we want a 2-arg form of xchg and xadd + __ move(data, dst); + if (x->is_add()) { - __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); + __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr); } else { if (is_obj) { // Do the pre-write barrier, if any. pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, true /* do_load */, false /* patch */, NULL); } - __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); + __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr); if (is_obj) { // Seems to be a precise address post_barrier(LIR_OprFact::address(addr), data); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -38,6 +38,9 @@ #include "runtime/vframeArray.hpp" #include "utilities/macros.hpp" #include "vmreg_x86.inline.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#endif // Implementation of StubAssembler @@ -1753,13 +1756,17 @@ __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); #endif - __ cmpb(Address(card_addr, 0), 0); + __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); + __ jcc(Assembler::equal, done); + + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); + __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); __ jcc(Assembler::equal, done); // storing region crossing non-NULL, card is clean. // dirty card and log. - __ movb(Address(card_addr, 0), 0); + __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); __ cmpl(queue_index, 0); __ jcc(Assembler::equal, runtime); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/cppInterpreter_x86.cpp --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -367,7 +367,7 @@ return entry; } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { // make it look good in the debugger return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/globals_x86.hpp --- a/src/cpu/x86/vm/globals_x86.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/globals_x86.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -79,6 +79,8 @@ // GC Ergo Flags define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, TypeProfileLevel, GRAALVM_ONLY(0) NOT_GRAALVM(111)); + #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \ \ develop(bool, IEEEPrecision, true, \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/interp_masm_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,229 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "interp_masm_x86.hpp" +#include "interpreter/interpreter.hpp" +#include "oops/methodData.hpp" + +#ifndef CC_INTERP +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { + Label update, next, none; + + verify_oop(obj); + + testptr(obj, obj); + jccb(Assembler::notZero, update); + orptr(mdo_addr, TypeEntries::null_seen); + jmpb(next); + + bind(update); + load_klass(obj, obj); + + xorptr(obj, mdo_addr); + testptr(obj, TypeEntries::type_klass_mask); + jccb(Assembler::zero, next); // klass seen before, nothing to + // do. The unknown bit may have been + // set already but no need to check. + + testptr(obj, TypeEntries::type_unknown); + jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. + + cmpptr(mdo_addr, 0); + jccb(Assembler::equal, none); + cmpptr(mdo_addr, TypeEntries::null_seen); + jccb(Assembler::equal, none); + // There is a chance that the checks above (re-reading profiling + // data from memory) fail if another thread has just set the + // profiling to this obj's klass + xorptr(obj, mdo_addr); + testptr(obj, TypeEntries::type_klass_mask); + jccb(Assembler::zero, next); + + // different than before. Cannot keep accurate profile. + orptr(mdo_addr, TypeEntries::type_unknown); + jmpb(next); + + bind(none); + // first time here. Set profile type. + movptr(mdo_addr, obj); + + bind(next); +} + +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { + if (!ProfileInterpreter) { + return; + } + + if (MethodData::profile_arguments() || MethodData::profile_return()) { + Label profile_continue; + + test_method_data_pointer(mdp, profile_continue); + + int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); + + cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); + jcc(Assembler::notEqual, profile_continue); + + if (MethodData::profile_arguments()) { + Label done; + int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); + addptr(mdp, off_to_args); + + for (int i = 0; i < TypeProfileArgsLimit; i++) { + if (i > 0 || MethodData::profile_return()) { + // If return value type is profiled we may have no argument to profile + movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); + subl(tmp, i*TypeStackSlotEntries::per_arg_count()); + cmpl(tmp, TypeStackSlotEntries::per_arg_count()); + jcc(Assembler::less, done); + } + movptr(tmp, Address(callee, Method::const_offset())); + load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); + // stack offset o (zero based) from the start of the argument + // list, for n arguments translates into offset n - o - 1 from + // the end of the argument list + subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args)); + subl(tmp, 1); + Address arg_addr = argument_address(tmp); + movptr(tmp, arg_addr); + + Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); + profile_obj_type(tmp, mdo_arg_addr); + + int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); + addptr(mdp, to_add); + off_to_args += to_add; + } + + if (MethodData::profile_return()) { + movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); + subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); + } + + bind(done); + + if (MethodData::profile_return()) { + // We're right after the type profile for the last + // argument. tmp is the number of cell left in the + // CallTypeData/VirtualCallTypeData to reach its end. Non null + // if there's a return to profile. + assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); + shll(tmp, exact_log2(DataLayout::cell_size)); + addptr(mdp, tmp); + } + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp); + } else { + assert(MethodData::profile_return(), "either profile call args or call ret"); + update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size())); + } + + // mdp points right after the end of the + // CallTypeData/VirtualCallTypeData, right after the cells for the + // return value type if there's one + + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { + assert_different_registers(mdp, ret, tmp, _bcp_register); + if (ProfileInterpreter && MethodData::profile_return()) { + Label profile_continue, done; + + test_method_data_pointer(mdp, profile_continue); + + if (MethodData::profile_return_jsr292_only()) { + // If we don't profile all invoke bytecodes we must make sure + // it's a bytecode we indeed profile. We can't go back to the + // begining of the ProfileData we intend to update to check its + // type because we're right after it and we don't known its + // length + Label do_profile; + cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic); + jcc(Assembler::equal, do_profile); + cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle); + jcc(Assembler::equal, do_profile); + get_method(tmp); + cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm); + jcc(Assembler::notEqual, profile_continue); + + bind(do_profile); + } + + Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); + mov(tmp, ret); + profile_obj_type(tmp, mdo_ret_addr); + + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { + if (ProfileInterpreter && MethodData::profile_parameters()) { + Label profile_continue, done; + + test_method_data_pointer(mdp, profile_continue); + + // Load the offset of the area within the MDO used for + // parameters. If it's negative we're not profiling any parameters + movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); + testl(tmp1, tmp1); + jcc(Assembler::negative, profile_continue); + + // Compute a pointer to the area for parameters from the offset + // and move the pointer to the slot for the last + // parameters. Collect profiling from last parameter down. + // mdo start + parameters offset + array length - 1 + addptr(mdp, tmp1); + movptr(tmp1, Address(mdp, in_bytes(ArrayData::array_len_offset()))); + decrement(tmp1, TypeStackSlotEntries::per_arg_count()); + + Label loop; + bind(loop); + + int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); + int type_base = in_bytes(ParametersTypeData::type_offset(0)); + Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); + Address arg_off(mdp, tmp1, per_arg_scale, off_base); + Address arg_type(mdp, tmp1, per_arg_scale, type_base); + + // load offset on the stack from the slot for this parameter + movptr(tmp2, arg_off); + negptr(tmp2); + // read the parameter from the local area + movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale())); + + // profile the parameter + profile_obj_type(tmp2, arg_type); + + // go to next parameter + decrement(tmp1, TypeStackSlotEntries::per_arg_count()); + jcc(Assembler::positive, loop); + + bind(profile_continue); + } +} +#endif diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/interp_masm_x86.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_VM_INTERP_MASM_X86_HPP +#define CPU_X86_VM_INTERP_MASM_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "interpreter/invocationCounter.hpp" +#include "runtime/frame.hpp" + +// This file specializes the assember with interpreter-specific macros + + +class InterpreterMacroAssembler: public MacroAssembler { + +#ifdef TARGET_ARCH_MODEL_x86_32 +# include "interp_masm_x86_32.hpp" +#endif +#ifdef TARGET_ARCH_MODEL_x86_64 +# include "interp_masm_x86_64.hpp" +#endif + + private: + + Register _locals_register; // register that contains the pointer to the locals + Register _bcp_register; // register that contains the bcp + + public: +#ifndef CC_INTERP + void profile_obj_type(Register obj, const Address& mdo_addr); + void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual); + void profile_return_type(Register mdp, Register ret, Register tmp); + void profile_parameters_type(Register mdp, Register tmp1, Register tmp2); +#endif /* !CC_INTERP */ + +}; + +#endif // CPU_X86_VM_INTERP_MASM_X86_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86_32.cpp --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "interp_masm_x86_32.hpp" +#include "interp_masm_x86.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "oops/arrayOop.hpp" @@ -196,7 +196,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); - movl(reg, Address(rsi, bcp_offset)); + load_unsigned_short(reg, Address(rsi, bcp_offset)); bswapl(reg); shrl(reg, 16); } @@ -1046,7 +1046,6 @@ } } - void InterpreterMacroAssembler::profile_call(Register mdp) { if (ProfileInterpreter) { Label profile_continue; diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86_32.hpp --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -22,18 +22,6 @@ * */ -#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP -#define CPU_X86_VM_INTERP_MASM_X86_32_HPP - -#include "asm/macroAssembler.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "interpreter/invocationCounter.hpp" -#include "runtime/frame.hpp" - -// This file specializes the assember with interpreter-specific macros - - -class InterpreterMacroAssembler: public MacroAssembler { #ifndef CC_INTERP protected: // Interpreter specific version of call_VM_base @@ -59,7 +47,7 @@ #endif /* CC_INTERP */ public: - InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {} + InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(rdi), _bcp_register(rsi) {} void load_earlyret_value(TosState state); @@ -233,7 +221,3 @@ // support for jvmti void notify_method_entry(); void notify_method_exit(TosState state, NotifyMethodExitMode mode); - -}; - -#endif // CPU_X86_VM_INTERP_MASM_X86_32_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86_64.cpp --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "interp_masm_x86_64.hpp" +#include "interp_masm_x86.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "oops/arrayOop.hpp" @@ -192,7 +192,7 @@ Register reg, int bcp_offset) { assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); - movl(reg, Address(r13, bcp_offset)); + load_unsigned_short(reg, Address(r13, bcp_offset)); bswapl(reg); shrl(reg, 16); } @@ -1067,7 +1067,6 @@ } } - void InterpreterMacroAssembler::profile_call(Register mdp) { if (ProfileInterpreter) { Label profile_continue; diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/interp_masm_x86_64.hpp --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -22,19 +22,7 @@ * */ -#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP -#define CPU_X86_VM_INTERP_MASM_X86_64_HPP - -#include "asm/macroAssembler.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "interpreter/invocationCounter.hpp" -#include "runtime/frame.hpp" - -// This file specializes the assember with interpreter-specific macros - typedef ByteSize (*OffsetFunction)(uint); - -class InterpreterMacroAssembler: public MacroAssembler { #ifndef CC_INTERP protected: // Interpreter specific version of call_VM_base @@ -56,7 +44,7 @@ #endif // CC_INTERP public: - InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {} + InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(r14), _bcp_register(r13) {} void load_earlyret_value(TosState state); @@ -255,6 +243,3 @@ // support for jvmti/dtrace void notify_method_entry(); void notify_method_exit(TosState state, NotifyMethodExitMode mode); -}; - -#endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/macroAssembler_x86.cpp --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1381,8 +1381,12 @@ jcc(Assembler::greater, loop); // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { + // At this point, (tmp-0) is the last address touched, so don't + // touch it again. (It was touched as (tmp-pagesize) but then tmp + // was post-decremented.) Skip this address by starting at i=1, and + // touch a few more pages below. N.B. It is important to touch all + // the way down to and including i=StackShadowPages. + for (int i = 1; i <= StackShadowPages; i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better. movptr(Address(tmp, (-i*os::vm_page_size())), size ); @@ -3389,13 +3393,18 @@ const Register card_addr = tmp; lea(card_addr, as_Address(ArrayAddress(cardtable, index))); #endif - cmpb(Address(card_addr, 0), 0); + cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); jcc(Assembler::equal, done); + membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); + cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); + jcc(Assembler::equal, done); + + // storing a region crossing, non-NULL oop, card is clean. // dirty card and log. - movb(Address(card_addr, 0), 0); + movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); cmpl(queue_index, 0); jcc(Assembler::equal, runtime); @@ -5044,25 +5053,32 @@ } void MacroAssembler::encode_klass_not_null(Register r) { - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); - // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. - assert(r != r12_heapbase, "Encoding a klass in r12"); - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - subq(r, r12_heapbase); + if (Universe::narrow_klass_base() != NULL) { + // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. + assert(r != r12_heapbase, "Encoding a klass in r12"); + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); + subq(r, r12_heapbase); + } if (Universe::narrow_klass_shift() != 0) { assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shrq(r, LogKlassAlignmentInBytes); } - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + reinit_heapbase(); + } } void MacroAssembler::encode_klass_not_null(Register dst, Register src) { if (dst == src) { encode_klass_not_null(src); } else { - mov64(dst, (int64_t)Universe::narrow_klass_base()); - negq(dst); - addq(dst, src); + if (Universe::narrow_klass_base() != NULL) { + mov64(dst, (int64_t)Universe::narrow_klass_base()); + negq(dst); + addq(dst, src); + } else { + movptr(dst, src); + } if (Universe::narrow_klass_shift() != 0) { assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shrq(dst, LogKlassAlignmentInBytes); @@ -5076,15 +5092,19 @@ // generate change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { assert (UseCompressedClassPointers, "only for compressed klass ptrs"); - // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). - return (Universe::narrow_klass_shift() == 0 ? 20 : 24); + if (Universe::narrow_klass_base() != NULL) { + // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). + return (Universe::narrow_klass_shift() == 0 ? 20 : 24); + } else { + // longest load decode klass function, mov64, leaq + return 16; + } } // !!! If the instructions that get generated here change then function // instr_size_for_decode_klass_not_null() needs to get updated. void MacroAssembler::decode_klass_not_null(Register r) { // Note: it will change flags - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert(r != r12_heapbase, "Decoding a klass in r12"); // Cannot assert, unverified entry point counts instructions (see .ad file) @@ -5095,14 +5115,15 @@ shlq(r, LogKlassAlignmentInBytes); } // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - addq(r, r12_heapbase); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); + addq(r, r12_heapbase); + reinit_heapbase(); + } } void MacroAssembler::decode_klass_not_null(Register dst, Register src) { // Note: it will change flags - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); assert (UseCompressedClassPointers, "should only be used for compressed headers"); if (dst == src) { decode_klass_not_null(dst); @@ -5110,7 +5131,6 @@ // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. - mov64(dst, (int64_t)Universe::narrow_klass_base()); if (Universe::narrow_klass_shift() != 0) { assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/macroAssembler_x86.hpp --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -773,6 +773,7 @@ void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } + void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } void testptr(Register src1, Register src2); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/register_definitions_x86.cpp --- a/src/cpu/x86/vm/register_definitions_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/register_definitions_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -26,11 +26,8 @@ #include "asm/assembler.hpp" #include "asm/register.hpp" #include "register_x86.hpp" -#ifdef TARGET_ARCH_MODEL_x86_32 -# include "interp_masm_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 -# include "interp_masm_x86_64.hpp" +#ifdef TARGET_ARCH_x86 +# include "interp_masm_x86.hpp" #endif REGISTER_DEFINITION(Register, noreg); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/templateInterpreter_x86.hpp --- a/src/cpu/x86/vm/templateInterpreter_x86.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -34,9 +34,9 @@ // Run with +PrintInterpreter to get the VM to print out the size. // Max size with JVMTI #ifdef AMD64 - const static int InterpreterCodeSize = GRAAL_ONLY(256) NOT_GRAAL(208) * 1024; + const static int InterpreterCodeSize = 256 * 1024; #else - const static int InterpreterCodeSize = 176 * 1024; + const static int InterpreterCodeSize = 224 * 1024; #endif // AMD64 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -150,13 +150,12 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { - TosState incoming_state = state; +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address entry = __ pc(); #ifdef COMPILER2 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases - if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { + if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { for (int i = 1; i < 8; i++) { __ ffree(i); } @@ -164,7 +163,7 @@ __ empty_FPU_stack(); } #endif - if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { + if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); } else { __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); @@ -172,12 +171,12 @@ // In SSE mode, interpreter returns FP results in xmm0 but they need // to end up back on the FPU so it can operate on them. - if (incoming_state == ftos && UseSSE >= 1) { + if (state == ftos && UseSSE >= 1) { __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); __ addptr(rsp, wordSize); - } else if (incoming_state == dtos && UseSSE >= 2) { + } else if (state == dtos && UseSSE >= 2) { __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); @@ -194,27 +193,22 @@ __ restore_bcp(); __ restore_locals(); - Label L_got_cache, L_giant_index; - if (EnableInvokeDynamic) { - __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); - __ jcc(Assembler::equal, L_giant_index); + if (state == atos) { + Register mdp = rbx; + Register tmp = rcx; + __ profile_return_type(mdp, rax, tmp); } - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); - __ bind(L_got_cache); - __ movl(rbx, Address(rbx, rcx, - Address::times_ptr, ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset())); - __ andptr(rbx, 0xFF); - __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); + + const Register cache = rbx; + const Register index = rcx; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); + __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); - __ jmp(L_got_cache); - } - return entry; } @@ -1484,6 +1478,7 @@ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); __ movbool(do_not_unlock_if_synchronized, true); + __ profile_parameters_type(rax, rcx, rdx); // increment invocation count & check for overflow Label invocation_counter_overflow; Label profile_method; diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -169,7 +169,7 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address entry = __ pc(); // Restore stack bottom in case i2c adjusted stack @@ -180,28 +180,22 @@ __ restore_bcp(); __ restore_locals(); - Label L_got_cache, L_giant_index; - if (EnableInvokeDynamic) { - __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); - __ jcc(Assembler::equal, L_giant_index); + if (state == atos) { + Register mdp = rbx; + Register tmp = rcx; + __ profile_return_type(mdp, rax, tmp); } - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); - __ bind(L_got_cache); - __ movl(rbx, Address(rbx, rcx, - Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset()) + - 3 * wordSize)); - __ andl(rbx, 0xFF); - __ lea(rsp, Address(rsp, rbx, Address::times_8)); + + const Register cache = rbx; + const Register index = rcx; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); + __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); - __ jmp(L_got_cache); - } - return entry; } @@ -1566,6 +1560,7 @@ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); __ movbool(do_not_unlock_if_synchronized, true); + __ profile_parameters_type(rax, rcx, rdx); // increment invocation count & check for overflow Label invocation_counter_overflow; Label profile_method; diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/templateTable_x86_32.cpp --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -558,7 +558,7 @@ void TemplateTable::locals_index_wide(Register reg) { - __ movl(reg, at_bcp(2)); + __ load_unsigned_short(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); __ negptr(reg); @@ -1552,7 +1552,11 @@ InvocationCounter::counter_offset(); // Load up EDX with the branch displacement - __ movl(rdx, at_bcp(1)); + if (is_wide) { + __ movl(rdx, at_bcp(1)); + } else { + __ load_signed_short(rdx, at_bcp(1)); + } __ bswapl(rdx); if (!is_wide) __ sarl(rdx, 16); LP64_ONLY(__ movslq(rdx, rdx)); @@ -2925,9 +2929,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); ExternalAddress table(table_addr); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); } @@ -2970,6 +2972,7 @@ // profile this call __ profile_final_call(rax); + __ profile_arguments_type(rax, method, rsi, true); __ jump_from_interpreted(method, rax); @@ -2984,6 +2987,7 @@ // get target Method* & entry point __ lookup_virtual_method(rax, index, method); + __ profile_arguments_type(rdx, method, rsi, true); __ jump_from_interpreted(method, rdx); } @@ -3013,6 +3017,7 @@ __ null_check(rcx); // do the call __ profile_call(rax); + __ profile_arguments_type(rax, rbx, rsi, false); __ jump_from_interpreted(rbx, rax); } @@ -3023,6 +3028,7 @@ prepare_invoke(byte_no, rbx); // get f1 Method* // do the call __ profile_call(rax); + __ profile_arguments_type(rax, rbx, rsi, false); __ jump_from_interpreted(rbx, rax); } @@ -3082,6 +3088,8 @@ __ testptr(rbx, rbx); __ jcc(Assembler::zero, no_such_method); + __ profile_arguments_type(rdx, rbx, rsi, true); + // do the call // rcx: receiver // rbx,: Method* @@ -3138,6 +3146,7 @@ // FIXME: profile the LambdaForm also __ profile_final_call(rax); + __ profile_arguments_type(rdx, rbx_method, rsi, true); __ jump_from_interpreted(rbx_method, rdx); } @@ -3171,6 +3180,7 @@ // %%% should make a type profile for any invokedynamic that takes a ref argument // profile this call __ profile_call(rsi); + __ profile_arguments_type(rdx, rbx, rsi, false); __ verify_oop(rax_callsite); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -568,7 +568,7 @@ } void TemplateTable::locals_index_wide(Register reg) { - __ movl(reg, at_bcp(2)); + __ load_unsigned_short(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); __ negptr(reg); @@ -1575,7 +1575,11 @@ InvocationCounter::counter_offset(); // Load up edx with the branch displacement - __ movl(rdx, at_bcp(1)); + if (is_wide) { + __ movl(rdx, at_bcp(1)); + } else { + __ load_signed_short(rdx, at_bcp(1)); + } __ bswapl(rdx); if (!is_wide) { @@ -2980,9 +2984,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); ExternalAddress table(table_addr); __ lea(rscratch1, table); __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); @@ -3026,6 +3028,7 @@ // profile this call __ profile_final_call(rax); + __ profile_arguments_type(rax, method, r13, true); __ jump_from_interpreted(method, rax); @@ -3040,10 +3043,12 @@ // get target Method* & entry point __ lookup_virtual_method(rax, index, method); + __ profile_arguments_type(rdx, method, r13, true); #ifdef GRAAL // r14: MethodDataPointer (r14 is callee saved) __ profile_called_method(method, r14, r13); #endif + __ jump_from_interpreted(method, rdx); } @@ -3073,6 +3078,7 @@ __ null_check(rcx); // do the call __ profile_call(rax); + __ profile_arguments_type(rax, rbx, r13, false); __ jump_from_interpreted(rbx, rax); } @@ -3083,6 +3089,7 @@ prepare_invoke(byte_no, rbx); // get f1 Method* // do the call __ profile_call(rax); + __ profile_arguments_type(rax, rbx, r13, false); __ jump_from_interpreted(rbx, rax); } @@ -3140,6 +3147,8 @@ __ testptr(rbx, rbx); __ jcc(Assembler::zero, no_such_method); + __ profile_arguments_type(rdx, rbx, r13, true); + // do the call // rcx: receiver // rbx,: Method* @@ -3201,6 +3210,7 @@ // FIXME: profile the LambdaForm also __ profile_final_call(rax); + __ profile_arguments_type(rdx, rbx_method, r13, true); __ jump_from_interpreted(rbx_method, rdx); } @@ -3234,6 +3244,7 @@ // %%% should make a type profile for any invokedynamic that takes a ref argument // profile this call __ profile_call(r13); + __ profile_arguments_type(rdx, rbx_method, r13, false); __ verify_oop(rax_callsite); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/vtableStubs_x86_32.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "code/vtableStubs.hpp" -#include "interp_masm_x86_32.hpp" +#include "interp_masm_x86.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/klassVtable.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/vtableStubs_x86_64.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "code/vtableStubs.hpp" -#include "interp_masm_x86_64.hpp" +#include "interp_masm_x86.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/klassVtable.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/x86_32.ad Thu Nov 21 15:04:54 2013 +0100 @@ -1538,6 +1538,11 @@ return EAX_REG_mask(); } +const RegMask Matcher::mathExactL_result_proj_mask() { + ShouldNotReachHere(); + return RegMask(); +} + const RegMask Matcher::mathExactI_flags_proj_mask() { return INT_FLAGS_mask(); } @@ -7519,7 +7524,7 @@ //----------Arithmetic Instructions-------------------------------------------- //----------Addition Instructions---------------------------------------------- -instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr) +instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr) %{ match(AddExactI dst src); effect(DEF cr); @@ -7531,7 +7536,7 @@ ins_pipe(ialu_reg_reg); %} -instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr) +instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr) %{ match(AddExactI dst src); effect(DEF cr); @@ -7543,6 +7548,20 @@ ins_pipe(ialu_reg_reg); %} +instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr) +%{ + match(AddExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(125); + format %{ "ADD $dst,$src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$Address); + %} + ins_pipe( ialu_reg_mem ); +%} + + // Integer Addition Instructions instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{ match(Set dst (AddI dst src)); @@ -7851,6 +7870,44 @@ %} //----------Subtraction Instructions------------------------------------------- + +instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr) +%{ + match(SubExactI dst src); + effect(DEF cr); + + format %{ "SUB $dst, $src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr) +%{ + match(SubExactI dst src); + effect(DEF cr); + + format %{ "SUB $dst, $src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr) +%{ + match(SubExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(125); + format %{ "SUB $dst,$src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$Address); + %} + ins_pipe( ialu_reg_mem ); +%} + // Integer Subtraction Instructions instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{ match(Set dst (SubI dst src)); @@ -7919,6 +7976,16 @@ ins_pipe( ialu_reg ); %} +instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{ + match(NegExactI dst); + effect(DEF cr); + + format %{ "NEG $dst\t# negExact int"%} + ins_encode %{ + __ negl($dst$$Register); + %} + ins_pipe(ialu_reg); +%} //----------Multiplication/Division Instructions------------------------------- // Integer Multiplication Instructions @@ -8131,6 +8198,46 @@ ins_pipe( pipe_slow ); %} +instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr) +%{ + match(MulExactI dst src); + effect(DEF cr); + + ins_cost(300); + format %{ "IMUL $dst, $src\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + +instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr) +%{ + match(MulExactI src imm); + effect(DEF cr); + + ins_cost(300); + format %{ "IMUL $dst, $src, $imm\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Register, $imm$$constant); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + +instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr) +%{ + match(MulExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(350); + format %{ "IMUL $dst, $src\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Address); + %} + ins_pipe(ialu_reg_mem_alu0); +%} + + // Integer DIV with Register instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{ match(Set rax (DivI rax div)); diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/x86/vm/x86_64.ad Thu Nov 21 15:04:54 2013 +0100 @@ -1653,6 +1653,10 @@ return INT_RAX_REG_mask(); } +const RegMask Matcher::mathExactL_result_proj_mask() { + return LONG_RAX_REG_mask(); +} + const RegMask Matcher::mathExactI_flags_proj_mask() { return INT_FLAGS_mask(); } @@ -6962,6 +6966,58 @@ ins_pipe(ialu_reg_reg); %} +instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr) +%{ + match(AddExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(125); // XXX + format %{ "addl $dst, $src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$Address); + %} + + ins_pipe(ialu_reg_mem); +%} + +instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr) +%{ + match(AddExactL dst src); + effect(DEF cr); + + format %{ "addq $dst, $src\t# addExact long" %} + ins_encode %{ + __ addq($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr) +%{ + match(AddExactL dst src); + effect(DEF cr); + + format %{ "addq $dst, $src\t# addExact long" %} + ins_encode %{ + __ addq($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr) +%{ + match(AddExactL dst (LoadL src)); + effect(DEF cr); + + ins_cost(125); // XXX + format %{ "addq $dst, $src\t# addExact long" %} + ins_encode %{ + __ addq($dst$$Register, $src$$Address); + %} + + ins_pipe(ialu_reg_mem); +%} + instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr) %{ match(Set dst (AddI dst src)); @@ -7574,6 +7630,80 @@ ins_pipe(ialu_mem_imm); %} +instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr) +%{ + match(SubExactI dst src); + effect(DEF cr); + + format %{ "subl $dst, $src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr) +%{ + match(SubExactI dst src); + effect(DEF cr); + + format %{ "subl $dst, $src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr) +%{ + match(SubExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(125); + format %{ "subl $dst, $src\t# subExact int" %} + ins_encode %{ + __ subl($dst$$Register, $src$$Address); + %} + ins_pipe(ialu_reg_mem); +%} + +instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr) +%{ + match(SubExactL dst src); + effect(DEF cr); + + format %{ "subq $dst, $src\t# subExact long" %} + ins_encode %{ + __ subq($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr) +%{ + match(SubExactL dst (LoadL src)); + effect(DEF cr); + + format %{ "subq $dst, $src\t# subExact long" %} + ins_encode %{ + __ subq($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr) +%{ + match(SubExactI dst src); + effect(DEF cr); + + ins_cost(125); + format %{ "subq $dst, $src\t# subExact long" %} + ins_encode %{ + __ subq($dst$$Register, $src$$Address); + %} + ins_pipe(ialu_reg_mem); +%} + instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr) %{ match(Set dst (SubL dst src)); @@ -7690,6 +7820,30 @@ ins_pipe(ialu_reg); %} +instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr) +%{ + match(NegExactI dst); + effect(KILL cr); + + format %{ "negl $dst\t# negExact int" %} + ins_encode %{ + __ negl($dst$$Register); + %} + ins_pipe(ialu_reg); +%} + +instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr) +%{ + match(NegExactL dst); + effect(KILL cr); + + format %{ "negq $dst\t# negExact long" %} + ins_encode %{ + __ negq($dst$$Register); + %} + ins_pipe(ialu_reg); +%} + //----------Multiplication/Division Instructions------------------------------- // Integer Multiplication Instructions @@ -7807,6 +7961,86 @@ ins_pipe(ialu_reg_reg_alu0); %} + +instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr) +%{ + match(MulExactI dst src); + effect(DEF cr); + + ins_cost(300); + format %{ "imull $dst, $src\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + + +instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr) +%{ + match(MulExactI src imm); + effect(DEF cr); + + ins_cost(300); + format %{ "imull $dst, $src, $imm\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Register, $imm$$constant); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + +instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr) +%{ + match(MulExactI dst (LoadI src)); + effect(DEF cr); + + ins_cost(350); + format %{ "imull $dst, $src\t# mulExact int" %} + ins_encode %{ + __ imull($dst$$Register, $src$$Address); + %} + ins_pipe(ialu_reg_mem_alu0); +%} + +instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr) +%{ + match(MulExactL dst src); + effect(DEF cr); + + ins_cost(300); + format %{ "imulq $dst, $src\t# mulExact long" %} + ins_encode %{ + __ imulq($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + +instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr) +%{ + match(MulExactL src imm); + effect(DEF cr); + + ins_cost(300); + format %{ "imulq $dst, $src, $imm\t# mulExact long" %} + ins_encode %{ + __ imulq($dst$$Register, $src$$Register, $imm$$constant); + %} + ins_pipe(ialu_reg_reg_alu0); +%} + +instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr) +%{ + match(MulExactL dst (LoadL src)); + effect(DEF cr); + + ins_cost(350); + format %{ "imulq $dst, $src\t# mulExact long" %} + ins_encode %{ + __ imulq($dst$$Register, $src$$Address); + %} + ins_pipe(ialu_reg_mem_alu0); +%} + instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, rFlagsReg cr) %{ diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1006,7 +1006,7 @@ istate->set_stack_limit(stack_base - method->max_stack() - 1); } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotCallThis(); return NULL; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/cpu/zero/vm/globals_zero.hpp --- a/src/cpu/zero/vm/globals_zero.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/cpu/zero/vm/globals_zero.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -57,6 +57,8 @@ // GC Ergo Flags define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, TypeProfileLevel, 0); + #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/gpu/hsail/vm/gpu_hsail.cpp --- a/src/gpu/hsail/vm/gpu_hsail.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/gpu/hsail/vm/gpu_hsail.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -154,13 +154,19 @@ return true; } else { // Unable to dlopen okra - tty->print_cr("[HSAIL] library load failed."); + if (TraceGPUInteraction) { + tty->print_cr("[HSAIL] library load failed."); + } return false; } } else { - tty->print_cr("Unsupported HSAIL platform"); + if (TraceGPUInteraction) { + tty->print_cr("Unsupported HSAIL platform"); + } return false; } - tty->print_cr("Failed to find HSAIL linkage"); + if (TraceGPUInteraction) { + tty->print_cr("Failed to find HSAIL linkage"); + } return false; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/gpu/hsail/vm/hsailKernelArguments.cpp --- a/src/gpu/hsail/vm/hsailKernelArguments.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/gpu/hsail/vm/hsailKernelArguments.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -93,7 +93,7 @@ void HSAILKernelArguments::do_int() { // The last int is the iteration variable in an IntStream, but we don't pass it // since we use the HSAIL workitemid in place of that int value - if (_index == _length) { + if (_parameter_index == _parameter_count - 1) { if (TraceGPUInteraction) { tty->print_cr("[HSAIL] HSAILKernelArguments::not pushing trailing int"); } @@ -136,17 +136,19 @@ void HSAILKernelArguments::do_object() { if (TraceGPUInteraction) { - tty->print_cr("[HSAIL] HSAILKernelArguments::do_object."); + tty->print_cr("[HSAIL] HSAILKernelArguments::do_object, _parameter_index=%d", _parameter_index); } - if (_index == _length) { - // last arg in object stream lambda is the object stream source array + oop arg = _args->obj_at(_index++); + + // check if this is last arg in signature + // an object as last parameter requires an object stream source array to be passed + if (_parameter_index == _parameter_count - 1) { if (TraceGPUInteraction) { tty->print_cr("[HSAIL] HSAILKernelArguments::trailing object ref should be object source array ref"); } + assert(arg->is_objArray(), "arg type mismatch"); } - oop arg = _args->obj_at(_index++); - assert(arg->is_array(), "arg type mismatch"); if (TraceGPUInteraction) { tty->print_cr("[HSAIL] HSAILKernelArguments::do_object, 0x%08x is a %s", (address) arg, arg->klass()->external_name()); } @@ -157,24 +159,9 @@ void HSAILKernelArguments::do_object(int begin, int end) { if (TraceGPUInteraction) { - tty->print_cr("[HSAIL] HSAILKernelArguments::do_object(int begin, int end)."); - } - - if ((!_is_static && (_index >=(_length-1))) || (_is_static && (_index >=(_length)))) { - // last arg in object stream lambda is the object stream source array - if (TraceGPUInteraction) { - tty->print_cr("[HSAIL] HSAILKernelArguments::trailing object ref should be object source array ref"); - } + tty->print_cr("[HSAIL] HSAILKernelArguments::do_object(int begin, int end), begin=%d, end=%d.", begin, end); } - - oop arg = _args->obj_at(_index++); - assert(arg->is_array(), "arg type mismatch"); - if (TraceGPUInteraction) { - tty->print_cr("[HSAIL] HSAILKernelArguments::do_object(int, int), 0x%08x is a %s", (address) arg, arg->klass()->external_name()); - } - - bool pushed = gpu::Hsail::_okra_push_object(_kernel, arg); - assert(pushed == true, "arg push failed"); + do_object(); } void HSAILKernelArguments::do_void() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/gpu/hsail/vm/hsailKernelArguments.hpp --- a/src/gpu/hsail/vm/hsailKernelArguments.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/gpu/hsail/vm/hsailKernelArguments.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -42,6 +42,8 @@ int _index; // Kernel to push into address _kernel; + // number of parameters in the signature + int _parameter_count; bool _is_static; @@ -55,8 +57,10 @@ _args = args; _kernel = kernel; _is_static = is_static; - + _length = args->length(); + _parameter_count = ArgumentCount(signature).size(); + if (TraceGPUInteraction) { tty->print_cr("[HSAIL] sig:%s args length=%d", signature->as_C_string(), _length); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/os/bsd/vm/osThread_bsd.hpp --- a/src/os/bsd/vm/osThread_bsd.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os/bsd/vm/osThread_bsd.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -42,7 +42,7 @@ #ifdef __APPLE__ typedef thread_t thread_id_t; #else - typedef pthread_t thread_id_t; + typedef pid_t thread_id_t; #endif // _pthread_id is the pthread id, which is used by library calls diff -r 790ebab62d23 -r f9f4503a4ab5 src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os/bsd/vm/os_bsd.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -100,6 +100,7 @@ # include # include # include +# include #if defined(__FreeBSD__) || defined(__NetBSD__) # include @@ -152,14 +153,27 @@ // utility functions static int SR_initialize(); +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); julong os::available_memory() { return Bsd::available_memory(); } +// available here means free julong os::Bsd::available_memory() { - // XXXBSD: this is just a stopgap implementation - return physical_memory() >> 2; + uint64_t available = physical_memory() >> 2; +#ifdef __APPLE__ + mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; + vm_statistics64_data_t vmstat; + kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64, + (host_info64_t)&vmstat, &count); + assert(kerr == KERN_SUCCESS, + "host_statistics64 failed - check mach_host_self() and count"); + if (kerr == KERN_SUCCESS) { + available = vmstat.free_count * os::vm_page_size(); + } +#endif + return available; } julong os::physical_memory() { @@ -247,7 +261,17 @@ * since it returns a 64 bit value) */ mib[0] = CTL_HW; + +#if defined (HW_MEMSIZE) // Apple mib[1] = HW_MEMSIZE; +#elif defined(HW_PHYSMEM) // Most of BSD + mib[1] = HW_PHYSMEM; +#elif defined(HW_REALMEM) // Old FreeBSD + mib[1] = HW_REALMEM; +#else + #error No ways to get physmem +#endif + len = sizeof(mem_val); if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) { assert(len == sizeof(mem_val), "unexpected data size"); @@ -679,18 +703,12 @@ return NULL; } + osthread->set_thread_id(os::Bsd::gettid()); + #ifdef __APPLE__ - // thread_id is mach thread on macos, which pthreads graciously caches and provides for us - mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self()); - guarantee(thread_id != 0, "thread id missing from pthreads"); - osthread->set_thread_id(thread_id); - - uint64_t unique_thread_id = locate_unique_thread_id(thread_id); + uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id()); guarantee(unique_thread_id != 0, "unique thread id was not found"); osthread->set_unique_thread_id(unique_thread_id); -#else - // thread_id is pthread_id on BSD - osthread->set_thread_id(::pthread_self()); #endif // initialize signal mask for this thread os::Bsd::hotspot_sigmask(thread); @@ -847,18 +865,13 @@ return false; } + osthread->set_thread_id(os::Bsd::gettid()); + // Store pthread info into the OSThread #ifdef __APPLE__ - // thread_id is mach thread on macos, which pthreads graciously caches and provides for us - mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self()); - guarantee(thread_id != 0, "just checking"); - osthread->set_thread_id(thread_id); - - uint64_t unique_thread_id = locate_unique_thread_id(thread_id); + uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id()); guarantee(unique_thread_id != 0, "just checking"); osthread->set_unique_thread_id(unique_thread_id); -#else - osthread->set_thread_id(::pthread_self()); #endif osthread->set_pthread_id(::pthread_self()); @@ -932,17 +945,15 @@ // Used by VMSelfDestructTimer and the MemProfiler. double os::elapsedTime() { - return (double)(os::elapsed_counter()) * 0.000001; + return ((double)os::elapsed_counter()) / os::elapsed_frequency(); } jlong os::elapsed_counter() { - timeval time; - int status = gettimeofday(&time, NULL); - return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; + return javaTimeNanos() - initial_time_count; } jlong os::elapsed_frequency() { - return (1000 * 1000); + return NANOSECS_PER_SEC; // nanosecond resolution } bool os::supports_vtime() { return true; } @@ -1125,6 +1136,30 @@ return n; } +// Information of current thread in variety of formats +pid_t os::Bsd::gettid() { + int retval = -1; + +#ifdef __APPLE__ //XNU kernel + // despite the fact mach port is actually not a thread id use it + // instead of syscall(SYS_thread_selfid) as it certainly fits to u4 + retval = ::pthread_mach_thread_np(::pthread_self()); + guarantee(retval != 0, "just checking"); + return retval; + +#elif __FreeBSD__ + retval = syscall(SYS_thr_self); +#elif __OpenBSD__ + retval = syscall(SYS_getthrid); +#elif __NetBSD__ + retval = (pid_t) syscall(SYS__lwp_self); +#endif + + if (retval == -1) { + return getpid(); + } +} + intx os::current_thread_id() { #ifdef __APPLE__ return (intx)::pthread_mach_thread_np(::pthread_self()); @@ -1132,6 +1167,7 @@ return (intx)::pthread_self(); #endif } + int os::current_process_id() { // Under the old bsd thread library, bsd gives each thread @@ -1904,7 +1940,7 @@ bool timedwait(unsigned int sec, int nsec); private: jlong currenttime() const; - semaphore_t _semaphore; + os_semaphore_t _semaphore; }; Semaphore::Semaphore() : _semaphore(0) { @@ -1972,7 +2008,7 @@ bool Semaphore::timedwait(unsigned int sec, int nsec) { struct timespec ts; - jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); while (1) { int result = sem_timedwait(&_semaphore, &ts); @@ -3544,7 +3580,7 @@ Bsd::_main_thread = pthread_self(); Bsd::clock_init(); - initial_time_count = os::elapsed_counter(); + initial_time_count = javaTimeNanos(); #ifdef __APPLE__ // XXXDARWIN @@ -4708,6 +4744,10 @@ // as libawt.so, and renamed libawt_xawt.so // bool os::is_headless_jre() { +#ifdef __APPLE__ + // We no longer build headless-only on Mac OS X + return false; +#else struct stat statbuf; char buf[MAXPATHLEN]; char libmawtpath[MAXPATHLEN]; @@ -4739,6 +4779,7 @@ if (::stat(libmawtpath, &statbuf) == 0) return false; return true; +#endif } // Get the default path to the core file diff -r 790ebab62d23 -r f9f4503a4ab5 src/os/bsd/vm/os_bsd.hpp --- a/src/os/bsd/vm/os_bsd.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os/bsd/vm/os_bsd.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -84,6 +84,7 @@ static void hotspot_sigmask(Thread* thread); static bool is_initial_thread(void); + static pid_t gettid(); static int page_size(void) { return _page_size; } static void set_page_size(int val) { _page_size = val; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/os/linux/vm/globals_linux.hpp --- a/src/os/linux/vm/globals_linux.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os/linux/vm/globals_linux.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,7 +53,7 @@ // Defines Linux-specific default values. The flags are available on all // platforms, but they may have different default values on other platforms. // -define_pd_global(bool, UseLargePages, true); +define_pd_global(bool, UseLargePages, false); define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseThreadPriorities, true) ; diff -r 790ebab62d23 -r f9f4503a4ab5 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os/linux/vm/os_linux.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1333,17 +1333,15 @@ // Used by VMSelfDestructTimer and the MemProfiler. double os::elapsedTime() { - return (double)(os::elapsed_counter()) * 0.000001; + return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution } jlong os::elapsed_counter() { - timeval time; - int status = gettimeofday(&time, NULL); - return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; + return javaTimeNanos() - initial_time_count; } jlong os::elapsed_frequency() { - return (1000 * 1000); + return NANOSECS_PER_SEC; // nanosecond resolution } bool os::supports_vtime() { return true; } @@ -3361,13 +3359,15 @@ if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM) && FLAG_IS_DEFAULT(UseTransparentHugePages)) { - // If UseLargePages is specified on the command line try all methods, - // if it's default, then try only UseTransparentHugePages. - if (FLAG_IS_DEFAULT(UseLargePages)) { - UseTransparentHugePages = true; - } else { - UseHugeTLBFS = UseTransparentHugePages = UseSHM = true; - } + + // The type of large pages has not been specified by the user. + + // Try UseHugeTLBFS and then UseSHM. + UseHugeTLBFS = UseSHM = true; + + // Don't try UseTransparentHugePages since there are known + // performance issues with it turned on. This might change in the future. + UseTransparentHugePages = false; } if (UseTransparentHugePages) { @@ -3393,9 +3393,19 @@ } void os::large_page_init() { - if (!UseLargePages) { + if (!UseLargePages && + !UseTransparentHugePages && + !UseHugeTLBFS && + !UseSHM) { + // Not using large pages. + return; + } + + if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) { + // The user explicitly turned off large pages. + // Ignore the rest of the large pages flags. + UseTransparentHugePages = false; UseHugeTLBFS = false; - UseTransparentHugePages = false; UseSHM = false; return; } @@ -4738,7 +4748,7 @@ Linux::_main_thread = pthread_self(); Linux::clock_init(); - initial_time_count = os::elapsed_counter(); + initial_time_count = javaTimeNanos(); // pthread_condattr initialization for monotonic clock int status; diff -r 790ebab62d23 -r f9f4503a4ab5 src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -79,6 +79,15 @@ # include #endif +// needed by current_stack_region() workaround for Mavericks +#if defined(__APPLE__) +# include +# include +# include +# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048 +# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13 +#endif + #ifdef AMD64 #define SPELL_REG_SP "rsp" #define SPELL_REG_FP "rbp" @@ -828,6 +837,21 @@ pthread_t self = pthread_self(); void *stacktop = pthread_get_stackaddr_np(self); *size = pthread_get_stacksize_np(self); + // workaround for OS X 10.9.0 (Mavericks) + // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages + if (pthread_main_np() == 1) { + if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { + char kern_osrelease[256]; + size_t kern_osrelease_size = sizeof(kern_osrelease); + int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); + if (ret == 0) { + // get the major number, atoi will ignore the minor amd micro portions of the version string + if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { + *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); + } + } + } + } *bottom = (address) stacktop - *size; #elif defined(__OpenBSD__) stack_t ss; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/adlc/archDesc.cpp --- a/src/share/vm/adlc/archDesc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/adlc/archDesc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1193,6 +1193,13 @@ || strcmp(idealName,"FastLock") == 0 || strcmp(idealName,"FastUnlock") == 0 || strcmp(idealName,"AddExactI") == 0 + || strcmp(idealName,"AddExactL") == 0 + || strcmp(idealName,"SubExactI") == 0 + || strcmp(idealName,"SubExactL") == 0 + || strcmp(idealName,"MulExactI") == 0 + || strcmp(idealName,"MulExactL") == 0 + || strcmp(idealName,"NegExactI") == 0 + || strcmp(idealName,"NegExactL") == 0 || strcmp(idealName,"FlagsProj") == 0 || strcmp(idealName,"Bool") == 0 || strcmp(idealName,"Binary") == 0 ) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/adlc/formssel.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -536,12 +536,6 @@ if( data_type != Form::none ) rematerialize = true; - // Ugly: until a better fix is implemented, disable rematerialization for - // negD nodes because they are proved to be problematic. - if (is_ideal_negD()) { - return false; - } - // Constants if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) rematerialize = true; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/asm/assembler.cpp --- a/src/share/vm/asm/assembler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/asm/assembler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -122,7 +122,7 @@ void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) { if (UseStackBanging) { // Each code entry causes one stack bang n pages down the stack where n - // is configurable by StackBangPages. The setting depends on the maximum + // is configurable by StackShadowPages. The setting depends on the maximum // depth of VM call stack or native before going back into java code, // since only java code can raise a stack overflow exception using the // stack banging mechanism. The VM and native code does not detect stack diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Canonicalizer.cpp --- a/src/share/vm/c1/c1_Canonicalizer.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -935,6 +935,7 @@ void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void Canonicalizer::do_ProfileCall(ProfileCall* x) {} +void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {} void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Canonicalizer.hpp --- a/src/share/vm/c1/c1_Canonicalizer.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -104,6 +104,7 @@ virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); + virtual void do_ProfileReturnType (ProfileReturnType* x); virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Compilation.cpp --- a/src/share/vm/c1/c1_Compilation.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Compilation.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -601,6 +601,17 @@ } } +ciKlass* Compilation::cha_exact_type(ciType* type) { + if (type != NULL && type->is_loaded() && type->is_instance_klass()) { + ciInstanceKlass* ik = type->as_instance_klass(); + assert(ik->exact_klass() == NULL, "no cha for final klass"); + if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { + dependency_recorder()->assert_leaf_type(ik); + return ik; + } + } + return NULL; +} void Compilation::print_timers() { // tty->print_cr(" Native methods : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Compilation.hpp --- a/src/share/vm/c1/c1_Compilation.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Compilation.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -238,7 +238,18 @@ return env()->comp_level() == CompLevel_full_profile && C1UpdateMethodData && C1ProfileCheckcasts; } - + bool profile_parameters() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && MethodData::profile_parameters(); + } + bool profile_arguments() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && MethodData::profile_arguments(); + } + bool profile_return() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && MethodData::profile_return(); + } // will compilation make optimistic assumptions that might lead to // deoptimization and that the runtime will account for? bool is_optimistic() const { @@ -246,6 +257,8 @@ (RangeCheckElimination || UseLoopInvariantCodeMotion) && method()->method_data()->trap_count(Deoptimization::Reason_none) == 0; } + + ciKlass* cha_exact_type(ciType* type); }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Compiler.cpp --- a/src/share/vm/c1/c1_Compiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Compiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -42,26 +42,17 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/sharedRuntime.hpp" -volatile int Compiler::_runtimes = uninitialized; Compiler::Compiler() : AbstractCompiler(c1) { } - -Compiler::~Compiler() { - Unimplemented(); -} - - -void Compiler::initialize_all() { +void Compiler::init_c1_runtime() { BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); Arena* arena = new (mtCompiler) Arena(); Runtime1::initialize(buffer_blob); FrameMap::initialize(); // initialize data structures ValueType::initialize(arena); - // Instruction::initialize(); - // BlockBegin::initialize(); GraphBuilder::initialize(); // note: to use more than one instance of LinearScan at a time this function call has to // be moved somewhere outside of this constructor: @@ -70,32 +61,33 @@ void Compiler::initialize() { - if (_runtimes != initialized) { - initialize_runtimes( initialize_all, &_runtimes); + // Buffer blob must be allocated per C1 compiler thread at startup + BufferBlob* buffer_blob = init_buffer_blob(); + + if (should_perform_init()) { + if (buffer_blob == NULL) { + // When we come here we are in state 'initializing'; entire C1 compilation + // can be shut down. + set_state(failed); + } else { + init_c1_runtime(); + set_state(initialized); + } } - mark_initialized(); } - -BufferBlob* Compiler::get_buffer_blob(ciEnv* env) { +BufferBlob* Compiler::init_buffer_blob() { // Allocate buffer blob once at startup since allocation for each // compilation seems to be too expensive (at least on Intel win32). - BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); - if (buffer_blob != NULL) { - return buffer_blob; - } + assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once"); // setup CodeBuffer. Preallocate a BufferBlob of size // NMethodSizeLimit plus some extra space for constants. int code_buffer_size = Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size(); - buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer", - code_buffer_size); - if (buffer_blob == NULL) { - CompileBroker::handle_full_code_cache(); - env->record_failure("CodeCache is full"); - } else { + BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size); + if (buffer_blob != NULL) { CompilerThread::current()->set_buffer_blob(buffer_blob); } @@ -104,15 +96,8 @@ void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) { - BufferBlob* buffer_blob = Compiler::get_buffer_blob(env); - if (buffer_blob == NULL) { - return; - } - - if (!is_initialized()) { - initialize(); - } - + BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); + assert(buffer_blob != NULL, "Must exist"); // invoke compilation { // We are nested here because we need for the destructor diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Compiler.hpp --- a/src/share/vm/c1/c1_Compiler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Compiler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -30,11 +30,9 @@ // There is one instance of the Compiler per CompilerThread. class Compiler: public AbstractCompiler { - private: - - // Tracks whether runtime has been initialized - static volatile int _runtimes; + static void init_c1_runtime(); + BufferBlob* init_buffer_blob(); public: // Creation @@ -46,19 +44,12 @@ virtual bool is_c1() { return true; }; - BufferBlob* get_buffer_blob(ciEnv* env); - // Missing feature tests virtual bool supports_native() { return true; } virtual bool supports_osr () { return true; } - // Customization - virtual bool needs_adapters () { return false; } - virtual bool needs_stubs () { return false; } - // Initialization virtual void initialize(); - static void initialize_all(); // Compilation entry point for methods virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1466,9 +1466,22 @@ // State at end of inlined method is the state of the caller // without the method parameters on stack, including the // return value, if any, of the inlined method on operand stack. + int invoke_bci = state()->caller_state()->bci(); set_state(state()->caller_state()->copy_for_parsing()); if (x != NULL) { state()->push(x->type(), x); + if (profile_return() && x->type()->is_object_kind()) { + ciMethod* caller = state()->scope()->method(); + ciMethodData* md = caller->method_data_or_null(); + ciProfileData* data = md->bci_to_data(invoke_bci); + if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { + bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); + // May not be true in case of an inlined call through a method handle intrinsic. + if (has_return) { + profile_return_type(x, method(), caller, invoke_bci); + } + } + } } Goto* goto_callee = new Goto(continuation(), false); @@ -1658,6 +1671,50 @@ return compilation()->dependency_recorder(); } +// How many arguments do we want to profile? +Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { + int n = 0; + bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); + start = has_receiver ? 1 : 0; + if (profile_arguments()) { + ciProfileData* data = method()->method_data()->bci_to_data(bci()); + if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { + n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); + } + } + // If we are inlining then we need to collect arguments to profile parameters for the target + if (profile_parameters() && target != NULL) { + if (target->method_data() != NULL && target->method_data()->parameters_type_data() != NULL) { + // The receiver is profiled on method entry so it's included in + // the number of parameters but here we're only interested in + // actual arguments. + n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); + } + } + if (n > 0) { + return new Values(n); + } + return NULL; +} + +// Collect arguments that we want to profile in a list +Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { + int start = 0; + Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); + if (obj_args == NULL) { + return NULL; + } + int s = obj_args->size(); + for (int i = start, j = 0; j < s; i++) { + if (args->at(i)->type()->is_object_kind()) { + obj_args->push(args->at(i)); + j++; + } + } + assert(s == obj_args->length(), "missed on arg?"); + return obj_args; +} + void GraphBuilder::invoke(Bytecodes::Code code) { bool will_link; @@ -1816,7 +1873,7 @@ // number of implementors for decl_interface is 0 or 1. If // it's 0 then no class implements decl_interface and there's // no point in inlining. - if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) { + if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) { singleton = NULL; } } @@ -1957,7 +2014,7 @@ } else if (exact_target != NULL) { target_klass = exact_target->holder(); } - profile_call(target, recv, target_klass); + profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false); } } @@ -1972,6 +2029,9 @@ push(result_type, result); } } + if (profile_return() && result_type->is_object_kind()) { + profile_return_type(result, target); + } } @@ -3511,7 +3571,7 @@ recv = args->at(0); null_check(recv); } - profile_call(callee, recv, NULL); + profile_call(callee, recv, NULL, collect_args_for_profiling(args, callee, true), true); } } } @@ -3522,6 +3582,10 @@ Value value = append_split(result); if (result_type != voidType) push(result_type, value); + if (callee != method() && profile_return() && result_type->is_object_kind()) { + profile_return_type(result, callee); + } + // done return true; } @@ -3706,6 +3770,7 @@ // now perform tests that are based on flag settings if (callee->force_inline()) { + if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel"); print_inlining(callee, "force inline by annotation"); } else if (callee->should_inline()) { print_inlining(callee, "force inline by CompileOracle"); @@ -3765,7 +3830,28 @@ compilation()->set_would_profile(true); if (profile_calls()) { - profile_call(callee, recv, holder_known ? callee->holder() : NULL); + int start = 0; + Values* obj_args = args_list_for_profiling(callee, start, has_receiver); + if (obj_args != NULL) { + int s = obj_args->size(); + // if called through method handle invoke, some arguments may have been popped + for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) { + Value v = state()->stack_at_inc(i); + if (v->type()->is_object_kind()) { + obj_args->push(v); + j++; + } + } +#ifdef ASSERT + { + bool ignored_will_link; + ciSignature* declared_signature = NULL; + ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); + assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?"); + } +#endif + } + profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true); } } @@ -4253,8 +4339,28 @@ } #endif // PRODUCT -void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) { - append(new ProfileCall(method(), bci(), callee, recv, known_holder)); +void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { + // A default method's holder is an interface + if (known_holder != NULL && known_holder->is_interface()) { + assert(known_holder->is_instance_klass() && ((ciInstanceKlass*)known_holder)->has_default_methods(), "should be default method"); + known_holder = NULL; + } + append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); +} + +void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { + assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together"); + if (m == NULL) { + m = method(); + } + if (invoke_bci < 0) { + invoke_bci = bci(); + } + ciMethodData* md = m->method_data_or_null(); + ciProfileData* data = md->bci_to_data(invoke_bci); + if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { + append(new ProfileReturnType(m , invoke_bci, callee, ret)); + } } void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_GraphBuilder.hpp --- a/src/share/vm/c1/c1_GraphBuilder.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -374,7 +374,8 @@ void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true); - void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder); + void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined); + void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1); void profile_invocation(ciMethod* inlinee, ValueStack* state); // Shortcuts to profiling control. @@ -385,6 +386,12 @@ bool profile_calls() { return _compilation->profile_calls(); } bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); } bool profile_checkcasts() { return _compilation->profile_checkcasts(); } + bool profile_parameters() { return _compilation->profile_parameters(); } + bool profile_arguments() { return _compilation->profile_arguments(); } + bool profile_return() { return _compilation->profile_return(); } + + Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver); + Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver); public: NOT_PRODUCT(void print_stats();) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Instruction.cpp --- a/src/share/vm/c1/c1_Instruction.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Instruction.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -104,6 +104,14 @@ } } +ciType* Instruction::exact_type() const { + ciType* t = declared_type(); + if (t != NULL && t->is_klass()) { + return t->as_klass()->exact_klass(); + } + return NULL; +} + #ifndef PRODUCT void Instruction::check_state(ValueStack* state) { @@ -135,9 +143,7 @@ // perform constant and interval tests on index value bool AccessIndexed::compute_needs_range_check() { - if (length()) { - Constant* clength = length()->as_Constant(); Constant* cindex = index()->as_Constant(); if (clength && cindex) { @@ -157,34 +163,8 @@ } -ciType* Local::exact_type() const { - ciType* type = declared_type(); - - // for primitive arrays, the declared type is the exact type - if (type->is_type_array_klass()) { - return type; - } else if (type->is_instance_klass()) { - ciInstanceKlass* ik = (ciInstanceKlass*)type; - if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) { - return type; - } - } else if (type->is_obj_array_klass()) { - ciObjArrayKlass* oak = (ciObjArrayKlass*)type; - ciType* base = oak->base_element_type(); - if (base->is_instance_klass()) { - ciInstanceKlass* ik = base->as_instance_klass(); - if (ik->is_loaded() && ik->is_final()) { - return type; - } - } else if (base->is_primitive_type()) { - return type; - } - } - return NULL; -} - ciType* Constant::exact_type() const { - if (type()->is_object()) { + if (type()->is_object() && type()->as_ObjectType()->is_loaded()) { return type()->as_ObjectType()->exact_type(); } return NULL; @@ -192,19 +172,18 @@ ciType* LoadIndexed::exact_type() const { ciType* array_type = array()->exact_type(); - if (array_type == NULL) { - return NULL; - } - assert(array_type->is_array_klass(), "what else?"); - ciArrayKlass* ak = (ciArrayKlass*)array_type; + if (array_type != NULL) { + assert(array_type->is_array_klass(), "what else?"); + ciArrayKlass* ak = (ciArrayKlass*)array_type; - if (ak->element_type()->is_instance_klass()) { - ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type(); - if (ik->is_loaded() && ik->is_final()) { - return ik; + if (ak->element_type()->is_instance_klass()) { + ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type(); + if (ik->is_loaded() && ik->is_final()) { + return ik; + } } } - return NULL; + return Instruction::exact_type(); } @@ -224,22 +203,6 @@ } -ciType* LoadField::exact_type() const { - ciType* type = declared_type(); - // for primitive arrays, the declared type is the exact type - if (type->is_type_array_klass()) { - return type; - } - if (type->is_instance_klass()) { - ciInstanceKlass* ik = (ciInstanceKlass*)type; - if (ik->is_loaded() && ik->is_final()) { - return type; - } - } - return NULL; -} - - ciType* NewTypeArray::exact_type() const { return ciTypeArrayKlass::make(elt_type()); } @@ -264,16 +227,6 @@ return klass(); } -ciType* CheckCast::exact_type() const { - if (klass()->is_instance_klass()) { - ciInstanceKlass* ik = (ciInstanceKlass*)klass(); - if (ik->is_loaded() && ik->is_final()) { - return ik; - } - } - return NULL; -} - // Implementation of ArithmeticOp bool ArithmeticOp::is_commutative() const { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Instruction.hpp --- a/src/share/vm/c1/c1_Instruction.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Instruction.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -107,6 +107,7 @@ class UnsafePrefetchRead; class UnsafePrefetchWrite; class ProfileCall; +class ProfileReturnType; class ProfileInvoke; class RuntimeCall; class MemBar; @@ -211,6 +212,7 @@ virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0; virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0; virtual void do_ProfileCall (ProfileCall* x) = 0; + virtual void do_ProfileReturnType (ProfileReturnType* x) = 0; virtual void do_ProfileInvoke (ProfileInvoke* x) = 0; virtual void do_RuntimeCall (RuntimeCall* x) = 0; virtual void do_MemBar (MemBar* x) = 0; @@ -322,6 +324,36 @@ _type = type; } + // Helper class to keep track of which arguments need a null check + class ArgsNonNullState { + private: + int _nonnull_state; // mask identifying which args are nonnull + public: + ArgsNonNullState() + : _nonnull_state(AllBits) {} + + // Does argument number i needs a null check? + bool arg_needs_null_check(int i) const { + // No data is kept for arguments starting at position 33 so + // conservatively assume that they need a null check. + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + return is_set_nth_bit(_nonnull_state, i); + } + return true; + } + + // Set whether argument number i needs a null check or not + void set_arg_needs_null_check(int i, bool check) { + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + if (check) { + _nonnull_state |= nth_bit(i); + } else { + _nonnull_state &= ~(nth_bit(i)); + } + } + } + }; + public: void* operator new(size_t size) throw() { Compilation* c = Compilation::current(); @@ -566,7 +598,7 @@ virtual void other_values_do(ValueVisitor* f) { /* usually no other - override on demand */ } void values_do(ValueVisitor* f) { input_values_do(f); state_values_do(f); other_values_do(f); } - virtual ciType* exact_type() const { return NULL; } + virtual ciType* exact_type() const; virtual ciType* declared_type() const { return NULL; } // hashing @@ -689,7 +721,6 @@ int java_index() const { return _java_index; } virtual ciType* declared_type() const { return _declared_type; } - virtual ciType* exact_type() const; // generic virtual void input_values_do(ValueVisitor* f) { /* no values */ } @@ -806,7 +837,6 @@ {} ciType* declared_type() const; - ciType* exact_type() const; // generic HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if needs patching or if volatile @@ -1299,6 +1329,7 @@ virtual bool needs_exception_state() const { return false; } + ciType* exact_type() const { return NULL; } ciType* declared_type() const; // generic @@ -1422,7 +1453,6 @@ } ciType* declared_type() const; - ciType* exact_type() const; }; @@ -1490,7 +1520,7 @@ vmIntrinsics::ID _id; Values* _args; Value _recv; - int _nonnull_state; // mask identifying which args are nonnull + ArgsNonNullState _nonnull_state; public: // preserves_state can be set to true for Intrinsics @@ -1511,7 +1541,6 @@ , _id(id) , _args(args) , _recv(NULL) - , _nonnull_state(AllBits) { assert(args != NULL, "args must exist"); ASSERT_VALUES @@ -1537,21 +1566,12 @@ Value receiver() const { assert(has_receiver(), "must have receiver"); return _recv; } bool preserves_state() const { return check_flag(PreservesStateFlag); } - bool arg_needs_null_check(int i) { - if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { - return is_set_nth_bit(_nonnull_state, i); - } - return true; + bool arg_needs_null_check(int i) const { + return _nonnull_state.arg_needs_null_check(i); } void set_arg_needs_null_check(int i, bool check) { - if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { - if (check) { - _nonnull_state |= nth_bit(i); - } else { - _nonnull_state &= ~(nth_bit(i)); - } - } + _nonnull_state.set_arg_needs_null_check(i, check); } // generic @@ -2450,34 +2470,87 @@ LEAF(ProfileCall, Instruction) private: - ciMethod* _method; - int _bci_of_invoke; - ciMethod* _callee; // the method that is called at the given bci - Value _recv; - ciKlass* _known_holder; + ciMethod* _method; + int _bci_of_invoke; + ciMethod* _callee; // the method that is called at the given bci + Value _recv; + ciKlass* _known_holder; + Values* _obj_args; // arguments for type profiling + ArgsNonNullState _nonnull_state; // Do we know whether some arguments are never null? + bool _inlined; // Are we profiling a call that is inlined public: - ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder) + ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) : Instruction(voidType) , _method(method) , _bci_of_invoke(bci) , _callee(callee) , _recv(recv) , _known_holder(known_holder) + , _obj_args(obj_args) + , _inlined(inlined) { // The ProfileCall has side-effects and must occur precisely where located pin(); } - ciMethod* method() { return _method; } - int bci_of_invoke() { return _bci_of_invoke; } - ciMethod* callee() { return _callee; } - Value recv() { return _recv; } - ciKlass* known_holder() { return _known_holder; } - - virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); } + ciMethod* method() const { return _method; } + int bci_of_invoke() const { return _bci_of_invoke; } + ciMethod* callee() const { return _callee; } + Value recv() const { return _recv; } + ciKlass* known_holder() const { return _known_holder; } + int nb_profiled_args() const { return _obj_args == NULL ? 0 : _obj_args->length(); } + Value profiled_arg_at(int i) const { return _obj_args->at(i); } + bool arg_needs_null_check(int i) const { + return _nonnull_state.arg_needs_null_check(i); + } + bool inlined() const { return _inlined; } + + void set_arg_needs_null_check(int i, bool check) { + _nonnull_state.set_arg_needs_null_check(i, check); + } + + virtual void input_values_do(ValueVisitor* f) { + if (_recv != NULL) { + f->visit(&_recv); + } + for (int i = 0; i < nb_profiled_args(); i++) { + f->visit(_obj_args->adr_at(i)); + } + } }; +LEAF(ProfileReturnType, Instruction) + private: + ciMethod* _method; + ciMethod* _callee; + int _bci_of_invoke; + Value _ret; + + public: + ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret) + : Instruction(voidType) + , _method(method) + , _callee(callee) + , _bci_of_invoke(bci) + , _ret(ret) + { + set_needs_null_check(true); + // The ProfileType has side-effects and must occur precisely where located + pin(); + } + + ciMethod* method() const { return _method; } + ciMethod* callee() const { return _callee; } + int bci_of_invoke() const { return _bci_of_invoke; } + Value ret() const { return _ret; } + + virtual void input_values_do(ValueVisitor* f) { + if (_ret != NULL) { + f->visit(&_ret); + } + } +}; // Call some C runtime function that doesn't safepoint, // optionally passing the current thread as the first argument. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_InstructionPrinter.cpp --- a/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -892,10 +892,24 @@ if (x->known_holder() != NULL) { output()->print(", "); print_klass(x->known_holder()); + output()->print(" "); + } + for (int i = 0; i < x->nb_profiled_args(); i++) { + if (i > 0) output()->print(", "); + print_value(x->profiled_arg_at(i)); + if (x->arg_needs_null_check(i)) { + output()->print(" [NC]"); + } } output()->put(')'); } +void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) { + output()->print("profile ret type "); + print_value(x->ret()); + output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8()); + output()->put(')'); +} void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) { output()->print("profile_invoke "); output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_InstructionPrinter.hpp --- a/src/share/vm/c1/c1_InstructionPrinter.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -132,6 +132,7 @@ virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); + virtual void do_ProfileReturnType (ProfileReturnType* x); virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LIR.cpp --- a/src/share/vm/c1/c1_LIR.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LIR.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -183,10 +183,10 @@ case T_LONG: case T_OBJECT: case T_ADDRESS: - case T_METADATA: case T_VOID: return ::type2char(t); - + case T_METADATA: + return 'M'; case T_ILLEGAL: return '?'; @@ -1001,6 +1001,17 @@ assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1); break; } + +// LIR_OpProfileType: + case lir_profile_type: { + assert(op->as_OpProfileType() != NULL, "must be"); + LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op; + + do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp); + do_input(opProfileType->_obj); + do_temp(opProfileType->_tmp); + break; + } default: ShouldNotReachHere(); } @@ -1151,6 +1162,10 @@ masm->emit_profile_call(this); } +void LIR_OpProfileType::emit_code(LIR_Assembler* masm) { + masm->emit_profile_type(this); +} + // LIR_List LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) : _operations(8) @@ -1803,6 +1818,8 @@ case lir_cas_int: s = "cas_int"; break; // LIR_OpProfileCall case lir_profile_call: s = "profile_call"; break; + // LIR_OpProfileType + case lir_profile_type: s = "profile_type"; break; // LIR_OpAssert #ifdef ASSERT case lir_assert: s = "assert"; break; @@ -2086,6 +2103,15 @@ tmp1()->print(out); out->print(" "); } +// LIR_OpProfileType +void LIR_OpProfileType::print_instr(outputStream* out) const { + out->print("exact = "); exact_klass()->print_name_on(out); + out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass()); + mdp()->print(out); out->print(" "); + obj()->print(out); out->print(" "); + tmp()->print(out); out->print(" "); +} + #endif // PRODUCT // Implementation of LIR_InsertionBuffer diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LIR.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -882,6 +882,7 @@ class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; +class LIR_OpProfileType; #ifdef ASSERT class LIR_OpAssert; #endif @@ -1005,6 +1006,7 @@ , end_opCompareAndSwap , begin_opMDOProfile , lir_profile_call + , lir_profile_type , end_opMDOProfile , begin_opAssert , lir_assert @@ -1145,6 +1147,7 @@ virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } + virtual LIR_OpProfileType* as_OpProfileType() { return NULL; } #ifdef ASSERT virtual LIR_OpAssert* as_OpAssert() { return NULL; } #endif @@ -1925,8 +1928,8 @@ public: // Destroys recv - LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) - : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info + LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) + : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL) // no result, no info , _profiled_method(profiled_method) , _profiled_bci(profiled_bci) , _profiled_callee(profiled_callee) @@ -1948,6 +1951,45 @@ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; +// LIR_OpProfileType +class LIR_OpProfileType : public LIR_Op { + friend class LIR_OpVisitState; + + private: + LIR_Opr _mdp; + LIR_Opr _obj; + LIR_Opr _tmp; + ciKlass* _exact_klass; // non NULL if we know the klass statically (no need to load it from _obj) + intptr_t _current_klass; // what the profiling currently reports + bool _not_null; // true if we know statically that _obj cannot be null + bool _no_conflict; // true if we're profling parameters, _exact_klass is not NULL and we know + // _exact_klass it the only possible type for this parameter in any context. + + public: + // Destroys recv + LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) + : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL) // no result, no info + , _mdp(mdp) + , _obj(obj) + , _exact_klass(exact_klass) + , _current_klass(current_klass) + , _tmp(tmp) + , _not_null(not_null) + , _no_conflict(no_conflict) { } + + LIR_Opr mdp() const { return _mdp; } + LIR_Opr obj() const { return _obj; } + LIR_Opr tmp() const { return _tmp; } + ciKlass* exact_klass() const { return _exact_klass; } + intptr_t current_klass() const { return _current_klass; } + bool not_null() const { return _not_null; } + bool no_conflict() const { return _no_conflict; } + + virtual void emit_code(LIR_Assembler* masm); + virtual LIR_OpProfileType* as_OpProfileType() { return this; } + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +}; + class LIR_InsertionBuffer; //--------------------------------LIR_List--------------------------------------------------- @@ -2247,7 +2289,10 @@ ciMethod* profiled_method, int profiled_bci); // MethodData* profiling void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { - append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass)); + append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass)); + } + void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) { + append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict)); } void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -208,6 +208,7 @@ void emit_call(LIR_OpJavaCall* op); void emit_rtcall(LIR_OpRTCall* op); void emit_profile_call(LIR_OpProfileCall* op); + void emit_profile_type(LIR_OpProfileType* op); void emit_delay(LIR_OpDelay* op); void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1175,7 +1175,7 @@ if (compilation()->env()->dtrace_method_probes()) { BasicTypeList signature; signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread - signature.append(T_OBJECT); // Method* + signature.append(T_METADATA); // Method* LIR_OprList* args = new LIR_OprList(); args->append(getThreadPointer()); LIR_Opr meth = new_register(T_METADATA); @@ -1265,6 +1265,7 @@ LIRItem rcvr(x->argument_at(0), this); rcvr.load_item(); + LIR_Opr temp = new_register(T_METADATA); LIR_Opr result = rlock_result(x); // need to perform the null check on the rcvr @@ -1272,8 +1273,11 @@ if (x->needs_null_check()) { info = state_for(x); } - __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info); - __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); + + // FIXME T_ADDRESS should actually be T_METADATA but it can't because the + // meaning of these two is mixed up (see JDK-8026837). + __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info); + __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); } @@ -2570,6 +2574,138 @@ __ jump(x->default_sux()); } +/** + * Emit profiling code if needed for arguments, parameters, return value types + * + * @param md MDO the code will update at runtime + * @param md_base_offset common offset in the MDO for this profile and subsequent ones + * @param md_offset offset in the MDO (on top of md_base_offset) for this profile + * @param profiled_k current profile + * @param obj IR node for the object to be profiled + * @param mdp register to hold the pointer inside the MDO (md + md_base_offset). + * Set once we find an update to make and use for next ones. + * @param not_null true if we know obj cannot be null + * @param signature_at_call_k signature at call for obj + * @param callee_signature_k signature of callee for obj + * at call and callee signatures differ at method handle call + * @return the only klass we know will ever be seen at this profile point + */ +ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, + Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, + ciKlass* callee_signature_k) { + ciKlass* result = NULL; + bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k); + bool do_update = !TypeEntries::is_type_unknown(profiled_k); + // known not to be null or null bit already set and already set to + // unknown: nothing we can do to improve profiling + if (!do_null && !do_update) { + return result; + } + + ciKlass* exact_klass = NULL; + Compilation* comp = Compilation::current(); + if (do_update) { + // try to find exact type, using CHA if possible, so that loading + // the klass from the object can be avoided + ciType* type = obj->exact_type(); + if (type == NULL) { + type = obj->declared_type(); + type = comp->cha_exact_type(type); + } + assert(type == NULL || type->is_klass(), "type should be class"); + exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL; + + do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; + } + + if (!do_null && !do_update) { + return result; + } + + ciKlass* exact_signature_k = NULL; + if (do_update) { + // Is the type from the signature exact (the only one possible)? + exact_signature_k = signature_at_call_k->exact_klass(); + if (exact_signature_k == NULL) { + exact_signature_k = comp->cha_exact_type(signature_at_call_k); + } else { + result = exact_signature_k; + // Known statically. No need to emit any code: prevent + // LIR_Assembler::emit_profile_type() from emitting useless code + profiled_k = ciTypeEntries::with_status(result, profiled_k); + } + if (exact_signature_k != NULL && exact_klass != exact_signature_k) { + assert(exact_klass == NULL, "obj and signature disagree?"); + // sometimes the type of the signature is better than the best type + // the compiler has + exact_klass = exact_signature_k; + } + if (callee_signature_k != NULL && + callee_signature_k != signature_at_call_k) { + ciKlass* improved_klass = callee_signature_k->exact_klass(); + if (improved_klass == NULL) { + improved_klass = comp->cha_exact_type(callee_signature_k); + } + if (improved_klass != NULL && exact_klass != improved_klass) { + assert(exact_klass == NULL, "obj and signature disagree?"); + exact_klass = exact_signature_k; + } + } + do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; + } + + if (!do_null && !do_update) { + return result; + } + + if (mdp == LIR_OprFact::illegalOpr) { + mdp = new_register(T_METADATA); + __ metadata2reg(md->constant_encoding(), mdp); + if (md_base_offset != 0) { + LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS); + mdp = new_pointer_register(); + __ leal(LIR_OprFact::address(base_type_address), mdp); + } + } + LIRItem value(obj, this); + value.load_item(); + __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA), + value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL); + return result; +} + +// profile parameters on entry to the root of the compilation +void LIRGenerator::profile_parameters(Base* x) { + if (compilation()->profile_parameters()) { + CallingConvention* args = compilation()->frame_map()->incoming_arguments(); + ciMethodData* md = scope()->method()->method_data_or_null(); + assert(md != NULL, "Sanity"); + + if (md->parameters_type_data() != NULL) { + ciParametersTypeData* parameters_type_data = md->parameters_type_data(); + ciTypeStackSlotEntries* parameters = parameters_type_data->parameters(); + LIR_Opr mdp = LIR_OprFact::illegalOpr; + for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) { + LIR_Opr src = args->at(i); + assert(!src->is_illegal(), "check"); + BasicType t = src->type(); + if (t == T_OBJECT || t == T_ARRAY) { + intptr_t profiled_k = parameters->type(j); + Local* local = x->state()->local_at(java_index)->as_Local(); + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), + in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)), + profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL); + // If the profile is known statically set it once for all and do not emit any code + if (exact != NULL) { + md->set_parameter_type(j, exact); + } + j++; + } + java_index += type2size[t]; + } + } + } +} void LIRGenerator::do_Base(Base* x) { __ std_entry(LIR_OprFact::illegalOpr); @@ -2611,7 +2747,7 @@ if (compilation()->env()->dtrace_method_probes()) { BasicTypeList signature; signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread - signature.append(T_OBJECT); // Method* + signature.append(T_METADATA); // Method* LIR_OprList* args = new LIR_OprList(); args->append(getThreadPointer()); LIR_Opr meth = new_register(T_METADATA); @@ -2646,6 +2782,7 @@ // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. + profile_parameters(x); CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); increment_invocation_counter(info); } @@ -3004,12 +3141,132 @@ } } +void LIRGenerator::profile_arguments(ProfileCall* x) { + if (compilation()->profile_arguments()) { + int bci = x->bci_of_invoke(); + ciMethodData* md = x->method()->method_data_or_null(); + ciProfileData* data = md->bci_to_data(bci); + if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || + (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { + ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); + int base_offset = md->byte_offset_of_slot(data, extra); + LIR_Opr mdp = LIR_OprFact::illegalOpr; + ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); + + Bytecodes::Code bc = x->method()->java_code_at_bci(bci); + int start = 0; + int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); + if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { + // first argument is not profiled at call (method handle invoke) + assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); + start = 1; + } + ciSignature* callee_signature = x->callee()->signature(); + // method handle call to virtual method + bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); + ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); + + bool ignored_will_link; + ciSignature* signature_at_call = NULL; + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); + ciSignatureStream signature_at_call_stream(signature_at_call); + + // if called through method handle invoke, some arguments may have been popped + for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { + int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); + ciKlass* exact = profile_type(md, base_offset, off, + args->type(i), x->profiled_arg_at(i+start), mdp, + !x->arg_needs_null_check(i+start), + signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); + if (exact != NULL) { + md->set_argument_type(bci, i, exact); + } + } + } else { +#ifdef ASSERT + Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); + int n = x->nb_profiled_args(); + assert(MethodData::profile_parameters() && x->inlined() && + ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)), + "only at JSR292 bytecodes"); +#endif + } + } +} + +// profile parameters on entry to an inlined method +void LIRGenerator::profile_parameters_at_call(ProfileCall* x) { + if (compilation()->profile_parameters() && x->inlined()) { + ciMethodData* md = x->callee()->method_data_or_null(); + if (md != NULL) { + ciParametersTypeData* parameters_type_data = md->parameters_type_data(); + if (parameters_type_data != NULL) { + ciTypeStackSlotEntries* parameters = parameters_type_data->parameters(); + LIR_Opr mdp = LIR_OprFact::illegalOpr; + bool has_receiver = !x->callee()->is_static(); + ciSignature* sig = x->callee()->signature(); + ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL); + int i = 0; // to iterate on the Instructions + Value arg = x->recv(); + bool not_null = false; + int bci = x->bci_of_invoke(); + Bytecodes::Code bc = x->method()->java_code_at_bci(bci); + // The first parameter is the receiver so that's what we start + // with if it exists. One exception is method handle call to + // virtual method: the receiver is in the args list + if (arg == NULL || !Bytecodes::has_receiver(bc)) { + i = 1; + arg = x->profiled_arg_at(0); + not_null = !x->arg_needs_null_check(0); + } + int k = 0; // to iterate on the profile data + for (;;) { + intptr_t profiled_k = parameters->type(k); + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), + in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)), + profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL); + // If the profile is known statically set it once for all and do not emit any code + if (exact != NULL) { + md->set_parameter_type(k, exact); + } + k++; + if (k >= parameters_type_data->number_of_parameters()) { +#ifdef ASSERT + int extra = 0; + if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 && + x->nb_profiled_args() >= TypeProfileParmsLimit && + x->recv() != NULL && Bytecodes::has_receiver(bc)) { + extra += 1; + } + assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?"); +#endif + break; + } + arg = x->profiled_arg_at(i); + not_null = !x->arg_needs_null_check(i); + i++; + } + } + } + } +} + void LIRGenerator::do_ProfileCall(ProfileCall* x) { // Need recv in a temporary register so it interferes with the other temporaries LIR_Opr recv = LIR_OprFact::illegalOpr; LIR_Opr mdo = new_register(T_OBJECT); // tmp is used to hold the counters on SPARC LIR_Opr tmp = new_pointer_register(); + + if (x->nb_profiled_args() > 0) { + profile_arguments(x); + } + + // profile parameters on inlined method entry including receiver + if (x->recv() != NULL || x->nb_profiled_args() > 0) { + profile_parameters_at_call(x); + } + if (x->recv() != NULL) { LIRItem value(x->recv(), this); value.load_item(); @@ -3019,6 +3276,28 @@ __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder()); } +void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) { + int bci = x->bci_of_invoke(); + ciMethodData* md = x->method()->method_data_or_null(); + ciProfileData* data = md->bci_to_data(bci); + assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); + ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); + LIR_Opr mdp = LIR_OprFact::illegalOpr; + + bool ignored_will_link; + ciSignature* signature_at_call = NULL; + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); + + ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()), + ret->type(), x->ret(), mdp, + !x->needs_null_check(), + signature_at_call->return_type()->as_klass(), + x->callee()->signature()->return_type()->as_klass()); + if (exact != NULL) { + md->set_return_type(bci, exact); + } +} + void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { // We can safely ignore accessors here, since c2 will inline them anyway, // accessors are also always mature. @@ -3053,7 +3332,11 @@ int offset = -1; LIR_Opr counter_holder; if (level == CompLevel_limited_profile) { - address counters_adr = method->ensure_method_counters(); + MethodCounters* counters_adr = method->ensure_method_counters(); + if (counters_adr == NULL) { + bailout("method counters allocation failed"); + return; + } counter_holder = new_pointer_register(); __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : @@ -3091,7 +3374,7 @@ BasicTypeList* signature = new BasicTypeList(x->number_of_arguments()); if (x->pass_thread()) { - signature->append(T_ADDRESS); + signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread args->append(getThreadPointer()); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LIRGenerator.hpp --- a/src/share/vm/c1/c1_LIRGenerator.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -434,6 +434,12 @@ void do_ThreadIDIntrinsic(Intrinsic* x); void do_ClassIDIntrinsic(Intrinsic* x); #endif + ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, + Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, + ciKlass* callee_signature_k); + void profile_arguments(ProfileCall* x); + void profile_parameters(Base* x); + void profile_parameters_at_call(ProfileCall* x); public: Compilation* compilation() const { return _compilation; } @@ -534,6 +540,7 @@ virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); + virtual void do_ProfileReturnType (ProfileReturnType* x); virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_LinearScan.cpp --- a/src/share/vm/c1/c1_LinearScan.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_LinearScan.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -75,9 +75,9 @@ // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words #ifdef _LP64 -static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1}; +static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 1, -1}; #else -static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1}; +static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1}; #endif @@ -1138,8 +1138,10 @@ } } } - - } else if (opr_type != T_LONG) { + // We want to sometimes use logical operations on pointers, in particular in GC barriers. + // Since 64bit logical operations do not current support operands on stack, we have to make sure + // T_OBJECT doesn't get spilled along with T_LONG. + } else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) { // integer instruction (note: long operands must always be in register) switch (op->code()) { case lir_cmp: diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Optimizer.cpp --- a/src/share/vm/c1/c1_Optimizer.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Optimizer.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -531,6 +531,7 @@ void do_UnsafePrefetchRead (UnsafePrefetchRead* x); void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); void do_ProfileCall (ProfileCall* x); + void do_ProfileReturnType (ProfileReturnType* x); void do_ProfileInvoke (ProfileInvoke* x); void do_RuntimeCall (RuntimeCall* x); void do_MemBar (MemBar* x); @@ -657,6 +658,8 @@ void handle_Intrinsic (Intrinsic* x); void handle_ExceptionObject (ExceptionObject* x); void handle_Phi (Phi* x); + void handle_ProfileCall (ProfileCall* x); + void handle_ProfileReturnType (ProfileReturnType* x); }; @@ -715,7 +718,9 @@ void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {} void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} -void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); } +void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); + nce()->handle_ProfileCall(x); } +void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); } void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {} void NullCheckVisitor::do_MemBar (MemBar* x) {} @@ -1134,6 +1139,15 @@ } } +void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) { + for (int i = 0; i < x->nb_profiled_args(); i++) { + x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i))); + } +} + +void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) { + x->set_needs_null_check(!set_contains(x->ret())); +} void Optimizer::eliminate_null_checks() { ResourceMark rm; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_RangeCheckElimination.hpp --- a/src/share/vm/c1/c1_RangeCheckElimination.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -162,7 +162,8 @@ void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }; void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }; void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }; - void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ }; + void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ }; + void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ }; void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_Runtime1.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -542,8 +542,7 @@ // exception handler can cause class loading, which might throw an // exception and those fields are expected to be clear during // normal bytecode execution. - thread->set_exception_oop(NULL); - thread->set_exception_pc(NULL); + thread->clear_exception_oop_and_pc(); continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false); // If an exception was thrown during exception dispatch, the exception oop may have changed diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_ValueMap.hpp --- a/src/share/vm/c1/c1_ValueMap.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_ValueMap.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -203,6 +203,7 @@ void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ } void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ } void do_ProfileCall (ProfileCall* x) { /* nothing to do */ } + void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ } void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ }; void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/c1/c1_globals.hpp --- a/src/share/vm/c1/c1_globals.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/c1/c1_globals.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -341,7 +341,6 @@ diagnostic(bool, C1PatchInvokeDynamic, true, \ "Patch invokedynamic appendix not known at compile time") \ \ - \ // Read default values for c1 globals diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciClassList.hpp --- a/src/share/vm/ci/ciClassList.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciClassList.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -102,6 +102,7 @@ friend class ciMethodHandle; \ friend class ciMethodType; \ friend class ciReceiverTypeData; \ +friend class ciTypeEntries; \ friend class ciSymbol; \ friend class ciArray; \ friend class ciObjArray; \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciEnv.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -483,8 +483,7 @@ { // We have to lock the cpool to keep the oop from being resolved // while we are accessing it. - oop cplock = cpool->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); + MonitorLockerEx ml(cpool->lock()); constantTag tag = cpool->tag_at(index); if (tag.is_klass()) { // The klass has been inserted into the constant pool @@ -936,7 +935,9 @@ // Prevent SystemDictionary::add_to_hierarchy from running // and invalidating our dependencies until we install this method. + // No safepoints are allowed. Otherwise, class redefinition can occur in between. MutexLocker ml(Compile_lock); + No_Safepoint_Verifier nsv; // Change in Jvmti state may invalidate compilation. if (!failing() && @@ -1002,16 +1003,6 @@ // Free codeBlobs code_buffer->free_blob(); - // stress test 6243940 by immediately making the method - // non-entrant behind the system's back. This has serious - // side effects on the code cache and is not meant for - // general stress testing - if (nm != NULL && StressNonEntrant) { - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); - NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(), - SharedRuntime::get_handle_wrong_method_stub()); - } - if (nm == NULL) { // The CodeCache is full. Print out warning and disable compilation. record_failure("code cache is full"); @@ -1037,11 +1028,11 @@ char *method_name = method->name_and_sig_as_C_string(); tty->print_cr("Replacing method %s", method_name); } - if (old != NULL ) { + if (old != NULL) { old->make_not_entrant(); } } - if (TraceNMethodInstalls ) { + if (TraceNMethodInstalls) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); ttyLocker ttyl; @@ -1052,7 +1043,7 @@ // Allow the code to be executed method->set_code(method, nm); } else { - if (TraceNMethodInstalls ) { + if (TraceNMethodInstalls) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); ttyLocker ttyl; @@ -1062,7 +1053,6 @@ entry_bci); } method->method_holder()->add_osr_nmethod(nm); - } } } @@ -1154,9 +1144,12 @@ GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();) } -void ciEnv::dump_replay_data(outputStream* out) { - VM_ENTRY_MARK; - MutexLocker ml(Compile_lock); +// ------------------------------------------------------------------ +// ciEnv::dump_replay_data* + +// Don't change thread state and acquire any locks. +// Safe to call from VM error reporter. +void ciEnv::dump_replay_data_unsafe(outputStream* out) { ResourceMark rm; #if INCLUDE_JVMTI out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables); @@ -1181,3 +1174,10 @@ entry_bci, comp_level); out->flush(); } + +void ciEnv::dump_replay_data(outputStream* out) { + GUARDED_VM_ENTRY( + MutexLocker ml(Compile_lock); + dump_replay_data_unsafe(out); + ) +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciEnv.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -452,6 +452,7 @@ // Dump the compilation replay data for the ciEnv to the stream. void dump_replay_data(outputStream* out); + void dump_replay_data_unsafe(outputStream* out); }; #endif // SHARE_VM_CI_CIENV_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciInstanceKlass.cpp --- a/src/share/vm/ci/ciInstanceKlass.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -57,6 +57,7 @@ _init_state = ik->init_state(); _nonstatic_field_size = ik->nonstatic_field_size(); _has_nonstatic_fields = ik->has_nonstatic_fields(); + _has_default_methods = ik->has_default_methods(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _implementor = NULL; // we will fill these lazily @@ -671,7 +672,6 @@ void ciInstanceKlass::dump_replay_data(outputStream* out) { - ASSERT_IN_VM; ResourceMark rm; InstanceKlass* ik = get_instanceKlass(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciInstanceKlass.hpp --- a/src/share/vm/ci/ciInstanceKlass.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciInstanceKlass.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -52,6 +52,7 @@ bool _has_finalizer; bool _has_subklass; bool _has_nonstatic_fields; + bool _has_default_methods; ciFlags _flags; jint _nonstatic_field_size; @@ -171,6 +172,11 @@ } } + bool has_default_methods() { + assert(is_loaded(), "must be loaded"); + return _has_default_methods; + } + ciInstanceKlass* get_canonical_holder(int offset); ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static); @@ -235,6 +241,13 @@ bool is_instance_klass() const { return true; } bool is_java_klass() const { return true; } + virtual ciKlass* exact_klass() { + if (is_loaded() && is_final() && !is_interface()) { + return this; + } + return NULL; + } + // Dump the current state of this klass for compilation replay. virtual void dump_replay_data(outputStream* out); }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciKlass.cpp --- a/src/share/vm/ci/ciKlass.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciKlass.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -66,7 +66,9 @@ // ------------------------------------------------------------------ // ciKlass::is_subtype_of bool ciKlass::is_subtype_of(ciKlass* that) { - assert(is_loaded() && that->is_loaded(), "must be loaded"); + assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii())); + assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii())); + // Check to see if the klasses are identical. if (this == that) { return true; @@ -83,8 +85,8 @@ // ------------------------------------------------------------------ // ciKlass::is_subclass_of bool ciKlass::is_subclass_of(ciKlass* that) { - assert(is_loaded() && that->is_loaded(), "must be loaded"); - // Check to see if the klasses are identical. + assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii())); + assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii())); VM_ENTRY_MARK; Klass* this_klass = get_Klass(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciKlass.hpp --- a/src/share/vm/ci/ciKlass.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciKlass.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -41,6 +41,7 @@ friend class ciEnv; friend class ciField; friend class ciMethod; + friend class ciMethodData; friend class ciObjArrayKlass; private: @@ -121,6 +122,8 @@ // What kind of ciObject is this? bool is_klass() const { return true; } + virtual ciKlass* exact_klass() = 0; + void print_name_on(outputStream* st); }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciMethod.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -565,6 +565,116 @@ if (_limit < MorphismLimit) _limit++; } + +void ciMethod::assert_virtual_call_type_ok(int bci) { + assert(java_code_at_bci(bci) == Bytecodes::_invokevirtual || + java_code_at_bci(bci) == Bytecodes::_invokeinterface, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci)))); +} + +void ciMethod::assert_call_type_ok(int bci) { + assert(java_code_at_bci(bci) == Bytecodes::_invokestatic || + java_code_at_bci(bci) == Bytecodes::_invokespecial || + java_code_at_bci(bci) == Bytecodes::_invokedynamic, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci)))); +} + +/** + * Check whether profiling provides a type for the argument i to the + * call at bci bci + * + * @param bci bci of the call + * @param i argument number + * @return profiled type + * + * If the profile reports that the argument may be null, return false + * at least for now. + */ +ciKlass* ciMethod::argument_profiled_type(int bci, int i) { + if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) { + ciProfileData* data = method_data()->bci_to_data(bci); + if (data != NULL) { + if (data->is_VirtualCallTypeData()) { + assert_virtual_call_type_ok(bci); + ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData(); + if (i >= call->number_of_arguments()) { + return NULL; + } + ciKlass* type = call->valid_argument_type(i); + if (type != NULL && !call->argument_maybe_null(i)) { + return type; + } + } else if (data->is_CallTypeData()) { + assert_call_type_ok(bci); + ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData(); + if (i >= call->number_of_arguments()) { + return NULL; + } + ciKlass* type = call->valid_argument_type(i); + if (type != NULL && !call->argument_maybe_null(i)) { + return type; + } + } + } + } + return NULL; +} + +/** + * Check whether profiling provides a type for the return value from + * the call at bci bci + * + * @param bci bci of the call + * @return profiled type + * + * If the profile reports that the argument may be null, return false + * at least for now. + */ +ciKlass* ciMethod::return_profiled_type(int bci) { + if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) { + ciProfileData* data = method_data()->bci_to_data(bci); + if (data != NULL) { + if (data->is_VirtualCallTypeData()) { + assert_virtual_call_type_ok(bci); + ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData(); + ciKlass* type = call->valid_return_type(); + if (type != NULL && !call->return_maybe_null()) { + return type; + } + } else if (data->is_CallTypeData()) { + assert_call_type_ok(bci); + ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData(); + ciKlass* type = call->valid_return_type(); + if (type != NULL && !call->return_maybe_null()) { + return type; + } + } + } + } + return NULL; +} + +/** + * Check whether profiling provides a type for the parameter i + * + * @param i parameter number + * @return profiled type + * + * If the profile reports that the argument may be null, return false + * at least for now. + */ +ciKlass* ciMethod::parameter_profiled_type(int i) { + if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) { + ciParametersTypeData* parameters = method_data()->parameters_type_data(); + if (parameters != NULL && i < parameters->number_of_parameters()) { + ciKlass* type = parameters->valid_parameter_type(i); + if (type != NULL && !parameters->parameter_maybe_null(i)) { + return type; + } + } + } + return NULL; +} + + // ------------------------------------------------------------------ // ciMethod::find_monomorphic_target // @@ -846,7 +956,9 @@ // Return true if allocation was successful or no MDO is required. bool ciMethod::ensure_method_data(methodHandle h_m) { EXCEPTION_CONTEXT; - if (is_native() || is_abstract() || h_m()->is_accessor()) return true; + if (is_native() || is_abstract() || h_m()->is_accessor()) { + return true; + } if (h_m()->method_data() == NULL) { Method::build_interpreter_method_data(h_m, THREAD); if (HAS_PENDING_EXCEPTION) { @@ -903,22 +1015,21 @@ // NULL otherwise. ciMethodData* ciMethod::method_data_or_null() { ciMethodData *md = method_data(); - if (md->is_empty()) return NULL; + if (md->is_empty()) { + return NULL; + } return md; } // ------------------------------------------------------------------ // ciMethod::ensure_method_counters // -address ciMethod::ensure_method_counters() { +MethodCounters* ciMethod::ensure_method_counters() { check_is_loaded(); VM_ENTRY_MARK; methodHandle mh(THREAD, get_Method()); - MethodCounters *counter = mh->method_counters(); - if (counter == NULL) { - counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL); - } - return (address)counter; + MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL); + return method_counters; } // ------------------------------------------------------------------ @@ -1247,7 +1358,6 @@ #undef FETCH_FLAG_FROM_VM void ciMethod::dump_replay_data(outputStream* st) { - ASSERT_IN_VM; ResourceMark rm; Method* method = get_Method(); MethodCounters* mcs = method->method_counters(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciMethod.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -117,6 +117,10 @@ *bcp = code; } + // Check bytecode and profile data collected are compatible + void assert_virtual_call_type_ok(int bci); + void assert_call_type_ok(int bci); + public: // Basic method information. ciFlags flags() const { check_is_loaded(); return _flags; } @@ -230,6 +234,11 @@ ciCallProfile call_profile_at_bci(int bci); int interpreter_call_site_count(int bci); + // Does type profiling provide a useful type at this point? + ciKlass* argument_profiled_type(int bci, int i); + ciKlass* parameter_profiled_type(int i); + ciKlass* return_profiled_type(int bci); + ciField* get_field_at_bci( int bci, bool &will_link); ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature); @@ -265,7 +274,7 @@ bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const; bool check_call(int refinfo_index, bool is_static) const; bool ensure_method_data(); // make sure it exists in the VM also - address ensure_method_counters(); + MethodCounters* ensure_method_counters(); int instructions_size(); int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciMethodData.cpp --- a/src/share/vm/ci/ciMethodData.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciMethodData.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,6 +53,7 @@ _hint_di = first_di(); // Initialize the escape information (to "don't know."); _eflags = _arg_local = _arg_stack = _arg_returned = 0; + _parameters = NULL; } // ------------------------------------------------------------------ @@ -74,11 +75,14 @@ _hint_di = first_di(); // Initialize the escape information (to "don't know."); _eflags = _arg_local = _arg_stack = _arg_returned = 0; + _parameters = NULL; } void ciMethodData::load_data() { MethodData* mdo = get_MethodData(); - if (mdo == NULL) return; + if (mdo == NULL) { + return; + } // To do: don't copy the data if it is not "ripe" -- require a minimum # // of invocations. @@ -106,6 +110,12 @@ ci_data = next_data(ci_data); data = mdo->next_data(data); } + if (mdo->parameters_type_data() != NULL) { + _parameters = data_layout_at(mdo->parameters_type_data_di()); + ciParametersTypeData* parameters = new ciParametersTypeData(_parameters); + parameters->translate_from(mdo->parameters_type_data()); + } + // Note: Extra data are all BitData, and do not need translation. _current_mileage = MethodData::mileage_of(mdo->method()); _invocation_counter = mdo->invocation_count(); @@ -123,7 +133,7 @@ #endif } -void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) { +void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) { for (uint row = 0; row < row_limit(); row++) { Klass* k = data->as_ReceiverTypeData()->receiver(row); if (k != NULL) { @@ -134,6 +144,18 @@ } +void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) { + for (int i = 0; i < _number_of_entries; i++) { + intptr_t k = entries->type(i); + TypeStackSlotEntries::set_type(i, translate_klass(k)); + } +} + +void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) { + intptr_t k = ret->type(); + set_type(translate_klass(k)); +} + // Get the data at an arbitrary (sort of) data index. ciProfileData* ciMethodData::data_at(int data_index) { if (out_of_bounds(data_index)) { @@ -164,6 +186,12 @@ return new ciMultiBranchData(data_layout); case DataLayout::arg_info_data_tag: return new ciArgInfoData(data_layout); + case DataLayout::call_type_data_tag: + return new ciCallTypeData(data_layout); + case DataLayout::virtual_call_type_data_tag: + return new ciVirtualCallTypeData(data_layout); + case DataLayout::parameters_type_data_tag: + return new ciParametersTypeData(data_layout); }; } @@ -286,6 +314,42 @@ } } +void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) { + VM_ENTRY_MARK; + MethodData* mdo = get_MethodData(); + if (mdo != NULL) { + ProfileData* data = mdo->bci_to_data(bci); + if (data->is_CallTypeData()) { + data->as_CallTypeData()->set_argument_type(i, k->get_Klass()); + } else { + assert(data->is_VirtualCallTypeData(), "no arguments!"); + data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass()); + } + } +} + +void ciMethodData::set_parameter_type(int i, ciKlass* k) { + VM_ENTRY_MARK; + MethodData* mdo = get_MethodData(); + if (mdo != NULL) { + mdo->parameters_type_data()->set_type(i, k->get_Klass()); + } +} + +void ciMethodData::set_return_type(int bci, ciKlass* k) { + VM_ENTRY_MARK; + MethodData* mdo = get_MethodData(); + if (mdo != NULL) { + ProfileData* data = mdo->bci_to_data(bci); + if (data->is_CallTypeData()) { + data->as_CallTypeData()->set_return_type(k->get_Klass()); + } else { + assert(data->is_VirtualCallTypeData(), "no arguments!"); + data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); + } + } +} + bool ciMethodData::has_escape_info() { return eflag_set(MethodData::estimated); } @@ -373,7 +437,6 @@ } void ciMethodData::dump_replay_data(outputStream* out) { - ASSERT_IN_VM; ResourceMark rm; MethodData* mdo = get_MethodData(); Method* method = mdo->method(); @@ -477,7 +540,50 @@ } } -void ciReceiverTypeData::print_receiver_data_on(outputStream* st) { +void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) { + if (TypeEntries::is_type_none(k)) { + st->print("none"); + } else if (TypeEntries::is_type_unknown(k)) { + st->print("unknown"); + } else { + valid_ciklass(k)->print_name_on(st); + } + if (TypeEntries::was_null_seen(k)) { + st->print(" (null seen)"); + } +} + +void ciTypeStackSlotEntries::print_data_on(outputStream* st) const { + for (int i = 0; i < _number_of_entries; i++) { + _pd->tab(st); + st->print("%d: stack (%u) ", i, stack_slot(i)); + print_ciklass(st, type(i)); + st->cr(); + } +} + +void ciReturnTypeEntry::print_data_on(outputStream* st) const { + _pd->tab(st); + st->print("ret "); + print_ciklass(st, type()); + st->cr(); +} + +void ciCallTypeData::print_data_on(outputStream* st) const { + print_shared(st, "ciCallTypeData"); + if (has_arguments()) { + tab(st, true); + st->print("argument types"); + args()->print_data_on(st); + } + if (has_return()) { + tab(st, true); + st->print("return type"); + ret()->print_data_on(st); + } +} + +void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const { uint row; int entries = 0; for (row = 0; row < row_limit(); row++) { @@ -493,13 +599,33 @@ } } -void ciReceiverTypeData::print_data_on(outputStream* st) { +void ciReceiverTypeData::print_data_on(outputStream* st) const { print_shared(st, "ciReceiverTypeData"); print_receiver_data_on(st); } -void ciVirtualCallData::print_data_on(outputStream* st) { +void ciVirtualCallData::print_data_on(outputStream* st) const { print_shared(st, "ciVirtualCallData"); rtd_super()->print_receiver_data_on(st); } + +void ciVirtualCallTypeData::print_data_on(outputStream* st) const { + print_shared(st, "ciVirtualCallTypeData"); + rtd_super()->print_receiver_data_on(st); + if (has_arguments()) { + tab(st, true); + st->print("argument types"); + args()->print_data_on(st); + } + if (has_return()) { + tab(st, true); + st->print("return type"); + ret()->print_data_on(st); + } +} + +void ciParametersTypeData::print_data_on(outputStream* st) const { + st->print_cr("Parametertypes"); + parameters()->print_data_on(st); +} #endif diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciMethodData.hpp --- a/src/share/vm/ci/ciMethodData.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciMethodData.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -41,6 +41,9 @@ class ciArrayData; class ciMultiBranchData; class ciArgInfoData; +class ciCallTypeData; +class ciVirtualCallTypeData; +class ciParametersTypeData; typedef ProfileData ciProfileData; @@ -59,6 +62,121 @@ ciJumpData(DataLayout* layout) : JumpData(layout) {}; }; +class ciTypeEntries { +protected: + static intptr_t translate_klass(intptr_t k) { + Klass* v = TypeEntries::valid_klass(k); + if (v != NULL) { + ciKlass* klass = CURRENT_ENV->get_klass(v); + return with_status(klass, k); + } + return with_status(NULL, k); + } + +public: + static ciKlass* valid_ciklass(intptr_t k) { + if (!TypeEntries::is_type_none(k) && + !TypeEntries::is_type_unknown(k)) { + ciKlass* res = (ciKlass*)TypeEntries::klass_part(k); + assert(res != NULL, "invalid"); + return res; + } else { + return NULL; + } + } + + static intptr_t with_status(ciKlass* k, intptr_t in) { + return TypeEntries::with_status((intptr_t)k, in); + } + +#ifndef PRODUCT + static void print_ciklass(outputStream* st, intptr_t k); +#endif +}; + +class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries { +public: + void translate_type_data_from(const TypeStackSlotEntries* args); + + ciKlass* valid_type(int i) const { + return valid_ciklass(type(i)); + } + + bool maybe_null(int i) const { + return was_null_seen(type(i)); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + +class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries { +public: + void translate_type_data_from(const ReturnTypeEntry* ret); + + ciKlass* valid_type() const { + return valid_ciklass(type()); + } + + bool maybe_null() const { + return was_null_seen(type()); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + +class ciCallTypeData : public CallTypeData { +public: + ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {} + + ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); } + ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); } + + void translate_from(const ProfileData* data) { + if (has_arguments()) { + args()->translate_type_data_from(data->as_CallTypeData()->args()); + } + if (has_return()) { + ret()->translate_type_data_from(data->as_CallTypeData()->ret()); + } + } + + intptr_t argument_type(int i) const { + assert(has_arguments(), "no arg type profiling data"); + return args()->type(i); + } + + ciKlass* valid_argument_type(int i) const { + assert(has_arguments(), "no arg type profiling data"); + return args()->valid_type(i); + } + + intptr_t return_type() const { + assert(has_return(), "no ret type profiling data"); + return ret()->type(); + } + + ciKlass* valid_return_type() const { + assert(has_return(), "no ret type profiling data"); + return ret()->valid_type(); + } + + bool argument_maybe_null(int i) const { + return args()->maybe_null(i); + } + + bool return_maybe_null() const { + return ret()->maybe_null(); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + class ciReceiverTypeData : public ReceiverTypeData { public: ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {}; @@ -69,7 +187,7 @@ (intptr_t) recv); } - ciKlass* receiver(uint row) { + ciKlass* receiver(uint row) const { assert((uint)row < row_limit(), "oob"); ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count); assert(recv == NULL || recv->is_klass(), "wrong type"); @@ -77,19 +195,19 @@ } // Copy & translate from oop based ReceiverTypeData - virtual void translate_from(ProfileData* data) { + virtual void translate_from(const ProfileData* data) { translate_receiver_data_from(data); } - void translate_receiver_data_from(ProfileData* data); + void translate_receiver_data_from(const ProfileData* data); #ifndef PRODUCT - void print_data_on(outputStream* st); - void print_receiver_data_on(outputStream* st); + void print_data_on(outputStream* st) const; + void print_receiver_data_on(outputStream* st) const; #endif }; class ciVirtualCallData : public VirtualCallData { // Fake multiple inheritance... It's a ciReceiverTypeData also. - ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; } + ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; } public: ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {}; @@ -103,11 +221,73 @@ } // Copy & translate from oop based VirtualCallData - virtual void translate_from(ProfileData* data) { + virtual void translate_from(const ProfileData* data) { rtd_super()->translate_receiver_data_from(data); } #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; +#endif +}; + +class ciVirtualCallTypeData : public VirtualCallTypeData { +private: + // Fake multiple inheritance... It's a ciReceiverTypeData also. + ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; } +public: + ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {} + + void set_receiver(uint row, ciKlass* recv) { + rtd_super()->set_receiver(row, recv); + } + + ciKlass* receiver(uint row) const { + return rtd_super()->receiver(row); + } + + ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); } + ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); } + + // Copy & translate from oop based VirtualCallData + virtual void translate_from(const ProfileData* data) { + rtd_super()->translate_receiver_data_from(data); + if (has_arguments()) { + args()->translate_type_data_from(data->as_VirtualCallTypeData()->args()); + } + if (has_return()) { + ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret()); + } + } + + intptr_t argument_type(int i) const { + assert(has_arguments(), "no arg type profiling data"); + return args()->type(i); + } + + ciKlass* valid_argument_type(int i) const { + assert(has_arguments(), "no arg type profiling data"); + return args()->valid_type(i); + } + + intptr_t return_type() const { + assert(has_return(), "no ret type profiling data"); + return ret()->type(); + } + + ciKlass* valid_return_type() const { + assert(has_return(), "no ret type profiling data"); + return ret()->valid_type(); + } + + bool argument_maybe_null(int i) const { + return args()->maybe_null(i); + } + + bool return_maybe_null() const { + return ret()->maybe_null(); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; #endif }; @@ -137,6 +317,29 @@ ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {}; }; +class ciParametersTypeData : public ParametersTypeData { +public: + ciParametersTypeData(DataLayout* layout) : ParametersTypeData(layout) {} + + virtual void translate_from(const ProfileData* data) { + parameters()->translate_type_data_from(data->as_ParametersTypeData()->parameters()); + } + + ciTypeStackSlotEntries* parameters() const { return (ciTypeStackSlotEntries*)ParametersTypeData::parameters(); } + + ciKlass* valid_parameter_type(int i) const { + return parameters()->valid_type(i); + } + + bool parameter_maybe_null(int i) const { + return parameters()->maybe_null(i); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + // ciMethodData // // This class represents a MethodData* in the HotSpot virtual @@ -182,6 +385,10 @@ // Coherent snapshot of original header. MethodData _orig; + // Dedicated area dedicated to parameters. Null if no parameter + // profiling for this method. + DataLayout* _parameters; + ciMethodData(MethodData* md); ciMethodData(); @@ -232,8 +439,6 @@ public: bool is_method_data() const { return true; } - void set_mature() { _state = mature_state; } - bool is_empty() { return _state == empty_state; } bool is_mature() { return _state == mature_state; } @@ -249,6 +454,11 @@ // Also set the numer of loops and blocks in the method. // Again, this is used to determine if a method is trivial. void set_compilation_stats(short loops, short blocks); + // If the compiler finds a profiled type that is known statically + // for sure, set it in the MethodData + void set_argument_type(int bci, int i, ciKlass* k); + void set_parameter_type(int i, ciKlass* k); + void set_return_type(int bci, ciKlass* k); void load_data(); @@ -312,6 +522,10 @@ bool is_arg_returned(int i) const; uint arg_modified(int arg) const; + ciParametersTypeData* parameters_type_data() const { + return _parameters != NULL ? new ciParametersTypeData(_parameters) : NULL; + } + // Code generation helper ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data); int byte_offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_slot(data, slot_offset_in_data)); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciObjArrayKlass.cpp --- a/src/share/vm/ci/ciObjArrayKlass.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -179,3 +179,16 @@ ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) { GUARDED_VM_ENTRY(return make_impl(element_klass);) } + +ciKlass* ciObjArrayKlass::exact_klass() { + ciType* base = base_element_type(); + if (base->is_instance_klass()) { + ciInstanceKlass* ik = base->as_instance_klass(); + if (ik->exact_klass() != NULL) { + return this; + } + } else if (base->is_primitive_type()) { + return this; + } + return NULL; +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciObjArrayKlass.hpp --- a/src/share/vm/ci/ciObjArrayKlass.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciObjArrayKlass.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -73,6 +73,8 @@ bool is_obj_array_klass() const { return true; } static ciObjArrayKlass* make(ciKlass* element_klass); + + virtual ciKlass* exact_klass(); }; #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciReplay.cpp --- a/src/share/vm/ci/ciReplay.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciReplay.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -965,14 +965,12 @@ tty->cr(); } else { EXCEPTION_CONTEXT; - MethodCounters* mcs = method->method_counters(); // m->_instructions_size = rec->instructions_size; m->_instructions_size = -1; m->_interpreter_invocation_count = rec->interpreter_invocation_count; m->_interpreter_throwout_count = rec->interpreter_throwout_count; - if (mcs == NULL) { - mcs = Method::build_method_counters(method, CHECK_AND_CLEAR); - } + MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR); + guarantee(mcs != NULL, "method counters allocation failed"); mcs->invocation_counter()->_counter = rec->invocation_counter; mcs->backedge_counter()->_counter = rec->backedge_counter; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciStreams.hpp --- a/src/share/vm/ci/ciStreams.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciStreams.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -277,11 +277,14 @@ class ciSignatureStream : public StackObj { private: ciSignature* _sig; - int _pos; + int _pos; + // holder is a method's holder + ciKlass* _holder; public: - ciSignatureStream(ciSignature* signature) { + ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) { _sig = signature; _pos = 0; + _holder = holder; } bool at_return_type() { return _pos == _sig->count(); } @@ -301,6 +304,23 @@ return _sig->type_at(_pos); } } + + // next klass in the signature + ciKlass* next_klass() { + ciKlass* sig_k; + if (_holder != NULL) { + sig_k = _holder; + _holder = NULL; + } else { + while (!type()->is_klass()) { + next(); + } + assert(!at_return_type(), "passed end of signature"); + sig_k = type()->as_klass(); + next(); + } + return sig_k; + } }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/ci/ciTypeArrayKlass.hpp --- a/src/share/vm/ci/ciTypeArrayKlass.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/ci/ciTypeArrayKlass.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -57,6 +57,10 @@ // Make an array klass corresponding to the specified primitive type. static ciTypeArrayKlass* make(BasicType type); + + virtual ciKlass* exact_klass() { + return this; + } }; #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2197,8 +2197,8 @@ } if (lvt_cnt == max_lvt_cnt) { max_lvt_cnt <<= 1; - REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt); - REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt); + localvariable_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt); + localvariable_table_start = REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt); } localvariable_table_start[lvt_cnt] = parse_localvariable_table(code_length, @@ -2226,8 +2226,8 @@ // Parse local variable type table if (lvtt_cnt == max_lvtt_cnt) { max_lvtt_cnt <<= 1; - REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt); - REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt); + localvariable_type_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt); + localvariable_type_table_start = REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt); } localvariable_type_table_start[lvtt_cnt] = parse_localvariable_table(code_length, @@ -4080,8 +4080,7 @@ // Generate any default methods - default methods are interface methods // that have a default implementation. This is new with Lambda project. - if (has_default_methods && !access_flags.is_interface() && - local_interfaces->length() > 0) { + if (has_default_methods ) { DefaultMethods::generate_default_methods( this_klass(), &all_mirandas, CHECK_(nullHandle)); } @@ -4484,9 +4483,8 @@ for (int index = 0; index < num_methods; index++) { Method* m = methods->at(index); - // skip private, static and methods - if ((!m->is_private()) && - (!m->is_static()) && + // skip static and methods + if ((!m->is_static()) && (m->name() != vmSymbols::object_initializer_name())) { Symbol* name = m->name(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/classLoaderData.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -131,6 +131,17 @@ } } +void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { + // Lock to avoid classes being modified/added/removed during iteration + MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); + for (Klass* k = _klasses; k != NULL; k = k->next_link()) { + // Do not filter ArrayKlass oops here... + if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) { + klass_closure->do_klass(k); + } + } +} + void ClassLoaderData::classes_do(void f(InstanceKlass*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->oop_is_instance()) { @@ -600,6 +611,12 @@ } } +void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { + cld->loaded_classes_do(klass_closure); + } +} + void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/classLoaderData.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -78,6 +78,7 @@ static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void classes_do(KlassClosure* klass_closure); static void classes_do(void f(Klass* const)); + static void loaded_classes_do(KlassClosure* klass_closure); static void classes_unloading_do(void f(Klass* const)); static bool do_unloading(BoolObjectClosure* is_alive); @@ -186,6 +187,7 @@ bool keep_alive() const { return _keep_alive; } bool is_alive(BoolObjectClosure* is_alive_closure) const; void classes_do(void f(Klass*)); + void loaded_classes_do(KlassClosure* klass_closure); void classes_do(void f(InstanceKlass*)); // Deallocate free list during class unloading. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/defaultMethods.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -171,8 +171,12 @@ } bool is_cancelled() const { return _cancelled; } + // This code used to skip interface classes because their only + // superclass was j.l.Object which would be also covered by class + // superclass hierarchy walks. Now that the starting point can be + // an interface, we must ensure we catch j.l.Object as the super. static bool has_super(InstanceKlass* cls) { - return cls->super() != NULL && !cls->is_interface(); + return cls->super() != NULL; } Node* node_at_depth(int i) const { @@ -345,7 +349,6 @@ } Symbol* generate_no_defaults_message(TRAPS) const; - Symbol* generate_abstract_method_message(Method* method, TRAPS) const; Symbol* generate_conflicts_message(GrowableArray* methods, TRAPS) const; public: @@ -392,32 +395,40 @@ return; } + // Qualified methods are maximally-specific methods + // These include public, instance concrete (=default) and abstract methods GrowableArray qualified_methods; + int num_defaults = 0; + int default_index = -1; + int qualified_index = -1; for (int i = 0; i < _members.length(); ++i) { Pair entry = _members.at(i); if (entry.second == QUALIFIED) { qualified_methods.append(entry.first); + qualified_index++; + if (entry.first->is_default_method()) { + num_defaults++; + default_index = qualified_index; + + } } } if (qualified_methods.length() == 0) { _exception_message = generate_no_defaults_message(CHECK); _exception_name = vmSymbols::java_lang_AbstractMethodError(); - } else if (qualified_methods.length() == 1) { - Method* method = qualified_methods.at(0); - if (method->is_abstract()) { - _exception_message = generate_abstract_method_message(method, CHECK); - _exception_name = vmSymbols::java_lang_AbstractMethodError(); - } else { - _selected_target = qualified_methods.at(0); - } - } else { + // If only one qualified method is default, select that + } else if (num_defaults == 1) { + _selected_target = qualified_methods.at(default_index); + } else if (num_defaults > 1) { _exception_message = generate_conflicts_message(&qualified_methods,CHECK); _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); + if (TraceDefaultMethods) { + _exception_message->print_value_on(tty); + tty->print_cr(""); + } } - - assert((has_target() ^ throws_exception()) == 1, - "One and only one must be true"); + // leave abstract methods alone, they will be found via normal search path } bool contains_signature(Symbol* query) { @@ -475,20 +486,6 @@ return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL); } -Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const { - Symbol* klass = method->klass_name(); - Symbol* name = method->name(); - Symbol* sig = method->signature(); - stringStream ss; - ss.print("Method "); - ss.write((const char*)klass->bytes(), klass->utf8_length()); - ss.print("."); - ss.write((const char*)name->bytes(), name->utf8_length()); - ss.write((const char*)sig->bytes(), sig->utf8_length()); - ss.print(" is abstract"); - return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL); -} - Symbol* MethodFamily::generate_conflicts_message(GrowableArray* methods, TRAPS) const { stringStream ss; ss.print("Conflicting default methods:"); @@ -595,6 +592,18 @@ #endif // ndef PRODUCT }; +static bool already_in_vtable_slots(GrowableArray* slots, Method* m) { + bool found = false; + for (int j = 0; j < slots->length(); ++j) { + if (slots->at(j)->name() == m->name() && + slots->at(j)->signature() == m->signature() ) { + found = true; + break; + } + } + return found; +} + static GrowableArray* find_empty_vtable_slots( InstanceKlass* klass, GrowableArray* mirandas, TRAPS) { @@ -604,8 +613,10 @@ // All miranda methods are obvious candidates for (int i = 0; i < mirandas->length(); ++i) { - EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i)); - slots->append(slot); + Method* m = mirandas->at(i); + if (!already_in_vtable_slots(slots, m)) { + slots->append(new EmptyVtableSlot(m)); + } } // Also any overpasses in our superclasses, that we haven't implemented. @@ -621,7 +632,26 @@ // unless we have a real implementation of it in the current class. Method* impl = klass->lookup_method(m->name(), m->signature()); if (impl == NULL || impl->is_overpass()) { - slots->append(new EmptyVtableSlot(m)); + if (!already_in_vtable_slots(slots, m)) { + slots->append(new EmptyVtableSlot(m)); + } + } + } + } + + // also any default methods in our superclasses + if (super->default_methods() != NULL) { + for (int i = 0; i < super->default_methods()->length(); ++i) { + Method* m = super->default_methods()->at(i); + // m is a method that would have been a miranda if not for the + // default method processing that occurred on behalf of our superclass, + // so it's a method we want to re-examine in this new context. That is, + // unless we have a real implementation of it in the current class. + Method* impl = klass->lookup_method(m->name(), m->signature()); + if (impl == NULL || impl->is_overpass()) { + if (!already_in_vtable_slots(slots, m)) { + slots->append(new EmptyVtableSlot(m)); + } } } } @@ -678,8 +708,10 @@ Method* m = iklass->find_method(_method_name, _method_signature); // private interface methods are not candidates for default methods // invokespecial to private interface methods doesn't use default method logic + // The overpasses are your supertypes' errors, we do not include them // future: take access controls into account for superclass methods - if (m != NULL && (!iklass->is_interface() || m->is_public())) { + if (m != NULL && !m->is_static() && !m->is_overpass() && + (!iklass->is_interface() || m->is_public())) { if (_family == NULL) { _family = new StatefulMethodFamily(); } @@ -700,7 +732,7 @@ -static void create_overpasses( +static void create_defaults_and_exceptions( GrowableArray* slots, InstanceKlass* klass, TRAPS); static void generate_erased_defaults( @@ -721,6 +753,8 @@ static void merge_in_new_methods(InstanceKlass* klass, GrowableArray* new_methods, TRAPS); +static void create_default_methods( InstanceKlass* klass, + GrowableArray* new_methods, TRAPS); // This is the guts of the default methods implementation. This is called just // after the classfile has been parsed if some ancestor has default methods. @@ -753,7 +787,8 @@ #ifndef PRODUCT if (TraceDefaultMethods) { ResourceMark rm; // be careful with these! - tty->print_cr("Class %s requires default method processing", + tty->print_cr("%s %s requires default method processing", + klass->is_interface() ? "Interface" : "Class", klass->name()->as_klass_external_name()); PrintHierarchy printer; printer.run(klass); @@ -778,11 +813,11 @@ } #ifndef PRODUCT if (TraceDefaultMethods) { - tty->print_cr("Creating overpasses..."); + tty->print_cr("Creating defaults and overpasses..."); } #endif // ndef PRODUCT - create_overpasses(empty_slots, klass, CHECK); + create_defaults_and_exceptions(empty_slots, klass, CHECK); #ifndef PRODUCT if (TraceDefaultMethods) { @@ -791,66 +826,6 @@ #endif // ndef PRODUCT } - - -#ifdef ASSERT -// Return true is broad type is a covariant return of narrow type -static bool covariant_return_type(BasicType narrow, BasicType broad) { - if (narrow == broad) { - return true; - } - if (broad == T_OBJECT) { - return true; - } - return false; -} -#endif - -static int assemble_redirect( - BytecodeConstantPool* cp, BytecodeBuffer* buffer, - Symbol* incoming, Method* target, TRAPS) { - - BytecodeAssembler assem(buffer, cp); - - SignatureStream in(incoming, true); - SignatureStream out(target->signature(), true); - u2 parameter_count = 0; - - assem.aload(parameter_count++); // load 'this' - - while (!in.at_return_type()) { - assert(!out.at_return_type(), "Parameter counts do not match"); - BasicType bt = in.type(); - assert(out.type() == bt, "Parameter types are not compatible"); - assem.load(bt, parameter_count); - if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) { - assem.checkcast(out.as_symbol(THREAD)); - } else if (bt == T_LONG || bt == T_DOUBLE) { - ++parameter_count; // longs and doubles use two slots - } - ++parameter_count; - in.next(); - out.next(); - } - assert(out.at_return_type(), "Parameter counts do not match"); - assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible"); - - if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) { - ++parameter_count; // need room for return value - } - if (target->method_holder()->is_interface()) { - assem.invokespecial(target); - } else { - assem.invokevirtual(target); - } - - if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) { - assem.checkcast(in.as_symbol(THREAD)); - } - assem._return(in.type()); - return parameter_count; -} - static int assemble_method_error( BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) { @@ -898,7 +873,6 @@ m->set_max_locals(params); m->constMethod()->set_stackmap_data(NULL); m->set_code(code_start); - m->set_force_inline(true); return m; } @@ -924,18 +898,18 @@ } } -// A "bridge" is a method created by javac to bridge the gap between -// an implementation and a generically-compatible, but different, signature. -// Bridges have actual bytecode implementation in classfiles. -// An "overpass", on the other hand, performs the same function as a bridge -// but does not occur in a classfile; the VM creates overpass itself, -// when it needs a path to get from a call site to an default method, and -// a bridge doesn't exist. -static void create_overpasses( +// Create default_methods list for the current class. +// With the VM only processing erased signatures, the VM only +// creates an overpass in a conflict case or a case with no candidates. +// This allows virtual methods to override the overpass, but ensures +// that a local method search will find the exception rather than an abstract +// or default method that is not a valid candidate. +static void create_defaults_and_exceptions( GrowableArray* slots, InstanceKlass* klass, TRAPS) { GrowableArray overpasses; + GrowableArray defaults; BytecodeConstantPool bpool(klass->constants()); for (int i = 0; i < slots->length(); ++i) { @@ -943,7 +917,6 @@ if (slot->is_bound()) { MethodFamily* method = slot->get_binding(); - int max_stack = 0; BytecodeBuffer buffer; #ifndef PRODUCT @@ -953,26 +926,27 @@ tty->print_cr(""); if (method->has_target()) { method->print_selected(tty, 1); - } else { + } else if (method->throws_exception()) { method->print_exception(tty, 1); } } #endif // ndef PRODUCT + if (method->has_target()) { Method* selected = method->get_selected_target(); if (selected->method_holder()->is_interface()) { - max_stack = assemble_redirect( - &bpool, &buffer, slot->signature(), selected, CHECK); + defaults.push(selected); } } else if (method->throws_exception()) { - max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK); - } - if (max_stack != 0) { + int max_stack = assemble_method_error(&bpool, &buffer, + method->get_exception_name(), method->get_exception_message(), CHECK); AccessFlags flags = accessFlags_from( JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE); - Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(), + Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(), flags, max_stack, slot->size_of_parameters(), ConstMethod::OVERPASS, CHECK); + // We push to the methods list: + // overpass methods which are exception throwing methods if (m != NULL) { overpasses.push(m); } @@ -983,11 +957,31 @@ #ifndef PRODUCT if (TraceDefaultMethods) { tty->print_cr("Created %d overpass methods", overpasses.length()); + tty->print_cr("Created %d default methods", defaults.length()); } #endif // ndef PRODUCT - switchover_constant_pool(&bpool, klass, &overpasses, CHECK); - merge_in_new_methods(klass, &overpasses, CHECK); + if (overpasses.length() > 0) { + switchover_constant_pool(&bpool, klass, &overpasses, CHECK); + merge_in_new_methods(klass, &overpasses, CHECK); + } + if (defaults.length() > 0) { + create_default_methods(klass, &defaults, CHECK); + } +} + +static void create_default_methods( InstanceKlass* klass, + GrowableArray* new_methods, TRAPS) { + + int new_size = new_methods->length(); + Array* total_default_methods = MetadataFactory::new_array( + klass->class_loader_data(), new_size, NULL, CHECK); + for (int index = 0; index < new_size; index++ ) { + total_default_methods->at_put(index, new_methods->at(index)); + } + Method::sort_methods(total_default_methods, false, false); + + klass->set_default_methods(total_default_methods); } static void sort_methods(GrowableArray* methods) { @@ -1089,7 +1083,9 @@ klass->set_initial_method_idnum(new_size); ClassLoaderData* cld = klass->class_loader_data(); - MetadataFactory::free_array(cld, original_methods); + if (original_methods ->length() > 0) { + MetadataFactory::free_array(cld, original_methods); + } if (original_ordering->length() > 0) { klass->set_method_ordering(merged_ordering); MetadataFactory::free_array(cld, original_ordering); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/dictionary.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/dictionary.hpp" #include "classfile/systemDictionary.hpp" +#include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "utilities/hashtable.inline.hpp" @@ -38,17 +39,21 @@ : TwoOopHashtable(table_size, sizeof(DictionaryEntry)) { _current_class_index = 0; _current_class_entry = NULL; + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); }; - Dictionary::Dictionary(int table_size, HashtableBucket* t, int number_of_entries) : TwoOopHashtable(table_size, sizeof(DictionaryEntry), t, number_of_entries) { _current_class_index = 0; _current_class_entry = NULL; + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); }; +ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) { + return _pd_cache_table->get(protection_domain); +} DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass, ClassLoaderData* loader_data) { @@ -105,11 +110,12 @@ } -void DictionaryEntry::add_protection_domain(oop protection_domain) { +void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) { assert_locked_or_safepoint(SystemDictionary_lock); if (!contains_protection_domain(protection_domain)) { + ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain); ProtectionDomainEntry* new_head = - new ProtectionDomainEntry(protection_domain, _pd_set); + new ProtectionDomainEntry(entry, _pd_set); // Warning: Preserve store ordering. The SystemDictionary is read // without locks. The new ProtectionDomainEntry must be // complete before other threads can be allowed to see it @@ -193,7 +199,10 @@ void Dictionary::always_strong_oops_do(OopClosure* blk) { - // Follow all system classes and temporary placeholders in dictionary + // Follow all system classes and temporary placeholders in dictionary; only + // protection domain oops contain references into the heap. In a first + // pass over the system dictionary determine which need to be treated as + // strongly reachable and mark them as such. for (int index = 0; index < table_size(); index++) { for (DictionaryEntry *probe = bucket(index); probe != NULL; @@ -201,10 +210,13 @@ Klass* e = probe->klass(); ClassLoaderData* loader_data = probe->loader_data(); if (is_strongly_reachable(loader_data, e)) { - probe->protection_domain_set_oops_do(blk); + probe->set_strongly_reachable(); } } } + // Then iterate over the protection domain cache to apply the closure on the + // previously marked ones. + _pd_cache_table->always_strong_oops_do(blk); } @@ -266,18 +278,12 @@ } } - void Dictionary::oops_do(OopClosure* f) { - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - probe->protection_domain_set_oops_do(f); - } - } + // Only the protection domain oops contain references into the heap. Iterate + // over all of them. + _pd_cache_table->oops_do(f); } - void Dictionary::methods_do(void f(Method*)) { for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); @@ -292,6 +298,11 @@ } } +void Dictionary::unlink(BoolObjectClosure* is_alive) { + // Only the protection domain cache table may contain references to the heap + // that need to be unlinked. + _pd_cache_table->unlink(is_alive); +} Klass* Dictionary::try_get_next_class() { while (true) { @@ -306,7 +317,6 @@ // never reached } - // Add a loaded class to the system dictionary. // Readers of the SystemDictionary aren't always locked, so _buckets // is volatile. The store of the next field in the constructor is @@ -396,7 +406,7 @@ assert(protection_domain() != NULL, "real protection domain should be present"); - entry->add_protection_domain(protection_domain()); + entry->add_protection_domain(this, protection_domain()); assert(entry->contains_protection_domain(protection_domain()), "now protection domain should be present"); @@ -446,6 +456,146 @@ } } +ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size) + : Hashtable(table_size, sizeof(ProtectionDomainCacheEntry)) +{ +} + +void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) { + assert(SafepointSynchronize::is_at_safepoint(), "must be"); + for (int i = 0; i < table_size(); ++i) { + ProtectionDomainCacheEntry** p = bucket_addr(i); + ProtectionDomainCacheEntry* entry = bucket(i); + while (entry != NULL) { + if (is_alive->do_object_b(entry->literal())) { + p = entry->next_addr(); + } else { + *p = entry->next(); + free_entry(entry); + } + entry = *p; + } + } +} + +void ProtectionDomainCacheTable::oops_do(OopClosure* f) { + for (int index = 0; index < table_size(); index++) { + for (ProtectionDomainCacheEntry* probe = bucket(index); + probe != NULL; + probe = probe->next()) { + probe->oops_do(f); + } + } +} + +uint ProtectionDomainCacheTable::bucket_size() { + return sizeof(ProtectionDomainCacheEntry); +} + +#ifndef PRODUCT +void ProtectionDomainCacheTable::print() { + tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)", + table_size(), number_of_entries()); + for (int index = 0; index < table_size(); index++) { + for (ProtectionDomainCacheEntry* probe = bucket(index); + probe != NULL; + probe = probe->next()) { + probe->print(); + } + } +} + +void ProtectionDomainCacheEntry::print() { + tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT, + this, (void*)literal(), _strongly_reachable, next()); +} +#endif + +void ProtectionDomainCacheTable::verify() { + int element_count = 0; + for (int index = 0; index < table_size(); index++) { + for (ProtectionDomainCacheEntry* probe = bucket(index); + probe != NULL; + probe = probe->next()) { + probe->verify(); + element_count++; + } + } + guarantee(number_of_entries() == element_count, + "Verify of protection domain cache table failed"); + debug_only(verify_lookup_length((double)number_of_entries() / table_size())); +} + +void ProtectionDomainCacheEntry::verify() { + guarantee(literal()->is_oop(), "must be an oop"); +} + +void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) { + // the caller marked the protection domain cache entries that we need to apply + // the closure on. Only process them. + for (int index = 0; index < table_size(); index++) { + for (ProtectionDomainCacheEntry* probe = bucket(index); + probe != NULL; + probe = probe->next()) { + if (probe->is_strongly_reachable()) { + probe->reset_strongly_reachable(); + probe->oops_do(f); + } + } + } +} + +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) { + unsigned int hash = compute_hash(protection_domain); + int index = hash_to_index(hash); + + ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain); + if (entry == NULL) { + entry = add_entry(index, hash, protection_domain); + } + return entry; +} + +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) { + for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) { + if (e->protection_domain() == protection_domain) { + return e; + } + } + + return NULL; +} + +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) { + assert_locked_or_safepoint(SystemDictionary_lock); + assert(index == index_for(protection_domain), "incorrect index?"); + assert(find_entry(index, protection_domain) == NULL, "no double entry"); + + ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain); + Hashtable::add_entry(index, p); + return p; +} + +void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) { + unsigned int hash = compute_hash(to_delete->protection_domain()); + int index = hash_to_index(hash); + + ProtectionDomainCacheEntry** p = bucket_addr(index); + ProtectionDomainCacheEntry* entry = bucket(index); + while (true) { + assert(entry != NULL, "sanity"); + + if (entry == to_delete) { + *p = entry->next(); + Hashtable::free_entry(entry); + break; + } else { + p = entry->next_addr(); + entry = *p; + } + } +} + SymbolPropertyTable::SymbolPropertyTable(int table_size) : Hashtable(table_size, sizeof(SymbolPropertyEntry)) { @@ -532,11 +682,13 @@ tty->cr(); } } + tty->cr(); + _pd_cache_table->print(); + tty->cr(); } #endif - void Dictionary::verify() { guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); @@ -563,5 +715,7 @@ guarantee(number_of_entries() == element_count, "Verify of system dictionary failed"); debug_only(verify_lookup_length((double)number_of_entries() / table_size())); + + _pd_cache_table->verify(); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/dictionary.hpp --- a/src/share/vm/classfile/dictionary.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/dictionary.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -27,11 +27,14 @@ #include "classfile/systemDictionary.hpp" #include "oops/instanceKlass.hpp" -#include "oops/oop.hpp" +#include "oops/oop.inline.hpp" #include "utilities/hashtable.hpp" class DictionaryEntry; class PSPromotionManager; +class ProtectionDomainCacheTable; +class ProtectionDomainCacheEntry; +class BoolObjectClosure; //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // The data structure for the system dictionary (and the shared system @@ -45,6 +48,8 @@ // pointer to the current hash table entry. static DictionaryEntry* _current_class_entry; + ProtectionDomainCacheTable* _pd_cache_table; + DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name, ClassLoaderData* loader_data); @@ -93,6 +98,7 @@ void methods_do(void f(Method*)); + void unlink(BoolObjectClosure* is_alive); // Classes loaded by the bootstrap loader are always strongly reachable. // If we're not doing class unloading, all classes are strongly reachable. @@ -118,6 +124,7 @@ // Sharing support void reorder_dictionary(); + ProtectionDomainCacheEntry* cache_get(oop protection_domain); #ifndef PRODUCT void print(); @@ -126,21 +133,112 @@ }; // The following classes can be in dictionary.cpp, but we need these -// to be in header file so that SA's vmStructs can access. +// to be in header file so that SA's vmStructs can access them. +class ProtectionDomainCacheEntry : public HashtableEntry { + friend class VMStructs; + private: + // Flag indicating whether this protection domain entry is strongly reachable. + // Used during iterating over the system dictionary to remember oops that need + // to be updated. + bool _strongly_reachable; + public: + oop protection_domain() { return literal(); } + + void init() { + _strongly_reachable = false; + } + + ProtectionDomainCacheEntry* next() { + return (ProtectionDomainCacheEntry*)HashtableEntry::next(); + } + + ProtectionDomainCacheEntry** next_addr() { + return (ProtectionDomainCacheEntry**)HashtableEntry::next_addr(); + } + + void oops_do(OopClosure* f) { + f->do_oop(literal_addr()); + } + + void set_strongly_reachable() { _strongly_reachable = true; } + bool is_strongly_reachable() { return _strongly_reachable; } + void reset_strongly_reachable() { _strongly_reachable = false; } + + void print() PRODUCT_RETURN; + void verify(); +}; + +// The ProtectionDomainCacheTable contains all protection domain oops. The system +// dictionary entries reference its entries instead of having references to oops +// directly. +// This is used to speed up system dictionary iteration: the oops in the +// protection domain are the only ones referring the Java heap. So when there is +// need to update these, instead of going over every entry of the system dictionary, +// we only need to iterate over this set. +// The amount of different protection domains used is typically magnitudes smaller +// than the number of system dictionary entries (loaded classes). +class ProtectionDomainCacheTable : public Hashtable { + friend class VMStructs; +private: + ProtectionDomainCacheEntry* bucket(int i) { + return (ProtectionDomainCacheEntry*) Hashtable::bucket(i); + } + + // The following method is not MT-safe and must be done under lock. + ProtectionDomainCacheEntry** bucket_addr(int i) { + return (ProtectionDomainCacheEntry**) Hashtable::bucket_addr(i); + } + + ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) { + ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable::new_entry(hash, protection_domain); + entry->init(); + return entry; + } + + static unsigned int compute_hash(oop protection_domain) { + return (unsigned int)(protection_domain->identity_hash()); + } + + int index_for(oop protection_domain) { + return hash_to_index(compute_hash(protection_domain)); + } + + ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain); + ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain); + +public: + + ProtectionDomainCacheTable(int table_size); + + ProtectionDomainCacheEntry* get(oop protection_domain); + void free(ProtectionDomainCacheEntry* entry); + + void unlink(BoolObjectClosure* cl); + + // GC support + void oops_do(OopClosure* f); + void always_strong_oops_do(OopClosure* f); + + static uint bucket_size(); + + void print() PRODUCT_RETURN; + void verify(); +}; + class ProtectionDomainEntry :public CHeapObj { friend class VMStructs; public: ProtectionDomainEntry* _next; - oop _protection_domain; + ProtectionDomainCacheEntry* _pd_cache; - ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) { - _protection_domain = protection_domain; - _next = next; + ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) { + _pd_cache = pd_cache; + _next = next; } ProtectionDomainEntry* next() { return _next; } - oop protection_domain() { return _protection_domain; } + oop protection_domain() { return _pd_cache->protection_domain(); } }; // An entry in the system dictionary, this describes a class as @@ -151,6 +249,24 @@ private: // Contains the set of approved protection domains that can access // this system dictionary entry. + // + // This protection domain set is a set of tuples: + // + // (InstanceKlass C, initiating class loader ICL, Protection Domain PD) + // + // [Note that C.protection_domain(), which is stored in the java.lang.Class + // mirror of C, is NOT the same as PD] + // + // If such an entry (C, ICL, PD) exists in the table, it means that + // it is okay for a class Foo to reference C, where + // + // Foo.protection_domain() == PD, and + // Foo's defining class loader == ICL + // + // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain() + // It is essentially a cache to avoid repeated Java up-calls to + // ClassLoader.checkPackageAccess(). + // ProtectionDomainEntry* _pd_set; ClassLoaderData* _loader_data; @@ -158,7 +274,7 @@ // Tells whether a protection is in the approved set. bool contains_protection_domain(oop protection_domain) const; // Adds a protection domain to the approved set. - void add_protection_domain(oop protection_domain); + void add_protection_domain(Dictionary* dict, oop protection_domain); Klass* klass() const { return (Klass*)literal(); } Klass** klass_addr() { return (Klass**)literal_addr(); } @@ -189,12 +305,11 @@ : contains_protection_domain(protection_domain()); } - - void protection_domain_set_oops_do(OopClosure* f) { + void set_strongly_reachable() { for (ProtectionDomainEntry* current = _pd_set; current != NULL; current = current->_next) { - f->do_oop(&(current->_protection_domain)); + current->_pd_cache->set_strongly_reachable(); } } @@ -202,7 +317,7 @@ for (ProtectionDomainEntry* current = _pd_set; current != NULL; current = current->_next) { - current->_protection_domain->verify(); + current->_pd_cache->protection_domain()->verify(); } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/javaClasses.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1382,8 +1382,15 @@ const char* klass_name = holder->external_name(); int buf_len = (int)strlen(klass_name); - // pushing to the stack trace added one. + // The method id may point to an obsolete method, can't get more stack information Method* method = holder->method_with_idnum(method_id); + if (method == NULL) { + char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64); + // This is what the java code prints in this case - added Redefined + sprintf(buf, "\tat %s.null (Redefined)", klass_name); + return buf; + } + char* method_name = method->name()->as_C_string(); buf_len += (int)strlen(method_name); @@ -1779,7 +1786,8 @@ return element; } -oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) { +oop java_lang_StackTraceElement::create(Handle mirror, int method_id, + int version, int bci, TRAPS) { // Allocate java.lang.StackTraceElement instance Klass* k = SystemDictionary::StackTraceElement_klass(); assert(k != NULL, "must be loaded in 1.4+"); @@ -1796,8 +1804,16 @@ oop classname = StringTable::intern((char*) str, CHECK_0); java_lang_StackTraceElement::set_declaringClass(element(), classname); + Method* method = holder->method_with_idnum(method_id); + // Method on stack may be obsolete because it was redefined so cannot be + // found by idnum. + if (method == NULL) { + // leave name and fileName null + java_lang_StackTraceElement::set_lineNumber(element(), -1); + return element(); + } + // Fill in method name - Method* method = holder->method_with_idnum(method_id); oop methodname = StringTable::intern(method->name(), CHECK_0); java_lang_StackTraceElement::set_methodName(element(), methodname); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/metadataOnStackMark.cpp --- a/src/share/vm/classfile/metadataOnStackMark.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/metadataOnStackMark.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -27,6 +27,7 @@ #include "code/codeCache.hpp" #include "compiler/compileBroker.hpp" #include "oops/metadata.hpp" +#include "prims/jvmtiImpl.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "utilities/growableArray.hpp" @@ -48,6 +49,7 @@ Threads::metadata_do(Metadata::mark_on_stack); CodeCache::alive_nmethods_do(nmethod::mark_on_stack); CompileBroker::mark_on_stack(); + JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack); } MetadataOnStackMark::~MetadataOnStackMark() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/symbolTable.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -107,18 +107,13 @@ add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD); } - // Table size - enum { - symbol_table_size = 20011 - }; - Symbol* lookup(int index, const char* name, int len, unsigned int hash); SymbolTable() - : Hashtable(symbol_table_size, sizeof (HashtableEntry)) {} + : Hashtable(SymbolTableSize, sizeof (HashtableEntry)) {} SymbolTable(HashtableBucket* t, int number_of_entries) - : Hashtable(symbol_table_size, sizeof (HashtableEntry), t, + : Hashtable(SymbolTableSize, sizeof (HashtableEntry), t, number_of_entries) {} // Arena for permanent symbols (null class loader) that are never unloaded @@ -136,6 +131,9 @@ // The symbol table static SymbolTable* the_table() { return _the_table; } + // Size of one bucket in the string table. Used when checking for rollover. + static uint bucket_size() { return sizeof(HashtableBucket); } + static void create_table() { assert(_the_table == NULL, "One symbol table allowed."); _the_table = new SymbolTable(); @@ -145,8 +143,11 @@ static void create_table(HashtableBucket* t, int length, int number_of_entries) { assert(_the_table == NULL, "One symbol table allowed."); - assert(length == symbol_table_size * sizeof(HashtableBucket), - "bad shared symbol size."); + + // If CDS archive used a different symbol table size, use that size instead + // which is better than giving an error. + SymbolTableSize = length/bucket_size(); + _the_table = new SymbolTable(t, number_of_entries); // if CDS give symbol table a default arena size since most symbols // are already allocated in the shared misc section. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/systemDictionary.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1697,6 +1697,24 @@ return newsize; } +#ifdef ASSERT +class VerifySDReachableAndLiveClosure : public OopClosure { +private: + BoolObjectClosure* _is_alive; + + template void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live"); + } + +public: + VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { } + + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; +#endif + // Assumes classes in the SystemDictionary are only unloaded at a safepoint // Note: anonymous classes are not in the SD. bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { @@ -1707,7 +1725,15 @@ unloading_occurred = dictionary()->do_unloading(); constraints()->purge_loader_constraints(); resolution_errors()->purge_resolution_errors(); -} + } + // Oops referenced by the system dictionary may get unreachable independently + // of the class loader (eg. cached protection domain oops). So we need to + // explicitly unlink them here instead of in Dictionary::do_unloading. + dictionary()->unlink(is_alive); +#ifdef ASSERT + VerifySDReachableAndLiveClosure cl(is_alive); + dictionary()->oops_do(&cl); +#endif return unloading_occurred; } @@ -2334,6 +2360,11 @@ objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); assert(appendix_box->obj_at(0) == NULL, ""); + // This should not happen. JDK code should take care of that. + if (accessing_klass.is_null() || method_type.is_null()) { + THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokehandle", empty); + } + // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName JavaCallArguments args; args.push_oop(accessing_klass()->java_mirror()); @@ -2459,6 +2490,9 @@ Handle type; if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { type = find_method_handle_type(signature, caller, CHECK_(empty)); + } else if (caller.is_null()) { + // This should not happen. JDK code should take care of that. + THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad MH constant", empty); } else { ResourceMark rm(THREAD); SignatureStream ss(signature, false); @@ -2522,6 +2556,11 @@ Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty)); Handle method_type = find_method_handle_type(type, caller, CHECK_(empty)); + // This should not happen. JDK code should take care of that. + if (caller.is_null() || method_type.is_null()) { + THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokedynamic", empty); + } + objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); assert(appendix_box->obj_at(0) == NULL, ""); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/systemDictionary.hpp --- a/src/share/vm/classfile/systemDictionary.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/systemDictionary.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -173,8 +173,6 @@ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ do_klass(nio_Buffer_klass, java_nio_Buffer, Opt ) \ \ - do_klass(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt ) \ - \ /* Preload boxing klasses */ \ do_klass(Boolean_klass, java_lang_Boolean, Pre ) \ do_klass(Character_klass, java_lang_Character, Pre ) \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/verifier.cpp --- a/src/share/vm/classfile/verifier.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/verifier.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2439,8 +2439,14 @@ && !ref_class_type.equals(current_type()) && !ref_class_type.equals(VerificationType::reference_type( current_class()->super()->name()))) { - bool subtype = ref_class_type.is_assignable_from( - current_type(), this, CHECK_VERIFY(this)); + bool subtype = false; + if (!current_class()->is_anonymous()) { + subtype = ref_class_type.is_assignable_from( + current_type(), this, CHECK_VERIFY(this)); + } else { + subtype = ref_class_type.is_assignable_from(VerificationType::reference_type( + current_class()->host_klass()->name()), this, CHECK_VERIFY(this)); + } if (!subtype) { verify_error(ErrorContext::bad_code(bci), "Bad invokespecial instruction: " @@ -2461,7 +2467,24 @@ } else { // other methods // Ensures that target class is assignable to method class. if (opcode == Bytecodes::_invokespecial) { - current_frame->pop_stack(current_type(), CHECK_VERIFY(this)); + if (!current_class()->is_anonymous()) { + current_frame->pop_stack(current_type(), CHECK_VERIFY(this)); + } else { + // anonymous class invokespecial calls: check if the + // objectref is a subtype of the host_klass of the current class + // to allow an anonymous class to reference methods in the host_klass + VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this)); + VerificationType hosttype = + VerificationType::reference_type(current_class()->host_klass()->name()); + bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this)); + if (!subtype) { + verify_error( ErrorContext::bad_type(current_frame->offset(), + current_frame->stack_top_ctx(), + TypeOrigin::implicit(top)), + "Bad type on operand stack"); + return; + } + } } else if (opcode == Bytecodes::_invokevirtual) { VerificationType stack_object_type = current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/classfile/vmSymbols.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -574,6 +574,7 @@ template(char_array_void_signature, "([C)V") \ template(int_int_void_signature, "(II)V") \ template(long_long_void_signature, "(JJ)V") \ + template(boolean_long_void_signature, "(ZJ)V") \ template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \ template(void_object_signature, "()Ljava/lang/Object;") \ template(void_class_signature, "()Ljava/lang/Class;") \ @@ -731,6 +732,7 @@ do_class(java_lang_StrictMath, "java/lang/StrictMath") \ do_signature(double2_double_signature, "(DD)D") \ do_signature(int2_int_signature, "(II)I") \ + do_signature(long2_long_signature, "(JJ)J") \ \ /* here are the math names, all together: */ \ do_name(abs_name,"abs") do_name(sin_name,"sin") do_name(cos_name,"cos") \ @@ -739,8 +741,11 @@ do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \ \ do_name(addExact_name,"addExact") \ + do_name(decrementExact_name,"decrementExact") \ + do_name(incrementExact_name,"incrementExact") \ + do_name(multiplyExact_name,"multiplyExact") \ + do_name(negateExact_name,"negateExact") \ do_name(subtractExact_name,"subtractExact") \ - do_name(multiplyExact_name,"multiplyExact") \ \ do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \ do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \ @@ -754,7 +759,18 @@ do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \ do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \ do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \ - do_intrinsic(_addExact, java_lang_Math, addExact_name, int2_int_signature, F_S) \ + do_intrinsic(_addExactI, java_lang_Math, addExact_name, int2_int_signature, F_S) \ + do_intrinsic(_addExactL, java_lang_Math, addExact_name, long2_long_signature, F_S) \ + do_intrinsic(_decrementExactI, java_lang_Math, decrementExact_name, int_int_signature, F_S) \ + do_intrinsic(_decrementExactL, java_lang_Math, decrementExact_name, long2_long_signature, F_S) \ + do_intrinsic(_incrementExactI, java_lang_Math, incrementExact_name, int_int_signature, F_S) \ + do_intrinsic(_incrementExactL, java_lang_Math, incrementExact_name, long2_long_signature, F_S) \ + do_intrinsic(_multiplyExactI, java_lang_Math, multiplyExact_name, int2_int_signature, F_S) \ + do_intrinsic(_multiplyExactL, java_lang_Math, multiplyExact_name, long2_long_signature, F_S) \ + do_intrinsic(_negateExactI, java_lang_Math, negateExact_name, int_int_signature, F_S) \ + do_intrinsic(_negateExactL, java_lang_Math, negateExact_name, long_long_signature, F_S) \ + do_intrinsic(_subtractExactI, java_lang_Math, subtractExact_name, int2_int_signature, F_S) \ + do_intrinsic(_subtractExactL, java_lang_Math, subtractExact_name, long2_long_signature, F_S) \ \ do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \ do_name( floatToRawIntBits_name, "floatToRawIntBits") \ @@ -903,7 +919,7 @@ do_signature(copyMemory_signature, "(Ljava/lang/Object;JLjava/lang/Object;JJ)V") \ do_intrinsic(_park, sun_misc_Unsafe, park_name, park_signature, F_RN) \ do_name( park_name, "park") \ - do_signature(park_signature, "(ZJ)V") \ + do_alias(park_signature, boolean_long_void_signature) \ do_intrinsic(_unpark, sun_misc_Unsafe, unpark_name, unpark_signature, F_RN) \ do_name( unpark_name, "unpark") \ do_alias( unpark_signature, /*(LObject;)V*/ object_void_signature) \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/code/codeBlob.cpp --- a/src/share/vm/code/codeBlob.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/code/codeBlob.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -245,8 +245,8 @@ } -void* BufferBlob::operator new(size_t s, unsigned size) throw() { - void* p = CodeCache::allocate(size); +void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() { + void* p = CodeCache::allocate(size, is_critical); return p; } @@ -277,7 +277,10 @@ unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) AdapterBlob(size, cb); + // The parameter 'true' indicates a critical memory allocation. + // This means that CodeCacheMinimumFreeSpace is used, if necessary + const bool is_critical = true; + blob = new (size, is_critical) AdapterBlob(size, cb); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -299,7 +302,10 @@ size += round_to(buffer_size, oopSize); { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) MethodHandlesAdapterBlob(size); + // The parameter 'true' indicates a critical memory allocation. + // This means that CodeCacheMinimumFreeSpace is used, if necessary + const bool is_critical = true; + blob = new (size, is_critical) MethodHandlesAdapterBlob(size); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/code/codeBlob.hpp --- a/src/share/vm/code/codeBlob.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/code/codeBlob.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -210,7 +210,7 @@ BufferBlob(const char* name, int size); BufferBlob(const char* name, int size, CodeBuffer* cb); - void* operator new(size_t s, unsigned size) throw(); + void* operator new(size_t s, unsigned size, bool is_critical = false) throw(); public: // Creation @@ -254,7 +254,6 @@ class MethodHandlesAdapterBlob: public BufferBlob { private: MethodHandlesAdapterBlob(int size) : BufferBlob("MethodHandles adapters", size) {} - MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {} public: // Creation diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/code/dependencies.cpp --- a/src/share/vm/code/dependencies.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/code/dependencies.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1024,8 +1024,8 @@ Klass* k = ctxk; Method* lm = k->lookup_method(m->name(), m->signature()); if (lm == NULL && k->oop_is_instance()) { - // It might be an abstract interface method, devoid of mirandas. - lm = ((InstanceKlass*)k)->lookup_method_in_all_interfaces(m->name(), + // It might be an interface method + lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(), m->signature()); } if (lm == m) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/code/nmethod.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -127,6 +127,7 @@ // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. // (In the latter two cases, they like other stats are printed to the log only.) +#ifndef PRODUCT // These variables are put into one block to reduce relocations // and make it simpler to print from the debugger. struct java_nmethod_stats_struct { @@ -254,7 +255,7 @@ unknown_java_nmethod_stats.note_nmethod(nm); } } - +#endif //--------------------------------------------------------------------------------- @@ -557,7 +558,7 @@ code_buffer, frame_size, basic_lock_owner_sp_offset, basic_lock_sp_offset, oop_maps); - if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm); + NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm)); if (PrintAssembly && nm != NULL) { Disassembler::decode(nm); } @@ -593,7 +594,7 @@ nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size); - if (nm != NULL) note_java_nmethod(nm); + NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm)); if (PrintAssembly && nm != NULL) { Disassembler::decode(nm); } @@ -677,21 +678,18 @@ // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } - } - if (nm != NULL) note_java_nmethod(nm); - if (PrintAssembly && nm != NULL) { - Disassembler::decode(nm); + NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm)); + if (PrintAssembly) { + Disassembler::decode(nm); + } } } - - // verify nmethod - debug_only(if (nm) nm->verify();) // might block - + // Do verification and logging outside CodeCache_lock. if (nm != NULL) { + // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. + DEBUG_ONLY(nm->verify();) nm->log_new_nmethod(); } - - // done return nm; } @@ -1369,7 +1367,7 @@ set_osr_link(NULL); //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods - NMethodSweeper::notify(); + NMethodSweeper::report_state_change(this); } void nmethod::invalidate_osr_method() { @@ -1403,7 +1401,9 @@ } } -// Common functionality for both make_not_entrant and make_zombie +/** + * Common functionality for both make_not_entrant and make_zombie + */ bool nmethod::make_not_entrant_or_zombie(unsigned int state) { assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); assert(!is_zombie(), "should not already be a zombie"); @@ -1534,9 +1534,7 @@ tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method()->name_and_sig_as_C_string(), (state == not_entrant) ? "not entrant" : "zombie"); } - // Make sweeper aware that there is a zombie method that needs to be removed - NMethodSweeper::notify(); - + NMethodSweeper::report_state_change(this); return true; } @@ -2533,20 +2531,23 @@ void nmethod::verify_interrupt_point(address call_site) { - // This code does not work in release mode since - // owns_lock only is available in debug mode. - CompiledIC* ic = NULL; - Thread *cur = Thread::current(); - if (CompiledIC_lock->owner() == cur || - ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && - SafepointSynchronize::is_at_safepoint())) { - ic = CompiledIC_at(this, call_site); - CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); - } else { - MutexLocker ml_verify (CompiledIC_lock); - ic = CompiledIC_at(this, call_site); + // Verify IC only when nmethod installation is finished. + bool is_installed = (method()->code() == this) // nmethod is in state 'alive' and installed + || !this->is_in_use(); // nmethod is installed, but not in 'alive' state + if (is_installed) { + Thread *cur = Thread::current(); + if (CompiledIC_lock->owner() == cur || + ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && + SafepointSynchronize::is_at_safepoint())) { + CompiledIC_at(this, call_site); + CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); + } else { + MutexLocker ml_verify (CompiledIC_lock); + CompiledIC_at(this, call_site); + } } - PcDesc* pd = pc_desc_at(ic->end_of_call()); + + PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), @@ -3076,8 +3077,6 @@ ImplicitExceptionTable(this).print(code_begin()); } -#endif // PRODUCT - void nmethod::print_statistics() { ttyLocker ttyl; if (xtty != NULL) xtty->head("statistics type='nmethod'"); @@ -3097,3 +3096,4 @@ Dependencies::print_statistics(); if (xtty != NULL) xtty->tail("statistics"); } +#endif // PRODUCT diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/code/nmethod.hpp --- a/src/share/vm/code/nmethod.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/code/nmethod.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -696,7 +696,7 @@ // Prints a comment for one native instruction (reloc info, pc desc) void print_code_comment_on(outputStream* st, int column, address begin, address end); - static void print_statistics(); + static void print_statistics() PRODUCT_RETURN; // Compiler task identification. Note that all OSR methods // are numbered in an independent sequence if CICountOSR is true, diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/compiler/abstractCompiler.cpp --- a/src/share/vm/compiler/abstractCompiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/compiler/abstractCompiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -24,41 +24,42 @@ #include "precompiled.hpp" #include "compiler/abstractCompiler.hpp" +#include "compiler/compileBroker.hpp" #include "runtime/mutexLocker.hpp" -void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) { - if (*state != initialized) { + +bool AbstractCompiler::should_perform_init() { + if (_compiler_state != initialized) { + MutexLocker only_one(CompileThread_lock); - // We are thread in native here... - CompilerThread* thread = CompilerThread::current(); - bool do_initialization = false; - { - ThreadInVMfromNative tv(thread); - ResetNoHandleMark rnhm; - MutexLocker only_one(CompileThread_lock, thread); - if ( *state == uninitialized) { - do_initialization = true; - *state = initializing; - } else { - while (*state == initializing ) { - CompileThread_lock->wait(); - } + if (_compiler_state == uninitialized) { + _compiler_state = initializing; + return true; + } else { + while (_compiler_state == initializing) { + CompileThread_lock->wait(); } } - if (do_initialization) { - // We can not hold any locks here since JVMTI events may call agents + } + return false; +} - // Compiler(s) run as native - - (*f)(); - - // To in_vm so we can use the lock +bool AbstractCompiler::should_perform_shutdown() { + // Since this method can be called by multiple threads, the lock ensures atomicity of + // decrementing '_num_compiler_threads' and the following operations. + MutexLocker only_one(CompileThread_lock); + _num_compiler_threads--; + assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever"); - ThreadInVMfromNative tv(thread); - ResetNoHandleMark rnhm; - MutexLocker only_one(CompileThread_lock, thread); - assert(*state == initializing, "wrong state"); - *state = initialized; - CompileThread_lock->notify_all(); - } + // Only the last thread will perform shutdown operations + if (_num_compiler_threads == 0) { + return true; } + return false; } + +void AbstractCompiler::set_state(int state) { + // Ensure that ste is only set by one thread at a time + MutexLocker only_one(CompileThread_lock); + _compiler_state = state; + CompileThread_lock->notify_all(); +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/compiler/abstractCompiler.hpp --- a/src/share/vm/compiler/abstractCompiler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/compiler/abstractCompiler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -30,33 +30,59 @@ typedef void (*initializer)(void); #ifdef GRAAL -class CompilerStatistics { +// Per-compiler statistics +class CompilerStatistics VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + + class Data VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + public: + elapsedTimer _time; // time spent compiling + int _bytes; // number of bytecodes compiled, including inlined bytecodes + int _count; // number of compilations + Data() : _bytes(0), _count(0) {} + void update(elapsedTimer time, int bytes) { + _time.add(time); + _bytes += bytes; + _count++; + } + }; + public: - elapsedTimer _t_osr_compilation; - elapsedTimer _t_standard_compilation; - int _sum_osr_bytes_compiled; - int _sum_standard_bytes_compiled; - CompilerStatistics() : _sum_osr_bytes_compiled(0), _sum_standard_bytes_compiled(0) {} + Data _standard; // stats for non-OSR compilations + Data _osr; // stats for OSR compilations + int _nmethods_size; // + int _nmethods_code_size; + int bytes_per_second() { + int bytes = _standard._bytes + _osr._bytes; + if (bytes == 0) { + return 0; + } + double seconds = _standard._time.seconds() + _osr._time.seconds(); + return seconds == 0.0 ? 0 : (int) (bytes / seconds); + } + CompilerStatistics() : _nmethods_size(0), _nmethods_code_size(0) {} }; #endif class AbstractCompiler : public CHeapObj { private: - bool _is_initialized; // Mark whether compiler object is initialized + volatile int _num_compiler_threads; protected: + volatile int _compiler_state; // Used for tracking global state of compiler runtime initialization - enum { uninitialized, initializing, initialized }; + enum { uninitialized, initializing, initialized, failed, shut_down }; + + // This method returns true for the first compiler thread that reaches that methods. + // This thread will initialize the compiler runtime. + bool should_perform_init(); // The (closed set) of concrete compiler classes. Using an tag like this // avoids a confusing use of macros around the definition of the // 'is_' methods. enum Type { c1, c2, shark, graal }; - // This method will call the initialization method "f" once (per compiler class/subclass) - // and do so without holding any locks - void initialize_runtimes(initializer f, volatile int* state); - private: Type _type; @@ -65,7 +91,11 @@ #endif public: - AbstractCompiler(Type type) : _is_initialized(false), _type(type) {} + AbstractCompiler(Type type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {} + + // This function determines the compiler thread that will perform the + // shutdown of the corresponding compiler runtime. + bool should_perform_shutdown(); // Name of this compiler virtual const char* name() = 0; @@ -82,17 +112,18 @@ bool is_graal() { return _type == graal; } // Customization - virtual bool needs_stubs () = 0; + virtual void initialize () = 0; - void mark_initialized() { _is_initialized = true; } - bool is_initialized() { return _is_initialized; } + void set_num_compiler_threads(int num) { _num_compiler_threads = num; } + int num_compiler_threads() { return _num_compiler_threads; } - virtual void initialize() = 0; - + // Get/set state of compiler objects + bool is_initialized() { return _compiler_state == initialized; } + bool is_failed () { return _compiler_state == failed;} + void set_state (int state); + void set_shut_down () { set_state(shut_down); } // Compilation entry point for methods - virtual void compile_method(ciEnv* env, - ciMethod* target, - int entry_bci) { + virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) { ShouldNotReachHere(); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -129,6 +129,7 @@ bool CompileBroker::_initialized = false; volatile bool CompileBroker::_should_block = false; +volatile jint CompileBroker::_print_compilation_warning = 0; volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; // The installed compiler(s) @@ -189,7 +190,7 @@ CompileQueue* CompileBroker::_c1_method_queue = NULL; CompileTask* CompileBroker::_task_free_list = NULL; -GrowableArray* CompileBroker::_method_threads = NULL; +GrowableArray* CompileBroker::_compiler_threads = NULL; class CompilationLog : public StringEventLog { @@ -590,9 +591,6 @@ -// ------------------------------------------------------------------ -// CompileQueue::add -// // Add a CompileTask to a CompileQueue void CompileQueue::add(CompileTask* task) { assert(lock()->owned_by_self(), "must own lock"); @@ -629,6 +627,16 @@ lock()->notify_all(); } +void CompileQueue::delete_all() { + assert(lock()->owned_by_self(), "must own lock"); + if (_first != NULL) { + for (CompileTask* task = _first; task != NULL; task = task->next()) { + delete task; + } + _first = NULL; + } +} + // ------------------------------------------------------------------ // CompileQueue::get // @@ -643,6 +651,11 @@ // case we perform code cache sweeps to free memory such that we can re-enable // compilation. while (_first == NULL) { + // Exit loop if compilation is disabled forever + if (CompileBroker::is_compilation_disabled_forever()) { + return NULL; + } + if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) { // Wait a certain amount of time to possibly do another sweep. // We must wait until stack scanning has happened so that we can @@ -667,9 +680,17 @@ // remains unchanged. This behavior is desired, since we want to keep // the stable state, i.e., we do not want to evict methods from the // code cache if it is unnecessary. - lock()->wait(); + // We need a timed wait here, since compiler threads can exit if compilation + // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads + // is not critical and we do not want idle compiler threads to wake up too often. + lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000); } } + + if (CompileBroker::is_compilation_disabled_forever()) { + return NULL; + } + CompileTask* task = CompilationPolicy::policy()->select_task(this); remove(task); return task; @@ -763,6 +784,10 @@ void CompileBroker::compilation_init() { _last_method_compiled[0] = '\0'; + // No need to initialize compilation system if we do not use it. + if (!UseCompiler) { + return; + } #ifndef SHARK // Set the interface to the current compiler(s). int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); @@ -902,10 +927,8 @@ } - -// ------------------------------------------------------------------ -// CompileBroker::make_compiler_thread -CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) { +CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, + AbstractCompiler* comp, TRAPS) { CompilerThread* compiler_thread = NULL; Klass* k = @@ -972,6 +995,7 @@ java_lang_Thread::set_daemon(thread_oop()); compiler_thread->set_threadObj(thread_oop()); + compiler_thread->set_compiler(comp); Threads::add(compiler_thread); Thread::start(compiler_thread); } @@ -983,25 +1007,24 @@ } -// ------------------------------------------------------------------ -// CompileBroker::init_compiler_threads -// -// Initialize the compilation queue void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) { EXCEPTION_MARK; #if !defined(ZERO) && !defined(SHARK) && !defined(GRAALVM) assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?"); #endif // !ZERO && !SHARK && !GRAALVM + // Initialize the compilation queue if (c2_compiler_count > 0) { _c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock); + _compilers[1]->set_num_compiler_threads(c2_compiler_count); } if (c1_compiler_count > 0) { _c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock); + _compilers[0]->set_num_compiler_threads(c1_compiler_count); } int compiler_count = c1_compiler_count + c2_compiler_count; - _method_threads = + _compiler_threads = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray(compiler_count, true); char name_buffer[256]; @@ -1009,21 +1032,22 @@ // Create a name for our thread. sprintf(name_buffer, "C2 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK); - _method_threads->append(new_thread); + // Shark and C2 + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK); + _compiler_threads->append(new_thread); } for (int i = c2_compiler_count; i < compiler_count; i++) { // Create a name for our thread. sprintf(name_buffer, "C1 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK); - _method_threads->append(new_thread); + // C1 + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK); + _compiler_threads->append(new_thread); } if (UsePerfData) { - PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, - compiler_count, CHECK); + PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK); } } @@ -1040,27 +1064,6 @@ } // ------------------------------------------------------------------ -// CompileBroker::is_idle -bool CompileBroker::is_idle() { - if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) { - return false; - } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) { - return false; - } else { - int num_threads = _method_threads->length(); - for (int i=0; iat(i)->task() != NULL) { - return false; - } - } - - // No pending or active compilations. - return true; - } -} - - -// ------------------------------------------------------------------ // CompileBroker::compile_method // // Request compilation of a method. @@ -1140,7 +1143,7 @@ return; } #ifdef GRAALVM - if (!JavaThread::current()->is_compiling()) { + if (!JavaThread::current()->is_graal_compiling()) { GraalCompiler::instance()->compile_method(method, osr_bci, is_compile_blocking(method, osr_bci)); } else { // Recursive compile request => ignore. @@ -1318,13 +1321,6 @@ method->jmethod_id(); } - // If the compiler is shut off due to code cache getting full - // fail out now so blocking compiles dont hang the java thread - if (!should_compile_new_jobs()) { - CompilationPolicy::policy()->delay_compilation(method()); - return NULL; - } - // do the compilation if (method->is_native()) { if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { @@ -1334,11 +1330,22 @@ MutexLocker locker(MethodCompileQueue_lock, THREAD); compile_id = assign_compile_id(method, standard_entry_bci); } + // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that + // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). + // + // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter + // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. (void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id); } else { return NULL; } } else { + // If the compiler is shut off due to code cache getting full + // fail out now so blocking compiles dont hang the java thread + if (!should_compile_new_jobs()) { + CompilationPolicy::policy()->delay_compilation(method()); + return NULL; + } compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD); } @@ -1570,6 +1577,101 @@ free_task(task); } +// Initialize compiler thread(s) + compiler object(s). The postcondition +// of this function is that the compiler runtimes are initialized and that +//compiler threads can start compiling. +bool CompileBroker::init_compiler_runtime() { + CompilerThread* thread = CompilerThread::current(); + AbstractCompiler* comp = thread->compiler(); + // Final sanity check - the compiler object must exist + guarantee(comp != NULL, "Compiler object must exist"); + + int system_dictionary_modification_counter; + { + MutexLocker locker(Compile_lock, thread); + system_dictionary_modification_counter = SystemDictionary::number_of_modifications(); + } + + { + // Must switch to native to allocate ci_env + ThreadToNativeFromVM ttn(thread); + ciEnv ci_env(NULL, system_dictionary_modification_counter); + // Cache Jvmti state + ci_env.cache_jvmti_state(); + // Cache DTrace flags + ci_env.cache_dtrace_flags(); + + // Switch back to VM state to do compiler initialization + ThreadInVMfromNative tv(thread); + ResetNoHandleMark rnhm; + + + if (!comp->is_shark()) { + // Perform per-thread and global initializations + comp->initialize(); + } + } + + if (comp->is_failed()) { + disable_compilation_forever(); + // If compiler initialization failed, no compiler thread that is specific to a + // particular compiler runtime will ever start to compile methods. + + shutdown_compiler_runtime(comp, thread); + return false; + } + + // C1 specific check + if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) { + warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); + return false; + } + + return true; +} + +// If C1 and/or C2 initialization failed, we shut down all compilation. +// We do this to keep things simple. This can be changed if it ever turns out to be +// a problem. +void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { + // Free buffer blob, if allocated + if (thread->get_buffer_blob() != NULL) { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::free(thread->get_buffer_blob()); + } + + if (comp->should_perform_shutdown()) { + // There are two reasons for shutting down the compiler + // 1) compiler runtime initialization failed + // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing + warning("Shutting down compiler %s (no space to run compilers)", comp->name()); + + // Only one thread per compiler runtime object enters here + // Set state to shut down + comp->set_shut_down(); + + MutexLocker mu(MethodCompileQueue_lock, thread); + CompileQueue* queue; + if (_c1_method_queue != NULL) { + _c1_method_queue->delete_all(); + queue = _c1_method_queue; + _c1_method_queue = NULL; + delete _c1_method_queue; + } + + if (_c2_method_queue != NULL) { + _c2_method_queue->delete_all(); + queue = _c2_method_queue; + _c2_method_queue = NULL; + delete _c2_method_queue; + } + + // We could delete compiler runtimes also. However, there are references to + // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then + // fail. This can be done later if necessary. + } +} + // ------------------------------------------------------------------ // CompileBroker::compiler_thread_loop // @@ -1577,7 +1679,6 @@ void CompileBroker::compiler_thread_loop() { CompilerThread* thread = CompilerThread::current(); CompileQueue* queue = thread->queue(); - // For the thread that initializes the ciObjectFactory // this resource mark holds all the shared objects ResourceMark rm; @@ -1606,65 +1707,78 @@ log->end_elem(); } - while (true) { - { - // We need this HandleMark to avoid leaking VM handles. - HandleMark hm(thread); + // If compiler thread/runtime initialization fails, exit the compiler thread + if (!init_compiler_runtime()) { + return; + } - if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { - // the code cache is really full - handle_full_code_cache(); - } + // Poll for new compilation tasks as long as the JVM runs. Compilation + // should only be disabled if something went wrong while initializing the + // compiler runtimes. This, in turn, should not happen. The only known case + // when compiler runtime initialization fails is if there is not enough free + // space in the code cache to generate the necessary stubs, etc. + while (!is_compilation_disabled_forever()) { + // We need this HandleMark to avoid leaking VM handles. + HandleMark hm(thread); - CompileTask* task = queue->get(); + if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { + // the code cache is really full + handle_full_code_cache(); + } - // Give compiler threads an extra quanta. They tend to be bursty and - // this helps the compiler to finish up the job. - if( CompilerThreadHintNoPreempt ) - os::hint_no_preempt(); + CompileTask* task = queue->get(); + if (task == NULL) { + continue; + } - // trace per thread time and compile statistics - CompilerCounters* counters = ((CompilerThread*)thread)->counters(); - PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter()); + // Give compiler threads an extra quanta. They tend to be bursty and + // this helps the compiler to finish up the job. + if( CompilerThreadHintNoPreempt ) + os::hint_no_preempt(); - // Assign the task to the current thread. Mark this compilation - // thread as active for the profiler. - CompileTaskWrapper ctw(task); - nmethodLocker result_handle; // (handle for the nmethod produced by this task) - task->set_code_handle(&result_handle); - methodHandle method(thread, task->method()); + // trace per thread time and compile statistics + CompilerCounters* counters = ((CompilerThread*)thread)->counters(); + PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter()); - // Never compile a method if breakpoints are present in it - if (method()->number_of_breakpoints() == 0) { - // Compile the method. - if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + // Assign the task to the current thread. Mark this compilation + // thread as active for the profiler. + CompileTaskWrapper ctw(task); + nmethodLocker result_handle; // (handle for the nmethod produced by this task) + task->set_code_handle(&result_handle); + methodHandle method(thread, task->method()); + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { #ifdef COMPILER1 - // Allow repeating compilations for the purpose of benchmarking - // compile speed. This is not useful for customers. - if (CompilationRepeat != 0) { - int compile_count = CompilationRepeat; - while (compile_count > 0) { - invoke_compiler_on_method(task); - nmethod* nm = method->code(); - if (nm != NULL) { - nm->make_zombie(); - method->clear_code(); - } - compile_count--; + // Allow repeating compilations for the purpose of benchmarking + // compile speed. This is not useful for customers. + if (CompilationRepeat != 0) { + int compile_count = CompilationRepeat; + while (compile_count > 0) { + invoke_compiler_on_method(task); + nmethod* nm = method->code(); + if (nm != NULL) { + nm->make_zombie(); + method->clear_code(); } + compile_count--; } + } #endif /* COMPILER1 */ - invoke_compiler_on_method(task); - } else { - // After compilation is disabled, remove remaining methods from queue - method->clear_queued_for_compilation(); - } + invoke_compiler_on_method(task); + } else { + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); } } } + + // Shut down compiler runtime + shutdown_compiler_runtime(thread->compiler(), thread); } - // ------------------------------------------------------------------ // CompileBroker::init_compiler_thread_log // @@ -1933,11 +2047,10 @@ #endif } -// ------------------------------------------------------------------ -// CompileBroker::handle_full_code_cache -// -// The CodeCache is full. Print out warning and disable compilation or -// try code cache cleaning so compilation can continue later. +/** + * The CodeCache is full. Print out warning and disable compilation + * or try code cache cleaning so compilation can continue later. + */ void CompileBroker::handle_full_code_cache() { UseInterpreter = true; if (UseCompiler || AlwaysCompileLoopMethods ) { @@ -1954,12 +2067,9 @@ xtty->stamp(); xtty->end_elem(); } - warning("CodeCache is full. Compiler has been disabled."); - warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); CodeCache::report_codemem_full(); - #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { codecache_print(/* detailed= */ true); @@ -1972,14 +2082,22 @@ // Since code cache is full, immediately stop new compiles if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { NMethodSweeper::log_sweep("disable_compiler"); - NMethodSweeper::possibly_sweep(); } + // Switch to 'vm_state'. This ensures that possibly_sweep() can be called + // without having to consider the state in which the current thread is. + ThreadInVMfromUnknown in_vm; + NMethodSweeper::possibly_sweep(); } else { - UseCompiler = false; - AlwaysCompileLoopMethods = false; + disable_compilation_forever(); + } + + // Print warning only once + if (should_print_compiler_warning()) { + warning("CodeCache is full. Compiler has been disabled."); + warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); + codecache_print(/* detailed= */ true); } } - codecache_print(/* detailed= */ true); } // ------------------------------------------------------------------ @@ -2125,21 +2243,19 @@ _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time; if (CITime) { + int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); + GRAAL_ONLY(CompilerStatistics* stats = compiler(task->comp_level())->stats();) if (is_osr) { _t_osr_compilation.add(time); - _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); -#ifdef GRAAL - compiler(task->comp_level())->stats()->_t_osr_compilation.add(time); - compiler(task->comp_level())->stats()->_sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); -#endif + _sum_osr_bytes_compiled += bytes_compiled; + GRAAL_ONLY(stats->_osr.update(time, bytes_compiled);) } else { _t_standard_compilation.add(time); _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); -#ifdef GRAAL - compiler(task->comp_level())->stats()->_t_standard_compilation.add(time); - compiler(task->comp_level())->stats()->_sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); -#endif + GRAAL_ONLY(stats->_standard.update(time, bytes_compiled);) } + GRAAL_ONLY(stats->_nmethods_size += code->total_size();) + GRAAL_ONLY(stats->_nmethods_code_size += code->insts_size();) } if (UsePerfData) { @@ -2196,16 +2312,82 @@ } void CompileBroker::print_times() { +#ifdef GRAAL + elapsedTimer standard_compilation; + elapsedTimer total_compilation; + elapsedTimer osr_compilation; + + int standard_bytes_compiled = 0; + int osr_bytes_compiled = 0; + + int standard_compile_count = 0; + int osr_compile_count = 0; + int total_compile_count = 0; + + int nmethods_size = 0; + int nmethods_code_size = 0; + bool printedHeader = false; + + for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { + AbstractCompiler* comp = _compilers[i]; + if (comp != NULL) { + if (!printedHeader) { + printedHeader = true; + tty->cr(); + tty->print_cr("Individual compiler times (for compiled methods only)"); + tty->print_cr("------------------------------------------------"); + tty->cr(); + } + CompilerStatistics* stats = comp->stats(); + + standard_compilation.add(stats->_standard._time); + osr_compilation.add(stats->_osr._time); + + standard_bytes_compiled += stats->_standard._bytes; + osr_bytes_compiled += stats->_osr._bytes; + + standard_compile_count += stats->_standard._count; + osr_compile_count += stats->_osr._count; + + nmethods_size += stats->_nmethods_size; + nmethods_code_size += stats->_nmethods_code_size; + + tty->print_cr(" %s { speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}", + comp->name(), stats->bytes_per_second(), + stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, + stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, + stats->_nmethods_size, stats->_nmethods_code_size); + } + } + total_compile_count = osr_compile_count + standard_compile_count; + total_compilation.add(osr_compilation); + total_compilation.add(standard_compilation); +#else + elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; + elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; + elapsedTimer total_compilation = CompileBroker::_t_total_compilation; + + int standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; + int osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; + + int standard_compile_count = CompileBroker::_total_standard_compile_count; + int osr_compile_count = CompileBroker::_total_osr_compile_count; + int total_compile_count = CompileBroker::_total_compile_count; + + int nmethods_size = CompileBroker::_sum_nmethod_code_size; + int nmethods_code_size = CompileBroker::_sum_nmethod_size; +#endif + tty->cr(); tty->print_cr("Accumulated compiler times (for compiled methods only)"); tty->print_cr("------------------------------------------------"); //0000000000111111111122222222223333333333444444444455555555556666666666 //0123456789012345678901234567890123456789012345678901234567890123456789 - tty->print_cr(" Total compilation time : %6.3f s", CompileBroker::_t_total_compilation.seconds()); + tty->print_cr(" Total compilation time : %6.3f s", total_compilation.seconds()); tty->print_cr(" Standard compilation : %6.3f s, Average : %2.3f", - CompileBroker::_t_standard_compilation.seconds(), - CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count); - tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count); + standard_compilation.seconds(), + standard_compilation.seconds() / standard_compile_count); + tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", osr_compilation.seconds(), osr_compilation.seconds() / osr_compile_count); AbstractCompiler *comp = compiler(CompLevel_simple); if (comp != NULL) { @@ -2216,35 +2398,19 @@ comp->print_timers(); } tty->cr(); - tty->print_cr(" Total compiled methods : %6d methods", CompileBroker::_total_compile_count); - tty->print_cr(" Standard compilation : %6d methods", CompileBroker::_total_standard_compile_count); - tty->print_cr(" On stack replacement : %6d methods", CompileBroker::_total_osr_compile_count); - int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled; + tty->print_cr(" Total compiled methods : %6d methods", total_compile_count); + tty->print_cr(" Standard compilation : %6d methods", standard_compile_count); + tty->print_cr(" On stack replacement : %6d methods", osr_compile_count); + int tcb = osr_bytes_compiled + standard_bytes_compiled; tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb); - tty->print_cr(" Standard compilation : %6d bytes", CompileBroker::_sum_standard_bytes_compiled); - tty->print_cr(" On stack replacement : %6d bytes", CompileBroker::_sum_osr_bytes_compiled); - double tcs = CompileBroker::_t_total_compilation.seconds(); + tty->print_cr(" Standard compilation : %6d bytes", standard_bytes_compiled); + tty->print_cr(" On stack replacement : %6d bytes", osr_bytes_compiled); + double tcs = total_compilation.seconds(); int bps = tcs == 0.0 ? 0 : (int)(tcb / tcs); tty->print_cr(" Average compilation speed: %6d bytes/s", bps); -#ifdef GRAAL - for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { - AbstractCompiler* comp = _compilers[i]; - if (comp != NULL) { - CompilerStatistics* stats = comp->stats(); - int bytecodes = stats->_sum_osr_bytes_compiled + stats->_sum_standard_bytes_compiled; - if (bytecodes != 0) { - double seconds = stats->_t_osr_compilation.seconds() + stats->_t_standard_compilation.seconds(); - int bps = seconds == 0.0 ? 0 : (int) (bytecodes / seconds); - tty->print_cr(" %7s compilation speed: %6d bytes/s {standard: %6.3f s, %6d bytes; osr: %6.3f s, %6d bytes}", - comp->name(), bps, stats->_t_standard_compilation.seconds(), stats->_sum_standard_bytes_compiled, - stats->_t_osr_compilation.seconds(), stats->_sum_osr_bytes_compiled); - } - } - } -#endif tty->cr(); - tty->print_cr(" nmethod code size : %6d bytes", CompileBroker::_sum_nmethod_code_size); - tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size); + tty->print_cr(" nmethod code size : %6d bytes", nmethods_code_size); + tty->print_cr(" nmethod total size : %6d bytes", nmethods_size); } // Debugging output for failure diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/compiler/compileBroker.hpp --- a/src/share/vm/compiler/compileBroker.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/compiler/compileBroker.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -213,8 +213,12 @@ // Redefine Classes support void mark_on_stack(); + void delete_all(); + void print(); - void print(); + ~CompileQueue() { + assert (is_empty(), " Compile Queue must be empty"); + } }; // CompileTaskWrapper @@ -266,7 +270,7 @@ static CompileQueue* _c1_method_queue; static CompileTask* _task_free_list; - static GrowableArray* _method_threads; + static GrowableArray* _compiler_threads; // performance counters static PerfCounter* _perf_total_compilation; @@ -311,7 +315,9 @@ static int _sum_nmethod_code_size; static long _peak_compilation_time; - static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); + static volatile jint _print_compilation_warning; + + static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level); @@ -351,6 +357,9 @@ if (is_c1_compile(comp_level)) return _c1_method_queue; return NULL; } + static bool init_compiler_runtime(); + static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread); + public: enum { // The entry bci used for non-OSR compilations. @@ -378,9 +387,7 @@ const char* comment, Thread* thread); static void compiler_thread_loop(); - static uint get_compilation_id() { return _compilation_id; } - static bool is_idle(); // Set _should_block. // Call this from the VM, with Threads_lock held and a safepoint requested. @@ -391,8 +398,9 @@ enum { // Flags for toggling compiler activity - stop_compilation = 0, - run_compilation = 1 + stop_compilation = 0, + run_compilation = 1, + shutdown_compilaton = 2 }; static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); } @@ -401,8 +409,22 @@ jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state); return (old == (1-new_state)); } + + static void disable_compilation_forever() { + UseCompiler = false; + AlwaysCompileLoopMethods = false; + Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs); + } + + static bool is_compilation_disabled_forever() { + return _should_compile_new_jobs == shutdown_compilaton; + } static void handle_full_code_cache(); - + // Ensures that warning is only printed once. + static bool should_print_compiler_warning() { + jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0); + return old == 0; + } // Return total compilation ticks static jlong total_compilation_ticks() { return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -47,8 +47,9 @@ // ConcurrentMarkSweepPolicy methods // -ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() { - initialize_all(); +void ConcurrentMarkSweepPolicy::initialize_alignments() { + _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; + _heap_alignment = compute_heap_alignment(); } void ConcurrentMarkSweepPolicy::initialize_generations() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -29,10 +29,11 @@ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy { protected: + void initialize_alignments(); void initialize_generations(); public: - ConcurrentMarkSweepPolicy(); + ConcurrentMarkSweepPolicy() {} ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -594,9 +594,9 @@ _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), - _should_unload_classes(false), + _should_unload_classes(CMSClassUnloadingEnabled), _concurrent_cycles_since_last_unload(0), - _roots_scanning_options(0), + _roots_scanning_options(SharedHeap::SO_None), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), @@ -788,14 +788,6 @@ && _survivor_chunk_index == 0), "Error"); - // Choose what strong roots should be scanned depending on verification options - if (!CMSClassUnloadingEnabled) { - // If class unloading is disabled we want to include all classes into the root set. - add_root_scanning_option(SharedHeap::SO_AllClasses); - } else { - add_root_scanning_option(SharedHeap::SO_SystemClasses); - } - NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) _gc_counters = new CollectorCounters("CMS", 1); _completed_initialization = true; @@ -2532,6 +2524,9 @@ // Snapshot the soft reference policy to be used in this collection cycle. ref_processor()->setup_policy(clear_all_soft_refs); + // Decide if class unloading should be done + update_should_unload_classes(); + bool init_mark_was_synchronous = false; // until proven otherwise while (_collectorState != Idling) { if (TraceCMSState) { @@ -3310,7 +3305,10 @@ || VerifyBeforeExit; const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + // We set the proper root for this CMS cycle here. if (should_unload_classes()) { // Should unload classes this cycle + remove_root_scanning_option(SharedHeap::SO_AllClasses); + add_root_scanning_option(SharedHeap::SO_SystemClasses); remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time @@ -3318,6 +3316,9 @@ // Not unloading classes this cycle assert(!should_unload_classes(), "Inconsitency!"); + remove_root_scanning_option(SharedHeap::SO_SystemClasses); + add_root_scanning_option(SharedHeap::SO_AllClasses); + if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { // Include symbols, strings and code cache elements to prevent their resurrection. add_root_scanning_option(rso); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1AllocRegion.hpp --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -55,7 +55,7 @@ // then _alloc_region is NULL and this object should not be used to // satisfy allocation requests (it was done this way to force the // correct use of init() and release()). - HeapRegion* _alloc_region; + HeapRegion* volatile _alloc_region; // It keeps track of the distinct number of regions that are used // for allocation in the active interval of this object, i.e., @@ -132,8 +132,9 @@ static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region); HeapRegion* get() const { + HeapRegion * hr = _alloc_region; // Make sure that the dummy region does not escape this class. - return (_alloc_region == _dummy_region) ? NULL : _alloc_region; + return (hr == _dummy_region) ? NULL : hr; } uint count() { return _count; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2008,7 +2008,7 @@ size_t init_byte_size = collector_policy()->initial_heap_byte_size(); size_t max_byte_size = collector_policy()->max_heap_byte_size(); - size_t heap_alignment = collector_policy()->max_alignment(); + size_t heap_alignment = collector_policy()->heap_alignment(); // Ensure that the sizes are properly aligned. Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); @@ -6045,7 +6045,11 @@ // is dirty. G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); MemRegion mr(hr->bottom(), hr->pre_dummy_top()); - ct_bs->verify_dirty_region(mr); + if (hr->is_young()) { + ct_bs->verify_g1_young_region(mr); + } else { + ct_bs->verify_dirty_region(mr); + } } void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { @@ -6662,13 +6666,18 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); - assert(!hr->isHumongous(), "code root in humongous region?"); + assert(!hr->continuesHumongous(), + err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT + " starting at "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); // HeapRegion::add_strong_code_root() avoids adding duplicate // entries but having duplicates is OK since we "mark" nmethods // as visited when we scan the strong code root lists during the GC. hr->add_strong_code_root(_nm); - assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?"); + assert(hr->rem_set()->strong_code_roots_list_contains(_nm), + err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6689,9 +6698,15 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); - assert(!hr->isHumongous(), "code root in humongous region?"); + assert(!hr->continuesHumongous(), + err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT + " starting at "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); + hr->remove_strong_code_root(_nm); - assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?"); + assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), + err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6722,7 +6737,9 @@ class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure { public: bool doHeapRegion(HeapRegion *hr) { - assert(!hr->isHumongous(), "humongous region in collection set?"); + assert(!hr->isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to collection set", + HR_FORMAT_PARAMS(hr))); hr->migrate_strong_code_roots(); return false; } @@ -6802,9 +6819,13 @@ bool doHeapRegion(HeapRegion *hr) { HeapRegionRemSet* hrrs = hr->rem_set(); - if (hr->isHumongous()) { - // Code roots should never be attached to a humongous region - assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); + if (hr->continuesHumongous()) { + // Code roots should never be attached to a continuation of a humongous region + assert(hrrs->strong_code_roots_list_length() == 0, + err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT + " starting at "HR_FORMAT", but has "INT32_FORMAT, + HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), + hrrs->strong_code_roots_list_length())); return false; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "utilities/taskqueue.hpp" @@ -134,7 +135,7 @@ assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); MemRegion mr(start, end); - g1_barrier_set()->dirty(mr); + g1_barrier_set()->g1_mark_as_young(mr); } inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -313,27 +313,38 @@ // for the first time during initialization. _reserve_regions = 0; - initialize_all(); _collectionSetChooser = new CollectionSetChooser(); - _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags +} + +void G1CollectorPolicy::initialize_alignments() { + _space_alignment = HeapRegion::GrainBytes; + size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); + size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); } void G1CollectorPolicy::initialize_flags() { - set_min_alignment(HeapRegion::GrainBytes); - size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); - size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); - set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); + if (G1HeapRegionSize != HeapRegion::GrainBytes) { + FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes); + } + if (SurvivorRatio < 1) { vm_exit_during_initialization("Invalid survivor ratio specified"); } CollectorPolicy::initialize_flags(); + _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags } -G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { - assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); - assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); - assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); +void G1CollectorPolicy::post_heap_initialize() { + uintx max_regions = G1CollectedHeap::heap()->max_regions(); + size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; + if (max_young_size != MaxNewSize) { + FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size); + } +} +G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), + _min_desired_young_length(0), _max_desired_young_length(0) { if (FLAG_IS_CMDLINE(NewRatio)) { if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); @@ -344,6 +355,15 @@ } } + if (NewSize > MaxNewSize) { + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + MaxNewSize = NewSize; + } + if (FLAG_IS_CMDLINE(NewSize)) { _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 1U); @@ -374,34 +394,48 @@ return MAX2(1U, default_value); } -void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { - assert(new_number_of_heap_regions > 0, "Heap must be initialized"); +void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { + assert(number_of_heap_regions > 0, "Heap must be initialized"); switch (_sizer_kind) { case SizerDefaults: - _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); - _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *max_young_length = calculate_default_max_length(number_of_heap_regions); break; case SizerNewSizeOnly: - _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); - _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); + *max_young_length = calculate_default_max_length(number_of_heap_regions); + *max_young_length = MAX2(*min_young_length, *max_young_length); break; case SizerMaxNewSizeOnly: - _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); - _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *min_young_length = MIN2(*min_young_length, *max_young_length); break; case SizerMaxAndNewSize: // Do nothing. Values set on the command line, don't update them at runtime. break; case SizerNewRatio: - _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); - _max_desired_young_length = _min_desired_young_length; + *min_young_length = number_of_heap_regions / (NewRatio + 1); + *max_young_length = *min_young_length; break; default: ShouldNotReachHere(); } - assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); + assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); +} + +uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { + // We need to pass the desired values because recalculation may not update these + // values in some cases. + uint temp = _min_desired_young_length; + uint result = _max_desired_young_length; + recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); + return result; +} + +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { + recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, + &_max_desired_young_length); } void G1CollectorPolicy::init() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -136,8 +136,16 @@ uint calculate_default_min_length(uint new_number_of_heap_regions); uint calculate_default_max_length(uint new_number_of_heap_regions); + // Update the given values for minimum and maximum young gen length in regions + // given the number of heap regions depending on the kind of sizing algorithm. + void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); + public: G1YoungGenSizer(); + // Calculate the maximum length of the young gen given the number of regions + // depending on the sizing algorithm. + uint max_young_length(uint number_of_heap_regions); + void heap_size_changed(uint new_number_of_heap_regions); uint min_desired_young_length() { return _min_desired_young_length; @@ -165,13 +173,9 @@ G1MMUTracker* _mmu_tracker; + void initialize_alignments(); void initialize_flags(); - void initialize_all() { - initialize_flags(); - initialize_size_info(); - } - CollectionSetChooser* _collectionSetChooser; double _full_collection_start_sec; @@ -217,7 +221,6 @@ return _during_marking; } -private: enum PredictionConstants { TruncatedSeqLength = 10 }; @@ -665,8 +668,6 @@ BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } - bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); // Record the start and end of an evacuation pause. @@ -934,6 +935,7 @@ // Calculates survivor space parameters. void update_survivors_policy(); + virtual void post_heap_initialize(); }; // This should move to some place more general... diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -377,11 +377,6 @@ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); - if (G1CollectedHeap::use_parallel_gc_threads()) { - // Don't set the number of workers here. It will be set - // when the task is run - // _seq_task->set_n_termination((int)n_workers()); - } guarantee( _cards_scanned == NULL, "invariant" ); _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC); for (uint i = 0; i < n_workers(); ++i) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -187,19 +187,23 @@ size_t code_root_elems() const { return _code_root_elems; } void print_rs_mem_info_on(outputStream * out, size_t total) { - out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", + round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); } void print_cards_occupied_info_on(outputStream * out, size_t total) { - out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) entries by "SIZE_FORMAT" %s regions", + cards_occupied(), cards_occupied_percent_of(total), amount(), _name); } void print_code_root_mem_info_on(outputStream * out, size_t total) { - out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", + round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); } void print_code_root_elems_info_on(outputStream * out, size_t total) { - out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) elements by "SIZE_FORMAT" %s regions", + code_root_elems(), code_root_elems_percent_of(total), amount(), _name); } }; @@ -327,14 +331,14 @@ out->print_cr("\n Recent concurrent refinement statistics"); out->print_cr(" Processed "SIZE_FORMAT" cards", num_concurrent_refined_cards()); - out->print_cr(" Of %d completed buffers:", num_processed_buf_total()); - out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.", + out->print_cr(" Of "SIZE_FORMAT" completed buffers:", num_processed_buf_total()); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by concurrent RS threads.", num_processed_buf_total(), percent_of(num_processed_buf_rs_threads(), num_processed_buf_total())); - out->print_cr(" %8d (%5.1f%%) by mutator threads.", + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by mutator threads.", num_processed_buf_mutator(), percent_of(num_processed_buf_mutator(), num_processed_buf_total())); - out->print_cr(" Did %d coarsenings.", num_coarsenings()); + out->print_cr(" Did "SIZE_FORMAT" coarsenings.", num_coarsenings()); out->print_cr(" Concurrent RS threads times (s)"); out->print(" "); for (uint i = 0; i < _num_vtimes; i++) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -70,6 +70,12 @@ if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { return false; } + + if (val == g1_young_gen) { + // the card is for a young gen region. We don't need to keep track of all pointers into young + return false; + } + // Cached bit can be installed either on a clean card or on a claimed card. jbyte new_val = val; if (val == clean_card_val()) { @@ -85,6 +91,19 @@ return true; } +void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { + jbyte *const first = byte_for(mr.start()); + jbyte *const last = byte_after(mr.last()); + + memset(first, g1_young_gen, last - first); +} + +#ifndef PRODUCT +void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { + verify_region(mr, g1_young_gen, true); +} +#endif + G1SATBCardTableLoggingModRefBS:: G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions) : @@ -97,7 +116,11 @@ void G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, oop new_val) { - jbyte* byte = byte_for(field); + volatile jbyte* byte = byte_for(field); + if (*byte == g1_young_gen) { + return; + } + OrderAccess::storeload(); if (*byte != dirty_card) { *byte = dirty_card; Thread* thr = Thread::current(); @@ -129,7 +152,7 @@ void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { - jbyte* byte = byte_for(mr.start()); + volatile jbyte* byte = byte_for(mr.start()); jbyte* last_byte = byte_for(mr.last()); Thread* thr = Thread::current(); if (whole_heap) { @@ -138,25 +161,35 @@ byte++; } } else { - // Enqueue if necessary. - if (thr->is_Java_thread()) { - JavaThread* jt = (JavaThread*)thr; - while (byte <= last_byte) { - if (*byte != dirty_card) { - *byte = dirty_card; - jt->dirty_card_queue().enqueue(byte); + // skip all consecutive young cards + for (; byte <= last_byte && *byte == g1_young_gen; byte++); + + if (byte <= last_byte) { + OrderAccess::storeload(); + // Enqueue if necessary. + if (thr->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thr; + for (; byte <= last_byte; byte++) { + if (*byte == g1_young_gen) { + continue; + } + if (*byte != dirty_card) { + *byte = dirty_card; + jt->dirty_card_queue().enqueue(byte); + } } - byte++; - } - } else { - MutexLockerEx x(Shared_DirtyCardQ_lock, - Mutex::_no_safepoint_check_flag); - while (byte <= last_byte) { - if (*byte != dirty_card) { - *byte = dirty_card; - _dcqs.shared_dirty_card_queue()->enqueue(byte); + } else { + MutexLockerEx x(Shared_DirtyCardQ_lock, + Mutex::_no_safepoint_check_flag); + for (; byte <= last_byte; byte++) { + if (*byte == g1_young_gen) { + continue; + } + if (*byte != dirty_card) { + *byte = dirty_card; + _dcqs.shared_dirty_card_queue()->enqueue(byte); + } } - byte++; } } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,14 @@ // snapshot-at-the-beginning marking. class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { +protected: + enum G1CardValues { + g1_young_gen = CT_MR_BS_last_reserved << 1 + }; + public: + static int g1_young_card_val() { return g1_young_gen; } + // Add "pre_val" to a set of objects that may have been disconnected from the // pre-marking object graph. static void enqueue(oop pre_val); @@ -118,6 +125,9 @@ _byte_map[card_index] = val; } + void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN; + void g1_mark_as_young(const MemRegion& mr); + bool mark_card_deferred(size_t card_index); bool is_card_deferred(size_t card_index) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -174,11 +174,6 @@ region_size = MAX_REGION_SIZE; } - if (region_size != G1HeapRegionSize) { - // Update the flag to make sure that PrintFlagsFinal logs the correct value - FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); - } - // And recalculate the log. region_size_log = log2_long((jlong) region_size); @@ -606,7 +601,9 @@ void HeapRegion::migrate_strong_code_roots() { assert(in_collection_set(), "only collection set regions"); - assert(!isHumongous(), "not humongous regions"); + assert(!isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to collection set", + HR_FORMAT_PARAMS(this))); HeapRegionRemSet* hrrs = rem_set(); hrrs->migrate_strong_code_roots(); @@ -727,12 +724,11 @@ return; } - // An H-region should have an empty strong code root list - if (isHumongous()) { + if (continuesHumongous()) { if (strong_code_roots_length > 0) { - gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " - "but has "INT32_FORMAT" code root entries", - bottom(), end(), strong_code_roots_length); + gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " + "region but has "INT32_FORMAT" code root entries", + HR_FORMAT_PARAMS(this), strong_code_roots_length); *failures = true; } return; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1004,7 +1004,9 @@ void HeapRegionRemSet::migrate_strong_code_roots() { assert(hr()->in_collection_set(), "only collection set regions"); - assert(!hr()->isHumongous(), "not humongous regions"); + assert(!hr()->isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to the collection set", + HR_FORMAT_PARAMS(hr()))); ResourceMark rm; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/g1/ptrQueue.hpp --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -80,6 +80,10 @@ void reset() { if (_buf != NULL) _index = _sz; } + void enqueue(volatile void* ptr) { + enqueue((void*)(ptr)); + } + // Enqueues the given "obj". void enqueue(void* ptr) { if (!_active) return; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" // If boundary moving is being used, create the young gen and old @@ -32,15 +33,17 @@ // the old behavior otherwise (with PSYoungGen and PSOldGen). AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs, - size_t init_low_byte_size, - size_t min_low_byte_size, - size_t max_low_byte_size, - size_t init_high_byte_size, - size_t min_high_byte_size, - size_t max_high_byte_size, + GenerationSizer* policy, size_t alignment) : - _virtual_spaces(old_young_rs, min_low_byte_size, - min_high_byte_size, alignment) { + _virtual_spaces(old_young_rs, policy->min_gen1_size(), + policy->min_gen0_size(), alignment) { + size_t init_low_byte_size = policy->initial_gen1_size(); + size_t min_low_byte_size = policy->min_gen1_size(); + size_t max_low_byte_size = policy->max_gen1_size(); + size_t init_high_byte_size = policy->initial_gen0_size(); + size_t min_high_byte_size = policy->min_gen0_size(); + size_t max_high_byte_size = policy->max_gen0_size(); + assert(min_low_byte_size <= init_low_byte_size && init_low_byte_size <= max_low_byte_size, "Parameter check"); assert(min_high_byte_size <= init_high_byte_size && diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" #include "gc_implementation/parallelScavenge/asPSOldGen.hpp" #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" // Contains two generations that both use an AdjoiningVirtualSpaces. @@ -56,14 +57,7 @@ bool request_young_gen_expansion(size_t desired_change_in_bytes); public: - AdjoiningGenerations(ReservedSpace rs, - size_t init_low_byte_size, - size_t min_low_byte_size, - size_t max_low_byte_size, - size_t init_high_byte_size, - size_t min_high_byte_size, - size_t max_high_bytes_size, - size_t alignment); + AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment); // Accessors PSYoungGen* young_gen() { return _young_gen; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -54,7 +54,6 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - {} ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs, @@ -65,13 +64,11 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - { _virtual_space = vs; } void ASPSOldGen::initialize_work(const char* perf_data_name, int level) { - PSOldGen::initialize_work(perf_data_name, level); // The old gen can grow to gen_size_limit(). _reserve reflects only @@ -94,7 +91,7 @@ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); - size_t result_aligned = align_size_down(result, heap->old_gen_alignment()); + size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; } @@ -105,7 +102,7 @@ } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t gen_alignment = heap->old_gen_alignment(); + const size_t gen_alignment = heap->generation_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -70,13 +70,12 @@ } size_t ASPSYoungGen::available_for_expansion() { - size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - current_committed_size; - size_t result_aligned = align_size_down(result, heap->young_gen_alignment()); + size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; } @@ -85,7 +84,6 @@ // Future implementations could check the survivors and if to_space is in the // right place (below from_space), take a chunk from to_space. size_t ASPSYoungGen::available_for_contraction() { - size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; @@ -94,8 +92,8 @@ if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t eden_alignment = heap->intra_heap_alignment(); - const size_t gen_alignment = heap->young_gen_alignment(); + const size_t eden_alignment = heap->space_alignment(); + const size_t gen_alignment = heap->generation_alignment(); assert(eden_space()->capacity_in_bytes() >= eden_alignment, "Alignment is wrong"); @@ -121,7 +119,6 @@ gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); } return result_aligned; - } return 0; @@ -132,7 +129,7 @@ // to_space can be. size_t ASPSYoungGen::available_to_live() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); // Include any space that is committed but is not in eden. size_t available = pointer_delta(eden_space()->bottom(), @@ -296,7 +293,7 @@ assert(eden_start < from_start, "Cannot push into from_space"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" +#include "memory/collectorPolicy.hpp" + +void GenerationSizer::trace_gen_sizes(const char* const str) { + if (TracePageSizes) { + tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " + SIZE_FORMAT "," SIZE_FORMAT " " + SIZE_FORMAT, + str, + _min_gen1_size / K, _max_gen1_size / K, + _min_gen0_size / K, _max_gen0_size / K, + _max_heap_byte_size / K); + } +} + +void GenerationSizer::initialize_alignments() { + _space_alignment = _gen_alignment = default_gen_alignment(); + _heap_alignment = compute_heap_alignment(); +} + +void GenerationSizer::initialize_flags() { + // Do basic sizing work + TwoGenerationCollectorPolicy::initialize_flags(); + + assert(UseSerialGC || + !FLAG_IS_DEFAULT(ParallelGCThreads) || + (ParallelGCThreads > 0), + "ParallelGCThreads should be set before flag initialization"); + + // The survivor ratio's are calculated "raw", unlike the + // default gc, which adds 2 to the ratio value. We need to + // make sure the values are valid before using them. + if (MinSurvivorRatio < 3) { + FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3); + } + + if (InitialSurvivorRatio < 3) { + FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3); + } +} + +void GenerationSizer::initialize_size_info() { + trace_gen_sizes("ps heap raw"); + const size_t page_sz = os::page_size_for_region(_min_heap_byte_size, + _max_heap_byte_size, + 8); + + // Can a page size be something else than a power of two? + assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2"); + size_t new_alignment = round_to(page_sz, _gen_alignment); + if (new_alignment != _gen_alignment) { + _gen_alignment = new_alignment; + _space_alignment = new_alignment; + // Redo everything from the start + initialize_flags(); + } + TwoGenerationCollectorPolicy::initialize_size_info(); + + trace_gen_sizes("ps heap rnd"); +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -31,41 +31,17 @@ // TwoGenerationCollectorPolicy. Lets reuse it! class GenerationSizer : public TwoGenerationCollectorPolicy { - public: - GenerationSizer() { - // Partial init only! - initialize_flags(); - initialize_size_info(); - } + private: - void initialize_flags() { - // Do basic sizing work - TwoGenerationCollectorPolicy::initialize_flags(); + void trace_gen_sizes(const char* const str); - assert(UseSerialGC || - !FLAG_IS_DEFAULT(ParallelGCThreads) || - (ParallelGCThreads > 0), - "ParallelGCThreads should be set before flag initialization"); + // The alignment used for boundary between young gen and old gen + static size_t default_gen_alignment() { return 64 * K * HeapWordSize; } - // The survivor ratio's are calculated "raw", unlike the - // default gc, which adds 2 to the ratio value. We need to - // make sure the values are valid before using them. - if (MinSurvivorRatio < 3) { - MinSurvivorRatio = 3; - } + protected: - if (InitialSurvivorRatio < 3) { - InitialSurvivorRatio = 3; - } - } - - size_t min_young_gen_size() { return _min_gen0_size; } - size_t young_gen_size() { return _initial_gen0_size; } - size_t max_young_gen_size() { return _max_gen0_size; } - - size_t min_old_gen_size() { return _min_gen1_size; } - size_t old_gen_size() { return _initial_gen1_size; } - size_t max_old_gen_size() { return _max_gen1_size; } + void initialize_alignments(); + void initialize_flags(); + void initialize_size_info(); }; - #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -52,76 +52,20 @@ ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; -static void trace_gen_sizes(const char* const str, - size_t og_min, size_t og_max, - size_t yg_min, size_t yg_max) -{ - if (TracePageSizes) { - tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " - SIZE_FORMAT "," SIZE_FORMAT " " - SIZE_FORMAT, - str, - og_min / K, og_max / K, - yg_min / K, yg_max / K, - (og_max + yg_max) / K); - } -} - jint ParallelScavengeHeap::initialize() { CollectedHeap::pre_initialize(); - // Cannot be initialized until after the flags are parsed - // GenerationSizer flag_parser; + // Initialize collector policy _collector_policy = new GenerationSizer(); - - size_t yg_min_size = _collector_policy->min_young_gen_size(); - size_t yg_max_size = _collector_policy->max_young_gen_size(); - size_t og_min_size = _collector_policy->min_old_gen_size(); - size_t og_max_size = _collector_policy->max_old_gen_size(); - - trace_gen_sizes("ps heap raw", - og_min_size, og_max_size, - yg_min_size, yg_max_size); - - const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, - yg_max_size + og_max_size, - 8); - - const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); - const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); + _collector_policy->initialize_all(); - // Update sizes to reflect the selected page size(s). - // - // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it - // should check UseAdaptiveSizePolicy. Changes from generationSizer could - // move to the common code. - yg_min_size = align_size_up(yg_min_size, yg_align); - yg_max_size = align_size_up(yg_max_size, yg_align); - size_t yg_cur_size = - align_size_up(_collector_policy->young_gen_size(), yg_align); - yg_cur_size = MAX2(yg_cur_size, yg_min_size); + const size_t heap_size = _collector_policy->max_heap_byte_size(); - og_min_size = align_size_up(og_min_size, og_align); - // Align old gen size down to preserve specified heap size. - assert(og_align == yg_align, "sanity"); - og_max_size = align_size_down(og_max_size, og_align); - og_max_size = MAX2(og_max_size, og_min_size); - size_t og_cur_size = - align_size_down(_collector_policy->old_gen_size(), og_align); - og_cur_size = MAX2(og_cur_size, og_min_size); - - trace_gen_sizes("ps heap rnd", - og_min_size, og_max_size, - yg_min_size, yg_max_size); - - const size_t heap_size = og_max_size + yg_max_size; - - ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align); - + ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); - os::trace_page_sizes("ps main", og_min_size + yg_min_size, - og_max_size + yg_max_size, og_page_sz, + os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(), + heap_size, generation_alignment(), heap_rs.base(), heap_rs.size()); if (!heap_rs.is_reserved()) { @@ -142,12 +86,6 @@ return JNI_ENOMEM; } - // Initial young gen size is 4 Mb - // - // XXX - what about flag_parser.young_gen_size()? - const size_t init_young_size = align_size_up(4 * M, yg_align); - yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); - // Make up the generations // Calculate the maximum size that a generation can grow. This // includes growth into the other generation. Note that the @@ -157,14 +95,7 @@ double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; - _gens = new AdjoiningGenerations(heap_rs, - og_cur_size, - og_min_size, - og_max_size, - yg_cur_size, - yg_min_size, - yg_max_size, - yg_align); + _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); _old_gen = _gens->old_gen(); _young_gen = _gens->young_gen(); @@ -176,7 +107,7 @@ new PSAdaptiveSizePolicy(eden_capacity, initial_promo_size, young_gen()->to_space()->capacity_in_bytes(), - intra_heap_alignment(), + _collector_policy->gen_alignment(), max_gc_pause_sec, max_gc_minor_pause_sec, GCTimeRatio diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP +#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/objectStartArray.hpp" #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp" @@ -32,14 +33,12 @@ #include "gc_implementation/shared/gcPolicyCounters.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/collectorPolicy.hpp" #include "utilities/ostream.hpp" class AdjoiningGenerations; -class CollectorPolicy; class GCHeapSummary; class GCTaskManager; -class GenerationSizer; -class CollectorPolicy; class PSAdaptiveSizePolicy; class PSHeapSummary; @@ -50,24 +49,20 @@ static PSOldGen* _old_gen; // Sizing policy for entire heap - static PSAdaptiveSizePolicy* _size_policy; - static PSGCAdaptivePolicyCounters* _gc_policy_counters; + static PSAdaptiveSizePolicy* _size_policy; + static PSGCAdaptivePolicyCounters* _gc_policy_counters; static ParallelScavengeHeap* _psh; - size_t _young_gen_alignment; - size_t _old_gen_alignment; - GenerationSizer* _collector_policy; - inline size_t set_alignment(size_t& var, size_t val); - // Collection of generations that are adjacent in the // space reserved for the heap. AdjoiningGenerations* _gens; unsigned int _death_march_count; - static GCTaskManager* _gc_task_manager; // The task manager. + // The task manager + static GCTaskManager* _gc_task_manager; void trace_heap(GCWhen::Type when, GCTracer* tracer); @@ -80,16 +75,7 @@ HeapWord* mem_allocate_old_gen(size_t size); public: - ParallelScavengeHeap() : CollectedHeap() { - _death_march_count = 0; - set_alignment(_young_gen_alignment, intra_heap_alignment()); - set_alignment(_old_gen_alignment, intra_heap_alignment()); - } - - // Return the (conservative) maximum heap alignment - static size_t conservative_max_heap_alignment() { - return intra_heap_alignment(); - } + ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { } // For use by VM operations enum CollectionType { @@ -103,8 +89,8 @@ virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } - static PSYoungGen* young_gen() { return _young_gen; } - static PSOldGen* old_gen() { return _old_gen; } + static PSYoungGen* young_gen() { return _young_gen; } + static PSOldGen* old_gen() { return _old_gen; } virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } @@ -121,13 +107,15 @@ void post_initialize(); void update_counters(); - // The alignment used for the various generations. - size_t young_gen_alignment() const { return _young_gen_alignment; } - size_t old_gen_alignment() const { return _old_gen_alignment; } + + // The alignment used for the various areas + size_t space_alignment() { return _collector_policy->space_alignment(); } + size_t generation_alignment() { return _collector_policy->gen_alignment(); } - // The alignment used for eden and survivors within the young gen - // and for boundary between young gen and old gen. - static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; } + // Return the (conservative) maximum heap alignment + static size_t conservative_max_heap_alignment() { + return CollectorPolicy::compute_heap_alignment(); + } size_t capacity() const; size_t used() const; @@ -157,16 +145,15 @@ virtual bool is_in_partial_collection(const void *p); #endif - bool is_in_young(oop p); // reserved part - bool is_in_old(oop p); // reserved part + bool is_in_young(oop p); // reserved part + bool is_in_old(oop p); // reserved part // Memory allocation. "gc_time_limit_was_exceeded" will // be set to true if the adaptive size policy determine that // an excessive amount of time is being spent doing collections // and caused a NULL to be returned. If a NULL is not returned, // "gc_time_limit_was_exceeded" has an undefined meaning. - HeapWord* mem_allocate(size_t size, - bool* gc_overhead_limit_was_exceeded); + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // Allocation attempt(s) during a safepoint. It should never be called // to allocate a new TLAB as this allocation might be satisfied out @@ -257,17 +244,10 @@ // Call these in sequential code around the processing of strong roots. class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { - public: + public: ParStrongRootsScope(); ~ParStrongRootsScope(); }; }; -inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) -{ - assert(is_power_of_2((intptr_t)val), "must be a power of 2"); - var = round_to(val, intra_heap_alignment()); - return var; -} - #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -37,7 +37,7 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size, - size_t intra_generation_alignment, + size_t space_alignment, double gc_pause_goal_sec, double gc_minor_pause_goal_sec, uint gc_cost_ratio) : @@ -46,9 +46,8 @@ init_survivor_size, gc_pause_goal_sec, gc_cost_ratio), - _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/ - 100.0), - _intra_generation_alignment(intra_generation_alignment), + _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0), + _space_alignment(space_alignment), _live_at_last_full_gc(init_promo_size), _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec), _latest_major_mutator_interval_seconds(0), @@ -353,11 +352,10 @@ } // Align everything and make a final limit check - const size_t alignment = _intra_generation_alignment; - desired_eden_size = align_size_up(desired_eden_size, alignment); - desired_eden_size = MAX2(desired_eden_size, alignment); + desired_eden_size = align_size_up(desired_eden_size, _space_alignment); + desired_eden_size = MAX2(desired_eden_size, _space_alignment); - eden_limit = align_size_down(eden_limit, alignment); + eden_limit = align_size_down(eden_limit, _space_alignment); // And one last limit check, now that we've aligned things. if (desired_eden_size > eden_limit) { @@ -561,11 +559,10 @@ } // Align everything and make a final limit check - const size_t alignment = _intra_generation_alignment; - desired_promo_size = align_size_up(desired_promo_size, alignment); - desired_promo_size = MAX2(desired_promo_size, alignment); + desired_promo_size = align_size_up(desired_promo_size, _space_alignment); + desired_promo_size = MAX2(desired_promo_size, _space_alignment); - promo_limit = align_size_down(promo_limit, alignment); + promo_limit = align_size_down(promo_limit, _space_alignment); // And one last limit check, now that we've aligned things. desired_promo_size = MIN2(desired_promo_size, promo_limit); @@ -650,7 +647,7 @@ } // If the desired eden size is as small as it will get, // try to adjust the old gen size. - if (*desired_eden_size_ptr <= _intra_generation_alignment) { + if (*desired_eden_size_ptr <= _space_alignment) { // Vary the old gen size to reduce the young gen pause. This // may not be a good idea. This is just a test. if (minor_pause_old_estimator()->decrement_will_decrease()) { @@ -755,7 +752,7 @@ // If the promo size is at the minimum (i.e., the old gen // size will not actually decrease), consider changing the // young gen size. - if (*desired_promo_size_ptr < _intra_generation_alignment) { + if (*desired_promo_size_ptr < _space_alignment) { // If increasing the young generation will decrease the old gen // pause, do it. // During startup there is noise in the statistics for deciding @@ -1066,24 +1063,24 @@ size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) { size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) { size_t result = eden_increment(cur_eden); - return align_size_down(result, _intra_generation_alignment); + return align_size_down(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up( size_t cur_eden) { size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement + _young_gen_size_increment_supplement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) { size_t eden_heap_delta = eden_decrement(cur_eden); - return align_size_down(eden_heap_delta, _intra_generation_alignment); + return align_size_down(eden_heap_delta, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) { @@ -1105,24 +1102,24 @@ size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); - return align_size_down(result, _intra_generation_alignment); + return align_size_down(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up( size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) { size_t promo_heap_delta = promo_decrement(cur_promo); - return align_size_down(promo_heap_delta, _intra_generation_alignment); + return align_size_down(promo_heap_delta, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) { @@ -1135,9 +1132,9 @@ bool is_survivor_overflow, uint tenuring_threshold, size_t survivor_limit) { - assert(survivor_limit >= _intra_generation_alignment, + assert(survivor_limit >= _space_alignment, "survivor_limit too small"); - assert((size_t)align_size_down(survivor_limit, _intra_generation_alignment) + assert((size_t)align_size_down(survivor_limit, _space_alignment) == survivor_limit, "survivor_limit not aligned"); // This method is called even if the tenuring threshold and survivor @@ -1201,8 +1198,8 @@ // We're trying to pad the survivor size as little as possible without // overflowing the survivor spaces. size_t target_size = align_size_up((size_t)_avg_survived->padded_average(), - _intra_generation_alignment); - target_size = MAX2(target_size, _intra_generation_alignment); + _space_alignment); + target_size = MAX2(target_size, _space_alignment); if (target_size > survivor_limit) { // Target size is bigger than we can handle. Let's also reduce diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -91,7 +91,7 @@ // for making ergonomic decisions. double _latest_major_mutator_interval_seconds; - const size_t _intra_generation_alignment; // alignment for eden, survivors + const size_t _space_alignment; // alignment for eden, survivors const double _gc_minor_pause_goal_sec; // goal for maximum minor gc pause @@ -229,7 +229,7 @@ PSAdaptiveSizePolicy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size, - size_t intra_generation_alignment, + size_t space_alignment, double gc_pause_goal_sec, double gc_minor_pause_goal_sec, uint gc_time_ratio); @@ -378,7 +378,7 @@ // remain almost full anyway (top() will be near end(), but there will be a // large filler object at the bottom). const size_t sz = gen_size / MinSurvivorRatio; - const size_t alignment = _intra_generation_alignment; + const size_t alignment = _space_alignment; return sz > alignment ? align_size_down(sz, alignment) : alignment; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -103,7 +103,7 @@ // Compute maximum space sizes for performance counters ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - size_t alignment = heap->intra_heap_alignment(); + size_t alignment = heap->space_alignment(); size_t size = virtual_space()->reserved_size(); size_t max_survivor_size; @@ -156,8 +156,9 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Compute sizes - size_t alignment = heap->intra_heap_alignment(); + size_t alignment = heap->space_alignment(); size_t size = virtual_space()->committed_size(); + assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors"); size_t survivor_size = size / InitialSurvivorRatio; survivor_size = align_size_down(survivor_size, alignment); @@ -207,7 +208,7 @@ #ifndef PRODUCT void PSYoungGen::space_invariants() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); // Currently, our eden size cannot shrink to zero guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); @@ -491,7 +492,7 @@ char* to_end = (char*)to_space()->end(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); @@ -840,8 +841,8 @@ size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t space_alignment = heap->intra_heap_alignment(); - const size_t gen_alignment = heap->young_gen_alignment(); + const size_t space_alignment = heap->space_alignment(); + const size_t gen_alignment = heap->generation_alignment(); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -211,7 +211,7 @@ // a GC that freed space for the allocation. if (!MetadataAllocationFailALot) { _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); - } + } if (_result == NULL) { if (UseConcMarkSweepGC) { @@ -223,9 +223,7 @@ _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); } if (_result == NULL) { - // Don't clear the soft refs. This GC is for reclaiming metadata - // and is unrelated to the fullness of the Java heap which should - // be the criteria for clearing SoftReferences. + // Don't clear the soft refs yet. if (Verbose && PrintGCDetails && UseConcMarkSweepGC) { gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } @@ -235,7 +233,7 @@ _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } - if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) { + if (_result == NULL) { // If still failing, allow the Metaspace to expand. // See delta_capacity_until_GC() for explanation of the // amount of the expansion. @@ -243,7 +241,16 @@ // or a MaxMetaspaceSize has been specified on the command line. _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); - + if (_result == NULL) { + // If expansion failed, do a last-ditch collection and try allocating + // again. A last-ditch collection will clear softrefs. This + // behavior is similar to the last-ditch collection done for perm + // gen when it was full and a collection for failed allocation + // did not free perm gen space. + heap->collect_as_vm_thread(GCCause::_last_ditch_collection); + _result = + _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + } } if (Verbose && PrintGCDetails && _result == NULL) { gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_implementation/shared/vmGCOperations.hpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -214,9 +214,6 @@ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { } - ~VM_CollectForMetadataAllocation() { - MetaspaceGC::set_expand_after_GC(false); - } virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual void doit(); MetaWord* result() const { return _result; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -202,12 +202,6 @@ ShouldNotReachHere(); // Unexpected use of this function } } -MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( - ClassLoaderData* loader_data, - size_t size, Metaspace::MetadataType mdtype) { - return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); -} - void CollectedHeap::pre_initialize() { // Used for ReduceInitialCardMarks (when COMPILER2 is used); @@ -478,6 +472,10 @@ fill_with_object_impl(start, words, zap); } +void CollectedHeap::post_initialize() { + collector_policy()->post_heap_initialize(); +} + HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { guarantee(false, "thread-local allocation buffers not supported"); return NULL; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -475,11 +475,6 @@ // the context of the vm thread. virtual void collect_as_vm_thread(GCCause::Cause cause); - // Callback from VM_CollectForMetadataAllocation operation. - MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, - size_t size, - Metaspace::MetadataType mdtype); - // Returns the barrier set for this heap BarrierSet* barrier_set() { return _barrier_set; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/graalCompiler.cpp --- a/src/share/vm/graal/graalCompiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/graal/graalCompiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -82,6 +82,9 @@ graal_compute_offsets(); + // Ensure _non_oop_bits is initialized + Universe::non_oop_word(); + { GRAAL_VM_ENTRY_MARK; HandleMark hm; @@ -99,7 +102,8 @@ if (UseCompiler) { bool bootstrap = GRAALVM_ONLY(BootstrapGraal) NOT_GRAALVM(false); - VMToCompiler::startCompiler(bootstrap); + jlong compilerStatisticsAddress = (jlong) ((address) (stats())); + VMToCompiler::startCompiler(bootstrap, compilerStatisticsAddress); _initialized = true; CompilationPolicy::completed_vm_startup(); if (bootstrap) { @@ -179,10 +183,10 @@ assert(_initialized, "must already be initialized"); ResourceMark rm; - JavaThread::current()->set_is_compiling(true); + thread->set_is_graal_compiling(true); Handle holder = GraalCompiler::createHotSpotResolvedObjectType(method, CHECK); VMToCompiler::compileMethod(method(), holder, entry_bci, blocking); - JavaThread::current()->set_is_compiling(false); + thread->set_is_graal_compiling(false); } // Compilation entry point for methods @@ -233,8 +237,7 @@ // We have to lock the cpool to keep the oop from being resolved // while we are accessing it. But we must release the lock before // calling up into Java. - oop cplock = cp->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); + MonitorLockerEx ml(cp->lock()); constantTag tag = cp->tag_at(index); if (tag.is_klass()) { // The klass has been inserted into the constant pool diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/graalCompilerToVM.cpp --- a/src/share/vm/graal/graalCompilerToVM.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/graal/graalCompilerToVM.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -732,6 +732,8 @@ set_int("instanceKlassVtableStartOffset", InstanceKlass::vtable_start_offset() * HeapWordSize); + set_long("elapsedTimerFrequency", os::elapsed_frequency()); + //------------------------------------------------------------------------------------------------ set_address("handleDeoptStub", SharedRuntime::deopt_blob()->unpack()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/graalEnv.cpp --- a/src/share/vm/graal/graalEnv.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/graal/graalEnv.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -172,8 +172,7 @@ { // We have to lock the cpool to keep the oop from being resolved // while we are accessing it. - oop cplock = cpool->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); + MonitorLockerEx ml(cpool->lock()); constantTag tag = cpool->tag_at(index); if (tag.is_klass()) { @@ -503,16 +502,6 @@ // Free codeBlobs //code_buffer->free_blob(); - // stress test 6243940 by immediately making the method - // non-entrant behind the system's back. This has serious - // side effects on the code cache and is not meant for - // general stress testing - if (nm != NULL && StressNonEntrant) { - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); - NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(), - SharedRuntime::get_handle_wrong_method_stub()); - } - if (nm == NULL) { // The CodeCache is full. Print out warning and disable compilation. { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/graalVMToCompiler.cpp --- a/src/share/vm/graal/graalVMToCompiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/graal/graalVMToCompiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -146,13 +146,14 @@ } } -void VMToCompiler::startCompiler(jboolean bootstrap_enabled) { +void VMToCompiler::startCompiler(jboolean bootstrap_enabled, jlong compilerStatisticsAddress) { JavaThread* THREAD = JavaThread::current(); JavaValue result(T_VOID); JavaCallArguments args; args.push_oop(instance()); args.push_int(bootstrap_enabled); - JavaCalls::call_interface(&result, vmToCompilerKlass(), vmSymbols::startCompiler_name(), vmSymbols::bool_void_signature(), &args, THREAD); + args.push_long(compilerStatisticsAddress); + JavaCalls::call_interface(&result, vmToCompilerKlass(), vmSymbols::startCompiler_name(), vmSymbols::boolean_long_void_signature(), &args, THREAD); check_pending_exception("Error while calling startCompiler"); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/graalVMToCompiler.hpp --- a/src/share/vm/graal/graalVMToCompiler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/graal/graalVMToCompiler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -67,7 +67,7 @@ static void shutdownCompiler(); // public abstract void startCompiler(boolean bootstrapEnabled); - static void startCompiler(jboolean bootstrap_enabled); + static void startCompiler(jboolean bootstrap_enabled, jlong compilerStatisticsAddress); // public abstract void bootstrap(); static void bootstrap(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/graal/vmStructs_graal.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/graal/vmStructs_graal.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GRAAL_VMSTRUCTS_GRAAL_HPP +#define SHARE_VM_GRAAL_VMSTRUCTS_GRAAL_HPP + +#include "compiler/abstractCompiler.hpp" + +#define VM_STRUCTS_GRAAL(nonstatic_field, static_field) \ + \ + static_field(java_lang_Class, _graal_mirror_offset, int) \ + \ + nonstatic_field(CompilerStatistics, _standard, CompilerStatistics::Data) \ + nonstatic_field(CompilerStatistics, _osr, CompilerStatistics::Data) \ + nonstatic_field(CompilerStatistics, _nmethods_size, int) \ + nonstatic_field(CompilerStatistics, _nmethods_code_size, int) \ + nonstatic_field(CompilerStatistics::Data, _bytes, int) \ + nonstatic_field(CompilerStatistics::Data, _count, int) \ + nonstatic_field(CompilerStatistics::Data, _time, elapsedTimer) \ + + +#define VM_TYPES_GRAAL(declare_type, declare_toplevel_type) \ + \ + declare_toplevel_type(CompilerStatistics) \ + declare_toplevel_type(CompilerStatistics::Data) \ + + +#endif // SHARE_VM_GRAAL_VMSTRUCTS_GRAAL_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/abstractInterpreter.hpp --- a/src/share/vm/interpreter/abstractInterpreter.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -30,11 +30,8 @@ #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/top.hpp" -#ifdef TARGET_ARCH_MODEL_x86_32 -# include "interp_masm_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 -# include "interp_masm_x86_64.hpp" +#ifdef TARGET_ARCH_x86 +# include "interp_masm_x86.hpp" #endif #ifdef TARGET_ARCH_MODEL_sparc # include "interp_masm_sparc.hpp" @@ -164,8 +161,8 @@ // Runtime support // length = invoke bytecode length (to advance to next bytecode) - static address deopt_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } - static address return_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } + static address deopt_entry(TosState state, int length) { ShouldNotReachHere(); return NULL; } + static address return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotReachHere(); return NULL; } static address rethrow_exception_entry() { return _rethrow_exception_entry; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/cppInterpreter.hpp --- a/src/share/vm/interpreter/cppInterpreter.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/cppInterpreter.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -78,7 +78,7 @@ static address stack_result_to_stack(int index) { return _stack_to_stack[index]; } static address stack_result_to_native(int index) { return _stack_to_native_abi[index]; } - static address return_entry (TosState state, int length); + static address return_entry (TosState state, int length, Bytecodes::Code code); static address deopt_entry (TosState state, int length); #ifdef TARGET_ARCH_x86 diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/interpreter.cpp --- a/src/share/vm/interpreter/interpreter.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/interpreter.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -339,15 +339,21 @@ //------------------------------------------------------------------------------------------------------------------------ // Deoptimization support -// If deoptimization happens, this function returns the point of next bytecode to continue execution +/** + * If a deoptimization happens, this function returns the point of next bytecode to continue execution. + */ address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { assert(method->contains(bcp), "just checkin'"); - Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); + + // Get the original and rewritten bytecode. + Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); - int bci = method->bci_from(bcp); - int length = -1; // initial value for debugging + + const int bci = method->bci_from(bcp); + // compute continuation length - length = Bytecodes::length_at(method, bcp); + const int length = Bytecodes::length_at(method, bcp); + // compute result type BasicType type = T_ILLEGAL; @@ -403,7 +409,7 @@ return is_top_frame ? Interpreter::deopt_entry (as_TosState(type), length) - : Interpreter::return_entry(as_TosState(type), length); + : Interpreter::return_entry(as_TosState(type), length, code); } // If deoptimization happens, this function returns the point where the interpreter reexecutes diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/linkResolver.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,4 +1,5 @@ /* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,11 +152,29 @@ // Could be an Object method inherited into an interface, but still a vtable call. kind = CallInfo::vtable_call; } else if (!resolved_klass->is_interface()) { - // A miranda method. Compute the vtable index. + // A default or miranda method. Compute the vtable index. ResourceMark rm; klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable(); - index = vt->index_of_miranda(resolved_method->name(), - resolved_method->signature()); + index = LinkResolver::vtable_index_of_interface_method(resolved_klass, + resolved_method); + assert(index >= 0 , "we should have valid vtable index at this point"); + + kind = CallInfo::vtable_call; + } else if (resolved_method->has_vtable_index()) { + // Can occur if an interface redeclares a method of Object. + +#ifdef ASSERT + // Ensure that this is really the case. + KlassHandle object_klass = SystemDictionary::Object_klass(); + Method * object_resolved_method = object_klass()->vtable()->method_at(index); + assert(object_resolved_method->name() == resolved_method->name(), + err_msg("Object and interface method names should match at vtable index %d, %s != %s", + index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string())); + assert(object_resolved_method->signature() == resolved_method->signature(), + err_msg("Object and interface method signatures should match at vtable index %d, %s != %s", + index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string())); +#endif // ASSERT + kind = CallInfo::vtable_call; } else { // A regular interface call. @@ -221,8 +240,17 @@ // // According to JVM spec. $5.4.3c & $5.4.3d +// Look up method in klasses, including static methods +// Then look up local default methods void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { Method* result_oop = klass->uncached_lookup_method(name, signature); + if (result_oop == NULL) { + Array* default_methods = InstanceKlass::cast(klass())->default_methods(); + if (default_methods != NULL) { + result_oop = InstanceKlass::find_method(default_methods, name, signature); + } + } + if (EnableInvokeDynamic && result_oop != NULL) { vmIntrinsics::ID iid = result_oop->intrinsic_id(); if (MethodHandles::is_signature_polymorphic(iid)) { @@ -234,20 +262,46 @@ } // returns first instance method +// Looks up method in classes, then looks up local default methods void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { Method* result_oop = klass->uncached_lookup_method(name, signature); result = methodHandle(THREAD, result_oop); - while (!result.is_null() && result->is_static()) { + while (!result.is_null() && result->is_static() && result->method_holder()->super() != NULL) { klass = KlassHandle(THREAD, result->method_holder()->super()); result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature)); } + + if (result.is_null()) { + Array* default_methods = InstanceKlass::cast(klass())->default_methods(); + if (default_methods != NULL) { + result = methodHandle(InstanceKlass::find_method(default_methods, name, signature)); + assert(result.is_null() || !result->is_static(), "static defaults not allowed"); + } + } } +int LinkResolver::vtable_index_of_interface_method(KlassHandle klass, + methodHandle resolved_method) { -int LinkResolver::vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { - ResourceMark rm(THREAD); - klassVtable *vt = InstanceKlass::cast(klass())->vtable(); - return vt->index_of_miranda(name, signature); + int vtable_index = Method::invalid_vtable_index; + Symbol* name = resolved_method->name(); + Symbol* signature = resolved_method->signature(); + + // First check in default method array + if (!resolved_method->is_abstract() && + (InstanceKlass::cast(klass())->default_methods() != NULL)) { + int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature); + if (index >= 0 ) { + vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index); + } + } + if (vtable_index == Method::invalid_vtable_index) { + // get vtable_index for miranda methods + ResourceMark rm; + klassVtable *vt = InstanceKlass::cast(klass())->vtable(); + vtable_index = vt->index_of_miranda(name, signature); + } + return vtable_index; } void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { @@ -366,18 +420,28 @@ AccessFlags flags = sel_method->access_flags(); - // Special case: arrays always override "clone". JVMS 2.15. + // Special case #1: arrays always override "clone". JVMS 2.15. // If the resolved klass is an array class, and the declaring class // is java.lang.Object and the method is "clone", set the flags // to public. + // Special case #2: If the resolved klass is an interface, and + // the declaring class is java.lang.Object, and the method is + // "clone" or "finalize", set the flags to public. If the + // resolved interface does not contain "clone" or "finalize" + // methods, the method/interface method resolution looks to + // the interface's super class, java.lang.Object. With JDK 8 + // interface accessability check requirement, special casing + // this scenario is necessary to avoid an IAE. // - // We'll check for the method name first, as that's most likely - // to be false (so we'll short-circuit out of these tests). - if (sel_method->name() == vmSymbols::clone_name() && - sel_klass() == SystemDictionary::Object_klass() && - resolved_klass->oop_is_array()) { + // We'll check for each method name first and then java.lang.Object + // to best short-circuit out of these tests. + if (((sel_method->name() == vmSymbols::clone_name() && + (resolved_klass->oop_is_array() || resolved_klass->is_interface())) || + (sel_method->name() == vmSymbols::finalize_method_name() && + resolved_klass->is_interface())) && + sel_klass() == SystemDictionary::Object_klass()) { // We need to change "protected" to "public". - assert(flags.is_protected(), "clone not protected?"); + assert(flags.is_protected(), "clone or finalize not protected?"); jint new_flags = flags.as_int(); new_flags = new_flags & (~JVM_ACC_PROTECTED); new_flags = new_flags | JVM_ACC_PUBLIC; @@ -418,7 +482,7 @@ Symbol* method_name = vmSymbols::invoke_name(); Symbol* method_signature = pool->signature_ref_at(index); KlassHandle current_klass(THREAD, pool->pool_holder()); - resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK); return; } @@ -440,22 +504,34 @@ if (code == Bytecodes::_invokeinterface) { resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); + } else if (code == Bytecodes::_invokevirtual) { + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK); } else { - resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK); } } void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, - KlassHandle current_klass, bool check_access, TRAPS) { + KlassHandle current_klass, bool check_access, + bool require_methodref, TRAPS) { Handle nested_exception; - // 1. lookup method in resolved klass and its super klasses + // 1. check if methodref required, that resolved_klass is not interfacemethodref + if (require_methodref && resolved_klass->is_interface()) { + ResourceMark rm(THREAD); + char buf[200]; + jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", + resolved_klass()->external_name()); + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + + // 2. lookup method in resolved klass and its super klasses lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); if (resolved_method.is_null()) { // not found in the class hierarchy - // 2. lookup method in all the interfaces implemented by the resolved klass + // 3. lookup method in all the interfaces implemented by the resolved klass lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); if (resolved_method.is_null()) { @@ -469,7 +545,7 @@ } if (resolved_method.is_null()) { - // 3. method lookup failed + // 4. method lookup failed ResourceMark rm(THREAD); THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), Method::name_and_sig_as_C_string(resolved_klass(), @@ -479,15 +555,6 @@ } } - // 4. check if klass is not interface - if (resolved_klass->is_interface() && resolved_method->is_abstract()) { - ResourceMark rm(THREAD); - char buf[200]; - jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", - resolved_klass()->external_name()); - THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); - } - // 5. check if method is concrete if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) { ResourceMark rm(THREAD); @@ -625,6 +692,12 @@ resolved_method->method_holder()->internal_name() ); resolved_method->access_flags().print_on(tty); + if (resolved_method->is_default_method()) { + tty->print("default "); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } tty->cr(); } } @@ -795,7 +868,7 @@ Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS) { - resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK); assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier"); // check if static @@ -829,7 +902,7 @@ // and the selected method is recalculated relative to the direct superclass // superinterface.method, which explicitly does not check shadowing - resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK); // check if method name is , that it is found in same klass as static type if (resolved_method->name() == vmSymbols::object_initializer_name() && @@ -857,6 +930,7 @@ resolved_method->signature())); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); } + if (TraceItables && Verbose) { ResourceMark rm(THREAD); tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", @@ -868,9 +942,8 @@ resolved_method->method_holder()->internal_name() ); resolved_method->access_flags().print_on(tty); - if (resolved_method->method_holder()->is_interface() && - !resolved_method->is_abstract()) { - tty->print("default"); + if (resolved_method->is_default_method()) { + tty->print("default "); } if (resolved_method->is_overpass()) { tty->print("overpass"); @@ -949,9 +1022,11 @@ sel_method->method_holder()->internal_name() ); sel_method->access_flags().print_on(tty); - if (sel_method->method_holder()->is_interface() && - !sel_method->is_abstract()) { - tty->print("default"); + if (sel_method->is_default_method()) { + tty->print("default "); + } + if (sel_method->is_overpass()) { + tty->print("overpass"); } tty->cr(); } @@ -973,7 +1048,7 @@ Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS) { // normal method resolution - resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK); assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier"); assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier"); @@ -1000,26 +1075,25 @@ THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); } - if (PrintVtables && Verbose) { - ResourceMark rm(THREAD); - tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", - (current_klass.is_null() ? "" : current_klass->internal_name()), - (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), - Method::name_and_sig_as_C_string(resolved_klass(), - resolved_method->name(), - resolved_method->signature()), - resolved_method->method_holder()->internal_name() - ); - resolved_method->access_flags().print_on(tty); - if (resolved_method->method_holder()->is_interface() && - !resolved_method->is_abstract()) { - tty->print("default"); - } - if (resolved_method->is_overpass()) { - tty->print("overpass"); - } - tty->cr(); - } + if (PrintVtables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", + (current_klass.is_null() ? "" : current_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + resolved_method->method_holder()->internal_name() + ); + resolved_method->access_flags().print_on(tty); + if (resolved_method->is_default_method()) { + tty->print("default "); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } + tty->cr(); + } } // throws runtime exceptions @@ -1049,10 +1123,8 @@ // do lookup based on receiver klass using the vtable index if (resolved_method->method_holder()->is_interface()) { // miranda method - vtable_index = vtable_index_of_miranda_method(resolved_klass, - resolved_method->name(), - resolved_method->signature(), CHECK); - + vtable_index = vtable_index_of_interface_method(resolved_klass, + resolved_method); assert(vtable_index >= 0 , "we should have valid vtable index at this point"); InstanceKlass* inst = InstanceKlass::cast(recv_klass()); @@ -1108,11 +1180,10 @@ vtable_index ); selected_method->access_flags().print_on(tty); - if (selected_method->method_holder()->is_interface() && - !selected_method->is_abstract()) { - tty->print("default"); + if (selected_method->is_default_method()) { + tty->print("default "); } - if (resolved_method->is_overpass()) { + if (selected_method->is_overpass()) { tty->print("overpass"); } tty->cr(); @@ -1195,7 +1266,6 @@ sel_method->name(), sel_method->signature())); } - // check if abstract if (check_null_and_abstract && sel_method->is_abstract()) { ResourceMark rm(THREAD); @@ -1204,14 +1274,6 @@ sel_method->name(), sel_method->signature())); } - // setup result - if (!resolved_method->has_itable_index()) { - int vtable_index = resolved_method->vtable_index(); - assert(vtable_index == sel_method->vtable_index(), "sanity check"); - result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK); - return; - } - int itable_index = resolved_method()->itable_index(); if (TraceItables && Verbose) { ResourceMark rm(THREAD); @@ -1224,16 +1286,23 @@ sel_method->method_holder()->internal_name() ); sel_method->access_flags().print_on(tty); - if (sel_method->method_holder()->is_interface() && - !sel_method->is_abstract()) { - tty->print("default"); + if (sel_method->is_default_method()) { + tty->print("default "); } - if (resolved_method->is_overpass()) { + if (sel_method->is_overpass()) { tty->print("overpass"); } tty->cr(); } - result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK); + // setup result + if (!resolved_method->has_itable_index()) { + int vtable_index = resolved_method->vtable_index(); + assert(vtable_index == sel_method->vtable_index(), "sanity check"); + result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK); + } else { + int itable_index = resolved_method()->itable_index(); + result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK); + } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/linkResolver.hpp --- a/src/share/vm/interpreter/linkResolver.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/linkResolver.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -130,14 +130,12 @@ static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS); - static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS); static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); - static void resolve_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + static void resolve_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS); static void linktime_resolve_static_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); static void linktime_resolve_special_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); @@ -188,6 +186,7 @@ static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); + static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method); // same as above for compile-time resolution; returns vtable_index if current_klass if linked static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/rewriter.cpp --- a/src/share/vm/interpreter/rewriter.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/rewriter.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -70,21 +70,21 @@ } // Unrewrite the bytecodes if an error occurs. -void Rewriter::restore_bytecodes() { +void Rewriter::restore_bytecodes(TRAPS) { int len = _methods->length(); for (int i = len-1; i >= 0; i--) { Method* method = _methods->at(i); - scan_method(method, true); + scan_method(method, true, CHECK); } } // Creates a constant pool cache given a CPC map void Rewriter::make_constant_pool_cache(TRAPS) { - const int length = _cp_cache_map.length(); ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data(); ConstantPoolCache* cache = - ConstantPoolCache::allocate(loader_data, length, _cp_cache_map, + ConstantPoolCache::allocate(loader_data, _cp_cache_map, + _invokedynamic_cp_cache_map, _invokedynamic_references_map, CHECK); // initialize object cache in constant pool @@ -154,6 +154,31 @@ } } +// If the constant pool entry for invokespecial is InterfaceMethodref, +// we need to add a separate cpCache entry for its resolution, because it is +// different than the resolution for invokeinterface with InterfaceMethodref. +// These cannot share cpCache entries. It's unclear if all invokespecial to +// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry +// is created for each one. This was added with lambda. +void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) { + static int count = 0; + address p = bcp + offset; + if (!reverse) { + int cp_index = Bytes::get_Java_u2(p); + int cache_index = add_invokespecial_cp_cache_entry(cp_index); + if (cache_index != (int)(jushort) cache_index) { + THROW_MSG(vmSymbols::java_lang_InternalError(), + "This classfile overflows invokespecial for interfaces " + "and cannot be loaded"); + } + Bytes::put_native_u2(p, cache_index); + } else { + int cache_index = Bytes::get_native_u2(p); + int cp_index = cp_cache_entry_pool_index(cache_index); + Bytes::put_Java_u2(p, cp_index); + } +} + // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) { @@ -203,7 +228,7 @@ if (!reverse) { int cp_index = Bytes::get_Java_u2(p); int cache_index = add_invokedynamic_cp_cache_entry(cp_index); - add_invokedynamic_resolved_references_entries(cp_index, cache_index); + int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index); // Replace the trailing four bytes with a CPC index for the dynamic // call site. Unlike other CPC entries, there is one per bytecode, // not just one per distinct CP entry. In other words, the @@ -212,13 +237,20 @@ // all these entries. That is the main reason invokedynamic // must have a five-byte instruction format. (Of course, other JVM // implementations can use the bytes for other purposes.) + // Note: We use native_u4 format exclusively for 4-byte indexes. Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index)); - // Note: We use native_u4 format exclusively for 4-byte indexes. + // add the bcp in case we need to patch this bytecode if we also find a + // invokespecial/InterfaceMethodref in the bytecode stream + _patch_invokedynamic_bcps->push(p); + _patch_invokedynamic_refs->push(resolved_index); } else { - // callsite index int cache_index = ConstantPool::decode_invokedynamic_index( Bytes::get_native_u4(p)); - int cp_index = cp_cache_entry_pool_index(cache_index); + // We will reverse the bytecode rewriting _after_ adjusting them. + // Adjust the cache index by offset to the invokedynamic entries in the + // cpCache plus the delta if the invokedynamic bytecodes were adjusted. + cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit; + int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index); assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index"); // zero out 4 bytes Bytes::put_Java_u4(p, 0); @@ -226,6 +258,34 @@ } } +void Rewriter::patch_invokedynamic_bytecodes() { + // If the end of the cp_cache is the same as after initializing with the + // cpool, nothing needs to be done. Invokedynamic bytecodes are at the + // correct offsets. ie. no invokespecials added + int delta = cp_cache_delta(); + if (delta > 0) { + int length = _patch_invokedynamic_bcps->length(); + assert(length == _patch_invokedynamic_refs->length(), + "lengths should match"); + for (int i = 0; i < length; i++) { + address p = _patch_invokedynamic_bcps->at(i); + int cache_index = ConstantPool::decode_invokedynamic_index( + Bytes::get_native_u4(p)); + Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta)); + + // invokedynamic resolved references map also points to cp cache and must + // add delta to each. + int resolved_index = _patch_invokedynamic_refs->at(i); + for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) { + assert(_invokedynamic_references_map[resolved_index+entry] == cache_index, + "should be the same index"); + _invokedynamic_references_map.at_put(resolved_index+entry, + cache_index + delta); + } + } + } +} + // Rewrite some ldc bytecodes to _fast_aldc void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide, @@ -269,7 +329,7 @@ // Rewrites a method given the index_map information -void Rewriter::scan_method(Method* method, bool reverse) { +void Rewriter::scan_method(Method* method, bool reverse, TRAPS) { int nof_jsrs = 0; bool has_monitor_bytecodes = false; @@ -329,12 +389,25 @@ #endif break; } + + case Bytecodes::_invokespecial : { + int offset = prefix_length + 1; + address p = bcp + offset; + int cp_index = Bytes::get_Java_u2(p); + // InterfaceMethodref + if (_pool->tag_at(cp_index).is_interface_method()) { + rewrite_invokespecial(bcp, offset, reverse, CHECK); + } else { + rewrite_member_reference(bcp, offset, reverse); + } + break; + } + case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through case Bytecodes::_putfield : // fall through case Bytecodes::_invokevirtual : // fall through - case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: case Bytecodes::_invokehandle : // if reverse=true @@ -426,16 +499,21 @@ for (int i = len-1; i >= 0; i--) { Method* method = _methods->at(i); - scan_method(method); + scan_method(method, false, CHECK); // If you get an error here, + // there is no reversing bytecodes } + // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref + // entries had to be added. + patch_invokedynamic_bytecodes(); + // allocate constant pool cache, now that we've seen all the bytecodes make_constant_pool_cache(THREAD); // Restore bytecodes to their unrewritten state if there are exceptions // rewriting bytecodes or allocating the cpCache if (HAS_PENDING_EXCEPTION) { - restore_bytecodes(); + restore_bytecodes(CATCH); return; } @@ -452,7 +530,7 @@ // relocating bytecodes. If some are relocated, that is ok because that // doesn't affect constant pool to cpCache rewriting. if (HAS_PENDING_EXCEPTION) { - restore_bytecodes(); + restore_bytecodes(CATCH); return; } // Method might have gotten rewritten. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/rewriter.hpp --- a/src/share/vm/interpreter/rewriter.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/rewriter.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,55 +46,102 @@ intArray _method_handle_invokers; int _resolved_reference_limit; + // For mapping invokedynamic bytecodes, which are discovered during method + // scanning. The invokedynamic entries are added at the end of the cpCache. + // If there are any invokespecial/InterfaceMethodref special case bytecodes, + // these entries are added before invokedynamic entries so that the + // invokespecial bytecode 16 bit index doesn't overflow. + intStack _invokedynamic_cp_cache_map; + + // For patching. + GrowableArray
    * _patch_invokedynamic_bcps; + GrowableArray* _patch_invokedynamic_refs; + void init_maps(int length) { _cp_map.initialize(length, -1); // Choose an initial value large enough that we don't get frequent // calls to grow(). - _cp_cache_map.initialize(length / 2); + _cp_cache_map.initialize(length/2); // Also cache resolved objects, in another different cache. _reference_map.initialize(length, -1); - _resolved_references_map.initialize(length / 2); - _invokedynamic_references_map.initialize(length / 2); + _resolved_references_map.initialize(length/2); + _invokedynamic_references_map.initialize(length/2); _resolved_reference_limit = -1; - DEBUG_ONLY(_cp_cache_index_limit = -1); + _first_iteration_cp_cache_limit = -1; + + // invokedynamic specific fields + _invokedynamic_cp_cache_map.initialize(length/4); + _patch_invokedynamic_bcps = new GrowableArray
    (length/4); + _patch_invokedynamic_refs = new GrowableArray(length/4); } - int _cp_cache_index_limit; + int _first_iteration_cp_cache_limit; void record_map_limits() { -#ifdef ASSERT - // Record initial size of the two arrays generated for the CP cache: - _cp_cache_index_limit = _cp_cache_map.length(); -#endif //ASSERT + // Record initial size of the two arrays generated for the CP cache + // relative to walking the constant pool. + _first_iteration_cp_cache_limit = _cp_cache_map.length(); _resolved_reference_limit = _resolved_references_map.length(); } + int cp_cache_delta() { + // How many cp cache entries were added since recording map limits after + // cp cache initialization? + assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); + return _cp_cache_map.length() - _first_iteration_cp_cache_limit; + } + int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } + int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { + assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); + int cache_index = cp_cache_map->append(cp_index); + cp_map->at_put(cp_index, cache_index); + return cache_index; + } + int add_cp_cache_entry(int cp_index) { assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); - assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); - assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration"); - int cache_index = _cp_cache_map.append(cp_index); - _cp_map.at_put(cp_index, cache_index); + assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration"); + int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map); assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); return cache_index; } - // add a new CP cache entry beyond the normal cache (for invokedynamic only) int add_invokedynamic_cp_cache_entry(int cp_index) { assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); - assert(_cp_map[cp_index] == -1, "do not map from cp_index"); - assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration"); + assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration"); + // add to the invokedynamic index map. + int cache_index = _invokedynamic_cp_cache_map.append(cp_index); + // do not update _cp_map, since the mapping is one-to-many + assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, ""); + // this index starts at one but in the bytecode it's appended to the end. + return cache_index + _first_iteration_cp_cache_limit; + } + + int invokedynamic_cp_cache_entry_pool_index(int cache_index) { + int cp_index = _invokedynamic_cp_cache_map[cache_index]; + return cp_index; + } + + // add a new CP cache entry beyond the normal cache for the special case of + // invokespecial with InterfaceMethodref as cpool operand. + int add_invokespecial_cp_cache_entry(int cp_index) { + assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration"); + // Don't add InterfaceMethodref if it already exists at the end. + for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) { + if (cp_cache_entry_pool_index(i) == cp_index) { + return i; + } + } int cache_index = _cp_cache_map.append(cp_index); - assert(cache_index >= _cp_cache_index_limit, ""); + assert(cache_index >= _first_iteration_cp_cache_limit, ""); // do not update _cp_map, since the mapping is one-to-many assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); return cache_index; } - // fix duplicated code later int cp_entry_to_resolved_references(int cp_index) const { assert(has_entry_in_resolved_references(cp_index), "oob"); return _reference_map[cp_index]; @@ -105,10 +152,7 @@ // add a new entry to the resolved_references map int add_resolved_references_entry(int cp_index) { - assert(_reference_map[cp_index] == -1, "not twice on same cp_index"); - assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration"); - int ref_index = _resolved_references_map.append(cp_index); - _reference_map.at_put(cp_index, ref_index); + int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); return ref_index; } @@ -137,7 +181,7 @@ // Access the contents of _cp_cache_map to determine CP cache layout. int cp_cache_entry_pool_index(int cache_index) { int cp_index = _cp_cache_map[cache_index]; - return cp_index; + return cp_index; } // All the work goes in here: @@ -145,14 +189,18 @@ void compute_index_maps(); void make_constant_pool_cache(TRAPS); - void scan_method(Method* m, bool reverse = false); + void scan_method(Method* m, bool reverse, TRAPS); void rewrite_Object_init(methodHandle m, TRAPS); - void rewrite_member_reference(address bcp, int offset, bool reverse = false); - void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false); - void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); - void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); + void rewrite_member_reference(address bcp, int offset, bool reverse); + void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); + void rewrite_invokedynamic(address bcp, int offset, bool reverse); + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse); + void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS); + + void patch_invokedynamic_bytecodes(); + // Revert bytecodes in case of an exception. - void restore_bytecodes(); + void restore_bytecodes(TRAPS); static methodHandle rewrite_jsrs(methodHandle m, TRAPS); public: diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -184,8 +184,9 @@ EntryPoint TemplateInterpreter::_continuation_entry; EntryPoint TemplateInterpreter::_safept_entry; -address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; -address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs]; DispatchTable TemplateInterpreter::_active_table; DispatchTable TemplateInterpreter::_normal_table; @@ -237,22 +238,37 @@ #endif // !PRODUCT { CodeletMark cm(_masm, "return entry points"); + const int index_size = sizeof(u2); for (int i = 0; i < Interpreter::number_of_return_entries; i++) { Interpreter::_return_entry[i] = EntryPoint( - generate_return_entry_for(itos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(atos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(ltos, i), - generate_return_entry_for(ftos, i), - generate_return_entry_for(dtos, i), - generate_return_entry_for(vtos, i) + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(atos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(ltos, i, index_size), + generate_return_entry_for(ftos, i, index_size), + generate_return_entry_for(dtos, i, index_size), + generate_return_entry_for(vtos, i, index_size) ); } } + { CodeletMark cm(_masm, "invoke return entry points"); + const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos}; + const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic); + const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface); + const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic); + + for (int i = 0; i < Interpreter::number_of_return_addrs; i++) { + TosState state = states[i]; + Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2)); + Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2)); + Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4)); + } + } + { CodeletMark cm(_masm, "earlyret entry points"); Interpreter::_earlyret_entry = EntryPoint( @@ -298,13 +314,6 @@ } } - for (int j = 0; j < number_of_states; j++) { - const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos}; - int index = Interpreter::TosState_as_index(states[j]); - Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3); - Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5); - } - { CodeletMark cm(_masm, "continuation entry points"); Interpreter::_continuation_entry = EntryPoint( @@ -537,9 +546,46 @@ //------------------------------------------------------------------------------------------------------------------------ // Entry points -address TemplateInterpreter::return_entry(TosState state, int length) { +/** + * Returns the return entry table for the given invoke bytecode. + */ +address* TemplateInterpreter::invoke_return_entry_table_for(Bytecodes::Code code) { + switch (code) { + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: + case Bytecodes::_invokevirtual: + case Bytecodes::_invokehandle: + return Interpreter::invoke_return_entry_table(); + case Bytecodes::_invokeinterface: + return Interpreter::invokeinterface_return_entry_table(); + case Bytecodes::_invokedynamic: + return Interpreter::invokedynamic_return_entry_table(); + default: + fatal(err_msg("invalid bytecode: %s", Bytecodes::name(code))); + return NULL; + } +} + +/** + * Returns the return entry address for the given top-of-stack state and bytecode. + */ +address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length"); - return _return_entry[length].entry(state); + const int index = TosState_as_index(state); + switch (code) { + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: + case Bytecodes::_invokevirtual: + case Bytecodes::_invokehandle: + return _invoke_return_entry[index]; + case Bytecodes::_invokeinterface: + return _invokeinterface_return_entry[index]; + case Bytecodes::_invokedynamic: + return _invokedynamic_return_entry[index]; + default: + assert(!Bytecodes::is_invoke(code), err_msg("invoke instructions should be handled separately: %s", Bytecodes::name(code))); + return _return_entry[length].entry(state); + } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/templateInterpreter.hpp --- a/src/share/vm/interpreter/templateInterpreter.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -120,8 +120,9 @@ static EntryPoint _continuation_entry; static EntryPoint _safept_entry; - static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries - static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries + static address _invoke_return_entry[number_of_return_addrs]; // for invokestatic, invokespecial, invokevirtual return entries + static address _invokeinterface_return_entry[number_of_return_addrs]; // for invokeinterface return entries + static address _invokedynamic_return_entry[number_of_return_addrs]; // for invokedynamic return entries static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch) static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode) @@ -161,12 +162,15 @@ static address* normal_table() { return _normal_table.table_for(); } // Support for invokes - static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; } - static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; } - static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table + static address* invoke_return_entry_table() { return _invoke_return_entry; } + static address* invokeinterface_return_entry_table() { return _invokeinterface_return_entry; } + static address* invokedynamic_return_entry_table() { return _invokedynamic_return_entry; } + static int TosState_as_index(TosState state); - static address return_entry (TosState state, int length); - static address deopt_entry (TosState state, int length); + static address* invoke_return_entry_table_for(Bytecodes::Code code); + + static address deopt_entry(TosState state, int length); + static address return_entry(TosState state, int length, Bytecodes::Code code); // Safepoint support static void notice_safepoints(); // stops the thread when reaching a safepoint diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/templateInterpreterGenerator.hpp --- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,7 +53,7 @@ address generate_ClassCastException_handler(); address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_continuation_for(TosState state); - address generate_return_entry_for(TosState state, int step); + address generate_return_entry_for(TosState state, int step, size_t index_size); address generate_earlyret_entry_for(TosState state); address generate_safept_entry_for(TosState state, address runtime_entry); void generate_throw_exception(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/interpreter/templateTable.hpp --- a/src/share/vm/interpreter/templateTable.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/interpreter/templateTable.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -28,11 +28,8 @@ #include "interpreter/bytecodes.hpp" #include "memory/allocation.hpp" #include "runtime/frame.hpp" -#ifdef TARGET_ARCH_MODEL_x86_32 -# include "interp_masm_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 -# include "interp_masm_x86_64.hpp" +#ifdef TARGET_ARCH_x86 +# include "interp_masm_x86.hpp" #endif #ifdef TARGET_ARCH_MODEL_sparc # include "interp_masm_sparc.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/binaryTreeDictionary.cpp --- a/src/share/vm/memory/binaryTreeDictionary.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -28,7 +28,6 @@ #include "memory/binaryTreeDictionary.hpp" #include "memory/freeList.hpp" #include "memory/freeBlockDictionary.hpp" -#include "memory/metablock.hpp" #include "memory/metachunk.hpp" #include "runtime/globals.hpp" #include "utilities/ostream.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -47,86 +47,107 @@ // CollectorPolicy methods. -// Align down. If the aligning result in 0, return 'alignment'. -static size_t restricted_align_down(size_t size, size_t alignment) { - return MAX2(alignment, align_size_down_(size, alignment)); +CollectorPolicy::CollectorPolicy() : + _space_alignment(0), + _heap_alignment(0), + _initial_heap_byte_size(InitialHeapSize), + _max_heap_byte_size(MaxHeapSize), + _min_heap_byte_size(Arguments::min_heap_size()), + _max_heap_size_cmdline(false), + _size_policy(NULL), + _should_clear_all_soft_refs(false), + _all_soft_refs_clear(false) +{} + +#ifdef ASSERT +void CollectorPolicy::assert_flags() { + assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); + assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment"); + assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment"); } +void CollectorPolicy::assert_size_info() { + assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage"); + assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage"); + assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); + assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); + assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); + assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment"); + assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment"); + assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment"); +} +#endif // ASSERT + void CollectorPolicy::initialize_flags() { - assert(max_alignment() >= min_alignment(), - err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, - max_alignment(), min_alignment())); - assert(max_alignment() % min_alignment() == 0, - err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, - max_alignment(), min_alignment())); + assert(_space_alignment != 0, "Space alignment not set up properly"); + assert(_heap_alignment != 0, "Heap alignment not set up properly"); + assert(_heap_alignment >= _space_alignment, + err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT, + _heap_alignment, _space_alignment)); + assert(_heap_alignment % _space_alignment == 0, + err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, + _heap_alignment, _space_alignment)); - if (MaxHeapSize < InitialHeapSize) { - vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); + if (FLAG_IS_CMDLINE(MaxHeapSize)) { + if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { + vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size"); + } + if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) { + vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); + } + _max_heap_size_cmdline = true; } - // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will - // override if MaxMetaspaceSize was set on the command line or not. - // This information is needed later to conform to the specification of the - // java.lang.management.MemoryUsage API. - // - // Ideally, we would be able to set the default value of MaxMetaspaceSize in - // globals.hpp to the aligned value, but this is not possible, since the - // alignment depends on other flags being parsed. - MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment()); - - if (MetaspaceSize > MaxMetaspaceSize) { - MetaspaceSize = MaxMetaspaceSize; + // Check heap parameter properties + if (InitialHeapSize < M) { + vm_exit_during_initialization("Too small initial heap"); + } + if (_min_heap_byte_size < M) { + vm_exit_during_initialization("Too small minimum heap"); } - MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment()); + // User inputs from -Xmx and -Xms must be aligned + _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); + uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment); + uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment); - assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); - - MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment()); - MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment()); + // Write back to flags if the values changed + if (aligned_initial_heap_size != InitialHeapSize) { + FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size); + } + if (aligned_max_heap_size != MaxHeapSize) { + FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size); + } - MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); + if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 && + InitialHeapSize < _min_heap_byte_size) { + vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); + } + if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { + FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); + } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { + FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); + if (InitialHeapSize < _min_heap_byte_size) { + _min_heap_byte_size = InitialHeapSize; + } + } - assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); - assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); - if (MetaspaceSize < 256*K) { - vm_exit_during_initialization("Too small initial Metaspace size"); - } + _initial_heap_byte_size = InitialHeapSize; + _max_heap_byte_size = MaxHeapSize; + + FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); + + DEBUG_ONLY(CollectorPolicy::assert_flags();) } void CollectorPolicy::initialize_size_info() { - // User inputs from -mx and ms must be aligned - set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); - set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); - set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); - - // Check heap parameter properties - if (initial_heap_byte_size() < M) { - vm_exit_during_initialization("Too small initial heap"); - } - // Check heap parameter properties - if (min_heap_byte_size() < M) { - vm_exit_during_initialization("Too small minimum heap"); - } - if (initial_heap_byte_size() <= NewSize) { - // make sure there is at least some room in old space - vm_exit_during_initialization("Too small initial heap for new size specified"); - } - if (max_heap_byte_size() < min_heap_byte_size()) { - vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); - } - if (initial_heap_byte_size() < min_heap_byte_size()) { - vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); - } - if (max_heap_byte_size() < initial_heap_byte_size()) { - vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); - } - if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " SIZE_FORMAT " Maximum heap " SIZE_FORMAT, - min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); + _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); } + + DEBUG_ONLY(CollectorPolicy::assert_size_info();) } bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { @@ -137,7 +158,6 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { - assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name"); return new CardTableRS(whole_heap, max_covered_regions); } @@ -151,7 +171,7 @@ _all_soft_refs_clear = true; } -size_t CollectorPolicy::compute_max_alignment() { +size_t CollectorPolicy::compute_heap_alignment() { // The card marking array and the offset arrays for old generations are // committed in os pages as well. Make sure they are entirely full (to // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 @@ -178,18 +198,21 @@ // GenCollectorPolicy methods. +GenCollectorPolicy::GenCollectorPolicy() : + _min_gen0_size(0), + _initial_gen0_size(0), + _max_gen0_size(0), + _gen_alignment(0), + _generations(NULL) +{} + size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { - size_t x = base_size / (NewRatio+1); - size_t new_gen_size = x > min_alignment() ? - align_size_down(x, min_alignment()) : - min_alignment(); - return new_gen_size; + return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); } size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, size_t maximum_size) { - size_t alignment = min_alignment(); - size_t max_minus = maximum_size - alignment; + size_t max_minus = maximum_size - _gen_alignment; return desired_size < max_minus ? desired_size : max_minus; } @@ -197,7 +220,7 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size) { - const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; + const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; _size_policy = new AdaptiveSizePolicy(init_eden_size, init_promo_size, init_survivor_size, @@ -205,100 +228,181 @@ GCTimeRatio); } +size_t GenCollectorPolicy::young_gen_size_lower_bound() { + // The young generation must be aligned and have room for eden + two survivors + return align_size_up(3 * _space_alignment, _gen_alignment); +} + +#ifdef ASSERT +void GenCollectorPolicy::assert_flags() { + CollectorPolicy::assert_flags(); + assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); + assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); + assert(NewSize % _gen_alignment == 0, "NewSize alignment"); + assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment"); +} + +void TwoGenerationCollectorPolicy::assert_flags() { + GenCollectorPolicy::assert_flags(); + assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); + assert(OldSize % _gen_alignment == 0, "OldSize alignment"); +} + +void GenCollectorPolicy::assert_size_info() { + CollectorPolicy::assert_size_info(); + // GenCollectorPolicy::initialize_size_info may update the MaxNewSize + assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); + assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage"); + assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage"); + assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); + assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment"); + assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment"); + assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment"); +} + +void TwoGenerationCollectorPolicy::assert_size_info() { + GenCollectorPolicy::assert_size_info(); + assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage"); + assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); + assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); + assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment"); + assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment"); + assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); +} +#endif // ASSERT + void GenCollectorPolicy::initialize_flags() { - // All sizes must be multiples of the generation granularity. - set_min_alignment((uintx) Generation::GenGrain); - set_max_alignment(compute_max_alignment()); - CollectorPolicy::initialize_flags(); - // All generational heaps have a youngest gen; handle those flags here. + assert(_gen_alignment != 0, "Generation alignment not set up properly"); + assert(_heap_alignment >= _gen_alignment, + err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT, + _heap_alignment, _gen_alignment)); + assert(_gen_alignment % _space_alignment == 0, + err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, + _gen_alignment, _space_alignment)); + assert(_heap_alignment % _gen_alignment == 0, + err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT, + _heap_alignment, _gen_alignment)); + + // All generational heaps have a youngest gen; handle those flags here - // Adjust max size parameters - if (NewSize > MaxNewSize) { - MaxNewSize = NewSize; + // Make sure the heap is large enough for two generations + uintx smallest_new_size = young_gen_size_lower_bound(); + uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment), + _heap_alignment); + if (MaxHeapSize < smallest_heap_size) { + FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size); + _max_heap_byte_size = MaxHeapSize; } - NewSize = align_size_down(NewSize, min_alignment()); - MaxNewSize = align_size_down(MaxNewSize, min_alignment()); + // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size + if (_min_heap_byte_size < smallest_heap_size) { + _min_heap_byte_size = smallest_heap_size; + if (InitialHeapSize < _min_heap_byte_size) { + FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size); + _initial_heap_byte_size = smallest_heap_size; + } + } + + // Now take the actual NewSize into account. We will silently increase NewSize + // if the user specified a smaller value. + smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment)); + if (smallest_new_size != NewSize) { + FLAG_SET_ERGO(uintx, NewSize, smallest_new_size); + } + _initial_gen0_size = NewSize; - // Check validity of heap flags - assert(NewSize % min_alignment() == 0, "eden space alignment"); - assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); + if (!FLAG_IS_DEFAULT(MaxNewSize)) { + uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size); - if (NewSize < 3*min_alignment()) { - // make sure there room for eden and two survivor spaces - vm_exit_during_initialization("Too small new size specified"); + if (MaxNewSize >= MaxHeapSize) { + // Make sure there is room for an old generation + uintx smaller_max_new_size = MaxHeapSize - _gen_alignment; + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " + "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", + MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K); + } + FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size); + if (NewSize > MaxNewSize) { + FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); + _initial_gen0_size = NewSize; + } + } else if (MaxNewSize < min_new_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size); + } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { + FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); + } + _max_gen0_size = MaxNewSize; } + + if (NewSize > MaxNewSize) { + // At this point this should only happen if the user specifies a large NewSize and/or + // a small (but not too small) MaxNewSize. + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + FLAG_SET_ERGO(uintx, MaxNewSize, NewSize); + _max_gen0_size = MaxNewSize; + } + if (SurvivorRatio < 1 || NewRatio < 1) { - vm_exit_during_initialization("Invalid heap ratio specified"); + vm_exit_during_initialization("Invalid young gen ratio specified"); } + + DEBUG_ONLY(GenCollectorPolicy::assert_flags();) } void TwoGenerationCollectorPolicy::initialize_flags() { GenCollectorPolicy::initialize_flags(); - OldSize = align_size_down(OldSize, min_alignment()); + if (!is_size_aligned(OldSize, _gen_alignment)) { + FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment)); + } - if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { + if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { // NewRatio will be used later to set the young generation size so we use // it to calculate how big the heap should be based on the requested OldSize // and NewRatio. assert(NewRatio > 0, "NewRatio should have been set up earlier"); size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); - calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); - MaxHeapSize = calculated_heapsize; - InitialHeapSize = calculated_heapsize; + calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); + FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); + _max_heap_byte_size = MaxHeapSize; + FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); + _initial_heap_byte_size = InitialHeapSize; } - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); // adjust max heap size if necessary if (NewSize + OldSize > MaxHeapSize) { - if (FLAG_IS_CMDLINE(MaxHeapSize)) { + if (_max_heap_size_cmdline) { // somebody set a maximum heap size with the intention that we should not // exceed it. Adjust New/OldSize as necessary. uintx calculated_size = NewSize + OldSize; double shrink_factor = (double) MaxHeapSize / calculated_size; - // align - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); + uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); + FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); + _initial_gen0_size = NewSize; + // OldSize is already aligned because above we aligned MaxHeapSize to - // max_alignment(), and we just made sure that NewSize is aligned to - // min_alignment(). In initialize_flags() we verified that max_alignment() - // is a multiple of min_alignment(). - OldSize = MaxHeapSize - NewSize; + // _heap_alignment, and we just made sure that NewSize is aligned to + // _gen_alignment. In initialize_flags() we verified that _heap_alignment + // is a multiple of _gen_alignment. + FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); } else { - MaxHeapSize = NewSize + OldSize; + FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); + _max_heap_byte_size = MaxHeapSize; } } - // need to do this again - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); - - // adjust max heap size if necessary - if (NewSize + OldSize > MaxHeapSize) { - if (FLAG_IS_CMDLINE(MaxHeapSize)) { - // somebody set a maximum heap size with the intention that we should not - // exceed it. Adjust New/OldSize as necessary. - uintx calculated_size = NewSize + OldSize; - double shrink_factor = (double) MaxHeapSize / calculated_size; - // align - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); - // OldSize is already aligned because above we aligned MaxHeapSize to - // max_alignment(), and we just made sure that NewSize is aligned to - // min_alignment(). In initialize_flags() we verified that max_alignment() - // is a multiple of min_alignment(). - OldSize = MaxHeapSize - NewSize; - } else { - MaxHeapSize = NewSize + OldSize; - } - } - // need to do this again - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); always_do_update_barrier = UseConcMarkSweepGC; - // Check validity of heap flags - assert(OldSize % min_alignment() == 0, "old space alignment"); - assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); + DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();) } // Values set on the command line win over any ergonomically @@ -313,7 +417,7 @@ void GenCollectorPolicy::initialize_size_info() { CollectorPolicy::initialize_size_info(); - // min_alignment() is used for alignment within a generation. + // _space_alignment is used for alignment within a generation. // There is additional alignment done down stream for some // collectors that sometimes causes unwanted rounding up of // generations sizes. @@ -321,37 +425,10 @@ // Determine maximum size of gen0 size_t max_new_size = 0; - if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { - if (MaxNewSize < min_alignment()) { - max_new_size = min_alignment(); - } - if (MaxNewSize >= max_heap_byte_size()) { - max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), - min_alignment()); - warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " - "greater than the entire heap (" SIZE_FORMAT "k). A " - "new generation size of " SIZE_FORMAT "k will be used.", - MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); - } else { - max_new_size = align_size_down(MaxNewSize, min_alignment()); - } - - // The case for FLAG_IS_ERGO(MaxNewSize) could be treated - // specially at this point to just use an ergonomically set - // MaxNewSize to set max_new_size. For cases with small - // heaps such a policy often did not work because the MaxNewSize - // was larger than the entire heap. The interpretation given - // to ergonomically set flags is that the flags are set - // by different collectors for their own special needs but - // are not allowed to badly shape the heap. This allows the - // different collectors to decide what's best for themselves - // without having to factor in the overall heap shape. It - // can be the case in the future that the collectors would - // only make "wise" ergonomics choices and this policy could - // just accept those choices. The choices currently made are - // not always "wise". + if (!FLAG_IS_DEFAULT(MaxNewSize)) { + max_new_size = MaxNewSize; } else { - max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); + max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); // Bound the maximum size by NewSize below (since it historically // would have been NewSize and because the NewRatio calculation could // yield a size that is too small) and bound it by MaxNewSize above. @@ -364,13 +441,13 @@ // Given the maximum gen0 size, determine the initial and // minimum gen0 sizes. - if (max_heap_byte_size() == min_heap_byte_size()) { + if (_max_heap_byte_size == _min_heap_byte_size) { // The maximum and minimum heap sizes are the same so // the generations minimum and initial must be the // same as its maximum. - set_min_gen0_size(max_new_size); - set_initial_gen0_size(max_new_size); - set_max_gen0_size(max_new_size); + _min_gen0_size = max_new_size; + _initial_gen0_size = max_new_size; + _max_gen0_size = max_new_size; } else { size_t desired_new_size = 0; if (!FLAG_IS_DEFAULT(NewSize)) { @@ -391,44 +468,49 @@ // Use the default NewSize as the floor for these values. If // NewRatio is overly large, the resulting sizes can be too // small. - _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), - NewSize); + _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); desired_new_size = - MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), - NewSize); + MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); } assert(_min_gen0_size > 0, "Sanity check"); - set_initial_gen0_size(desired_new_size); - set_max_gen0_size(max_new_size); + _initial_gen0_size = desired_new_size; + _max_gen0_size = max_new_size; // At this point the desirable initial and minimum sizes have been // determined without regard to the maximum sizes. // Bound the sizes by the corresponding overall heap sizes. - set_min_gen0_size( - bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); - set_initial_gen0_size( - bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); - set_max_gen0_size( - bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); + _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); + _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); + _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); // At this point all three sizes have been checked against the // maximum sizes but have not been checked for consistency // among the three. // Final check min <= initial <= max - set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); - set_initial_gen0_size( - MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); - set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); + _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); + _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); + _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); + } + + // Write back to flags if necessary + if (NewSize != _initial_gen0_size) { + FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); + } + + if (MaxNewSize != _max_gen0_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); } if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, - min_gen0_size(), initial_gen0_size(), max_gen0_size()); + _min_gen0_size, _initial_gen0_size, _max_gen0_size); } + + DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) } // Call this method during the sizing of the gen1 to make @@ -441,25 +523,18 @@ // keeping it simple also seems a worthwhile goal. bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, - const size_t heap_size, - const size_t min_gen1_size) { + const size_t heap_size) { bool result = false; - if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { - if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && - (heap_size >= min_gen1_size + min_alignment())) { - // Adjust gen0 down to accommodate min_gen1_size - *gen0_size_ptr = heap_size - min_gen1_size; - *gen0_size_ptr = - MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), - min_alignment()); - assert(*gen0_size_ptr > 0, "Min gen0 is too large"); + if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) { + uintx smallest_new_size = young_gen_size_lower_bound(); + if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) && + (heap_size >= _min_gen1_size + smallest_new_size)) { + // Adjust gen0 down to accommodate _min_gen1_size + *gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment); result = true; } else { - *gen1_size_ptr = heap_size - *gen0_size_ptr; - *gen1_size_ptr = - MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), - min_alignment()); + *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment); } } return result; @@ -480,83 +555,87 @@ // The maximum gen1 size can be determined from the maximum gen0 // and maximum heap size since no explicit flags exits // for setting the gen1 maximum. - _max_gen1_size = max_heap_byte_size() - _max_gen0_size; - _max_gen1_size = - MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), - min_alignment()); + _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); + // If no explicit command line flag has been set for the // gen1 size, use what is left for gen1. - if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { - // The user has not specified any value or ergonomics - // has chosen a value (which may or may not be consistent + if (!FLAG_IS_CMDLINE(OldSize)) { + // The user has not specified any value but the ergonomics + // may have chosen a value (which may or may not be consistent // with the overall heap size). In either case make // the minimum, maximum and initial sizes consistent // with the gen0 sizes and the overall heap sizes. - assert(min_heap_byte_size() > _min_gen0_size, - "gen0 has an unexpected minimum size"); - set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); - set_min_gen1_size( - MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), - min_alignment())); - set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); - set_initial_gen1_size( - MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), - min_alignment())); - + _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment); + _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment); + // _max_gen1_size has already been made consistent above + FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); } else { // It's been explicitly set on the command line. Use the // OldSize and then determine the consequences. - set_min_gen1_size(OldSize); - set_initial_gen1_size(OldSize); + _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); + _initial_gen1_size = OldSize; // If the user has explicitly set an OldSize that is inconsistent // with other command line flags, issue a warning. // The generation minimums and the overall heap mimimum should - // be within one heap alignment. - if ((_min_gen1_size + _min_gen0_size + min_alignment()) < - min_heap_byte_size()) { + // be within one generation alignment. + if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) { warning("Inconsistency between minimum heap size and minimum " - "generation sizes: using minimum heap = " SIZE_FORMAT, - min_heap_byte_size()); + "generation sizes: using minimum heap = " SIZE_FORMAT, + _min_heap_byte_size); } - if ((OldSize > _max_gen1_size)) { + if (OldSize > _max_gen1_size) { warning("Inconsistency between maximum heap size and maximum " - "generation sizes: using maximum heap = " SIZE_FORMAT - " -XX:OldSize flag is being ignored", - max_heap_byte_size()); + "generation sizes: using maximum heap = " SIZE_FORMAT + " -XX:OldSize flag is being ignored", + _max_heap_byte_size); } // If there is an inconsistency between the OldSize and the minimum and/or // initial size of gen0, since OldSize was explicitly set, OldSize wins. - if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, - min_heap_byte_size(), OldSize)) { + if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) { if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, - min_gen0_size(), initial_gen0_size(), max_gen0_size()); + _min_gen0_size, _initial_gen0_size, _max_gen0_size); } } // Initial size if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, - initial_heap_byte_size(), OldSize)) { + _initial_heap_byte_size)) { if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, - min_gen0_size(), initial_gen0_size(), max_gen0_size()); + _min_gen0_size, _initial_gen0_size, _max_gen0_size); } } } // Enforce the maximum gen1 size. - set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); + _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); // Check that min gen1 <= initial gen1 <= max gen1 - set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); - set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); + _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); + _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); + + // Write back to flags if necessary + if (NewSize != _initial_gen0_size) { + FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); + } + + if (MaxNewSize != _max_gen0_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); + } + + if (OldSize != _initial_gen1_size) { + FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); + } if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, - min_gen1_size(), initial_gen1_size(), max_gen1_size()); + _min_gen1_size, _initial_gen1_size, _max_gen1_size); } + + DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();) } HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, @@ -650,9 +729,7 @@ gc_count_before = Universe::heap()->total_collections(); } - VM_GenCollectForAllocation op(size, - is_tlab, - gc_count_before); + VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); @@ -881,14 +958,16 @@ // MarkSweepPolicy methods // -MarkSweepPolicy::MarkSweepPolicy() { - initialize_all(); +void MarkSweepPolicy::initialize_alignments() { + _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; + _heap_alignment = compute_heap_alignment(); } void MarkSweepPolicy::initialize_generations() { _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); - if (_generations == NULL) + if (_generations == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } if (UseParNewGC) { _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); @@ -897,8 +976,9 @@ } _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); - if (_generations[0] == NULL || _generations[1] == NULL) + if (_generations[0] == NULL || _generations[1] == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } } void MarkSweepPolicy::initialize_gc_policy_counters() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/collectorPolicy.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -61,17 +61,23 @@ protected: GCPolicyCounters* _gc_policy_counters; - // Requires that the concrete subclass sets the alignment constraints - // before calling. + virtual void initialize_alignments() = 0; virtual void initialize_flags(); virtual void initialize_size_info(); + DEBUG_ONLY(virtual void assert_flags();) + DEBUG_ONLY(virtual void assert_size_info();) + size_t _initial_heap_byte_size; size_t _max_heap_byte_size; size_t _min_heap_byte_size; - size_t _min_alignment; - size_t _max_alignment; + size_t _space_alignment; + size_t _heap_alignment; + + // Needed to keep information if MaxHeapSize was set on the command line + // when the flag value is aligned etc by ergonomics + bool _max_heap_size_cmdline; // The sizing of the heap are controlled by a sizing policy. AdaptiveSizePolicy* _size_policy; @@ -79,6 +85,7 @@ // Set to true when policy wants soft refs cleared. // Reset to false by gc after it clears all soft refs. bool _should_clear_all_soft_refs; + // Set to true by the GC if the just-completed gc cleared all // softrefs. This is set to true whenever a gc clears all softrefs, and // set to false each time gc returns to the mutator. For example, in the @@ -86,32 +93,24 @@ // mem_allocate() where it returns op.result() bool _all_soft_refs_clear; - CollectorPolicy() : - _min_alignment(1), - _max_alignment(1), - _initial_heap_byte_size(0), - _max_heap_byte_size(0), - _min_heap_byte_size(0), - _size_policy(NULL), - _should_clear_all_soft_refs(false), - _all_soft_refs_clear(false) - {} + CollectorPolicy(); public: - // Return maximum heap alignment that may be imposed by the policy - static size_t compute_max_alignment(); + virtual void initialize_all() { + initialize_alignments(); + initialize_flags(); + initialize_size_info(); + } - void set_min_alignment(size_t align) { _min_alignment = align; } - size_t min_alignment() { return _min_alignment; } - void set_max_alignment(size_t align) { _max_alignment = align; } - size_t max_alignment() { return _max_alignment; } + // Return maximum heap alignment that may be imposed by the policy + static size_t compute_heap_alignment(); + + size_t space_alignment() { return _space_alignment; } + size_t heap_alignment() { return _heap_alignment; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; } - void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } size_t max_heap_byte_size() { return _max_heap_byte_size; } - void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } size_t min_heap_byte_size() { return _min_heap_byte_size; } - void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } enum Name { CollectorPolicyKind, @@ -156,7 +155,6 @@ virtual BarrierSet::Name barrier_set_name() = 0; - virtual GenRemSet::Name rem_set_name() = 0; // Create the remembered set (to cover the given reserved region, // allowing breaking up into at most "max_covered_regions"). @@ -200,6 +198,9 @@ return false; } + // Do any updates required to global flags that are due to heap initialization + // changes + virtual void post_heap_initialize() = 0; }; class ClearedAllSoftRefs : public StackObj { @@ -224,6 +225,10 @@ size_t _initial_gen0_size; size_t _max_gen0_size; + // _gen_alignment and _space_alignment will have the same value most of the + // time. When using large pages they can differ. + size_t _gen_alignment; + GenerationSpec **_generations; // Return true if an allocation should be attempted in the older @@ -234,44 +239,50 @@ void initialize_flags(); void initialize_size_info(); + DEBUG_ONLY(void assert_flags();) + DEBUG_ONLY(void assert_size_info();) + // Try to allocate space by expanding the heap. virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); - // Scale the base_size by NewRation according to + // Compute max heap alignment + size_t compute_max_alignment(); + + // Scale the base_size by NewRatio according to // result = base_size / (NewRatio + 1) // and align by min_alignment() size_t scale_by_NewRatio_aligned(size_t base_size); - // Bound the value by the given maximum minus the - // min_alignment. + // Bound the value by the given maximum minus the min_alignment size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); public: + GenCollectorPolicy(); + // Accessors - size_t min_gen0_size() { return _min_gen0_size; } - void set_min_gen0_size(size_t v) { _min_gen0_size = v; } + size_t min_gen0_size() { return _min_gen0_size; } size_t initial_gen0_size() { return _initial_gen0_size; } - void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } - size_t max_gen0_size() { return _max_gen0_size; } - void set_max_gen0_size(size_t v) { _max_gen0_size = v; } + size_t max_gen0_size() { return _max_gen0_size; } + size_t gen_alignment() { return _gen_alignment; } virtual int number_of_generations() = 0; - virtual GenerationSpec **generations() { + virtual GenerationSpec **generations() { assert(_generations != NULL, "Sanity check"); return _generations; } virtual GenCollectorPolicy* as_generation_policy() { return this; } - virtual void initialize_generations() = 0; + virtual void initialize_generations() { }; virtual void initialize_all() { - initialize_flags(); - initialize_size_info(); + CollectorPolicy::initialize_all(); initialize_generations(); } + size_t young_gen_size_lower_bound(); + HeapWord* mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded); @@ -282,6 +293,10 @@ virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); + + virtual void post_heap_initialize() { + assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info"); + } }; // All of hotspot's current collectors are subtypes of this @@ -298,39 +313,41 @@ void initialize_flags(); void initialize_size_info(); - void initialize_generations() { ShouldNotReachHere(); } + + DEBUG_ONLY(void assert_flags();) + DEBUG_ONLY(void assert_size_info();) public: + TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0), + _initial_gen1_size(0), _max_gen1_size(0) {} + // Accessors - size_t min_gen1_size() { return _min_gen1_size; } - void set_min_gen1_size(size_t v) { _min_gen1_size = v; } + size_t min_gen1_size() { return _min_gen1_size; } size_t initial_gen1_size() { return _initial_gen1_size; } - void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } - size_t max_gen1_size() { return _max_gen1_size; } - void set_max_gen1_size(size_t v) { _max_gen1_size = v; } + size_t max_gen1_size() { return _max_gen1_size; } // Inherited methods TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } - int number_of_generations() { return 2; } - BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } + int number_of_generations() { return 2; } + BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } virtual CollectorPolicy::Name kind() { return CollectorPolicy::TwoGenerationCollectorPolicyKind; } - // Returns true is gen0 sizes were adjusted + // Returns true if gen0 sizes were adjusted bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, - const size_t heap_size, const size_t min_gen1_size); + const size_t heap_size); }; class MarkSweepPolicy : public TwoGenerationCollectorPolicy { protected: + void initialize_alignments(); void initialize_generations(); public: - MarkSweepPolicy(); + MarkSweepPolicy() {} MarkSweepPolicy* as_mark_sweep_policy() { return this; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/defNewGeneration.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -204,7 +204,7 @@ // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. - uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); + uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); @@ -235,7 +235,7 @@ bool clear_space, bool mangle_space) { uintx alignment = - GenCollectedHeap::heap()->collector_policy()->min_alignment(); + GenCollectedHeap::heap()->collector_policy()->space_alignment(); // If the spaces are being cleared (only done at heap initialization // currently), the survivor spaces need not be empty. @@ -473,7 +473,7 @@ } size_t DefNewGeneration::max_capacity() const { - const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); + const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); const size_t reserved_bytes = reserved().byte_size(); return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/filemap.hpp --- a/src/share/vm/memory/filemap.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/filemap.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -26,6 +26,7 @@ #define SHARE_VM_MEMORY_FILEMAP_HPP #include "memory/metaspaceShared.hpp" +#include "memory/metaspace.hpp" // Layout of the file: // header: dump of archive instance plus versioning info, datestamp, etc. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/freeBlockDictionary.cpp --- a/src/share/vm/memory/freeBlockDictionary.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/freeBlockDictionary.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -28,7 +28,6 @@ #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #endif // INCLUDE_ALL_GCS #include "memory/freeBlockDictionary.hpp" -#include "memory/metablock.hpp" #include "memory/metachunk.hpp" #include "runtime/thread.inline.hpp" #include "utilities/macros.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/freeList.cpp --- a/src/share/vm/memory/freeList.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/freeList.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/freeList.hpp" -#include "memory/metablock.hpp" #include "memory/metachunk.hpp" #include "memory/sharedHeap.hpp" #include "runtime/globals.hpp" diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/genCollectedHeap.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -111,7 +111,7 @@ int n_covered_regions = 0; ReservedSpace heap_rs; - size_t heap_alignment = collector_policy()->max_alignment(); + size_t heap_alignment = collector_policy()->heap_alignment(); heap_address = allocate(heap_alignment, &total_reserved, &n_covered_regions, &heap_rs); @@ -1053,12 +1053,6 @@ } } -void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { - for (int i = 0; i <= collectedGen; i++) { - _gens[i]->compute_new_size(); - } -} - GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/genCollectedHeap.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -86,10 +86,6 @@ NOT_PRODUCT(static size_t _skip_header_HeapWords;) protected: - // Directs each generation up to and including "collectedGen" to recompute - // its desired size. - void compute_new_generation_sizes(int collectedGen); - // Helper functions for allocation HeapWord* attempt_allocation(size_t size, bool is_tlab, diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/heapInspection.hpp --- a/src/share/vm/memory/heapInspection.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/heapInspection.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -73,6 +73,10 @@ "Number of bytes used by the InstanceKlass::methods() array") \ f(method_ordering_bytes, IK_method_ordering, \ "Number of bytes used by the InstanceKlass::method_ordering() array") \ + f(default_methods_array_bytes, IK_default_methods, \ + "Number of bytes used by the InstanceKlass::default_methods() array") \ + f(default_vtable_indices_bytes, IK_default_vtable_indices, \ + "Number of bytes used by the InstanceKlass::default_vtable_indices() array") \ f(local_interfaces_bytes, IK_local_interfaces, \ "Number of bytes used by the InstanceKlass::local_interfaces() array") \ f(transitive_interfaces_bytes, IK_transitive_interfaces, \ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metablock.cpp --- a/src/share/vm/memory/metablock.cpp Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "memory/allocation.hpp" -#include "memory/metablock.hpp" -#include "utilities/copy.hpp" -#include "utilities/debug.hpp" - -// Blocks of space for metadata are allocated out of Metachunks. -// -// Metachunk are allocated out of MetadataVirtualspaces and once -// allocated there is no explicit link between a Metachunk and -// the MetadataVirtualspaces from which it was allocated. -// -// Each SpaceManager maintains a -// list of the chunks it is using and the current chunk. The current -// chunk is the chunk from which allocations are done. Space freed in -// a chunk is placed on the free list of blocks (BlockFreelist) and -// reused from there. -// -// Future modification -// -// The Metachunk can conceivable be replaced by the Chunk in -// allocation.hpp. Note that the latter Chunk is the space for -// allocation (allocations from the chunk are out of the space in -// the Chunk after the header for the Chunk) where as Metachunks -// point to space in a VirtualSpace. To replace Metachunks with -// Chunks, change Chunks so that they can be allocated out of a VirtualSpace. -size_t Metablock::_min_block_byte_size = sizeof(Metablock); - -// New blocks returned by the Metaspace are zero initialized. -// We should fix the constructors to not assume this instead. -Metablock* Metablock::initialize(MetaWord* p, size_t word_size) { - if (p == NULL) { - return NULL; - } - - Metablock* result = (Metablock*) p; - - // Clear the memory - Copy::fill_to_aligned_words((HeapWord*)result, word_size); -#ifdef ASSERT - result->set_word_size(word_size); -#endif - return result; -} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metablock.hpp --- a/src/share/vm/memory/metablock.hpp Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ -#ifndef SHARE_VM_MEMORY_METABLOCK_HPP -#define SHARE_VM_MEMORY_METABLOCK_HPP - -// Metablock are the unit of allocation from a Chunk. It is initialized -// with the size of the requested allocation. That size is overwritten -// once the allocation returns. -// -// A Metablock may be reused by its SpaceManager but are never moved between -// SpaceManagers. There is no explicit link to the Metachunk -// from which it was allocated. Metablock may be deallocated and -// put on a freelist but the space is never freed, rather -// the Metachunk it is a part of will be deallocated when it's -// associated class loader is collected. - -class Metablock VALUE_OBJ_CLASS_SPEC { - friend class VMStructs; - private: - // Used to align the allocation (see below). - union block_t { - void* _data[3]; - struct header_t { - size_t _word_size; - Metablock* _next; - Metablock* _prev; - } _header; - } _block; - static size_t _min_block_byte_size; - - typedef union block_t Block; - typedef struct header_t Header; - const Block* block() const { return &_block; } - const Block::header_t* header() const { return &(block()->_header); } - public: - - static Metablock* initialize(MetaWord* p, size_t word_size); - - // This places the body of the block at a 2 word boundary - // because every block starts on a 2 word boundary. Work out - // how to make the body on a 2 word boundary if the block - // starts on a arbitrary boundary. JJJ - - size_t word_size() const { return header()->_word_size; } - void set_word_size(size_t v) { _block._header._word_size = v; } - size_t size() const volatile { return _block._header._word_size; } - void set_size(size_t v) { _block._header._word_size = v; } - Metablock* next() const { return header()->_next; } - void set_next(Metablock* v) { _block._header._next = v; } - Metablock* prev() const { return header()->_prev; } - void set_prev(Metablock* v) { _block._header._prev = v; } - - static size_t min_block_byte_size() { return _min_block_byte_size; } - - bool is_free() { return header()->_word_size != 0; } - void clear_next() { set_next(NULL); } - void link_prev(Metablock* ptr) { set_prev(ptr); } - uintptr_t* end() { return ((uintptr_t*) this) + size(); } - bool cantCoalesce() const { return false; } - void link_next(Metablock* ptr) { set_next(ptr); } - void link_after(Metablock* ptr){ - link_next(ptr); - if (ptr != NULL) ptr->link_prev(this); - } - - // Should not be needed in a free list of Metablocks - void markNotFree() { ShouldNotReachHere(); } - - // Debug support -#ifdef ASSERT - void* prev_addr() const { return (void*)&_block._header._prev; } - void* next_addr() const { return (void*)&_block._header._next; } - void* size_addr() const { return (void*)&_block._header._word_size; } -#endif - bool verify_chunk_in_free_list(Metablock* tc) const { return true; } - bool verify_par_locked() { return true; } - - void assert_is_mangled() const {/* Don't check "\*/} -}; -#endif // SHARE_VM_MEMORY_METABLOCK_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metachunk.cpp --- a/src/share/vm/memory/metachunk.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metachunk.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,42 +29,39 @@ #include "utilities/debug.hpp" class VirtualSpaceNode; -// -// Future modification -// -// The Metachunk can conceivable be replaced by the Chunk in -// allocation.hpp. Note that the latter Chunk is the space for -// allocation (allocations from the chunk are out of the space in -// the Chunk after the header for the Chunk) where as Metachunks -// point to space in a VirtualSpace. To replace Metachunks with -// Chunks, change Chunks so that they can be allocated out of a VirtualSpace. const size_t metadata_chunk_initialize = 0xf7f7f7f7; -size_t Metachunk::_overhead = - Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord; +size_t Metachunk::object_alignment() { + // Must align pointers and sizes to 8, + // so that 64 bit types get correctly aligned. + const size_t alignment = 8; + + // Make sure that the Klass alignment also agree. + STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes); + + return alignment; +} + +size_t Metachunk::overhead() { + return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord; +} // Metachunk methods Metachunk::Metachunk(size_t word_size, - VirtualSpaceNode* container) : - _word_size(word_size), - _bottom(NULL), - _end(NULL), + VirtualSpaceNode* container) + : Metabase(word_size), _top(NULL), - _next(NULL), - _prev(NULL), _container(container) { - _bottom = (MetaWord*)this; - _top = (MetaWord*)this + _overhead; - _end = (MetaWord*)this + word_size; + _top = initial_top(); #ifdef ASSERT - set_is_free(false); + set_is_tagged_free(false); size_t data_word_size = pointer_delta(end(), - top(), + _top, sizeof(MetaWord)); - Copy::fill_to_words((HeapWord*) top(), + Copy::fill_to_words((HeapWord*)_top, data_word_size, metadata_chunk_initialize); #endif @@ -82,22 +79,18 @@ // _bottom points to the start of the chunk including the overhead. size_t Metachunk::used_word_size() const { - return pointer_delta(_top, _bottom, sizeof(MetaWord)); + return pointer_delta(_top, bottom(), sizeof(MetaWord)); } size_t Metachunk::free_word_size() const { - return pointer_delta(_end, _top, sizeof(MetaWord)); -} - -size_t Metachunk::capacity_word_size() const { - return pointer_delta(_end, _bottom, sizeof(MetaWord)); + return pointer_delta(end(), _top, sizeof(MetaWord)); } void Metachunk::print_on(outputStream* st) const { st->print_cr("Metachunk:" " bottom " PTR_FORMAT " top " PTR_FORMAT " end " PTR_FORMAT " size " SIZE_FORMAT, - bottom(), top(), end(), word_size()); + bottom(), _top, end(), word_size()); if (Verbose) { st->print_cr(" used " SIZE_FORMAT " free " SIZE_FORMAT, used_word_size(), free_word_size()); @@ -109,8 +102,8 @@ // Mangle the payload of the chunk and not the links that // maintain list of chunks. HeapWord* start = (HeapWord*)(bottom() + overhead()); - size_t word_size = capacity_word_size() - overhead(); - Copy::fill_to_words(start, word_size, metadata_chunk_initialize); + size_t size = word_size() - overhead(); + Copy::fill_to_words(start, size, metadata_chunk_initialize); } #endif // PRODUCT @@ -118,9 +111,68 @@ #ifdef ASSERT // Cannot walk through the blocks unless the blocks have // headers with sizes. - assert(_bottom <= _top && - _top <= _end, + assert(bottom() <= _top && + _top <= (MetaWord*)end(), "Chunk has been smashed"); #endif return; } + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TestMetachunk { + public: + static void test() { + size_t size = 2 * 1024 * 1024; + void* memory = malloc(size); + assert(memory != NULL, "Failed to malloc 2MB"); + + Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL); + + assert(metachunk->bottom() == (MetaWord*)metachunk, "assert"); + assert(metachunk->end() == (uintptr_t*)metachunk + metachunk->size(), "assert"); + + // Check sizes + assert(metachunk->size() == metachunk->word_size(), "assert"); + assert(metachunk->word_size() == pointer_delta(metachunk->end(), metachunk->bottom(), + sizeof(MetaWord*)), "assert"); + + // Check usage + assert(metachunk->used_word_size() == metachunk->overhead(), "assert"); + assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert"); + assert(metachunk->top() == metachunk->initial_top(), "assert"); + assert(metachunk->is_empty(), "assert"); + + // Allocate + size_t alloc_size = 64; // Words + assert(is_size_aligned(alloc_size, Metachunk::object_alignment()), "assert"); + + MetaWord* mem = metachunk->allocate(alloc_size); + + // Check post alloc + assert(mem == metachunk->initial_top(), "assert"); + assert(mem + alloc_size == metachunk->top(), "assert"); + assert(metachunk->used_word_size() == metachunk->overhead() + alloc_size, "assert"); + assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert"); + assert(!metachunk->is_empty(), "assert"); + + // Clear chunk + metachunk->reset_empty(); + + // Check post clear + assert(metachunk->used_word_size() == metachunk->overhead(), "assert"); + assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert"); + assert(metachunk->top() == metachunk->initial_top(), "assert"); + assert(metachunk->is_empty(), "assert"); + + free(memory); + } +}; + +void TestMetachunk_test() { + TestMetachunk::test(); +} + +#endif diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metachunk.hpp --- a/src/share/vm/memory/metachunk.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metachunk.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,89 +24,44 @@ #ifndef SHARE_VM_MEMORY_METACHUNK_HPP #define SHARE_VM_MEMORY_METACHUNK_HPP -// Metachunk - Quantum of allocation from a Virtualspace -// Metachunks are reused (when freed are put on a global freelist) and -// have no permanent association to a SpaceManager. - -// +--------------+ <- end -// | | --+ ---+ -// | | | free | -// | | | | -// | | | | capacity -// | | | | -// | | <- top --+ | -// | | ---+ | -// | | | used | -// | | | | -// | | | | -// +--------------+ <- bottom ---+ ---+ +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" class VirtualSpaceNode; -class Metachunk VALUE_OBJ_CLASS_SPEC { - // link to support lists of chunks - Metachunk* _next; - Metachunk* _prev; - VirtualSpaceNode* _container; - - MetaWord* _bottom; - MetaWord* _end; - MetaWord* _top; +// Super class of Metablock and Metachunk to allow them to +// be put on the FreeList and in the BinaryTreeDictionary. +template +class Metabase VALUE_OBJ_CLASS_SPEC { size_t _word_size; - // Used in a guarantee() so included in the Product builds - // even through it is only for debugging. - bool _is_free; + T* _next; + T* _prev; - // Metachunks are allocated out of a MetadataVirtualSpace and - // and use some of its space to describe itself (plus alignment - // considerations). Metadata is allocated in the rest of the chunk. - // This size is the overhead of maintaining the Metachunk within - // the space. - static size_t _overhead; + protected: + Metabase(size_t word_size) : _word_size(word_size), _next(NULL), _prev(NULL) {} public: - Metachunk(size_t word_size , VirtualSpaceNode* container); - - // Used to add a Metachunk to a list of Metachunks - void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");} - void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");} - void set_container(VirtualSpaceNode* v) { _container = v; } - - MetaWord* allocate(size_t word_size); + T* next() const { return _next; } + T* prev() const { return _prev; } + void set_next(T* v) { _next = v; assert(v != this, "Boom");} + void set_prev(T* v) { _prev = v; assert(v != this, "Boom");} + void clear_next() { set_next(NULL); } + void clear_prev() { set_prev(NULL); } - // Accessors - Metachunk* next() const { return _next; } - Metachunk* prev() const { return _prev; } - VirtualSpaceNode* container() const { return _container; } - MetaWord* bottom() const { return _bottom; } - MetaWord* end() const { return _end; } - MetaWord* top() const { return _top; } - size_t word_size() const { return _word_size; } size_t size() const volatile { return _word_size; } void set_size(size_t v) { _word_size = v; } - bool is_free() { return _is_free; } - void set_is_free(bool v) { _is_free = v; } - static size_t overhead() { return _overhead; } - void clear_next() { set_next(NULL); } - void link_prev(Metachunk* ptr) { set_prev(ptr); } - uintptr_t* end() { return ((uintptr_t*) this) + size(); } - bool cantCoalesce() const { return false; } - void link_next(Metachunk* ptr) { set_next(ptr); } - void link_after(Metachunk* ptr){ + + void link_next(T* ptr) { set_next(ptr); } + void link_prev(T* ptr) { set_prev(ptr); } + void link_after(T* ptr) { link_next(ptr); - if (ptr != NULL) ptr->link_prev(this); + if (ptr != NULL) ptr->link_prev((T*)this); } - // Reset top to bottom so chunk can be reused. - void reset_empty() { _top = (_bottom + _overhead); _next = NULL; _prev = NULL; } - bool is_empty() { return _top == (_bottom + _overhead); } + uintptr_t* end() const { return ((uintptr_t*) this) + size(); } - // used (has been allocated) - // free (available for future allocations) - // capacity (total size of chunk) - size_t used_word_size() const; - size_t free_word_size() const; - size_t capacity_word_size()const; + bool cantCoalesce() const { return false; } // Debug support #ifdef ASSERT @@ -114,14 +69,99 @@ void* next_addr() const { return (void*)&_next; } void* size_addr() const { return (void*)&_word_size; } #endif - bool verify_chunk_in_free_list(Metachunk* tc) const { return true; } + bool verify_chunk_in_free_list(T* tc) const { return true; } bool verify_par_locked() { return true; } void assert_is_mangled() const {/* Don't check "\*/} + bool is_free() { return true; } +}; + +// Metachunk - Quantum of allocation from a Virtualspace +// Metachunks are reused (when freed are put on a global freelist) and +// have no permanent association to a SpaceManager. + +// +--------------+ <- end --+ --+ +// | | | | +// | | | free | +// | | | | +// | | | | size | capacity +// | | | | +// | | <- top -- + | +// | | | | +// | | | used | +// | | | | +// | | | | +// +--------------+ <- bottom --+ --+ + +class Metachunk : public Metabase { + friend class TestMetachunk; + // The VirtualSpaceNode containing this chunk. + VirtualSpaceNode* _container; + + // Current allocation top. + MetaWord* _top; + + DEBUG_ONLY(bool _is_tagged_free;) + + MetaWord* initial_top() const { return (MetaWord*)this + overhead(); } + MetaWord* top() const { return _top; } + + public: + // Metachunks are allocated out of a MetadataVirtualSpace and + // and use some of its space to describe itself (plus alignment + // considerations). Metadata is allocated in the rest of the chunk. + // This size is the overhead of maintaining the Metachunk within + // the space. + + // Alignment of each allocation in the chunks. + static size_t object_alignment(); + + // Size of the Metachunk header, including alignment. + static size_t overhead(); + + Metachunk(size_t word_size , VirtualSpaceNode* container); + + MetaWord* allocate(size_t word_size); + + VirtualSpaceNode* container() const { return _container; } + + MetaWord* bottom() const { return (MetaWord*) this; } + + // Reset top to bottom so chunk can be reused. + void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); } + bool is_empty() { return _top == initial_top(); } + + // used (has been allocated) + // free (available for future allocations) + size_t word_size() const { return size(); } + size_t used_word_size() const; + size_t free_word_size() const; + +#ifdef ASSERT + bool is_tagged_free() { return _is_tagged_free; } + void set_is_tagged_free(bool v) { _is_tagged_free = v; } +#endif + NOT_PRODUCT(void mangle();) void print_on(outputStream* st) const; void verify(); }; + +// Metablock is the unit of allocation from a Chunk. +// +// A Metablock may be reused by its SpaceManager but are never moved between +// SpaceManagers. There is no explicit link to the Metachunk +// from which it was allocated. Metablock may be deallocated and +// put on a freelist but the space is never freed, rather +// the Metachunk it is a part of will be deallocated when it's +// associated class loader is collected. + +class Metablock : public Metabase { + friend class VMStructs; + public: + Metablock(size_t word_size) : Metabase(word_size) {} +}; + #endif // SHARE_VM_MEMORY_METACHUNK_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metadataFactory.hpp --- a/src/share/vm/memory/metadataFactory.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metadataFactory.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,6 +65,7 @@ static void free_array(ClassLoaderData* loader_data, Array* data) { if (data != NULL) { assert(loader_data != NULL, "shouldn't pass null"); + assert(!data->is_shared(), "cannot deallocate array in shared spaces"); int size = data->size(); if (DumpSharedSpaces) { loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false); @@ -83,6 +84,7 @@ // Call metadata's deallocate function which will call deallocate fields assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive"); assert(!md->on_stack(), "can't deallocate things on stack"); + assert(!md->is_shared(), "cannot deallocate if in shared spaces"); md->deallocate_contents(loader_data); loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass()); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metaspace.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -29,34 +29,34 @@ #include "memory/collectorPolicy.hpp" #include "memory/filemap.hpp" #include "memory/freeList.hpp" -#include "memory/metablock.hpp" +#include "memory/gcLocker.hpp" #include "memory/metachunk.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/globals.hpp" +#include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/mutex.hpp" #include "runtime/orderAccess.hpp" #include "services/memTracker.hpp" +#include "services/memoryService.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" typedef BinaryTreeDictionary BlockTreeDictionary; typedef BinaryTreeDictionary ChunkTreeDictionary; -// Define this macro to enable slow integrity checking of -// the free chunk lists + +// Set this constant to enable slow integrity checking of the free chunk lists const bool metaspace_slow_verify = false; -// Parameters for stress mode testing -const uint metadata_deallocate_a_lot_block = 10; -const uint metadata_deallocate_a_lock_chunk = 3; size_t const allocation_from_dictionary_limit = 4 * K; MetaWord* last_allocated = 0; -size_t Metaspace::_class_metaspace_size; +size_t Metaspace::_compressed_class_space_size; // Used in declarations in SpaceManager and ChunkManager enum ChunkIndex { @@ -75,8 +75,7 @@ ClassSmallChunk = 256, SmallChunk = 512, ClassMediumChunk = 4 * K, - MediumChunk = 8 * K, - HumongousChunkGranularity = 8 + MediumChunk = 8 * K }; static ChunkIndex next_chunk_index(ChunkIndex i) { @@ -84,35 +83,15 @@ return (ChunkIndex) (i+1); } -// Originally _capacity_until_GC was set to MetaspaceSize here but -// the default MetaspaceSize before argument processing was being -// used which was not the desired value. See the code -// in should_expand() to see how the initialization is handled -// now. -size_t MetaspaceGC::_capacity_until_GC = 0; -bool MetaspaceGC::_expand_after_GC = false; +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; uint MetaspaceGC::_shrink_factor = 0; bool MetaspaceGC::_should_concurrent_collect = false; -// Blocks of space for metadata are allocated out of Metachunks. -// -// Metachunk are allocated out of MetadataVirtualspaces and once -// allocated there is no explicit link between a Metachunk and -// the MetadataVirtualspaces from which it was allocated. -// -// Each SpaceManager maintains a -// list of the chunks it is using and the current chunk. The current -// chunk is the chunk from which allocations are done. Space freed in -// a chunk is placed on the free list of blocks (BlockFreelist) and -// reused from there. - typedef class FreeList ChunkList; // Manages the global free lists of chunks. -// Has three lists of free chunks, and a total size and -// count that includes all three - class ChunkManager : public CHeapObj { + friend class TestVirtualSpaceNodeTest; // Free list of chunks of different sizes. // SpecializedChunk @@ -121,7 +100,6 @@ // HumongousChunk ChunkList _free_chunks[NumberOfFreeLists]; - // HumongousChunk ChunkTreeDictionary _humongous_dictionary; @@ -168,7 +146,6 @@ // add or delete (return) a chunk to the global freelist. Metachunk* chunk_freelist_allocate(size_t word_size); - void chunk_freelist_deallocate(Metachunk* chunk); // Map a size to a list index assuming that there are lists // for special, small, medium, and humongous chunks. @@ -202,9 +179,7 @@ // Returns the list for the given chunk word size. ChunkList* find_free_chunks_list(size_t word_size); - // Add and remove from a list by size. Selects - // list based on size of chunk. - void free_chunks_put(Metachunk* chuck); + // Remove from a list by size. Selects list based on size of chunk. Metachunk* free_chunks_get(size_t chunk_word_size); // Debug support @@ -232,7 +207,6 @@ // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { BlockTreeDictionary* _dictionary; - static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size); // Only allocate and split from freelist if the size of the allocation // is at least 1/4th the size of the available block. @@ -260,6 +234,7 @@ void print_on(outputStream* st) const; }; +// A VirtualSpaceList node. class VirtualSpaceNode : public CHeapObj { friend class VirtualSpaceList; @@ -282,6 +257,8 @@ // VirtualSpace Metachunk* first_chunk() { return (Metachunk*) bottom(); } + // Committed but unused space in the virtual space + size_t free_words_in_vs() const; public: VirtualSpaceNode(size_t byte_size); @@ -293,9 +270,10 @@ MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } - size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; } size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } + bool is_pre_committed() const { return _virtual_space.special(); } + // address of next available space in _virtual_space; // Accessors VirtualSpaceNode* next() { return _next; } @@ -325,7 +303,6 @@ // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; - size_t free_words_in_vs() const; bool initialize(); @@ -337,12 +314,19 @@ // Expands/shrinks the committed space in a virtual space. Delegates // to Virtualspace - bool expand_by(size_t words, bool pre_touch = false); + bool expand_by(size_t min_words, size_t preferred_words); // In preparation for deleting this node, remove all the chunks // in the node from any freelist. void purge(ChunkManager* chunk_manager); + // If an allocation doesn't fit in the current node a new node is created. + // Allocate chunks out of the remaining committed space in this node + // to avoid wasting that memory. + // This always adds up because all the chunk sizes are multiples of + // the smallest chunk size. + void retire(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support void mangle(); @@ -351,42 +335,77 @@ void print_on(outputStream* st) const; }; +#define assert_is_ptr_aligned(ptr, alignment) \ + assert(is_ptr_aligned(ptr, alignment), \ + err_msg(PTR_FORMAT " is not aligned to " \ + SIZE_FORMAT, ptr, alignment)) + +#define assert_is_size_aligned(size, alignment) \ + assert(is_size_aligned(size, alignment), \ + err_msg(SIZE_FORMAT " is not aligned to " \ + SIZE_FORMAT, size, alignment)) + + +// Decide if large pages should be committed when the memory is reserved. +static bool should_commit_large_pages_when_reserving(size_t bytes) { + if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { + size_t words = bytes / BytesPerWord; + bool is_class = false; // We never reserve large pages for the class space. + if (MetaspaceGC::can_expand(words, is_class) && + MetaspaceGC::allowed_expansion() >= words) { + return true; + } + } + + return false; +} + // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { - // align up to vm allocation granularity - byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { + assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); // This allocates memory with mmap. For DumpSharedspaces, try to reserve // configurable address, generally at the top of the Java heap so other // memory addresses don't conflict. if (DumpSharedSpaces) { - char* shared_base = (char*)SharedBaseAddress; - _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); + bool large_pages = false; // No large pages when dumping the CDS archive. + char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); + + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); if (_rs.is_reserved()) { assert(shared_base == 0 || _rs.base() == shared_base, "should match"); } else { // Get a mmap region anywhere if the SharedBaseAddress fails. - _rs = ReservedSpace(byte_size); + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); } MetaspaceShared::set_shared_rs(&_rs); } else { - _rs = ReservedSpace(byte_size); + bool large_pages = should_commit_large_pages_when_reserving(bytes); + + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); } - MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); + if (_rs.is_reserved()) { + assert(_rs.base() != NULL, "Catch if we get a NULL address"); + assert(_rs.size() != 0, "Catch if we get a 0 size"); + assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); + assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); + + MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); + } } void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { Metachunk* chunk = first_chunk(); Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { - assert(chunk->is_free(), "Should be marked free"); - MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); - chunk_manager->remove_chunk(chunk); - assert(chunk->next() == NULL && - chunk->prev() == NULL, - "Was not removed from its list"); - chunk = (Metachunk*) next; + assert(chunk->is_tagged_free(), "Should be tagged free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; } } @@ -400,7 +419,7 @@ // Don't count the chunks on the free lists. Those are // still part of the VirtualSpaceNode but not currently // counted. - if (!chunk->is_free()) { + if (!chunk->is_tagged_free()) { count++; } chunk = (Metachunk*) next; @@ -410,8 +429,6 @@ #endif // List of VirtualSpaces for metadata allocation. -// It has a _next link for singly linked list and a MemRegion -// for total space in the VirtualSpace. class VirtualSpaceList : public CHeapObj { friend class VirtualSpaceNode; @@ -419,16 +436,13 @@ VirtualSpaceSize = 256 * K }; - // Global list of virtual spaces // Head of the list VirtualSpaceNode* _virtual_space_list; // virtual space currently being used for allocations VirtualSpaceNode* _current_virtual_space; - // Can this virtual list allocate >1 spaces? Also, used to determine - // whether to allocate unlimited small chunks in this virtual space + // Is this VirtualSpaceList used for the compressed class space bool _is_class; - bool can_grow() const { return !is_class() || !UseCompressedClassPointers; } // Sum of reserved and committed memory in the virtual spaces size_t _reserved_words; @@ -453,7 +467,11 @@ // Get another virtual space and add it to the list. This // is typically prompted by a failed attempt to allocate a chunk // and is typically followed by the allocation of a chunk. - bool grow_vs(size_t vs_word_size); + bool create_new_virtual_space(size_t vs_word_size); + + // Chunk up the unused committed space in the current + // virtual space and add the chunks to the free list. + void retire_current_virtual_space(); public: VirtualSpaceList(size_t word_size); @@ -465,12 +483,12 @@ size_t grow_chunks_by_words, size_t medium_chunk_bunch); - bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); - - // Get the first chunk for a Metaspace. Used for - // special cases such as the boot class loader, reflection - // class loader and anonymous class loader. - Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); + bool expand_node_by(VirtualSpaceNode* node, + size_t min_words, + size_t preferred_words); + + bool expand_by(size_t min_words, + size_t preferred_words); VirtualSpaceNode* current_virtual_space() { return _current_virtual_space; @@ -478,8 +496,7 @@ bool is_class() const { return _is_class; } - // Allocate the first virtualspace. - void initialize(size_t word_size); + bool initialization_succeeded() { return _virtual_space_list != NULL; } size_t reserved_words() { return _reserved_words; } size_t reserved_bytes() { return reserved_words() * BytesPerWord; } @@ -522,44 +539,16 @@ class Metadebug : AllStatic { // Debugging support for Metaspaces - static int _deallocate_block_a_lot_count; - static int _deallocate_chunk_a_lot_count; static int _allocation_fail_alot_count; public: - static int deallocate_block_a_lot_count() { - return _deallocate_block_a_lot_count; - } - static void set_deallocate_block_a_lot_count(int v) { - _deallocate_block_a_lot_count = v; - } - static void inc_deallocate_block_a_lot_count() { - _deallocate_block_a_lot_count++; - } - static int deallocate_chunk_a_lot_count() { - return _deallocate_chunk_a_lot_count; - } - static void reset_deallocate_chunk_a_lot_count() { - _deallocate_chunk_a_lot_count = 1; - } - static void inc_deallocate_chunk_a_lot_count() { - _deallocate_chunk_a_lot_count++; - } static void init_allocation_fail_alot_count(); #ifdef ASSERT static bool test_metadata_failure(); #endif - - static void deallocate_chunk_a_lot(SpaceManager* sm, - size_t chunk_word_size); - static void deallocate_block_a_lot(SpaceManager* sm, - size_t chunk_word_size); - }; -int Metadebug::_deallocate_block_a_lot_count = 0; -int Metadebug::_deallocate_chunk_a_lot_count = 0; int Metadebug::_allocation_fail_alot_count = 0; // SpaceManager - used by Metaspace to handle allocations @@ -647,10 +636,12 @@ bool is_class() { return _mdtype == Metaspace::ClassType; } // Accessors - size_t specialized_chunk_size() { return SpecializedChunk; } - size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } - size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } - size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } + size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } + size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } + size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + + size_t smallest_chunk_size() { return specialized_chunk_size(); } size_t allocated_blocks_words() const { return _allocated_blocks_words; } size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } @@ -708,6 +699,9 @@ // and allocates from that chunk. MetaWord* grow_and_allocate(size_t word_size); + // Notify memory usage to MemoryService. + void track_metaspace_memory_usage(); + // debugging support. void dump(outputStream* const out) const; @@ -722,14 +716,11 @@ #endif size_t get_raw_word_size(size_t word_size) { - // If only the dictionary is going to be used (i.e., no - // indexed free list), then there is a minimum size requirement. - // MinChunkSize is a placeholder for the real minimum size JJJ size_t byte_size = word_size * BytesPerWord; - size_t raw_bytes_size = MAX2(byte_size, - Metablock::min_block_byte_size()); - raw_bytes_size = ARENA_ALIGN(raw_bytes_size); + size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); + raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); + size_t raw_word_size = raw_bytes_size / BytesPerWord; assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); @@ -782,17 +773,8 @@ } } -Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) { - Metablock* block = (Metablock*) p; - block->set_word_size(word_size); - block->set_prev(NULL); - block->set_next(NULL); - - return block; -} - void BlockFreelist::return_block(MetaWord* p, size_t word_size) { - Metablock* free_chunk = initialize_free_chunk(p, word_size); + Metablock* free_chunk = ::new (p) Metablock(word_size); if (dictionary() == NULL) { _dictionary = new BlockTreeDictionary(); } @@ -869,6 +851,12 @@ MetaWord* chunk_limit = top(); assert(chunk_limit != NULL, "Not safe to call this method"); + // The virtual spaces are always expanded by the + // commit granularity to enforce the following condition. + // Without this the is_available check will not work correctly. + assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), + "The committed memory doesn't match the expanded memory."); + if (!is_available(chunk_word_size)) { if (TraceMetadataChunkAllocation) { gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); @@ -888,14 +876,21 @@ // Expand the virtual space (commit more of the reserved space) -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { - size_t bytes = words * BytesPerWord; - bool result = virtual_space()->expand_by(bytes, pre_touch); - if (TraceMetavirtualspaceAllocation && !result) { - gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " - "for byte size " SIZE_FORMAT, bytes); - virtual_space()->print_on(gclog_or_tty); +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { + size_t min_bytes = min_words * BytesPerWord; + size_t preferred_bytes = preferred_words * BytesPerWord; + + size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); + + if (uncommitted < min_bytes) { + return false; } + + size_t commit = MIN2(preferred_bytes, uncommitted); + bool result = virtual_space()->expand_by(commit, false); + + assert(result, "Failed to commit memory"); + return result; } @@ -914,12 +909,23 @@ return false; } - // An allocation out of this Virtualspace that is larger - // than an initial commit size can waste that initial committed - // space. - size_t committed_byte_size = 0; - bool result = virtual_space()->initialize(_rs, committed_byte_size); + // These are necessary restriction to make sure that the virtual space always + // grows in steps of Metaspace::commit_alignment(). If both base and size are + // aligned only the middle alignment of the VirtualSpace is used. + assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); + assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); + + // ReservedSpaces marked as special will have the entire memory + // pre-committed. Setting a committed size will make sure that + // committed_size and actual_committed_size agrees. + size_t pre_committed_size = _rs.special() ? _rs.size() : 0; + + bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, + Metaspace::commit_alignment()); if (result) { + assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), + "Checking that the pre-committed memory was registered by the VirtualSpace"); + set_top((MetaWord*)virtual_space()->low()); set_reserved(MemRegion((HeapWord*)_rs.base(), (HeapWord*)(_rs.base() + _rs.size()))); @@ -976,13 +982,23 @@ _reserved_words = _reserved_words - v; } +#define assert_committed_below_limit() \ + assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ + err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ + " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ + MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); + void VirtualSpaceList::inc_committed_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); _committed_words = _committed_words + v; + + assert_committed_below_limit(); } void VirtualSpaceList::dec_committed_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); _committed_words = _committed_words - v; + + assert_committed_below_limit(); } void VirtualSpaceList::inc_virtual_space_count() { @@ -1004,7 +1020,7 @@ } // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); + dec_free_chunks_total(chunk->word_size()); } // Walk the list of VirtualSpaceNodes and delete @@ -1025,8 +1041,8 @@ if (vsl->container_count() == 0 && vsl != current_virtual_space()) { // Unlink it from the list if (prev_vsl == vsl) { - // This is the case of the current note being the first note. - assert(vsl == virtual_space_list(), "Expected to be the first note"); + // This is the case of the current node being the first node. + assert(vsl == virtual_space_list(), "Expected to be the first node"); set_virtual_space_list(vsl->next()); } else { prev_vsl->set_next(vsl->next()); @@ -1054,7 +1070,36 @@ #endif } -VirtualSpaceList::VirtualSpaceList(size_t word_size ) : +void VirtualSpaceList::retire_current_virtual_space() { + assert_lock_strong(SpaceManager::expand_lock()); + + VirtualSpaceNode* vsn = current_virtual_space(); + + ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : + Metaspace::chunk_manager_metadata(); + + vsn->retire(cm); +} + +void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { + for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { + ChunkIndex index = (ChunkIndex)i; + size_t chunk_size = chunk_manager->free_chunks(index)->size(); + + while (free_words_in_vs() >= chunk_size) { + DEBUG_ONLY(verify_container_count();) + Metachunk* chunk = get_chunk_vs(chunk_size); + assert(chunk != NULL, "allocation should have been successful"); + + chunk_manager->return_chunks(index, chunk); + chunk_manager->inc_free_chunks_total(chunk_size); + DEBUG_ONLY(verify_container_count();) + } + } + assert(free_words_in_vs() == 0, "should be empty now"); +} + +VirtualSpaceList::VirtualSpaceList(size_t word_size) : _is_class(false), _virtual_space_list(NULL), _current_virtual_space(NULL), @@ -1063,9 +1108,7 @@ _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); - bool initialization_succeeded = grow_vs(word_size); - assert(initialization_succeeded, - " VirtualSpaceList initialization should not fail"); + create_new_virtual_space(word_size); } VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : @@ -1079,8 +1122,9 @@ Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); - assert(succeeded, " VirtualSpaceList initialization should not fail"); - link_vs(class_entry); + if (succeeded) { + link_vs(class_entry); + } } size_t VirtualSpaceList::free_bytes() { @@ -1088,14 +1132,24 @@ } // Allocate another meta virtual space and add it to the list. -bool VirtualSpaceList::grow_vs(size_t vs_word_size) { +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - if (vs_word_size == 0) { + + if (is_class()) { + assert(false, "We currently don't support more than one VirtualSpace for" + " the compressed class space. The initialization of the" + " CCS uses another code path and should not hit this path."); return false; } + + if (vs_word_size == 0) { + assert(false, "vs_word_size should always be at least _reserve_alignment large."); + return false; + } + // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; - assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); + assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); // Allocate the meta virtual space and initialize it. VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); @@ -1103,7 +1157,8 @@ delete new_entry; return false; } else { - assert(new_entry->reserved_words() == vs_word_size, "Must be"); + assert(new_entry->reserved_words() == vs_word_size, + "Reserved memory size differs from requested memory size"); // ensure lock-free iteration sees fully initialized node OrderAccess::storestore(); link_vs(new_entry); @@ -1130,20 +1185,68 @@ } } -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, + size_t min_words, + size_t preferred_words) { size_t before = node->committed_words(); - bool result = node->expand_by(word_size, pre_touch); + bool result = node->expand_by(min_words, preferred_words); size_t after = node->committed_words(); // after and before can be the same if the memory was pre-committed. - assert(after >= before, "Must be"); + assert(after >= before, "Inconsistency"); inc_committed_words(after - before); return result; } +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { + assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); + assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); + assert(min_words <= preferred_words, "Invalid arguments"); + + if (!MetaspaceGC::can_expand(min_words, this->is_class())) { + return false; + } + + size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); + if (allowed_expansion_words < min_words) { + return false; + } + + size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); + + // Commit more memory from the the current virtual space. + bool vs_expanded = expand_node_by(current_virtual_space(), + min_words, + max_expansion_words); + if (vs_expanded) { + return true; + } + retire_current_virtual_space(); + + // Get another virtual space. + size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); + grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); + + if (create_new_virtual_space(grow_vs_words)) { + if (current_virtual_space()->is_pre_committed()) { + // The memory was pre-committed, so we are done here. + assert(min_words <= current_virtual_space()->committed_words(), + "The new VirtualSpace was pre-committed, so it" + "should be large enough to fit the alloc request."); + return true; + } + + return expand_node_by(current_virtual_space(), + min_words, + max_expansion_words); + } + + return false; +} + Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch) { @@ -1151,63 +1254,27 @@ // Allocate a chunk out of the current virtual space. Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - if (next == NULL) { - // Not enough room in current virtual space. Try to commit - // more space. - size_t expand_vs_by_words = MAX2(medium_chunk_bunch, - grow_chunks_by_words); - size_t page_size_words = os::vm_page_size() / BytesPerWord; - size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, - page_size_words); - bool vs_expanded = - expand_by(current_virtual_space(), aligned_expand_vs_by_words); - if (!vs_expanded) { - // Should the capacity of the metaspaces be expanded for - // this allocation? If it's the virtual space for classes and is - // being used for CompressedHeaders, don't allocate a new virtualspace. - if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { - // Get another virtual space. - size_t allocation_aligned_expand_words = - align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); - size_t grow_vs_words = - MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); - if (grow_vs(grow_vs_words)) { - // Got it. It's on the list now. Get a chunk from it. - assert(current_virtual_space()->expanded_words() == 0, - "New virtual space nodes should not have expanded"); - - size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, - page_size_words); - // We probably want to expand by aligned_expand_vs_by_words here. - expand_by(current_virtual_space(), grow_chunks_by_words_aligned); - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - } - } else { - // Allocation will fail and induce a GC - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():" - " Fail instead of expand the metaspace"); - } - } - } else { - // The virtual space expanded, get a new chunk - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - assert(next != NULL, "Just expanded, should succeed"); - } + if (next != NULL) { + return next; } - assert(next == NULL || (next->next() == NULL && next->prev() == NULL), - "New chunk is still on some list"); - return next; -} - -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size, - size_t chunk_bunch) { - // Get a chunk from the chunk freelist - Metachunk* new_chunk = get_new_chunk(chunk_word_size, - chunk_word_size, - chunk_bunch); - return new_chunk; + // The expand amount is currently only determined by the requested sizes + // and not how much committed memory is left in the current virtual space. + + size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); + size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); + if (min_word_size >= preferred_word_size) { + // Can happen when humongous chunks are allocated. + preferred_word_size = min_word_size; + } + + bool expanded = expand_by(min_word_size, preferred_word_size); + if (expanded) { + next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); + assert(next != NULL, "The allocation was expected to succeed after the expansion"); + } + + return next; } void VirtualSpaceList::print_on(outputStream* st) const { @@ -1256,96 +1323,96 @@ // Calculate the amount to increase the high water mark (HWM). // Increase by a minimum amount (MinMetaspaceExpansion) so that // another expansion is not requested too soon. If that is not -// enough to satisfy the allocation (i.e. big enough for a word_size -// allocation), increase by MaxMetaspaceExpansion. If that is still -// not enough, expand by the size of the allocation (word_size) plus -// some. -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { - size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; - size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; - size_t page_size_words = os::vm_page_size() / BytesPerWord; - size_t size_delta_words = align_size_up(word_size, page_size_words); - size_t delta_words = MAX2(size_delta_words, min_delta_words); - if (delta_words > min_delta_words) { +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. +// If that is still not enough, expand by the size of the allocation +// plus some. +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { + size_t min_delta = MinMetaspaceExpansion; + size_t max_delta = MaxMetaspaceExpansion; + size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); + + if (delta <= min_delta) { + delta = min_delta; + } else if (delta <= max_delta) { // Don't want to hit the high water mark on the next // allocation so make the delta greater than just enough // for this allocation. - delta_words = MAX2(delta_words, max_delta_words); - if (delta_words > max_delta_words) { - // This allocation is large but the next ones are probably not - // so increase by the minimum. - delta_words = delta_words + min_delta_words; - } + delta = max_delta; + } else { + // This allocation is large but the next ones are probably not + // so increase by the minimum. + delta = delta + min_delta; } - return delta_words; + + assert_is_size_aligned(delta, Metaspace::commit_alignment()); + + return delta; +} + +size_t MetaspaceGC::capacity_until_GC() { + size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); + assert(value >= MetaspaceSize, "Not initialied properly?"); + return value; } -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { - - // If the user wants a limit, impose one. - // The reason for someone using this flag is to limit reserved space. So - // for non-class virtual space, compare against virtual spaces that are reserved. - // For class virtual space, we only compare against the committed space, not - // reserved space, because this is a larger space prereserved for compressed - // class pointers. - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { - size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); - size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); - size_t real_allocated = nonclass_allocated + class_allocated; - if (real_allocated >= MaxMetaspaceSize) { +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { + assert_is_size_aligned(v, Metaspace::commit_alignment()); + + return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); +} + +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { + assert_is_size_aligned(v, Metaspace::commit_alignment()); + + return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); +} + +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { + // Check if the compressed class space is full. + if (is_class && Metaspace::using_class_space()) { + size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); + if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { return false; } } - // Class virtual space should always be expanded. Call GC for the other - // metadata virtual space. - if (Metaspace::using_class_space() && - (vsl == Metaspace::class_space_list())) return true; - - // If this is part of an allocation after a GC, expand - // unconditionally. - if (MetaspaceGC::expand_after_GC()) { - return true; + // Check if the user has imposed a limit on the metaspace memory. + size_t committed_bytes = MetaspaceAux::committed_bytes(); + if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { + return false; } - - // If the capacity is below the minimum capacity, allow the - // expansion. Also set the high-water-mark (capacity_until_GC) - // to that minimum capacity so that a GC will not be induced - // until that minimum capacity is exceeded. - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); - size_t metaspace_size_bytes = MetaspaceSize; - if (committed_capacity_bytes < metaspace_size_bytes || - capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_bytes); - return true; - } else { - if (committed_capacity_bytes < capacity_until_GC()) { - return true; - } else { - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT - " capacity_until_GC " SIZE_FORMAT - " allocated_capacity_bytes " SIZE_FORMAT, - word_size, - capacity_until_GC(), - MetaspaceAux::allocated_capacity_bytes()); - } - return false; - } + return true; +} + +size_t MetaspaceGC::allowed_expansion() { + size_t committed_bytes = MetaspaceAux::committed_bytes(); + + size_t left_until_max = MaxMetaspaceSize - committed_bytes; + + // Always grant expansion if we are initiating the JVM, + // or if the GC_locker is preventing GCs. + if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) { + return left_until_max / BytesPerWord; } + + size_t capacity_until_gc = capacity_until_GC(); + + if (capacity_until_gc <= committed_bytes) { + return 0; + } + + size_t left_until_GC = capacity_until_gc - committed_bytes; + size_t left_to_commit = MIN2(left_until_GC, left_until_max); + + return left_to_commit / BytesPerWord; } - - void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - // Until a faster way of calculating the "used" quantity is implemented, - // use "capacity". const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); @@ -1377,9 +1444,10 @@ // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; + expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); + MetaspaceGC::inc_capacity_until_GC(expand_bytes); } if (PrintGCDetails && Verbose) { size_t new_capacity_until_GC = capacity_until_GC; @@ -1436,6 +1504,9 @@ // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + + shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, shrink_bytes, max_shrink_bytes)); @@ -1467,60 +1538,12 @@ // Don't shrink unless it's significant if (shrink_bytes >= MinMetaspaceExpansion && ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { - MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); + MetaspaceGC::dec_capacity_until_GC(shrink_bytes); } } // Metadebug methods -void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm, - size_t chunk_word_size){ -#ifdef ASSERT - VirtualSpaceList* vsl = sm->vs_list(); - if (MetaDataDeallocateALot && - Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) { - Metadebug::reset_deallocate_chunk_a_lot_count(); - for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) { - Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size); - if (dummy_chunk == NULL) { - break; - } - sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk); - - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ", - sm->sum_count_in_chunks_in_use()); - dummy_chunk->print_on(gclog_or_tty); - gclog_or_tty->print_cr(" Free chunks total %d count %d", - sm->chunk_manager()->free_chunks_total_words(), - sm->chunk_manager()->free_chunks_count()); - } - } - } else { - Metadebug::inc_deallocate_chunk_a_lot_count(); - } -#endif -} - -void Metadebug::deallocate_block_a_lot(SpaceManager* sm, - size_t raw_word_size){ -#ifdef ASSERT - if (MetaDataDeallocateALot && - Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) { - Metadebug::set_deallocate_block_a_lot_count(0); - for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) { - MetaWord* dummy_block = sm->allocate_work(raw_word_size); - if (dummy_block == 0) { - break; - } - sm->deallocate(dummy_block, raw_word_size); - } - } else { - Metadebug::inc_deallocate_block_a_lot_count(); - } -#endif -} - void Metadebug::init_allocation_fail_alot_count() { if (MetadataAllocationFailALot) { _allocation_fail_alot_count = @@ -1664,31 +1687,6 @@ return free_chunks(index); } -void ChunkManager::free_chunks_put(Metachunk* chunk) { - assert_lock_strong(SpaceManager::expand_lock()); - ChunkList* free_list = find_free_chunks_list(chunk->word_size()); - chunk->set_next(free_list->head()); - free_list->set_head(chunk); - // chunk is being returned to the chunk free list - inc_free_chunks_total(chunk->capacity_word_size()); - slow_locked_verify(); -} - -void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) { - // The deallocation of a chunk originates in the freelist - // manangement code for a Metaspace and does not hold the - // lock. - assert(chunk != NULL, "Deallocating NULL"); - assert_lock_strong(SpaceManager::expand_lock()); - slow_locked_verify(); - if (TraceMetadataChunkAllocation) { - gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk " - PTR_FORMAT " size " SIZE_FORMAT, - chunk, chunk->word_size()); - } - free_chunks_put(chunk); -} - Metachunk* ChunkManager::free_chunks_get(size_t word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -1700,7 +1698,6 @@ assert(free_list != NULL, "Sanity check"); chunk = free_list->head(); - debug_only(Metachunk* debug_head = chunk;) if (chunk == NULL) { return NULL; @@ -1709,9 +1706,6 @@ // Remove the chunk as the head of the list. free_list->remove_chunk(chunk); - // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); - if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, @@ -1722,28 +1716,29 @@ word_size, FreeBlockDictionary::atLeast); - if (chunk != NULL) { - if (TraceMetadataHumongousAllocation) { - size_t waste = chunk->word_size() - word_size; - gclog_or_tty->print_cr("Free list allocate humongous chunk size " - SIZE_FORMAT " for requested size " SIZE_FORMAT - " waste " SIZE_FORMAT, - chunk->word_size(), word_size, waste); - } - // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); - } else { + if (chunk == NULL) { return NULL; } + + if (TraceMetadataHumongousAllocation) { + size_t waste = chunk->word_size() - word_size; + gclog_or_tty->print_cr("Free list allocate humongous chunk size " + SIZE_FORMAT " for requested size " SIZE_FORMAT + " waste " SIZE_FORMAT, + chunk->word_size(), word_size, waste); + } } + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->word_size()); + // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); #ifdef ASSERT // Chunk is no longer on any freelist. Setting to false make container_count_slow() // work. - chunk->set_is_free(false); + chunk->set_is_tagged_free(false); #endif chunk->container()->inc_container_count(); @@ -1875,7 +1870,7 @@ for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { Metachunk* chunk = chunks_in_use(i); while (chunk != NULL) { - sum += chunk->capacity_word_size(); + sum += chunk->word_size(); chunk = chunk->next(); } } @@ -1951,12 +1946,12 @@ chunk_word_size = medium_chunk_size(); } - // Might still need a humongous chunk. Enforce an - // eight word granularity to facilitate reuse (some - // wastage but better chance of reuse). + // Might still need a humongous chunk. Enforce + // humongous allocations sizes to be aligned up to + // the smallest chunk size. size_t if_humongous_sized_chunk = align_size_up(word_size + Metachunk::overhead(), - HumongousChunkGranularity); + smallest_chunk_size()); chunk_word_size = MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); @@ -1977,6 +1972,15 @@ return chunk_word_size; } +void SpaceManager::track_metaspace_memory_usage() { + if (is_init_completed()) { + if (is_class()) { + MemoryService::track_compressed_class_memory_usage(); + } + MemoryService::track_metaspace_memory_usage(); + } +} + MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { assert(vs_list()->current_virtual_space() != NULL, "Should have been set"); @@ -2002,15 +2006,20 @@ size_t grow_chunks_by_words = calc_chunk_size(word_size); Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); + MetaWord* mem = NULL; + // If a chunk was available, add it to the in-use chunk list // and do an allocation from it. if (next != NULL) { - Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); // Add to this manager's list of chunks in use. add_chunk(next, false); - return next->allocate(word_size); + mem = next->allocate(word_size); } - return NULL; + + // Track metaspace memory usage statistic. + track_metaspace_memory_usage(); + + return mem; } void SpaceManager::print_on(outputStream* st) const { @@ -2105,7 +2114,7 @@ // Capture the next link before it is changed // by the call to return_chunk_at_head(); Metachunk* next = cur->next(); - cur->set_is_free(true); + DEBUG_ONLY(cur->set_is_tagged_free(true);) list->return_chunk_at_head(cur); cur = next; } @@ -2177,7 +2186,7 @@ while (humongous_chunks != NULL) { #ifdef ASSERT - humongous_chunks->set_is_free(true); + humongous_chunks->set_is_tagged_free(true); #endif if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", @@ -2186,10 +2195,10 @@ } assert(humongous_chunks->word_size() == (size_t) align_size_up(humongous_chunks->word_size(), - HumongousChunkGranularity), + smallest_chunk_size()), err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT " granularity %d", - humongous_chunks->word_size(), HumongousChunkGranularity)); + humongous_chunks->word_size(), smallest_chunk_size())); Metachunk* next_humongous_chunks = humongous_chunks->next(); humongous_chunks->container()->dec_container_count(); chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); @@ -2341,7 +2350,6 @@ if (p == NULL) { p = allocate_work(raw_word_size); } - Metadebug::deallocate_block_a_lot(this, raw_word_size); return p; } @@ -2366,6 +2374,7 @@ inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } + if (current_chunk() != NULL) { result = current_chunk()->allocate(word_size); } @@ -2373,7 +2382,8 @@ if (result == NULL) { result = grow_and_allocate(word_size); } - if (result != 0) { + + if (result != NULL) { inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); @@ -2438,7 +2448,7 @@ curr->print_on(out); curr_total += curr->word_size(); used += curr->used_word_size(); - capacity += curr->capacity_word_size(); + capacity += curr->word_size(); waste += curr->free_word_size() + curr->overhead();; } } @@ -2639,24 +2649,26 @@ void MetaspaceAux::print_on(outputStream* out) { Metaspace::MetadataType nct = Metaspace::NonClassType; - out->print_cr(" Metaspace total " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); - - out->print_cr(" data space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - allocated_capacity_bytes(nct)/K, - allocated_used_bytes(nct)/K, - reserved_bytes(nct)/K); + out->print_cr(" Metaspace " + "used " SIZE_FORMAT "K, " + "capacity " SIZE_FORMAT "K, " + "committed " SIZE_FORMAT "K, " + "reserved " SIZE_FORMAT "K", + allocated_used_bytes()/K, + allocated_capacity_bytes()/K, + committed_bytes()/K, + reserved_bytes()/K); + if (Metaspace::using_class_space()) { Metaspace::MetadataType ct = Metaspace::ClassType; out->print_cr(" class space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", + "used " SIZE_FORMAT "K, " + "capacity " SIZE_FORMAT "K, " + "committed " SIZE_FORMAT "K, " + "reserved " SIZE_FORMAT "K", + allocated_used_bytes(ct)/K, allocated_capacity_bytes(ct)/K, - allocated_used_bytes(ct)/K, + committed_bytes(ct)/K, reserved_bytes(ct)/K); } } @@ -2808,6 +2820,9 @@ size_t Metaspace::_first_chunk_word_size = 0; size_t Metaspace::_first_class_chunk_word_size = 0; +size_t Metaspace::_commit_alignment = 0; +size_t Metaspace::_reserve_alignment = 0; + Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { initialize(lock, type); } @@ -2828,6 +2843,8 @@ #define VIRTUALSPACEMULTIPLIER 2 #ifdef _LP64 +static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); + void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { // Figure out the narrow_klass_base and the narrow_klass_shift. The // narrow_klass_base is the lower of the metaspace base and the cds base @@ -2837,14 +2854,22 @@ address higher_address; if (UseSharedSpaces) { higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); + (address)(metaspace_base + compressed_class_space_size())); lower_base = MIN2(metaspace_base, cds_base); } else { - higher_address = metaspace_base + class_metaspace_size(); + higher_address = metaspace_base + compressed_class_space_size(); lower_base = metaspace_base; + + uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; + // If compressed class space fits in lower 32G, we don't need a base. + if (higher_address <= (address)klass_encoding_max) { + lower_base = 0; // effectively lower base is zero. + } } + Universe::set_narrow_klass_base(lower_base); - if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { + + if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { Universe::set_narrow_klass_shift(0); } else { assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); @@ -2859,31 +2884,40 @@ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); address lower_base = MIN2((address)metaspace_base, cds_base); address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); - return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); + (address)(metaspace_base + compressed_class_space_size())); + return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); } // Try to allocate the metaspace at the requested addr. void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); - assert(class_metaspace_size() < KlassEncodingMetaspaceMax, + assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); - - ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), - false, requested_addr, 0); + assert_is_ptr_aligned(requested_addr, _reserve_alignment); + assert_is_ptr_aligned(cds_base, _reserve_alignment); + assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); + + // Don't use large pages for the class space. + bool large_pages = false; + + ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, + large_pages, + requested_addr, 0); if (!metaspace_rs.is_reserved()) { if (UseSharedSpaces) { + size_t increment = align_size_up(1*G, _reserve_alignment); + // Keep trying to allocate the metaspace, increasing the requested_addr // by 1GB each time, until we reach an address that will no longer allow // use of CDS with compressed klass pointers. char *addr = requested_addr; - while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && - can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { - addr = addr + 1*G; - metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), false, addr, 0); + while (!metaspace_rs.is_reserved() && (addr + increment > addr) && + can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { + addr = addr + increment; + metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, large_pages, addr, 0); } } @@ -2893,11 +2927,11 @@ // initialization has happened that depends on UseCompressedClassPointers. // So, UseCompressedClassPointers cannot be turned off at this point. if (!metaspace_rs.is_reserved()) { - metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), false); + metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, large_pages); if (!metaspace_rs.is_reserved()) { vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", - class_metaspace_size())); + compressed_class_space_size())); } } } @@ -2919,8 +2953,8 @@ if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, Universe::narrow_klass_base(), Universe::narrow_klass_shift()); - gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, - class_metaspace_size(), metaspace_rs.base(), requested_addr); + gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, + compressed_class_space_size(), metaspace_rs.base(), requested_addr); } } @@ -2933,34 +2967,91 @@ assert(using_class_space(), "Must be using class space"); _class_space_list = new VirtualSpaceList(rs); _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); + + if (!_class_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); + } } #endif +void Metaspace::ergo_initialize() { + if (DumpSharedSpaces) { + // Using large pages when dumping the shared archive is currently not implemented. + FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); + } + + size_t page_size = os::vm_page_size(); + if (UseLargePages && UseLargePagesInMetaspace) { + page_size = os::large_page_size(); + } + + _commit_alignment = page_size; + _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); + + // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will + // override if MaxMetaspaceSize was set on the command line or not. + // This information is needed later to conform to the specification of the + // java.lang.management.MemoryUsage API. + // + // Ideally, we would be able to set the default value of MaxMetaspaceSize in + // globals.hpp to the aligned value, but this is not possible, since the + // alignment depends on other flags being parsed. + MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); + + if (MetaspaceSize > MaxMetaspaceSize) { + MetaspaceSize = MaxMetaspaceSize; + } + + MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); + + assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); + + if (MetaspaceSize < 256*K) { + vm_exit_during_initialization("Too small initial Metaspace size"); + } + + MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); + MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); + + CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); + set_compressed_class_space_size(CompressedClassSpaceSize); +} + void Metaspace::global_initialize() { // Initialize the alignment for shared spaces. int max_alignment = os::vm_page_size(); size_t cds_total = 0; - set_class_metaspace_size(align_size_up(CompressedClassSpaceSize, - os::vm_allocation_granularity())); - MetaspaceShared::set_max_alignment(max_alignment); if (DumpSharedSpaces) { - SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); + SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); - SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); - SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); + SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); + SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. cds_total = FileMapInfo::shared_spaces_size(); + cds_total = align_size_up(cds_total, _reserve_alignment); _space_list = new VirtualSpaceList(cds_total/wordSize); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + if (!_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Unable to dump shared archive.", NULL); + } + #ifdef _LP64 + if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { + vm_exit_during_initialization("Unable to dump shared archive.", + err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" + SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " + "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), + cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); + } + // Set the compressed klass pointer base so that decoding of these pointers works // properly when creating the shared archive. assert(UseCompressedOops && UseCompressedClassPointers, @@ -2971,9 +3062,6 @@ _space_list->current_virtual_space()->bottom()); } - // Set the shift to zero. - assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total, - "CDS region is too large"); Universe::set_narrow_klass_shift(0); #endif @@ -2992,12 +3080,12 @@ // Map in spaces now also if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { FileMapInfo::set_current_info(mapinfo); + cds_total = FileMapInfo::shared_spaces_size(); + cds_address = (address)mapinfo->region_base(0); } else { assert(!mapinfo->is_open() && !UseSharedSpaces, "archive file not closed or shared spaces not disabled."); } - cds_total = FileMapInfo::shared_spaces_size(); - cds_address = (address)mapinfo->region_base(0); } #ifdef _LP64 @@ -3005,9 +3093,12 @@ // above the heap and above the CDS area (if it exists). if (using_class_space()) { if (UseSharedSpaces) { - allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); + char* cds_end = (char*)(cds_address + cds_total); + cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); + allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); } else { - allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); + char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); + allocate_metaspace_compressed_klass_ptrs(base, 0); } } #endif @@ -3023,11 +3114,19 @@ _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); // Arbitrarily set the initial virtual space to a multiple // of the boot class loader size. - size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); + size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; + word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); + // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + + if (!_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); + } } + + MetaspaceGC::initialize(); } Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, @@ -3039,7 +3138,7 @@ return chunk; } - return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); + return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); } void Metaspace::initialize(Mutex* lock, MetaspaceType type) { @@ -3112,19 +3211,18 @@ } MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { - MetaWord* result; - MetaspaceGC::set_expand_after_GC(true); - size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(delta_bytes); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); + assert(delta_bytes > 0, "Must be"); + + size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); + size_t before_inc = after_inc - delta_bytes; + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT - " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); + " to " SIZE_FORMAT, before_inc, after_inc); } - result = allocate(word_size, mdtype); - - return result; + return allocate(word_size, mdtype); } // Space allocated in the Metaspace. This may @@ -3206,69 +3304,108 @@ } } -Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, + +MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, bool read_only, MetaspaceObj::Type type, TRAPS) { if (HAS_PENDING_EXCEPTION) { assert(false, "Should not allocate with exception pending"); return NULL; // caller does a CHECK_NULL too } - MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; - - // SSS: Should we align the allocations and make sure the sizes are aligned. - MetaWord* result = NULL; - assert(loader_data != NULL, "Should never pass around a NULL loader_data. " "ClassLoaderData::the_null_class_loader_data() should have been used."); + // Allocate in metaspaces without taking out a lock, because it deadlocks // with the SymbolTable_lock. Dumping is single threaded for now. We'll have // to revisit this for application class data sharing. if (DumpSharedSpaces) { assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); - result = space->allocate(word_size, NonClassType); + MetaWord* result = space->allocate(word_size, NonClassType); if (result == NULL) { report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); - } else { - space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); } - return Metablock::initialize(result, word_size); + + space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); + + // Zero initialize. + Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); + + return result; } - result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); + MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; + + // Try to allocate metadata. + MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); + + if (result == NULL) { + // Allocation failed. + if (is_init_completed()) { + // Only start a GC if the bootstrapping has completed. + + // Try to clean out some memory and retry. + result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( + loader_data, word_size, mdtype); + } + } if (result == NULL) { - // Try to clean out some memory and retry. - result = - Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( - loader_data, word_size, mdtype); - - // If result is still null, we are out of memory. - if (result == NULL) { - if (Verbose && TraceMetadataChunkAllocation) { - gclog_or_tty->print_cr("Metaspace allocation failed for size " - SIZE_FORMAT, word_size); - if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); - MetaspaceAux::dump(gclog_or_tty); - } - // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support - const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : - "Metadata space"; - report_java_out_of_memory(space_string); - - if (JvmtiExport::should_post_resource_exhausted()) { - JvmtiExport::post_resource_exhausted( - JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, - space_string); - } - if (is_class_space_allocation(mdtype)) { - THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); - } else { - THROW_OOP_0(Universe::out_of_memory_error_metaspace()); - } + report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL); + } + + // Zero initialize. + Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); + + return result; +} + +size_t Metaspace::class_chunk_size(size_t word_size) { + assert(using_class_space(), "Has to use class space"); + return class_vsm()->calc_chunk_size(word_size); +} + +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) { + // If result is still null, we are out of memory. + if (Verbose && TraceMetadataChunkAllocation) { + gclog_or_tty->print_cr("Metaspace allocation failed for size " + SIZE_FORMAT, word_size); + if (loader_data->metaspace_or_null() != NULL) { + loader_data->dump(gclog_or_tty); } + MetaspaceAux::dump(gclog_or_tty); } - return Metablock::initialize(result, word_size); + + bool out_of_compressed_class_space = false; + if (is_class_space_allocation(mdtype)) { + Metaspace* metaspace = loader_data->metaspace_non_null(); + out_of_compressed_class_space = + MetaspaceAux::committed_bytes(Metaspace::ClassType) + + (metaspace->class_chunk_size(word_size) * BytesPerWord) > + CompressedClassSpaceSize; + } + + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support + const char* space_string = out_of_compressed_class_space ? + "Compressed class space" : "Metaspace"; + + report_java_out_of_memory(space_string); + + if (JvmtiExport::should_post_resource_exhausted()) { + JvmtiExport::post_resource_exhausted( + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, + space_string); + } + + if (!is_init_completed()) { + vm_exit_during_initialization("OutOfMemoryError", space_string); + } + + if (out_of_compressed_class_space) { + THROW_OOP(Universe::out_of_memory_error_class_metaspace()); + } else { + THROW_OOP(Universe::out_of_memory_error_metaspace()); + } } void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { @@ -3420,4 +3557,94 @@ TestMetaspaceAuxTest::test(); } +class TestVirtualSpaceNodeTest { + static void chunk_up(size_t words_left, size_t& num_medium_chunks, + size_t& num_small_chunks, + size_t& num_specialized_chunks) { + num_medium_chunks = words_left / MediumChunk; + words_left = words_left % MediumChunk; + + num_small_chunks = words_left / SmallChunk; + words_left = words_left % SmallChunk; + // how many specialized chunks can we get? + num_specialized_chunks = words_left / SpecializedChunk; + assert(words_left % SpecializedChunk == 0, "should be nothing left"); + } + + public: + static void test() { + MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); + const size_t vsn_test_size_words = MediumChunk * 4; + const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; + + // The chunk sizes must be multiples of eachother, or this will fail + STATIC_ASSERT(MediumChunk % SmallChunk == 0); + STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); + + { // No committed memory in VSN + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); + } + + { // All of VSN is committed, half is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(vsn_test_size_words, vsn_test_size_words); + vsn.get_chunk_vs(MediumChunk); + vsn.get_chunk_vs(MediumChunk); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); + assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); + } + + { // 4 pages of VSN is committed, some is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; + assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); + vsn.initialize(); + vsn.expand_by(page_chunks, page_chunks); + vsn.get_chunk_vs(SmallChunk); + vsn.get_chunk_vs(SpecializedChunk); + vsn.retire(&cm); + + // committed - used = words left to retire + const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; + + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + { // Half of VSN is committed, a humongous chunk is used + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(MediumChunk * 2, MediumChunk * 2); + vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk + vsn.retire(&cm); + + const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + } +}; + +void TestVirtualSpaceNode_test() { + TestVirtualSpaceNodeTest::test(); +} + #endif diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metaspace.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -87,9 +87,10 @@ friend class MetaspaceAux; public: - enum MetadataType {ClassType = 0, - NonClassType = ClassType + 1, - MetadataTypeCount = ClassType + 2 + enum MetadataType { + ClassType, + NonClassType, + MetadataTypeCount }; enum MetaspaceType { StandardMetaspaceType, @@ -103,6 +104,9 @@ private: void initialize(Mutex* lock, MetaspaceType type); + // Get the first chunk for a Metaspace. Used for + // special cases such as the boot class loader, reflection + // class loader and anonymous class loader. Metachunk* get_initialization_chunk(MetadataType mdtype, size_t chunk_word_size, size_t chunk_bunch); @@ -111,18 +115,21 @@ static size_t align_word_size_up(size_t); // Aligned size of the metaspace. - static size_t _class_metaspace_size; + static size_t _compressed_class_space_size; - static size_t class_metaspace_size() { - return _class_metaspace_size; + static size_t compressed_class_space_size() { + return _compressed_class_space_size; } - static void set_class_metaspace_size(size_t metaspace_size) { - _class_metaspace_size = metaspace_size; + static void set_compressed_class_space_size(size_t size) { + _compressed_class_space_size = size; } static size_t _first_chunk_word_size; static size_t _first_class_chunk_word_size; + static size_t _commit_alignment; + static size_t _reserve_alignment; + SpaceManager* _vsm; SpaceManager* vsm() const { return _vsm; } @@ -132,7 +139,6 @@ // Allocate space for metadata of type mdtype. This is space // within a Metachunk and is used by // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) - // which returns a Metablock. MetaWord* allocate(size_t word_size, MetadataType mdtype); // Virtual Space lists for both classes and other metadata @@ -186,17 +192,24 @@ AllocRecord * _alloc_record_head; AllocRecord * _alloc_record_tail; + size_t class_chunk_size(size_t word_size); + public: Metaspace(Mutex* lock, MetaspaceType type); ~Metaspace(); - // Initialize globals for Metaspace + static void ergo_initialize(); static void global_initialize(); static size_t first_chunk_word_size() { return _first_chunk_word_size; } static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } + static size_t reserve_alignment() { return _reserve_alignment; } + static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; } + static size_t commit_alignment() { return _commit_alignment; } + static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; } + char* bottom() const; size_t used_words_slow(MetadataType mdtype) const; size_t free_words_slow(MetadataType mdtype) const; @@ -205,8 +218,8 @@ size_t used_bytes_slow(MetadataType mdtype) const; size_t capacity_bytes_slow(MetadataType mdtype) const; - static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size, - bool read_only, MetaspaceObj::Type type, TRAPS); + static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size, + bool read_only, MetaspaceObj::Type type, TRAPS); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); MetaWord* expand_and_allocate(size_t size, @@ -219,6 +232,9 @@ static void purge(MetadataType mdtype); static void purge(); + static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, + MetadataType mdtype, TRAPS); + void print_on(outputStream* st) const; // Debugging support void verify(); @@ -238,6 +254,7 @@ static bool is_class_space_allocation(MetadataType mdType) { return mdType == ClassType && using_class_space(); } + }; class MetaspaceAux : AllStatic { @@ -352,17 +369,10 @@ class MetaspaceGC : AllStatic { - // The current high-water-mark for inducing a GC. When - // the capacity of all space in the virtual lists reaches this value, - // a GC is induced and the value is increased. This should be changed - // to the space actually used for allocations to avoid affects of - // fragmentation losses to partially used chunks. Size is in words. - static size_t _capacity_until_GC; - - // After a GC is done any allocation that fails should try to expand - // the capacity of the Metaspaces. This flag is set during attempts - // to allocate in the VMGCOperation that does the GC. - static bool _expand_after_GC; + // The current high-water-mark for inducing a GC. + // When committed memory of all metaspaces reaches this value, + // a GC is induced and the value is increased. Size is in bytes. + static volatile intptr_t _capacity_until_GC; // For a CMS collection, signal that a concurrent collection should // be started. @@ -370,20 +380,16 @@ static uint _shrink_factor; - static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; } - static size_t shrink_factor() { return _shrink_factor; } void set_shrink_factor(uint v) { _shrink_factor = v; } public: - static size_t capacity_until_GC() { return _capacity_until_GC; } - static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } - static void dec_capacity_until_GC(size_t v) { - _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; - } - static bool expand_after_GC() { return _expand_after_GC; } - static void set_expand_after_GC(bool v) { _expand_after_GC = v; } + static void initialize() { _capacity_until_GC = MetaspaceSize; } + + static size_t capacity_until_GC(); + static size_t inc_capacity_until_GC(size_t v); + static size_t dec_capacity_until_GC(size_t v); static bool should_concurrent_collect() { return _should_concurrent_collect; } static void set_should_concurrent_collect(bool v) { @@ -391,11 +397,14 @@ } // The amount to increase the high-water-mark (_capacity_until_GC) - static size_t delta_capacity_until_GC(size_t word_size); + static size_t delta_capacity_until_GC(size_t bytes); - // It is expected that this will be called when the current capacity - // has been used and a GC should be considered. - static bool should_expand(VirtualSpaceList* vsl, size_t word_size); + // Tells if we have can expand metaspace without hitting set limits. + static bool can_expand(size_t words, bool is_class); + + // Returns amount that we can expand without hitting a GC, + // measured in words. + static size_t allowed_expansion(); // Calculate the new high-water mark at which to induce // a GC. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/sharedHeap.cpp --- a/src/share/vm/memory/sharedHeap.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/sharedHeap.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -247,6 +247,7 @@ } void SharedHeap::post_initialize() { + CollectedHeap::post_initialize(); ref_processing_init(); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/universe.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -677,13 +677,13 @@ // HeapBased - Use compressed oops with heap base + encoding. // 4Gb -static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); +static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); // 32Gb -// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; +// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes; char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); - assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be"); + assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be"); assert(is_size_aligned(heap_size, alignment), "Must be"); uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); @@ -702,20 +702,40 @@ // If the total size is small enough to allow UnscaledNarrowOop then // just use UnscaledNarrowOop. } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { - if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && + if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) && (Universe::narrow_oop_shift() == 0)) { // Use 32-bits oops without encoding and // place heap's top on the 4Gb boundary - base = (NarrowOopHeapMax - heap_size); + base = (UnscaledOopHeapMax - heap_size); } else { // Can't reserve with NarrowOopShift == 0 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); + if (mode == UnscaledNarrowOop || - mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) { + mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) { + // Use zero based compressed oops with encoding and // place heap's top on the 32Gb boundary in case // total_size > 4Gb or failed to reserve below 4Gb. - base = (OopEncodingHeapMax - heap_size); + uint64_t heap_top = OopEncodingHeapMax; + + // For small heaps, save some space for compressed class pointer + // space so it can be decoded with no base. + if (UseCompressedClassPointers && !UseSharedSpaces && + OopEncodingHeapMax <= 32*G) { + + uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment); + assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space, + alignment), "difference must be aligned too"); + uint64_t new_top = OopEncodingHeapMax-class_space; + + if (total_size <= new_top) { + heap_top = new_top; + } + } + + // Align base to the adjusted top of the heap + base = heap_top - heap_size; } } } else { @@ -737,7 +757,7 @@ // Set to a non-NULL value so the ReservedSpace ctor computes // the correct no-access prefix. // The final value will be set in initialize_heap() below. - Universe::set_narrow_oop_base((address)NarrowOopHeapMax); + Universe::set_narrow_oop_base((address)UnscaledOopHeapMax); #ifdef _WIN64 if (UseLargePages) { // Cannot allocate guard pages for implicit checks in indexed @@ -765,6 +785,7 @@ } else if (UseG1GC) { #if INCLUDE_ALL_GCS G1CollectorPolicy* g1p = new G1CollectorPolicy(); + g1p->initialize_all(); G1CollectedHeap* g1h = new G1CollectedHeap(g1p); Universe::_collectedHeap = g1h; #else // INCLUDE_ALL_GCS @@ -789,6 +810,7 @@ } else { // default old generation gc_policy = new MarkSweepPolicy(); } + gc_policy->initialize_all(); Universe::_collectedHeap = new GenCollectedHeap(gc_policy); } @@ -833,7 +855,7 @@ Universe::set_narrow_oop_use_implicit_null_checks(true); } #endif // _WIN64 - if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) { + if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { // Can't reserve heap below 4Gb. Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); } else { @@ -1021,7 +1043,7 @@ Universe::_virtual_machine_error_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); - Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); if (!DumpSharedSpaces) { // These are the only Java fields that are currently set during shared space dumping. @@ -1029,7 +1051,7 @@ Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); - msg = java_lang_String::create_from_str("Metadata space", CHECK_false); + msg = java_lang_String::create_from_str("Metaspace", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); msg = java_lang_String::create_from_str("Compressed class space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/constantPool.cpp --- a/src/share/vm/oops/constantPool.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/constantPool.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -40,7 +40,6 @@ #include "runtime/init.hpp" #include "runtime/javaCalls.hpp" #include "runtime/signature.hpp" -#include "runtime/synchronizer.hpp" #include "runtime/vframe.hpp" ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) { @@ -70,6 +69,7 @@ // only set to non-zero if constant pool is merged by RedefineClasses set_version(0); + set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); // initialize tag array int length = tags->length(); @@ -95,6 +95,9 @@ void ConstantPool::release_C_heap_structures() { // walk constant pool and decrement symbol reference counts unreference_symbols(); + + delete _lock; + set_lock(NULL); } objArrayOop ConstantPool::resolved_references() const { @@ -151,6 +154,9 @@ ClassLoaderData* loader_data = pool_holder()->class_loader_data(); set_resolved_references(loader_data->add_handle(refs_handle)); } + + // Also need to recreate the mutex. Make sure this matches the constructor + set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); } } @@ -161,23 +167,7 @@ set_resolved_reference_length( resolved_references() != NULL ? resolved_references()->length() : 0); set_resolved_references(NULL); -} - -oop ConstantPool::lock() { - if (_pool_holder) { - // We re-use the _pool_holder's init_lock to reduce footprint. - // Notes on deadlocks: - // [1] This lock is a Java oop, so it can be recursively locked by - // the same thread without self-deadlocks. - // [2] Deadlock will happen if there is circular dependency between - // the of two Java classes. However, in this case, - // the deadlock would have happened long before we reach - // ConstantPool::lock(), so reusing init_lock does not - // increase the possibility of deadlock. - return _pool_holder->init_lock(); - } else { - return NULL; - } + set_lock(NULL); } int ConstantPool::cp_to_object_index(int cp_index) { @@ -211,9 +201,7 @@ Symbol* name = NULL; Handle loader; - { - oop cplock = this_oop->lock(); - ObjectLocker ol(cplock , THREAD, cplock != NULL); + { MonitorLockerEx ml(this_oop->lock()); if (this_oop->tag_at(which).is_unresolved_klass()) { if (this_oop->tag_at(which).is_unresolved_klass_in_error()) { @@ -260,8 +248,7 @@ bool throw_orig_error = false; { - oop cplock = this_oop->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); + MonitorLockerEx ml(this_oop->lock()); // some other thread has beaten us and has resolved the class. if (this_oop->tag_at(which).is_klass()) { @@ -329,8 +316,7 @@ } return k(); } else { - oop cplock = this_oop->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); + MonitorLockerEx ml(this_oop->lock()); // Only updated constant pool - if it is resolved. do_resolve = this_oop->tag_at(which).is_unresolved_klass(); if (do_resolve) { @@ -600,8 +586,7 @@ int tag, TRAPS) { ResourceMark rm; Symbol* error = PENDING_EXCEPTION->klass()->name(); - oop cplock = this_oop->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); // lock cpool to change tag. + MonitorLockerEx ml(this_oop->lock()); // lock cpool to change tag. int error_tag = (tag == JVM_CONSTANT_MethodHandle) ? JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError; @@ -762,8 +747,7 @@ if (cache_index >= 0) { // Cache the oop here also. Handle result_handle(THREAD, result_oop); - oop cplock = this_oop->lock(); - ObjectLocker ol(cplock, THREAD, cplock != NULL); // don't know if we really need this + MonitorLockerEx ml(this_oop->lock()); // don't know if we really need this oop result = this_oop->resolved_references()->obj_at(cache_index); // Benign race condition: resolved_references may already be filled in while we were trying to lock. // The important thing here is that all threads pick up the same result. @@ -869,18 +853,9 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS) { - jbyte t1 = tag_at(index1).value(); - jbyte t2 = cp2->tag_at(index2).value(); - - - // JVM_CONSTANT_UnresolvedClassInError is equal to JVM_CONSTANT_UnresolvedClass - // when comparing - if (t1 == JVM_CONSTANT_UnresolvedClassInError) { - t1 = JVM_CONSTANT_UnresolvedClass; - } - if (t2 == JVM_CONSTANT_UnresolvedClassInError) { - t2 = JVM_CONSTANT_UnresolvedClass; - } + // The error tags are equivalent to non-error tags when comparing + jbyte t1 = tag_at(index1).non_error_value(); + jbyte t2 = cp2->tag_at(index2).non_error_value(); if (t1 != t2) { // Not the same entry type so there is nothing else to check. Note @@ -1001,8 +976,8 @@ case JVM_CONSTANT_MethodType: { - int k1 = method_type_index_at(index1); - int k2 = cp2->method_type_index_at(index2); + int k1 = method_type_index_at_error_ok(index1); + int k2 = cp2->method_type_index_at_error_ok(index2); bool match = compare_entry_to(k1, cp2, k2, CHECK_false); if (match) { return true; @@ -1011,11 +986,11 @@ case JVM_CONSTANT_MethodHandle: { - int k1 = method_handle_ref_kind_at(index1); - int k2 = cp2->method_handle_ref_kind_at(index2); + int k1 = method_handle_ref_kind_at_error_ok(index1); + int k2 = cp2->method_handle_ref_kind_at_error_ok(index2); if (k1 == k2) { - int i1 = method_handle_index_at(index1); - int i2 = cp2->method_handle_index_at(index2); + int i1 = method_handle_index_at_error_ok(index1); + int i2 = cp2->method_handle_index_at_error_ok(index2); bool match = compare_entry_to(i1, cp2, i2, CHECK_false); if (match) { return true; @@ -1329,14 +1304,6 @@ } } break; - case JVM_CONSTANT_UnresolvedClassInError: - { - Symbol* k = from_cp->unresolved_klass_at(from_i); - to_cp->unresolved_klass_at_put(to_i, k); - to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError); - } break; - - case JVM_CONSTANT_String: { Symbol* s = from_cp->unresolved_string_at(from_i); @@ -1352,15 +1319,17 @@ } break; case JVM_CONSTANT_MethodType: + case JVM_CONSTANT_MethodTypeInError: { - jint k = from_cp->method_type_index_at(from_i); + jint k = from_cp->method_type_index_at_error_ok(from_i); to_cp->method_type_index_at_put(to_i, k); } break; case JVM_CONSTANT_MethodHandle: + case JVM_CONSTANT_MethodHandleInError: { - int k1 = from_cp->method_handle_ref_kind_at(from_i); - int k2 = from_cp->method_handle_index_at(from_i); + int k1 = from_cp->method_handle_ref_kind_at_error_ok(from_i); + int k2 = from_cp->method_handle_index_at_error_ok(from_i); to_cp->method_handle_index_at_put(to_i, k1, k2); } break; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/constantPool.hpp --- a/src/share/vm/oops/constantPool.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/constantPool.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -111,6 +111,7 @@ int _version; } _saved; + Monitor* _lock; void set_tags(Array* tags) { _tags = tags; } void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } @@ -845,17 +846,8 @@ void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; } int resolved_reference_length() const { return _saved._resolved_reference_length; } - - // lock() may return null -- constant pool updates may happen before this lock is - // initialized, because the _pool_holder has not been fully initialized and - // has not been registered into the system dictionary. In this case, no other - // thread can be modifying this constantpool, so no synchronization is - // necessary. - // - // Use cplock() like this: - // oop cplock = cp->lock(); - // ObjectLocker ol(cplock , THREAD, cplock != NULL); - oop lock(); + void set_lock(Monitor* lock) { _lock = lock; } + Monitor* lock() { return _lock; } // Decrease ref counts of symbols that are in the constant pool // when the holder class is unloaded diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/cpCache.cpp --- a/src/share/vm/oops/cpCache.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/cpCache.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -284,8 +284,7 @@ // the lock, so that when the losing writer returns, he can use the linked // cache entry. - oop cplock = cpool->lock(); - ObjectLocker ol(cplock, Thread::current(), cplock != NULL); + MonitorLockerEx ml(cpool->lock()); if (!is_f1_null()) { return; } @@ -555,24 +554,37 @@ // Implementation of ConstantPoolCache ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, - int length, const intStack& index_map, + const intStack& invokedynamic_index_map, const intStack& invokedynamic_map, TRAPS) { + + const int length = index_map.length() + invokedynamic_index_map.length(); int size = ConstantPoolCache::size(length); return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD) - ConstantPoolCache(length, index_map, invokedynamic_map); + ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map); } void ConstantPoolCache::initialize(const intArray& inverse_index_map, + const intArray& invokedynamic_inverse_index_map, const intArray& invokedynamic_references_map) { - assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache"); - for (int i = 0; i < length(); i++) { + for (int i = 0; i < inverse_index_map.length(); i++) { ConstantPoolCacheEntry* e = entry_at(i); int original_index = inverse_index_map[i]; e->initialize_entry(original_index); assert(entry_at(i) == e, "sanity"); } + + // Append invokedynamic entries at the end + int invokedynamic_offset = inverse_index_map.length(); + for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) { + int offset = i + invokedynamic_offset; + ConstantPoolCacheEntry* e = entry_at(offset); + int original_index = invokedynamic_inverse_index_map[i]; + e->initialize_entry(original_index); + assert(entry_at(offset) == e, "sanity"); + } + for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) { const int cpci = invokedynamic_references_map[ref]; if (cpci >= 0) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/cpCache.hpp --- a/src/share/vm/oops/cpCache.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/cpCache.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -31,6 +31,10 @@ class PSPromotionManager; +// The ConstantPoolCache is not a cache! It is the resolution table that the +// interpreter uses to avoid going into the runtime and a way to access resolved +// values. + // A ConstantPoolCacheEntry describes an individual entry of the constant // pool cache. There's 2 principal kinds of entries: field entries for in- // stance & static field access, and method entries for invokes. Some of @@ -392,26 +396,33 @@ friend class MetadataFactory; private: int _length; - ConstantPool* _constant_pool; // the corresponding constant pool + ConstantPool* _constant_pool; // the corresponding constant pool // Sizing debug_only(friend class ClassVerifier;) // Constructor - ConstantPoolCache(int length, const intStack& inverse_index_map, + ConstantPoolCache(int length, + const intStack& inverse_index_map, + const intStack& invokedynamic_inverse_index_map, const intStack& invokedynamic_references_map) : - _length(length), _constant_pool(NULL) { - initialize(inverse_index_map, invokedynamic_references_map); + _length(length), + _constant_pool(NULL) { + initialize(inverse_index_map, invokedynamic_inverse_index_map, + invokedynamic_references_map); for (int i = 0; i < length; i++) { assert(entry_at(i)->is_f1_null(), "Failed to clear?"); } } // Initialization - void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map); + void initialize(const intArray& inverse_index_map, + const intArray& invokedynamic_inverse_index_map, + const intArray& invokedynamic_references_map); public: - static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, - const intStack& inverse_index_map, + static ConstantPoolCache* allocate(ClassLoaderData* loader_data, + const intStack& cp_cache_map, + const intStack& invokedynamic_cp_cache_map, const intStack& invokedynamic_references_map, TRAPS); bool is_constantPoolCache() const { return true; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -238,6 +238,13 @@ } } +// create a new array of vtable_indices for default methods +Array* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) { + Array* vtable_indices = MetadataFactory::new_array(class_loader_data(), len, CHECK_NULL); + assert(default_vtable_indices() == NULL, "only create once"); + set_default_vtable_indices(vtable_indices); + return vtable_indices; +} InstanceKlass::InstanceKlass(int vtable_len, int itable_len, @@ -263,6 +270,8 @@ set_array_klasses(NULL); set_methods(NULL); set_method_ordering(NULL); + set_default_methods(NULL); + set_default_vtable_indices(NULL); set_local_interfaces(NULL); set_transitive_interfaces(NULL); init_implementor(); @@ -311,7 +320,8 @@ void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data, Array* methods) { - if (methods != NULL && methods != Universe::the_empty_method_array()) { + if (methods != NULL && methods != Universe::the_empty_method_array() && + !methods->is_shared()) { for (int i = 0; i < methods->length(); i++) { Method* method = methods->at(i); if (method == NULL) continue; // maybe null if error processing @@ -335,13 +345,14 @@ // check that the interfaces don't come from super class Array* sti = (super_klass == NULL) ? NULL : InstanceKlass::cast(super_klass)->transitive_interfaces(); - if (ti != sti) { + if (ti != sti && ti != NULL && !ti->is_shared()) { MetadataFactory::free_array(loader_data, ti); } } // local interfaces can be empty - if (local_interfaces != Universe::the_empty_klass_array()) { + if (local_interfaces != Universe::the_empty_klass_array() && + local_interfaces != NULL && !local_interfaces->is_shared()) { MetadataFactory::free_array(loader_data, local_interfaces); } } @@ -371,16 +382,37 @@ deallocate_methods(loader_data, methods()); set_methods(NULL); - if (method_ordering() != Universe::the_empty_int_array()) { + if (method_ordering() != NULL && + method_ordering() != Universe::the_empty_int_array() && + !method_ordering()->is_shared()) { MetadataFactory::free_array(loader_data, method_ordering()); } set_method_ordering(NULL); + // default methods can be empty + if (default_methods() != NULL && + default_methods() != Universe::the_empty_method_array() && + !default_methods()->is_shared()) { + MetadataFactory::free_array(loader_data, default_methods()); + } + // Do NOT deallocate the default methods, they are owned by superinterfaces. + set_default_methods(NULL); + + // default methods vtable indices can be empty + if (default_vtable_indices() != NULL && + !default_vtable_indices()->is_shared()) { + MetadataFactory::free_array(loader_data, default_vtable_indices()); + } + set_default_vtable_indices(NULL); + + // This array is in Klass, but remove it with the InstanceKlass since // this place would be the only caller and it can share memory with transitive // interfaces. - if (secondary_supers() != Universe::the_empty_klass_array() && - secondary_supers() != transitive_interfaces()) { + if (secondary_supers() != NULL && + secondary_supers() != Universe::the_empty_klass_array() && + secondary_supers() != transitive_interfaces() && + !secondary_supers()->is_shared()) { MetadataFactory::free_array(loader_data, secondary_supers()); } set_secondary_supers(NULL); @@ -389,24 +421,32 @@ set_transitive_interfaces(NULL); set_local_interfaces(NULL); - MetadataFactory::free_array(loader_data, fields()); + if (fields() != NULL && !fields()->is_shared()) { + MetadataFactory::free_array(loader_data, fields()); + } set_fields(NULL, 0); // If a method from a redefined class is using this constant pool, don't // delete it, yet. The new class's previous version will point to this. if (constants() != NULL) { assert (!constants()->on_stack(), "shouldn't be called if anything is onstack"); - MetadataFactory::free_metadata(loader_data, constants()); + if (!constants()->is_shared()) { + MetadataFactory::free_metadata(loader_data, constants()); + } set_constants(NULL); } - if (inner_classes() != Universe::the_empty_short_array()) { + if (inner_classes() != NULL && + inner_classes() != Universe::the_empty_short_array() && + !inner_classes()->is_shared()) { MetadataFactory::free_array(loader_data, inner_classes()); } set_inner_classes(NULL); - // We should deallocate the Annotations instance - MetadataFactory::free_metadata(loader_data, annotations()); + // We should deallocate the Annotations instance if it's not in shared spaces. + if (annotations() != NULL && !annotations()->is_shared()) { + MetadataFactory::free_metadata(loader_data, annotations()); + } set_annotations(NULL); } @@ -456,15 +496,29 @@ return java_lang_Class::signers(java_mirror()); } -volatile oop InstanceKlass::init_lock() const { +oop InstanceKlass::init_lock() const { // return the init lock from the mirror - return java_lang_Class::init_lock(java_mirror()); + oop lock = java_lang_Class::init_lock(java_mirror()); + assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state + "only fully initialized state can have a null lock"); + return lock; +} + +// Set the initialization lock to null so the object can be GC'ed. Any racing +// threads to get this lock will see a null lock and will not lock. +// That's okay because they all check for initialized state after getting +// the lock and return. +void InstanceKlass::fence_and_clear_init_lock() { + // make sure previous stores are all done, notably the init_state. + OrderAccess::storestore(); + java_lang_Class::set_init_lock(java_mirror(), NULL); + assert(!is_not_initialized(), "class must be initialized now"); } void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { EXCEPTION_MARK; - volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD); + oop init_lock = this_oop->init_lock(); + ObjectLocker ol(init_lock, THREAD, init_lock != NULL); // abort if someone beat us to the initialization if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() @@ -483,6 +537,7 @@ } else { // linking successfull, mark class as initialized this_oop->set_init_state (fully_initialized); + this_oop->fence_and_clear_init_lock(); // trace if (TraceClassInitialization) { ResourceMark rm(THREAD); @@ -608,8 +663,8 @@ // verification & rewriting { - volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD); + oop init_lock = this_oop->init_lock(); + ObjectLocker ol(init_lock, THREAD, init_lock != NULL); // rewritten will have been set if loader constraint error found // on an earlier link attempt // don't verify or rewrite if already rewritten @@ -731,8 +786,8 @@ // refer to the JVM book page 47 for description of steps // Step 1 { - volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD); + oop init_lock = this_oop->init_lock(); + ObjectLocker ol(init_lock, THREAD, init_lock != NULL); Thread *self = THREAD; // it's passed the current thread @@ -879,9 +934,10 @@ } void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { - volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD); + oop init_lock = this_oop->init_lock(); + ObjectLocker ol(init_lock, THREAD, init_lock != NULL); this_oop->set_init_state(state); + this_oop->fence_and_clear_init_lock(); ol.notify_all(CHECK); } @@ -1354,32 +1410,44 @@ return -1; } +// find_method looks up the name/signature in the local methods array Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const { return InstanceKlass::find_method(methods(), name, signature); } +// find_method looks up the name/signature in the local methods array Method* InstanceKlass::find_method( Array* methods, Symbol* name, Symbol* signature) { + int hit = find_method_index(methods, name, signature); + return hit >= 0 ? methods->at(hit): NULL; +} + +// Used directly for default_methods to find the index into the +// default_vtable_indices, and indirectly by find_method +// find_method_index looks in the local methods array to return the index +// of the matching name/signature +int InstanceKlass::find_method_index( + Array* methods, Symbol* name, Symbol* signature) { int hit = binary_search(methods, name); if (hit != -1) { Method* m = methods->at(hit); // Do linear search to find matching signature. First, quick check // for common case - if (m->signature() == signature) return m; + if (m->signature() == signature) return hit; // search downwards through overloaded methods int i; for (i = hit - 1; i >= 0; --i) { Method* m = methods->at(i); assert(m->is_method(), "must be method"); if (m->name() != name) break; - if (m->signature() == signature) return m; + if (m->signature() == signature) return i; } // search upwards for (i = hit + 1; i < methods->length(); ++i) { Method* m = methods->at(i); assert(m->is_method(), "must be method"); if (m->name() != name) break; - if (m->signature() == signature) return m; + if (m->signature() == signature) return i; } // not found #ifdef ASSERT @@ -1387,9 +1455,8 @@ assert(index == -1, err_msg("binary search should have found entry %d", index)); #endif } - return NULL; + return -1; } - int InstanceKlass::find_method_by_name(Symbol* name, int* end) { return find_method_by_name(methods(), name, end); } @@ -1408,6 +1475,7 @@ return -1; } +// lookup_method searches both the local methods array and all superclasses methods arrays Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const { Klass* klass = const_cast(this); while (klass != NULL) { @@ -1418,6 +1486,21 @@ return NULL; } +// lookup a method in the default methods list then in all transitive interfaces +// Do NOT return private or static methods +Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name, + Symbol* signature) const { + Method* m = NULL; + if (default_methods() != NULL) { + m = find_method(default_methods(), name, signature); + } + // Look up interfaces + if (m == NULL) { + m = lookup_method_in_all_interfaces(name, signature); + } + return m; +} + // lookup a method in all the interfaces that this class implements // Do NOT return private or static methods, new in JDK8 which are not externally visible // They should only be found in the initial InterfaceMethodRef @@ -2128,6 +2211,10 @@ data = mdo->next_data(data)) { data->clean_weak_klass_links(is_alive); } + ParametersTypeData* parameters = mdo->parameters_type_data(); + if (parameters != NULL) { + parameters->clean_weak_klass_links(is_alive); + } } } } @@ -2310,15 +2397,38 @@ const char* InstanceKlass::signature_name() const { + int hash_len = 0; + char hash_buf[40]; + + // If this is an anonymous class, append a hash to make the name unique + if (is_anonymous()) { + assert(EnableInvokeDynamic, "EnableInvokeDynamic was not set."); + intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0; + sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash); + hash_len = (int)strlen(hash_buf); + } + + // Get the internal name as a c string const char* src = (const char*) (name()->as_C_string()); const int src_length = (int)strlen(src); - char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); - int src_index = 0; + + char* dest = NEW_RESOURCE_ARRAY(char, src_length + hash_len + 3); + + // Add L as type indicator int dest_index = 0; dest[dest_index++] = 'L'; - while (src_index < src_length) { + + // Add the actual class name + for (int src_index = 0; src_index < src_length; ) { dest[dest_index++] = src[src_index++]; } + + // If we have a hash, append it + for (int hash_index = 0; hash_index < hash_len; ) { + dest[dest_index++] = hash_buf[hash_index++]; + } + + // Add the semicolon and the NULL dest[dest_index++] = ';'; dest[dest_index] = '\0'; return dest; @@ -2548,6 +2658,42 @@ return m; } + +#if INCLUDE_JVMTI +// update default_methods for redefineclasses for methods that are +// not yet in the vtable due to concurrent subclass define and superinterface +// redefinition +// Note: those in the vtable, should have been updated via adjust_method_entries +void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods, + int methods_length, bool* trace_name_printed) { + // search the default_methods for uses of either obsolete or EMCP methods + if (default_methods() != NULL) { + for (int j = 0; j < methods_length; j++) { + Method* old_method = old_methods[j]; + Method* new_method = new_methods[j]; + + for (int index = 0; index < default_methods()->length(); index ++) { + if (default_methods()->at(index) == old_method) { + default_methods()->at_put(index, new_method); + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { + // RC_TRACE_MESG macro has an embedded ResourceMark + RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s", + external_name(), + old_method->method_holder()->external_name())); + *trace_name_printed = true; + } + RC_TRACE(0x00100000, ("default method update: %s(%s) ", + new_method->name()->as_C_string(), + new_method->signature()->as_C_string())); + } + } + } + } + } +} +#endif // INCLUDE_JVMTI + // On-stack replacement stuff void InstanceKlass::add_osr_nmethod(nmethod* n) { // only one compilation can be active @@ -2742,11 +2888,21 @@ st->print(BULLET"methods: "); methods()->print_value_on(st); st->cr(); if (Verbose || WizardMode) { Array* method_array = methods(); - for(int i = 0; i < method_array->length(); i++) { + for (int i = 0; i < method_array->length(); i++) { st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); } } - st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr(); + st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr(); + st->print(BULLET"default_methods: "); default_methods()->print_value_on(st); st->cr(); + if (Verbose && default_methods() != NULL) { + Array* method_array = default_methods(); + for (int i = 0; i < method_array->length(); i++) { + st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); + } + } + if (default_vtable_indices() != NULL) { + st->print(BULLET"default vtable indices: "); default_vtable_indices()->print_value_on(st); st->cr(); + } st->print(BULLET"local interfaces: "); local_interfaces()->print_value_on(st); st->cr(); st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr(); st->print(BULLET"constants: "); constants()->print_value_on(st); st->cr(); @@ -3099,6 +3255,19 @@ } } + // Verify default methods + if (default_methods() != NULL) { + Array* methods = this->default_methods(); + for (int j = 0; j < methods->length(); j++) { + guarantee(methods->at(j)->is_method(), "non-method in methods array"); + } + for (int j = 0; j < methods->length() - 1; j++) { + Method* m1 = methods->at(j); + Method* m2 = methods->at(j + 1); + guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly"); + } + } + // Verify JNI static field identifiers if (jni_ids() != NULL) { jni_ids()->verify(this); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/instanceKlass.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -269,12 +269,18 @@ // Method array. Array* _methods; + // Default Method Array, concrete methods inherited from interfaces + Array* _default_methods; // Interface (Klass*s) this class declares locally to implement. Array* _local_interfaces; // Interface (Klass*s) this class implements transitively. Array* _transitive_interfaces; // Int array containing the original order of method in the class file (for JVMTI). Array* _method_ordering; + // Int array containing the vtable_indices for default_methods + // offset matches _default_methods offset + Array* _default_vtable_indices; + // Instance and static variable information, starts with 6-tuples of shorts // [access, name index, sig index, initval index, low_offset, high_offset] // for all fields, followed by the generic signature data at the end of @@ -356,6 +362,15 @@ void set_method_ordering(Array* m) { _method_ordering = m; } void copy_method_ordering(intArray* m, TRAPS); + // default_methods + Array* default_methods() const { return _default_methods; } + void set_default_methods(Array* a) { _default_methods = a; } + + // default method vtable_indices + Array* default_vtable_indices() const { return _default_vtable_indices; } + void set_default_vtable_indices(Array* v) { _default_vtable_indices = v; } + Array* create_new_default_vtable_indices(int len, TRAPS); + // interfaces Array* local_interfaces() const { return _local_interfaces; } void set_local_interfaces(Array* a) { @@ -501,12 +516,18 @@ Method* find_method(Symbol* name, Symbol* signature) const; static Method* find_method(Array* methods, Symbol* name, Symbol* signature); + // find a local method index in default_methods (returns -1 if not found) + static int find_method_index(Array* methods, Symbol* name, Symbol* signature); + // lookup operation (returns NULL if not found) Method* uncached_lookup_method(Symbol* name, Symbol* signature) const; // lookup a method in all the interfaces that this class implements // (returns NULL if not found) Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const; + // lookup a method in local defaults then in all interfaces + // (returns NULL if not found) + Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const; // Find method indices by name. If a method with the specified name is // found the index to the first method is returned, and 'end' is filled in @@ -910,6 +931,11 @@ klassItable* itable() const; // return new klassItable wrapper Method* method_at_itable(Klass* holder, int index, TRAPS); +#if INCLUDE_JVMTI + void adjust_default_methods(Method** old_methods, Method** new_methods, + int methods_length, bool* trace_name_printed); +#endif // INCLUDE_JVMTI + // Garbage collection void oop_follow_contents(oop obj); int oop_adjust_pointers(oop obj); @@ -995,8 +1021,9 @@ // Must be one per class and it has to be a VM internal object so java code // cannot lock it (like the mirror). // It has to be an object not a Mutex because it's held through java calls. - volatile oop init_lock() const; + oop init_lock() const; private: + void fence_and_clear_init_lock(); // Static methods that are used to implement member methods where an exposed this pointer // is needed due to possible GCs diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/klassVtable.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -83,10 +83,14 @@ GrowableArray new_mirandas(20); // compute the number of mirandas methods that must be added to the end - get_mirandas(&new_mirandas, all_mirandas, super, methods, local_interfaces); + get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces); *num_new_mirandas = new_mirandas.length(); - vtable_length += *num_new_mirandas * vtableEntry::size(); + // Interfaces do not need interface methods in their vtables + // This includes miranda methods and during later processing, default methods + if (!class_flags.is_interface()) { + vtable_length += *num_new_mirandas * vtableEntry::size(); + } if (Universe::is_bootstrapping() && vtable_length == 0) { // array classes don't have their superclass set correctly during @@ -186,7 +190,7 @@ assert(methods->at(i)->is_method(), "must be a Method*"); methodHandle mh(THREAD, methods->at(i)); - bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK); + bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, -1, checkconstraints, CHECK); if (needs_new_entry) { put_method_at(mh(), initialized); @@ -195,8 +199,40 @@ } } - // add miranda methods to end of vtable. - initialized = fill_in_mirandas(initialized); + // update vtable with default_methods + Array* default_methods = ik()->default_methods(); + if (default_methods != NULL) { + len = default_methods->length(); + if (len > 0) { + Array* def_vtable_indices = NULL; + if ((def_vtable_indices = ik()->default_vtable_indices()) == NULL) { + def_vtable_indices = ik()->create_new_default_vtable_indices(len, CHECK); + } else { + assert(def_vtable_indices->length() == len, "reinit vtable len?"); + } + for (int i = 0; i < len; i++) { + HandleMark hm(THREAD); + assert(default_methods->at(i)->is_method(), "must be a Method*"); + methodHandle mh(THREAD, default_methods->at(i)); + + bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, i, checkconstraints, CHECK); + + // needs new entry + if (needs_new_entry) { + put_method_at(mh(), initialized); + def_vtable_indices->at_put(i, initialized); //set vtable index + initialized++; + } + } + } + } + + // add miranda methods; it will also return the updated initialized + // Interfaces do not need interface methods in their vtables + // This includes miranda methods and during later processing, default methods + if (!ik()->is_interface()) { + initialized = fill_in_mirandas(initialized); + } // In class hierarchies where the accessibility is not increasing (i.e., going from private -> // package_private -> public/protected), the vtable might actually be smaller than our initial @@ -230,14 +266,19 @@ #ifndef PRODUCT if (PrintVtables && Verbose) { ResourceMark rm(THREAD); + char* sig = target_method()->name_and_sig_as_C_string(); tty->print("transitive overriding superclass %s with %s::%s index %d, original flags: ", supersuperklass->internal_name(), - _klass->internal_name(), (target_method() != NULL) ? - target_method()->name()->as_C_string() : "", vtable_index); + _klass->internal_name(), sig, vtable_index); super_method->access_flags().print_on(tty); + if (super_method->is_default_method()) { + tty->print("default "); + } tty->print("overriders flags: "); target_method->access_flags().print_on(tty); - tty->cr(); + if (target_method->is_default_method()) { + tty->print("default "); + } } #endif /*PRODUCT*/ break; // return found superk @@ -258,16 +299,31 @@ // OR return true if a new vtable entry is required. // Only called for InstanceKlass's, i.e. not for arrays // If that changed, could not use _klass as handle for klass -bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, - bool checkconstraints, TRAPS) { +bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, + int super_vtable_len, int default_index, + bool checkconstraints, TRAPS) { ResourceMark rm; bool allocate_new = true; assert(klass->oop_is_instance(), "must be InstanceKlass"); - assert(klass == target_method()->method_holder(), "caller resp."); - // Initialize the method's vtable index to "nonvirtual". - // If we allocate a vtable entry, we will update it to a non-negative number. - target_method()->set_vtable_index(Method::nonvirtual_vtable_index); + Array* def_vtable_indices = NULL; + bool is_default = false; + // default methods are concrete methods in superinterfaces which are added to the vtable + // with their real method_holder + // Since vtable and itable indices share the same storage, don't touch + // the default method's real vtable/itable index + // default_vtable_indices stores the vtable value relative to this inheritor + if (default_index >= 0 ) { + is_default = true; + def_vtable_indices = klass->default_vtable_indices(); + assert(def_vtable_indices != NULL, "def vtable alloc?"); + assert(default_index <= def_vtable_indices->length(), "def vtable len?"); + } else { + assert(klass == target_method()->method_holder(), "caller resp."); + // Initialize the method's vtable index to "nonvirtual". + // If we allocate a vtable entry, we will update it to a non-negative number. + target_method()->set_vtable_index(Method::nonvirtual_vtable_index); + } // Static and methods are never in if (target_method()->is_static() || target_method()->name() == vmSymbols::object_initializer_name()) { @@ -284,7 +340,15 @@ // An interface never allocates new vtable slots, only inherits old ones. // This method will either be assigned its own itable index later, // or be assigned an inherited vtable index in the loop below. - target_method()->set_vtable_index(Method::pending_itable_index); + // default methods inherited by classes store their vtable indices + // in the inheritor's default_vtable_indices + // default methods inherited by interfaces may already have a + // valid itable index, if so, don't change it + // overpass methods in an interface will be assigned an itable index later + // by an inheriting class + if (!is_default || !target_method()->has_itable_index()) { + target_method()->set_vtable_index(Method::pending_itable_index); + } } // we need a new entry if there is no superclass @@ -307,8 +371,15 @@ Symbol* name = target_method()->name(); Symbol* signature = target_method()->signature(); - Handle target_loader(THREAD, _klass()->class_loader()); - Symbol* target_classname = _klass->name(); + + KlassHandle target_klass(THREAD, target_method()->method_holder()); + if (target_klass == NULL) { + target_klass = _klass; + } + + Handle target_loader(THREAD, target_klass->class_loader()); + + Symbol* target_classname = target_klass->name(); for(int i = 0; i < super_vtable_len; i++) { Method* super_method = method_at(i); // Check if method name matches @@ -317,10 +388,14 @@ // get super_klass for method_holder for the found method InstanceKlass* super_klass = super_method->method_holder(); - if ((super_klass->is_override(super_method, target_loader, target_classname, THREAD)) || - ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION) - && ((super_klass = find_transitive_override(super_klass, target_method, i, target_loader, - target_classname, THREAD)) != (InstanceKlass*)NULL))) { + if (is_default + || ((super_klass->is_override(super_method, target_loader, target_classname, THREAD)) + || ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION) + && ((super_klass = find_transitive_override(super_klass, + target_method, i, target_loader, + target_classname, THREAD)) + != (InstanceKlass*)NULL)))) + { // overriding, so no new entry allocate_new = false; @@ -347,7 +422,7 @@ "%s used in the signature"; char* sig = target_method()->name_and_sig_as_C_string(); const char* loader1 = SystemDictionary::loader_name(target_loader()); - char* current = _klass->name()->as_C_string(); + char* current = target_klass->name()->as_C_string(); const char* loader2 = SystemDictionary::loader_name(super_loader()); char* failed_type_name = failed_type_symbol->as_C_string(); size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) + @@ -360,16 +435,39 @@ } } - put_method_at(target_method(), i); - target_method()->set_vtable_index(i); + put_method_at(target_method(), i); + if (!is_default) { + target_method()->set_vtable_index(i); + } else { + if (def_vtable_indices != NULL) { + def_vtable_indices->at_put(default_index, i); + } + assert(super_method->is_default_method() || super_method->is_overpass() + || super_method->is_abstract(), "default override error"); + } + + #ifndef PRODUCT if (PrintVtables && Verbose) { + ResourceMark rm(THREAD); + char* sig = target_method()->name_and_sig_as_C_string(); tty->print("overriding with %s::%s index %d, original flags: ", - _klass->internal_name(), (target_method() != NULL) ? - target_method()->name()->as_C_string() : "", i); + target_klass->internal_name(), sig, i); super_method->access_flags().print_on(tty); + if (super_method->is_default_method()) { + tty->print("default "); + } + if (super_method->is_overpass()) { + tty->print("overpass"); + } tty->print("overriders flags: "); target_method->access_flags().print_on(tty); + if (target_method->is_default_method()) { + tty->print("default "); + } + if (target_method->is_overpass()) { + tty->print("overpass"); + } tty->cr(); } #endif /*PRODUCT*/ @@ -378,12 +476,25 @@ // but not override another. Once we override one, not need new #ifndef PRODUCT if (PrintVtables && Verbose) { + ResourceMark rm(THREAD); + char* sig = target_method()->name_and_sig_as_C_string(); tty->print("NOT overriding with %s::%s index %d, original flags: ", - _klass->internal_name(), (target_method() != NULL) ? - target_method()->name()->as_C_string() : "", i); + target_klass->internal_name(), sig,i); super_method->access_flags().print_on(tty); + if (super_method->is_default_method()) { + tty->print("default "); + } + if (super_method->is_overpass()) { + tty->print("overpass"); + } tty->print("overriders flags: "); target_method->access_flags().print_on(tty); + if (target_method->is_default_method()) { + tty->print("default "); + } + if (target_method->is_overpass()) { + tty->print("overpass"); + } tty->cr(); } #endif /*PRODUCT*/ @@ -397,8 +508,18 @@ #ifndef PRODUCT if (PrintVtables && Verbose) { ResourceMark rm; - tty->print_cr("adding %s::%s at index %d", _klass->internal_name(), - (m != NULL) ? m->name()->as_C_string() : "", index); + const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : ""; + tty->print("adding %s at index %d, flags: ", sig, index); + if (m != NULL) { + m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } + if (m->is_overpass()) { + tty->print("overpass"); + } + } + tty->cr(); } #endif table()[index].set(m); @@ -438,6 +559,14 @@ return false; } + // Concrete interface methods do not need new entries, they override + // abstract method entries using default inheritance rules + if (target_method()->method_holder() != NULL && + target_method()->method_holder()->is_interface() && + !target_method()->is_abstract() ) { + return false; + } + // we need a new entry if there is no superclass if (super == NULL) { return true; @@ -446,7 +575,7 @@ // private methods in classes always have a new entry in the vtable // specification interpretation since classic has // private methods not overriding - // JDK8 adds private methods in interfaces which require invokespecial + // JDK8 adds private methods in interfaces which require invokespecial if (target_method()->is_private()) { return true; } @@ -526,35 +655,43 @@ if (mhk->is_interface()) { assert(m->is_public(), "should be public"); assert(ik()->implements_interface(method_holder) , "this class should implement the interface"); - assert(is_miranda(m, ik()->methods(), ik()->super()), "should be a miranda_method"); - return true; + // the search could find a miranda or a default method + if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) { + return true; + } } return false; } -// check if a method is a miranda method, given a class's methods table and its super -// "miranda" means not static, not defined by this class, and not defined -// in super unless it is private and therefore inaccessible to this class. +// check if a method is a miranda method, given a class's methods table, +// its default_method table and its super +// "miranda" means not static, not defined by this class. +// private methods in interfaces do not belong in the miranda list. // the caller must make sure that the method belongs to an interface implemented by the class // Miranda methods only include public interface instance methods -// Not private methods, not static methods, not default = concrete abstract -bool klassVtable::is_miranda(Method* m, Array* class_methods, Klass* super) { - if (m->is_static()) { +// Not private methods, not static methods, not default == concrete abstract +// Miranda methods also do not include overpass methods in interfaces +bool klassVtable::is_miranda(Method* m, Array* class_methods, + Array* default_methods, Klass* super) { + if (m->is_static() || m->is_private() || m->is_overpass()) { return false; } Symbol* name = m->name(); Symbol* signature = m->signature(); if (InstanceKlass::find_method(class_methods, name, signature) == NULL) { // did not find it in the method table of the current class - if (super == NULL) { - // super doesn't exist - return true; - } + if ((default_methods == NULL) || + InstanceKlass::find_method(default_methods, name, signature) == NULL) { + if (super == NULL) { + // super doesn't exist + return true; + } - Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature); - if (mo == NULL || mo->access_flags().is_private() ) { - // super class hierarchy does not implement it or protection is different - return true; + Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature); + if (mo == NULL || mo->access_flags().is_private() ) { + // super class hierarchy does not implement it or protection is different + return true; + } } } @@ -562,7 +699,7 @@ } // Scans current_interface_methods for miranda methods that do not -// already appear in new_mirandas and are also not defined-and-non-private +// already appear in new_mirandas, or default methods, and are also not defined-and-non-private // in super (superclass). These mirandas are added to all_mirandas if it is // not null; in addition, those that are not duplicates of miranda methods // inherited by super from its interfaces are added to new_mirandas. @@ -572,7 +709,8 @@ void klassVtable::add_new_mirandas_to_lists( GrowableArray* new_mirandas, GrowableArray* all_mirandas, Array* current_interface_methods, Array* class_methods, - Klass* super) { + Array* default_methods, Klass* super) { + // iterate thru the current interface's method to see if it a miranda int num_methods = current_interface_methods->length(); for (int i = 0; i < num_methods; i++) { @@ -590,7 +728,7 @@ } if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable - if (is_miranda(im, class_methods, super)) { // is it a miranda at all? + if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all? InstanceKlass *sk = InstanceKlass::cast(super); // check if it is a duplicate of a super's miranda if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) { @@ -607,6 +745,7 @@ void klassVtable::get_mirandas(GrowableArray* new_mirandas, GrowableArray* all_mirandas, Klass* super, Array* class_methods, + Array* default_methods, Array* local_interfaces) { assert((new_mirandas->length() == 0) , "current mirandas must be 0"); @@ -615,14 +754,16 @@ for (int i = 0; i < num_local_ifs; i++) { InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i)); add_new_mirandas_to_lists(new_mirandas, all_mirandas, - ik->methods(), class_methods, super); + ik->methods(), class_methods, + default_methods, super); // iterate thru each local's super interfaces Array* super_ifs = ik->transitive_interfaces(); int num_super_ifs = super_ifs->length(); for (int j = 0; j < num_super_ifs; j++) { InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j)); add_new_mirandas_to_lists(new_mirandas, all_mirandas, - sik->methods(), class_methods, super); + sik->methods(), class_methods, + default_methods, super); } } } @@ -630,11 +771,27 @@ // Discover miranda methods ("miranda" = "interface abstract, no binding"), // and append them into the vtable starting at index initialized, // return the new value of initialized. +// Miranda methods use vtable entries, but do not get assigned a vtable_index +// The vtable_index is discovered by searching from the end of the vtable int klassVtable::fill_in_mirandas(int initialized) { GrowableArray mirandas(20); get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(), - ik()->local_interfaces()); + ik()->default_methods(), ik()->local_interfaces()); for (int i = 0; i < mirandas.length(); i++) { + if (PrintVtables && Verbose) { + Method* meth = mirandas.at(i); + ResourceMark rm(Thread::current()); + if (meth != NULL) { + char* sig = meth->name_and_sig_as_C_string(); + tty->print("fill in mirandas with %s index %d, flags: ", + sig, initialized); + meth->access_flags().print_on(tty); + if (meth->is_default_method()) { + tty->print("default "); + } + tty->cr(); + } + } put_method_at(mirandas.at(i), initialized); ++initialized; } @@ -648,6 +805,26 @@ } #if INCLUDE_JVMTI +bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Method* new_method) { + // If old_method is default, find this vtable index in default_vtable_indices + // and replace that method in the _default_methods list + bool updated = false; + + Array* default_methods = ik()->default_methods(); + if (default_methods != NULL) { + int len = default_methods->length(); + for (int idx = 0; idx < len; idx++) { + if (vtable_index == ik()->default_vtable_indices()->at(idx)) { + if (default_methods->at(idx) == old_method) { + default_methods->at_put(idx, new_method); + updated = true; + } + break; + } + } + } + return updated; +} void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods, int methods_length, bool * trace_name_printed) { // search the vtable for uses of either obsolete or EMCP methods @@ -663,18 +840,26 @@ for (int index = 0; index < length(); index++) { if (unchecked_method_at(index) == old_method) { put_method_at(new_method, index); + // For default methods, need to update the _default_methods array + // which can only have one method entry for a given signature + bool updated_default = false; + if (old_method->is_default_method()) { + updated_default = adjust_default_method(index, old_method, new_method); + } if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { if (!(*trace_name_printed)) { // RC_TRACE_MESG macro has an embedded ResourceMark - RC_TRACE_MESG(("adjust: name=%s", + RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s", + klass()->external_name(), old_method->method_holder()->external_name())); *trace_name_printed = true; } // RC_TRACE macro has an embedded ResourceMark - RC_TRACE(0x00100000, ("vtable method update: %s(%s)", + RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s", new_method->name()->as_C_string(), - new_method->signature()->as_C_string())); + new_method->signature()->as_C_string(), + updated_default ? "true" : "false")); } // cannot 'break' here; see for-loop comment above. } @@ -701,6 +886,12 @@ if (m != NULL) { tty->print(" (%5d) ", i); m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } + if (m->is_overpass()) { + tty->print("overpass"); + } tty->print(" -- "); m->print_name(tty); tty->cr(); @@ -757,9 +948,9 @@ // Initialization void klassItable::initialize_itable(bool checkconstraints, TRAPS) { if (_klass->is_interface()) { - // This needs to go after vtable indexes are assigned but - // before implementors need to know the number of itable indexes. - assign_itable_indexes_for_interface(_klass()); + // This needs to go after vtable indices are assigned but + // before implementors need to know the number of itable indices. + assign_itable_indices_for_interface(_klass()); } // Cannot be setup doing bootstrapping, interfaces don't have @@ -803,7 +994,7 @@ return true; } -int klassItable::assign_itable_indexes_for_interface(Klass* klass) { +int klassItable::assign_itable_indices_for_interface(Klass* klass) { // an interface does not have an itable, but its methods need to be numbered if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count, klass->name()->as_C_string()); @@ -815,6 +1006,25 @@ if (interface_method_needs_itable_index(m)) { assert(!m->is_final_method(), "no final interface methods"); // If m is already assigned a vtable index, do not disturb it. + if (TraceItables && Verbose) { + ResourceMark rm; + const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : ""; + if (m->has_vtable_index()) { + tty->print("itable index %d for method: %s, flags: ", m->vtable_index(), sig); + } else { + tty->print("itable index %d for method: %s, flags: ", ime_num, sig); + } + if (m != NULL) { + m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } + if (m->is_overpass()) { + tty->print("overpass"); + } + } + tty->cr(); + } if (!m->has_vtable_index()) { assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable"); m->set_itable_index(ime_num); @@ -846,7 +1056,7 @@ } nof_methods -= 1; } - // no methods have itable indexes + // no methods have itable indices return 0; } @@ -907,6 +1117,21 @@ int ime_num = m->itable_index(); assert(ime_num < ime_count, "oob"); itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target()); + if (TraceItables && Verbose) { + ResourceMark rm(THREAD); + if (target() != NULL) { + char* sig = target()->name_and_sig_as_C_string(); + tty->print("interface: %s, ime_num: %d, target: %s, method_holder: %s ", + interf_h()->internal_name(), ime_num, sig, + target()->method_holder()->internal_name()); + tty->print("target_method flags: "); + target()->access_flags().print_on(tty); + if (target()->is_default_method()) { + tty->print("default "); + } + tty->cr(); + } + } } } } @@ -980,6 +1205,9 @@ if (m != NULL) { tty->print(" (%5d) ", i); m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } tty->print(" -- "); m->print_name(tty); tty->cr(); @@ -1116,7 +1344,7 @@ Array* methods = InstanceKlass::cast(intf)->methods(); if (itable_index < 0 || itable_index >= method_count_for_interface(intf)) - return NULL; // help caller defend against bad indexes + return NULL; // help caller defend against bad indices int index = itable_index; Method* m = methods->at(index); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/klassVtable.hpp --- a/src/share/vm/oops/klassVtable.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/klassVtable.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -97,6 +97,7 @@ // trace_name_printed is set to true if the current call has // printed the klass name so that other routines in the adjust_* // group don't print the klass name. + bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method); void adjust_method_entries(Method** old_methods, Method** new_methods, int methods_length, bool * trace_name_printed); bool check_no_old_or_obsolete_entries(); @@ -118,24 +119,28 @@ void put_method_at(Method* m, int index); static bool needs_new_vtable_entry(methodHandle m, Klass* super, Handle classloader, Symbol* classname, AccessFlags access_flags, TRAPS); - bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS); + bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, int default_index, bool checkconstraints, TRAPS); InstanceKlass* find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method, int vtable_index, Handle target_loader, Symbol* target_classname, Thread* THREAD); // support for miranda methods bool is_miranda_entry_at(int i); int fill_in_mirandas(int initialized); - static bool is_miranda(Method* m, Array* class_methods, Klass* super); + static bool is_miranda(Method* m, Array* class_methods, + Array* default_methods, Klass* super); static void add_new_mirandas_to_lists( GrowableArray* new_mirandas, GrowableArray* all_mirandas, - Array* current_interface_methods, Array* class_methods, + Array* current_interface_methods, + Array* class_methods, + Array* default_methods, Klass* super); static void get_mirandas( GrowableArray* new_mirandas, GrowableArray* all_mirandas, Klass* super, - Array* class_methods, Array* local_interfaces); - + Array* class_methods, + Array* default_methods, + Array* local_interfaces); void verify_against(outputStream* st, klassVtable* vt, int index); inline InstanceKlass* ik() const; }; @@ -290,7 +295,7 @@ #endif // INCLUDE_JVMTI // Setup of itable - static int assign_itable_indexes_for_interface(Klass* klass); + static int assign_itable_indices_for_interface(Klass* klass); static int method_count_for_interface(Klass* klass); static int compute_itable_size(Array* transitive_interfaces); static void setup_itable_offset_table(instanceKlassHandle klass); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/method.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -511,9 +511,9 @@ bool Method::is_final_method(AccessFlags class_access_flags) const { // or "does_not_require_vtable_entry" - // overpass can occur, is not final (reuses vtable entry) + // default method or overpass can occur, is not final (reuses vtable entry) // private methods get vtable entries for backward class compatibility. - if (is_overpass()) return false; + if (is_overpass() || is_default_method()) return false; return is_final() || class_access_flags.is_final(); } @@ -521,11 +521,24 @@ return is_final_method(method_holder()->access_flags()); } +bool Method::is_default_method() const { + if (method_holder() != NULL && + method_holder()->is_interface() && + !is_abstract()) { + return true; + } else { + return false; + } +} + bool Method::can_be_statically_bound(AccessFlags class_access_flags) const { if (is_final_method(class_access_flags)) return true; #ifdef ASSERT + ResourceMark rm; bool is_nonv = (vtable_index() == nonvirtual_vtable_index); - if (class_access_flags.is_interface()) assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv)); + if (class_access_flags.is_interface()) { + assert(is_nonv == is_static(), err_msg("is_nonv=%s", name_and_sig_as_C_string())); + } #endif assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question"); return vtable_index() == nonvirtual_vtable_index; @@ -1382,7 +1395,8 @@ } // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array -void Method::sort_methods(Array* methods, bool idempotent) { +// default_methods also uses this without the ordering for fast find_method +void Method::sort_methods(Array* methods, bool idempotent, bool set_idnums) { int length = methods->length(); if (length > 1) { { @@ -1390,14 +1404,15 @@ QuickSort::sort(methods->data(), length, method_comparator, idempotent); } // Reset method ordering - for (int i = 0; i < length; i++) { - Method* m = methods->at(i); - m->set_method_idnum(i); + if (set_idnums) { + for (int i = 0; i < length; i++) { + Method* m = methods->at(i); + m->set_method_idnum(i); + } } } } - //----------------------------------------------------------------------------------- // Non-product code unless JVM/TI needs it @@ -1511,7 +1526,10 @@ return bp->orig_bytecode(); } } - ShouldNotReachHere(); + { + ResourceMark rm; + fatal(err_msg("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci)); + } return Bytecodes::_shouldnotreachhere; } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/method.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -567,6 +567,7 @@ // checks method and its method holder bool is_final_method() const; bool is_final_method(AccessFlags class_access_flags) const; + bool is_default_method() const; // true if method needs no dynamic dispatch (final and/or no vtable entry) bool can_be_statically_bound() const; @@ -804,6 +805,7 @@ private: void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); + public: MethodCounters* get_method_counters(TRAPS) { if (_method_counters == NULL) { build_method_counters(this, CHECK_AND_CLEAR_NULL); @@ -811,7 +813,6 @@ return _method_counters; } - public: bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } @@ -846,7 +847,7 @@ #endif // Helper routine used for method sorting - static void sort_methods(Array* methods, bool idempotent = false); + static void sort_methods(Array* methods, bool idempotent = false, bool set_idnums = true); // Deallocation function for redefine classes or if an error occurs void deallocate_contents(ClassLoaderData* loader_data); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/methodData.cpp --- a/src/share/vm/oops/methodData.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/methodData.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -41,7 +41,7 @@ // Some types of data layouts need a length field. bool DataLayout::needs_array_len(u1 tag) { - return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag); + return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); } // Perform generic initialization of the data. More specific @@ -56,6 +56,11 @@ if (needs_array_len(tag)) { set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. } + if (tag == call_type_data_tag) { + CallTypeData::initialize(this, cell_count); + } else if (tag == virtual_call_type_data_tag) { + VirtualCallTypeData::initialize(this, cell_count); + } } void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) { @@ -76,7 +81,7 @@ } #ifndef PRODUCT -void ProfileData::print_shared(outputStream* st, const char* name) { +void ProfileData::print_shared(outputStream* st, const char* name) const { st->print("bci: %d", bci()); st->fill_to(tab_width_one); st->print("%s", name); @@ -91,8 +96,8 @@ st->print("flags(%d) ", flags); } -void ProfileData::tab(outputStream* st) { - st->fill_to(tab_width_two); +void ProfileData::tab(outputStream* st, bool first) const { + st->fill_to(first ? tab_width_one : tab_width_two); } #endif // !PRODUCT @@ -104,7 +109,7 @@ #ifndef PRODUCT -void BitData::print_data_on(outputStream* st) { +void BitData::print_data_on(outputStream* st) const { print_shared(st, "BitData"); } #endif // !PRODUCT @@ -115,7 +120,7 @@ // A CounterData corresponds to a simple counter. #ifndef PRODUCT -void CounterData::print_data_on(outputStream* st) { +void CounterData::print_data_on(outputStream* st) const { print_shared(st, "CounterData"); st->print_cr("count(%u)", count()); } @@ -145,12 +150,217 @@ } #ifndef PRODUCT -void JumpData::print_data_on(outputStream* st) { +void JumpData::print_data_on(outputStream* st) const { print_shared(st, "JumpData"); st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); } #endif // !PRODUCT +int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { + // Parameter profiling include the receiver + int args_count = include_receiver ? 1 : 0; + ResourceMark rm; + SignatureStream ss(signature); + args_count += ss.reference_parameter_count(); + args_count = MIN2(args_count, max); + return args_count * per_arg_cell_count; +} + +int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { + assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); + assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); + Bytecode_invoke inv(stream->method(), stream->bci()); + int args_cell = 0; + if (arguments_profiling_enabled()) { + args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); + } + int ret_cell = 0; + if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) { + ret_cell = ReturnTypeEntry::static_cell_count(); + } + int header_cell = 0; + if (args_cell + ret_cell > 0) { + header_cell = header_cell_count(); + } + + return header_cell + args_cell + ret_cell; +} + +class ArgumentOffsetComputer : public SignatureInfo { +private: + int _max; + GrowableArray _offsets; + + void set(int size, BasicType type) { _size += size; } + void do_object(int begin, int end) { + if (_offsets.length() < _max) { + _offsets.push(_size); + } + SignatureInfo::do_object(begin, end); + } + void do_array (int begin, int end) { + if (_offsets.length() < _max) { + _offsets.push(_size); + } + SignatureInfo::do_array(begin, end); + } + +public: + ArgumentOffsetComputer(Symbol* signature, int max) + : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) { + } + + int total() { lazy_iterate_parameters(); return _size; } + + int off_at(int i) const { return _offsets.at(i); } +}; + +void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { + ResourceMark rm; + int start = 0; + // Parameter profiling include the receiver + if (include_receiver && has_receiver) { + set_stack_slot(0, 0); + set_type(0, type_none()); + start += 1; + } + ArgumentOffsetComputer aos(signature, _number_of_entries-start); + aos.total(); + for (int i = start; i < _number_of_entries; i++) { + set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); + set_type(i, type_none()); + } +} + +void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { + assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); + Bytecode_invoke inv(stream->method(), stream->bci()); + + SignatureStream ss(inv.signature()); + if (has_arguments()) { +#ifdef ASSERT + ResourceMark rm; + int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); + assert(count > 0, "room for args type but none found?"); + check_number_of_arguments(count); +#endif + _args.post_initialize(inv.signature(), inv.has_receiver(), false); + } + + if (has_return()) { + assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); + _ret.post_initialize(); + } +} + +void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { + assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); + Bytecode_invoke inv(stream->method(), stream->bci()); + + if (has_arguments()) { +#ifdef ASSERT + ResourceMark rm; + SignatureStream ss(inv.signature()); + int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); + assert(count > 0, "room for args type but none found?"); + check_number_of_arguments(count); +#endif + _args.post_initialize(inv.signature(), inv.has_receiver(), false); + } + + if (has_return()) { + assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); + _ret.post_initialize(); + } +} + +bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) { + Klass* k = (Klass*)klass_part(p); + return k != NULL && k->is_loader_alive(is_alive_cl); +} + +void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { + for (int i = 0; i < _number_of_entries; i++) { + intptr_t p = type(i); + if (!is_loader_alive(is_alive_cl, p)) { + set_type(i, with_status((Klass*)NULL, p)); + } + } +} + +void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { + intptr_t p = type(); + if (!is_loader_alive(is_alive_cl, p)) { + set_type(with_status((Klass*)NULL, p)); + } +} + +bool TypeEntriesAtCall::return_profiling_enabled() { + return MethodData::profile_return(); +} + +bool TypeEntriesAtCall::arguments_profiling_enabled() { + return MethodData::profile_arguments(); +} + +#ifndef PRODUCT +void TypeEntries::print_klass(outputStream* st, intptr_t k) { + if (is_type_none(k)) { + st->print("none"); + } else if (is_type_unknown(k)) { + st->print("unknown"); + } else { + valid_klass(k)->print_value_on(st); + } + if (was_null_seen(k)) { + st->print(" (null seen)"); + } +} + +void TypeStackSlotEntries::print_data_on(outputStream* st) const { + for (int i = 0; i < _number_of_entries; i++) { + _pd->tab(st); + st->print("%d: stack(%u) ", i, stack_slot(i)); + print_klass(st, type(i)); + st->cr(); + } +} + +void ReturnTypeEntry::print_data_on(outputStream* st) const { + _pd->tab(st); + print_klass(st, type()); + st->cr(); +} + +void CallTypeData::print_data_on(outputStream* st) const { + CounterData::print_data_on(st); + if (has_arguments()) { + tab(st, true); + st->print("argument types"); + _args.print_data_on(st); + } + if (has_return()) { + tab(st, true); + st->print("return type"); + _ret.print_data_on(st); + } +} + +void VirtualCallTypeData::print_data_on(outputStream* st) const { + VirtualCallData::print_data_on(st); + if (has_arguments()) { + tab(st, true); + st->print("argument types"); + _args.print_data_on(st); + } + if (has_return()) { + tab(st, true); + st->print("return type"); + _ret.print_data_on(st); + } +} +#endif + // ================================================================== // ReceiverTypeData // @@ -181,7 +391,7 @@ #endif // GRAAL #ifndef PRODUCT -void ReceiverTypeData::print_receiver_data_on(outputStream* st) { +void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { uint row; int entries = 0; for (row = 0; row < row_limit(); row++) { @@ -202,11 +412,11 @@ } } } -void ReceiverTypeData::print_data_on(outputStream* st) { +void ReceiverTypeData::print_data_on(outputStream* st) const { print_shared(st, "ReceiverTypeData"); print_receiver_data_on(st); } -void VirtualCallData::print_data_on(outputStream* st) { +void VirtualCallData::print_data_on(outputStream* st) const { print_shared(st, "VirtualCallData"); print_receiver_data_on(st); } @@ -258,7 +468,7 @@ #ifndef PRODUCT -void RetData::print_data_on(outputStream* st) { +void RetData::print_data_on(outputStream* st) const { print_shared(st, "RetData"); uint row; int entries = 0; @@ -293,7 +503,7 @@ } #ifndef PRODUCT -void BranchData::print_data_on(outputStream* st) { +void BranchData::print_data_on(outputStream* st) const { print_shared(st, "BranchData"); st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); @@ -367,7 +577,7 @@ } #ifndef PRODUCT -void MultiBranchData::print_data_on(outputStream* st) { +void MultiBranchData::print_data_on(outputStream* st) const { print_shared(st, "MultiBranchData"); st->print_cr("default_count(%u) displacement(%d)", default_count(), default_displacement()); @@ -381,7 +591,7 @@ #endif #ifndef PRODUCT -void ArgInfoData::print_data_on(outputStream* st) { +void ArgInfoData::print_data_on(outputStream* st) const { print_shared(st, "ArgInfoData"); int nargs = number_of_args(); for (int i = 0; i < nargs; i++) { @@ -391,6 +601,34 @@ } #endif + +int ParametersTypeData::compute_cell_count(Method* m) { + if (!MethodData::profile_parameters_for_method(m)) { + return 0; + } + int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; + int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); + if (obj_args > 0) { + return obj_args + 1; // 1 cell for array len + } + return 0; +} + +void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { + _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); +} + +bool ParametersTypeData::profiling_enabled() { + return MethodData::profile_parameters(); +} + +#ifndef PRODUCT +void ParametersTypeData::print_data_on(outputStream* st) const { + st->print("parameter types"); + _parameters.print_data_on(st); +} +#endif + // ================================================================== // MethodData* // @@ -419,7 +657,11 @@ } case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: - return CounterData::static_cell_count(); + if (MethodData::profile_arguments() || MethodData::profile_return()) { + return variable_cell_count; + } else { + return CounterData::static_cell_count(); + } case Bytecodes::_goto: case Bytecodes::_goto_w: case Bytecodes::_jsr: @@ -427,9 +669,17 @@ return JumpData::static_cell_count(); case Bytecodes::_invokevirtual: case Bytecodes::_invokeinterface: - return VirtualCallData::static_cell_count(); + if (MethodData::profile_arguments() || MethodData::profile_return()) { + return variable_cell_count; + } else { + return VirtualCallData::static_cell_count(); + } case Bytecodes::_invokedynamic: - return CounterData::static_cell_count(); + if (MethodData::profile_arguments() || MethodData::profile_return()) { + return variable_cell_count; + } else { + return CounterData::static_cell_count(); + } case Bytecodes::_ret: return RetData::static_cell_count(); case Bytecodes::_ifeq: @@ -465,7 +715,36 @@ return 0; } if (cell_count == variable_cell_count) { - cell_count = MultiBranchData::compute_cell_count(stream); + switch (stream->code()) { + case Bytecodes::_lookupswitch: + case Bytecodes::_tableswitch: + cell_count = MultiBranchData::compute_cell_count(stream); + break; + case Bytecodes::_invokespecial: + case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: + assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); + if (profile_arguments_for_invoke(stream->method(), stream->bci()) || + profile_return_for_invoke(stream->method(), stream->bci())) { + cell_count = CallTypeData::compute_cell_count(stream); + } else { + cell_count = CounterData::static_cell_count(); + } + break; + case Bytecodes::_invokevirtual: + case Bytecodes::_invokeinterface: { + assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); + if (profile_arguments_for_invoke(stream->method(), stream->bci()) || + profile_return_for_invoke(stream->method(), stream->bci())) { + cell_count = VirtualCallTypeData::compute_cell_count(stream); + } else { + cell_count = VirtualCallData::static_cell_count(); + } + break; + } + default: + fatal("unexpected bytecode for var length profile data"); + } } // Note: cell_count might be zero, meaning that there is just // a DataLayout header, with no extra cells. @@ -522,6 +801,13 @@ // Add a cell to record information about modified arguments. int arg_size = method->size_of_parameters(); object_size += DataLayout::compute_size_in_bytes(arg_size+1); + + // Reserve room for an area of the MDO dedicated to profiling of + // parameters + int args_cell = ParametersTypeData::compute_cell_count(method()); + if (args_cell > 0) { + object_size += DataLayout::compute_size_in_bytes(args_cell); + } #endif return object_size; } @@ -558,10 +844,21 @@ } break; case Bytecodes::_invokespecial: - case Bytecodes::_invokestatic: - cell_count = CounterData::static_cell_count(); - tag = DataLayout::counter_data_tag; + case Bytecodes::_invokestatic: { + int counter_data_cell_count = CounterData::static_cell_count(); + if (profile_arguments_for_invoke(stream->method(), stream->bci()) || + profile_return_for_invoke(stream->method(), stream->bci())) { + cell_count = CallTypeData::compute_cell_count(stream); + } else { + cell_count = counter_data_cell_count; + } + if (cell_count > counter_data_cell_count) { + tag = DataLayout::call_type_data_tag; + } else { + tag = DataLayout::counter_data_tag; + } break; + } case Bytecodes::_goto: case Bytecodes::_goto_w: case Bytecodes::_jsr: @@ -570,15 +867,37 @@ tag = DataLayout::jump_data_tag; break; case Bytecodes::_invokevirtual: - case Bytecodes::_invokeinterface: - cell_count = VirtualCallData::static_cell_count(); - tag = DataLayout::virtual_call_data_tag; + case Bytecodes::_invokeinterface: { + int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); + if (profile_arguments_for_invoke(stream->method(), stream->bci()) || + profile_return_for_invoke(stream->method(), stream->bci())) { + cell_count = VirtualCallTypeData::compute_cell_count(stream); + } else { + cell_count = virtual_call_data_cell_count; + } + if (cell_count > virtual_call_data_cell_count) { + tag = DataLayout::virtual_call_type_data_tag; + } else { + tag = DataLayout::virtual_call_data_tag; + } break; - case Bytecodes::_invokedynamic: + } + case Bytecodes::_invokedynamic: { // %%% should make a type profile for any invokedynamic that takes a ref argument - cell_count = CounterData::static_cell_count(); - tag = DataLayout::counter_data_tag; + int counter_data_cell_count = CounterData::static_cell_count(); + if (profile_arguments_for_invoke(stream->method(), stream->bci()) || + profile_return_for_invoke(stream->method(), stream->bci())) { + cell_count = CallTypeData::compute_cell_count(stream); + } else { + cell_count = counter_data_cell_count; + } + if (cell_count > counter_data_cell_count) { + tag = DataLayout::call_type_data_tag; + } else { + tag = DataLayout::counter_data_tag; + } break; + } case Bytecodes::_ret: cell_count = RetData::static_cell_count(); tag = DataLayout::ret_data_tag; @@ -609,6 +928,11 @@ break; } assert(tag == DataLayout::multi_branch_data_tag || + ((MethodData::profile_arguments() || MethodData::profile_return()) && + (tag == DataLayout::call_type_data_tag || + tag == DataLayout::counter_data_tag || + tag == DataLayout::virtual_call_type_data_tag || + tag == DataLayout::virtual_call_data_tag)) || cell_count == bytecode_cell_count(c), "cell counts must agree"); if (cell_count >= 0) { assert(tag != DataLayout::no_tag, "bad tag"); @@ -655,6 +979,12 @@ return new MultiBranchData(this); case DataLayout::arg_info_data_tag: return new ArgInfoData(this); + case DataLayout::call_type_data_tag: + return new CallTypeData(this); + case DataLayout::virtual_call_type_data_tag: + return new VirtualCallTypeData(this); + case DataLayout::parameters_type_data_tag: + return new ParametersTypeData(this); }; } @@ -676,6 +1006,9 @@ stream->next(); data->post_initialize(stream, this); } + if (_parameters_type_data_di != -1) { + parameters_type_data()->post_initialize(NULL, this); + } } // Initialize the MethodData* corresponding to a given method. @@ -724,7 +1057,25 @@ int arg_size = method()->size_of_parameters(); dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); - object_size += DataLayout::compute_size_in_bytes(arg_size+1); + int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); + object_size += extra_size + arg_data_size; + + int args_cell = ParametersTypeData::compute_cell_count(method()); + // If we are profiling parameters, we reserver an area near the end + // of the MDO after the slots for bytecodes (because there's no bci + // for method entry so they don't fit with the framework for the + // profiling of bytecodes). We store the offset within the MDO of + // this area (or -1 if no parameter is profiled) + if (args_cell > 0) { + object_size += DataLayout::compute_size_in_bytes(args_cell); + _parameters_type_data_di = data_size + extra_size + arg_data_size; + DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); + dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell); + } else { + _parameters_type_data_di = -1; + } +#else + _parameters_type_data_di = -1; #endif // Set an initial hint. Don't use set_hint_di() because @@ -892,6 +1243,9 @@ void MethodData::print_data_on(outputStream* st) const { ResourceMark rm; ProfileData* data = first_data(); + if (_parameters_type_data_di != -1) { + parameters_type_data()->print_data_on(st); + } for ( ; is_valid(data); data = next_data(data)) { st->print("%d", dp_to_di(data->dp())); st->fill_to(6); @@ -940,3 +1294,99 @@ NEEDS_CLEANUP; // not yet implemented. } + +bool MethodData::profile_jsr292(methodHandle m, int bci) { + if (m->is_compiled_lambda_form()) { + return true; + } + + Bytecode_invoke inv(m , bci); + return inv.is_invokedynamic() || inv.is_invokehandle(); +} + +int MethodData::profile_arguments_flag() { + return TypeProfileLevel % 10; +} + +bool MethodData::profile_arguments() { + return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all; +} + +bool MethodData::profile_arguments_jsr292_only() { + return profile_arguments_flag() == type_profile_jsr292; +} + +bool MethodData::profile_all_arguments() { + return profile_arguments_flag() == type_profile_all; +} + +bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) { + if (!profile_arguments()) { + return false; + } + + if (profile_all_arguments()) { + return true; + } + + assert(profile_arguments_jsr292_only(), "inconsistent"); + return profile_jsr292(m, bci); +} + +int MethodData::profile_return_flag() { + return (TypeProfileLevel % 100) / 10; +} + +bool MethodData::profile_return() { + return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; +} + +bool MethodData::profile_return_jsr292_only() { + return profile_return_flag() == type_profile_jsr292; +} + +bool MethodData::profile_all_return() { + return profile_return_flag() == type_profile_all; +} + +bool MethodData::profile_return_for_invoke(methodHandle m, int bci) { + if (!profile_return()) { + return false; + } + + if (profile_all_return()) { + return true; + } + + assert(profile_return_jsr292_only(), "inconsistent"); + return profile_jsr292(m, bci); +} + +int MethodData::profile_parameters_flag() { + return TypeProfileLevel / 100; +} + +bool MethodData::profile_parameters() { + return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; +} + +bool MethodData::profile_parameters_jsr292_only() { + return profile_parameters_flag() == type_profile_jsr292; +} + +bool MethodData::profile_all_parameters() { + return profile_parameters_flag() == type_profile_all; +} + +bool MethodData::profile_parameters_for_method(methodHandle m) { + if (!profile_parameters()) { + return false; + } + + if (profile_all_parameters()) { + return true; + } + + assert(profile_parameters_jsr292_only(), "inconsistent"); + return m->is_compiled_lambda_form(); +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/oops/methodData.hpp --- a/src/share/vm/oops/methodData.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/oops/methodData.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -117,7 +117,10 @@ ret_data_tag, branch_data_tag, multi_branch_data_tag, - arg_info_data_tag + arg_info_data_tag, + call_type_data_tag, + virtual_call_type_data_tag, + parameters_type_data_tag }; enum { @@ -165,7 +168,7 @@ // occurred, and the MDO shows N occurrences of X, we make the // simplifying assumption that all N occurrences can be blamed // on that BCI. - int trap_state() { + int trap_state() const { return ((_header._struct._flags >> trap_shift) & trap_mask); } @@ -175,11 +178,11 @@ _header._struct._flags = (new_state << trap_shift) | old_flags; } - u1 flags() { + u1 flags() const { return _header._struct._flags; } - u2 bci() { + u2 bci() const { return _header._struct._bci; } @@ -198,7 +201,7 @@ void release_set_cell_at(int index, intptr_t value) { OrderAccess::release_store_ptr(&_cells[index], value); } - intptr_t cell_at(int index) { + intptr_t cell_at(int index) const { return _cells[index]; } @@ -206,7 +209,7 @@ assert(flag_number < flag_limit, "oob"); _header._struct._flags |= (0x1 << flag_number); } - bool flag_at(int flag_number) { + bool flag_at(int flag_number) const { assert(flag_number < flag_limit, "oob"); return (_header._struct._flags & (0x1 << flag_number)) != 0; } @@ -254,19 +257,24 @@ class CounterData; class ReceiverTypeData; class VirtualCallData; +class VirtualCallTypeData; class RetData; +class CallTypeData; class JumpData; class BranchData; class ArrayData; class MultiBranchData; class ArgInfoData; - +class ParametersTypeData; // ProfileData // // A ProfileData object is created to refer to a section of profiling // data in a structured way. class ProfileData : public ResourceObj { + friend class TypeEntries; + friend class ReturnTypeEntry; + friend class TypeStackSlotEntries; private: #ifndef PRODUCT enum { @@ -280,6 +288,7 @@ protected: DataLayout* data() { return _data; } + const DataLayout* data() const { return _data; } enum { cell_size = DataLayout::cell_size @@ -287,7 +296,7 @@ public: // How many cells are in this? - virtual int cell_count() { + virtual int cell_count() const { ShouldNotReachHere(); return -1; } @@ -307,7 +316,7 @@ assert(0 <= index && index < cell_count(), "oob"); data()->release_set_cell_at(index, value); } - intptr_t intptr_at(int index) { + intptr_t intptr_at(int index) const { assert(0 <= index && index < cell_count(), "oob"); return data()->cell_at(index); } @@ -317,7 +326,7 @@ void release_set_uint_at(int index, uint value) { release_set_intptr_at(index, (intptr_t) value); } - uint uint_at(int index) { + uint uint_at(int index) const { return (uint)intptr_at(index); } void set_int_at(int index, int value) { @@ -326,23 +335,23 @@ void release_set_int_at(int index, int value) { release_set_intptr_at(index, (intptr_t) value); } - int int_at(int index) { + int int_at(int index) const { return (int)intptr_at(index); } - int int_at_unchecked(int index) { + int int_at_unchecked(int index) const { return (int)data()->cell_at(index); } void set_oop_at(int index, oop value) { set_intptr_at(index, cast_from_oop(value)); } - oop oop_at(int index) { + oop oop_at(int index) const { return cast_to_oop(intptr_at(index)); } void set_flag_at(int flag_number) { data()->set_flag_at(flag_number); } - bool flag_at(int flag_number) { + bool flag_at(int flag_number) const { return data()->flag_at(flag_number); } @@ -362,7 +371,7 @@ // Constructor for invalid ProfileData. ProfileData(); - u2 bci() { + u2 bci() const { return data()->bci(); } @@ -370,7 +379,7 @@ return (address)_data; } - int trap_state() { + int trap_state() const { return data()->trap_state(); } void set_trap_state(int new_state) { @@ -378,58 +387,73 @@ } // Type checking - virtual bool is_BitData() { return false; } - virtual bool is_CounterData() { return false; } - virtual bool is_JumpData() { return false; } - virtual bool is_ReceiverTypeData(){ return false; } - virtual bool is_VirtualCallData() { return false; } - virtual bool is_RetData() { return false; } - virtual bool is_BranchData() { return false; } - virtual bool is_ArrayData() { return false; } - virtual bool is_MultiBranchData() { return false; } - virtual bool is_ArgInfoData() { return false; } + virtual bool is_BitData() const { return false; } + virtual bool is_CounterData() const { return false; } + virtual bool is_JumpData() const { return false; } + virtual bool is_ReceiverTypeData()const { return false; } + virtual bool is_VirtualCallData() const { return false; } + virtual bool is_RetData() const { return false; } + virtual bool is_BranchData() const { return false; } + virtual bool is_ArrayData() const { return false; } + virtual bool is_MultiBranchData() const { return false; } + virtual bool is_ArgInfoData() const { return false; } + virtual bool is_CallTypeData() const { return false; } + virtual bool is_VirtualCallTypeData()const { return false; } + virtual bool is_ParametersTypeData() const { return false; } - BitData* as_BitData() { + BitData* as_BitData() const { assert(is_BitData(), "wrong type"); return is_BitData() ? (BitData*) this : NULL; } - CounterData* as_CounterData() { + CounterData* as_CounterData() const { assert(is_CounterData(), "wrong type"); return is_CounterData() ? (CounterData*) this : NULL; } - JumpData* as_JumpData() { + JumpData* as_JumpData() const { assert(is_JumpData(), "wrong type"); return is_JumpData() ? (JumpData*) this : NULL; } - ReceiverTypeData* as_ReceiverTypeData() { + ReceiverTypeData* as_ReceiverTypeData() const { assert(is_ReceiverTypeData(), "wrong type"); return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL; } - VirtualCallData* as_VirtualCallData() { + VirtualCallData* as_VirtualCallData() const { assert(is_VirtualCallData(), "wrong type"); return is_VirtualCallData() ? (VirtualCallData*)this : NULL; } - RetData* as_RetData() { + RetData* as_RetData() const { assert(is_RetData(), "wrong type"); return is_RetData() ? (RetData*) this : NULL; } - BranchData* as_BranchData() { + BranchData* as_BranchData() const { assert(is_BranchData(), "wrong type"); return is_BranchData() ? (BranchData*) this : NULL; } - ArrayData* as_ArrayData() { + ArrayData* as_ArrayData() const { assert(is_ArrayData(), "wrong type"); return is_ArrayData() ? (ArrayData*) this : NULL; } - MultiBranchData* as_MultiBranchData() { + MultiBranchData* as_MultiBranchData() const { assert(is_MultiBranchData(), "wrong type"); return is_MultiBranchData() ? (MultiBranchData*)this : NULL; } - ArgInfoData* as_ArgInfoData() { + ArgInfoData* as_ArgInfoData() const { assert(is_ArgInfoData(), "wrong type"); return is_ArgInfoData() ? (ArgInfoData*)this : NULL; } + CallTypeData* as_CallTypeData() const { + assert(is_CallTypeData(), "wrong type"); + return is_CallTypeData() ? (CallTypeData*)this : NULL; + } + VirtualCallTypeData* as_VirtualCallTypeData() const { + assert(is_VirtualCallTypeData(), "wrong type"); + return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL; + } + ParametersTypeData* as_ParametersTypeData() const { + assert(is_ParametersTypeData(), "wrong type"); + return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL; + } // Subclass specific initialization @@ -443,15 +467,15 @@ // an oop in a ProfileData to the ci equivalent. Generally speaking, // most ProfileData don't require any translation, so we provide the null // translation here, and the required translators are in the ci subclasses. - virtual void translate_from(ProfileData* data) {} + virtual void translate_from(const ProfileData* data) {} - virtual void print_data_on(outputStream* st) { + virtual void print_data_on(outputStream* st) const { ShouldNotReachHere(); } #ifndef PRODUCT - void print_shared(outputStream* st, const char* name); - void tab(outputStream* st); + void print_shared(outputStream* st, const char* name) const; + void tab(outputStream* st, bool first = false) const; #endif }; @@ -474,13 +498,13 @@ BitData(DataLayout* layout) : ProfileData(layout) { } - virtual bool is_BitData() { return true; } + virtual bool is_BitData() const { return true; } static int static_cell_count() { return bit_cell_count; } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } @@ -506,7 +530,7 @@ } #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; @@ -522,18 +546,18 @@ public: CounterData(DataLayout* layout) : BitData(layout) {} - virtual bool is_CounterData() { return true; } + virtual bool is_CounterData() const { return true; } static int static_cell_count() { return counter_cell_count; } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } // Direct accessor - uint count() { + uint count() const { return uint_at(count_off); } @@ -550,7 +574,7 @@ } #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; @@ -578,18 +602,18 @@ layout->tag() == DataLayout::branch_data_tag, "wrong type"); } - virtual bool is_JumpData() { return true; } + virtual bool is_JumpData() const { return true; } static int static_cell_count() { return jump_cell_count; } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } // Direct accessor - uint taken() { + uint taken() const { return uint_at(taken_off_set); } @@ -606,7 +630,7 @@ return cnt; } - int displacement() { + int displacement() const { return int_at(displacement_off_set); } @@ -623,7 +647,418 @@ void post_initialize(BytecodeStream* stream, MethodData* mdo); #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; +#endif +}; + +// Entries in a ProfileData object to record types: it can either be +// none (no profile), unknown (conflicting profile data) or a klass if +// a single one is seen. Whether a null reference was seen is also +// recorded. No counter is associated with the type and a single type +// is tracked (unlike VirtualCallData). +class TypeEntries { + +public: + + // A single cell is used to record information for a type: + // - the cell is initialized to 0 + // - when a type is discovered it is stored in the cell + // - bit zero of the cell is used to record whether a null reference + // was encountered or not + // - bit 1 is set to record a conflict in the type information + + enum { + null_seen = 1, + type_mask = ~null_seen, + type_unknown = 2, + status_bits = null_seen | type_unknown, + type_klass_mask = ~status_bits + }; + + // what to initialize a cell to + static intptr_t type_none() { + return 0; + } + + // null seen = bit 0 set? + static bool was_null_seen(intptr_t v) { + return (v & null_seen) != 0; + } + + // conflicting type information = bit 1 set? + static bool is_type_unknown(intptr_t v) { + return (v & type_unknown) != 0; + } + + // not type information yet = all bits cleared, ignoring bit 0? + static bool is_type_none(intptr_t v) { + return (v & type_mask) == 0; + } + + // recorded type: cell without bit 0 and 1 + static intptr_t klass_part(intptr_t v) { + intptr_t r = v & type_klass_mask; + return r; + } + + // type recorded + static Klass* valid_klass(intptr_t k) { + if (!is_type_none(k) && + !is_type_unknown(k)) { + Klass* res = (Klass*)klass_part(k); + assert(res != NULL, "invalid"); + return res; + } else { + return NULL; + } + } + + static intptr_t with_status(intptr_t k, intptr_t in) { + return k | (in & status_bits); + } + + static intptr_t with_status(Klass* k, intptr_t in) { + return with_status((intptr_t)k, in); + } + +#ifndef PRODUCT + static void print_klass(outputStream* st, intptr_t k); +#endif + + // GC support + static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p); + +protected: + // ProfileData object these entries are part of + ProfileData* _pd; + // offset within the ProfileData object where the entries start + const int _base_off; + + TypeEntries(int base_off) + : _base_off(base_off), _pd(NULL) {} + + void set_intptr_at(int index, intptr_t value) { + _pd->set_intptr_at(index, value); + } + + intptr_t intptr_at(int index) const { + return _pd->intptr_at(index); + } + +public: + void set_profile_data(ProfileData* pd) { + _pd = pd; + } +}; + +// Type entries used for arguments passed at a call and parameters on +// method entry. 2 cells per entry: one for the type encoded as in +// TypeEntries and one initialized with the stack slot where the +// profiled object is to be found so that the interpreter can locate +// it quickly. +class TypeStackSlotEntries : public TypeEntries { + +private: + enum { + stack_slot_entry, + type_entry, + per_arg_cell_count + }; + + // offset of cell for stack slot for entry i within ProfileData object + int stack_slot_offset(int i) const { + return _base_off + stack_slot_local_offset(i); + } + +protected: + const int _number_of_entries; + + // offset of cell for type for entry i within ProfileData object + int type_offset(int i) const { + return _base_off + type_local_offset(i); + } + +public: + + TypeStackSlotEntries(int base_off, int nb_entries) + : TypeEntries(base_off), _number_of_entries(nb_entries) {} + + static int compute_cell_count(Symbol* signature, bool include_receiver, int max); + + void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver); + + // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries + static int stack_slot_local_offset(int i) { + return i * per_arg_cell_count + stack_slot_entry; + } + + // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries + static int type_local_offset(int i) { + return i * per_arg_cell_count + type_entry; + } + + // stack slot for entry i + uint stack_slot(int i) const { + assert(i >= 0 && i < _number_of_entries, "oob"); + return _pd->uint_at(stack_slot_offset(i)); + } + + // set stack slot for entry i + void set_stack_slot(int i, uint num) { + assert(i >= 0 && i < _number_of_entries, "oob"); + _pd->set_uint_at(stack_slot_offset(i), num); + } + + // type for entry i + intptr_t type(int i) const { + assert(i >= 0 && i < _number_of_entries, "oob"); + return _pd->intptr_at(type_offset(i)); + } + + // set type for entry i + void set_type(int i, intptr_t k) { + assert(i >= 0 && i < _number_of_entries, "oob"); + _pd->set_intptr_at(type_offset(i), k); + } + + static ByteSize per_arg_size() { + return in_ByteSize(per_arg_cell_count * DataLayout::cell_size); + } + + static int per_arg_count() { + return per_arg_cell_count ; + } + + // GC support + void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + +// Type entry used for return from a call. A single cell to record the +// type. +class ReturnTypeEntry : public TypeEntries { + +private: + enum { + cell_count = 1 + }; + +public: + ReturnTypeEntry(int base_off) + : TypeEntries(base_off) {} + + void post_initialize() { + set_type(type_none()); + } + + intptr_t type() const { + return _pd->intptr_at(_base_off); + } + + void set_type(intptr_t k) { + _pd->set_intptr_at(_base_off, k); + } + + static int static_cell_count() { + return cell_count; + } + + static ByteSize size() { + return in_ByteSize(cell_count * DataLayout::cell_size); + } + + ByteSize type_offset() { + return DataLayout::cell_offset(_base_off); + } + + // GC support + void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); + +#ifndef PRODUCT + void print_data_on(outputStream* st) const; +#endif +}; + +// Entries to collect type information at a call: contains arguments +// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a +// number of cells. Because the number of cells for the return type is +// smaller than the number of cells for the type of an arguments, the +// number of cells is used to tell how many arguments are profiled and +// whether a return value is profiled. See has_arguments() and +// has_return(). +class TypeEntriesAtCall { +private: + static int stack_slot_local_offset(int i) { + return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i); + } + + static int argument_type_local_offset(int i) { + return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);; + } + +public: + + static int header_cell_count() { + return 1; + } + + static int cell_count_local_offset() { + return 0; + } + + static int compute_cell_count(BytecodeStream* stream); + + static void initialize(DataLayout* dl, int base, int cell_count) { + int off = base + cell_count_local_offset(); + dl->set_cell_at(off, cell_count - base - header_cell_count()); + } + + static bool arguments_profiling_enabled(); + static bool return_profiling_enabled(); + + // Code generation support + static ByteSize cell_count_offset() { + return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size); + } + + static ByteSize args_data_offset() { + return in_ByteSize(header_cell_count() * DataLayout::cell_size); + } + + static ByteSize stack_slot_offset(int i) { + return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size); + } + + static ByteSize argument_type_offset(int i) { + return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size); + } +}; + +// CallTypeData +// +// A CallTypeData is used to access profiling information about a non +// virtual call for which we collect type information about arguments +// and return value. +class CallTypeData : public CounterData { +private: + // entries for arguments if any + TypeStackSlotEntries _args; + // entry for return type if any + ReturnTypeEntry _ret; + + int cell_count_global_offset() const { + return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); + } + + // number of cells not counting the header + int cell_count_no_header() const { + return uint_at(cell_count_global_offset()); + } + + void check_number_of_arguments(int total) { + assert(number_of_arguments() == total, "should be set in DataLayout::initialize"); + } + +public: + CallTypeData(DataLayout* layout) : + CounterData(layout), + _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()), + _ret(cell_count() - ReturnTypeEntry::static_cell_count()) + { + assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type"); + // Some compilers (VC++) don't want this passed in member initialization list + _args.set_profile_data(this); + _ret.set_profile_data(this); + } + + const TypeStackSlotEntries* args() const { + assert(has_arguments(), "no profiling of arguments"); + return &_args; + } + + const ReturnTypeEntry* ret() const { + assert(has_return(), "no profiling of return value"); + return &_ret; + } + + virtual bool is_CallTypeData() const { return true; } + + static int static_cell_count() { + return -1; + } + + static int compute_cell_count(BytecodeStream* stream) { + return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream); + } + + static void initialize(DataLayout* dl, int cell_count) { + TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count); + } + + virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); + + virtual int cell_count() const { + return CounterData::static_cell_count() + + TypeEntriesAtCall::header_cell_count() + + int_at_unchecked(cell_count_global_offset()); + } + + int number_of_arguments() const { + return cell_count_no_header() / TypeStackSlotEntries::per_arg_count(); + } + + void set_argument_type(int i, Klass* k) { + assert(has_arguments(), "no arguments!"); + intptr_t current = _args.type(i); + _args.set_type(i, TypeEntries::with_status(k, current)); + } + + void set_return_type(Klass* k) { + assert(has_return(), "no return!"); + intptr_t current = _ret.type(); + _ret.set_type(TypeEntries::with_status(k, current)); + } + + // An entry for a return value takes less space than an entry for an + // argument so if the number of cells exceeds the number of cells + // needed for an argument, this object contains type information for + // at least one argument. + bool has_arguments() const { + bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count(); + assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments"); + return res; + } + + // An entry for a return value takes less space than an entry for an + // argument, so if the remainder of the number of cells divided by + // the number of cells for an argument is not null, a return value + // is profiled in this object. + bool has_return() const { + bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0; + assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values"); + return res; + } + + // Code generation support + static ByteSize args_data_offset() { + return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset(); + } + + // GC support + virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) { + if (has_arguments()) { + _args.clean_weak_klass_links(is_alive_closure); + } + if (has_return()) { + _ret.clean_weak_klass_links(is_alive_closure); + } + } + +#ifndef PRODUCT + virtual void print_data_on(outputStream* st) const; #endif }; @@ -659,16 +1094,17 @@ public: ReceiverTypeData(DataLayout* layout) : CounterData(layout) { assert(layout->tag() == DataLayout::receiver_type_data_tag || - layout->tag() == DataLayout::virtual_call_data_tag, "wrong type"); + layout->tag() == DataLayout::virtual_call_data_tag || + layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); } - virtual bool is_ReceiverTypeData() { return true; } + virtual bool is_ReceiverTypeData() const { return true; } static int static_cell_count() { return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count GRAAL_ONLY(+ 1); } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } @@ -683,7 +1119,7 @@ return count0_offset + row * receiver_type_row_cell_count; } - Klass* receiver(uint row) { + Klass* receiver(uint row) const { assert(row < row_limit(), "oob"); Klass* recv = (Klass*)intptr_at(receiver_cell_index(row)); @@ -696,7 +1132,7 @@ set_intptr_at(receiver_cell_index(row), (uintptr_t)k); } - uint receiver_count(uint row) { + uint receiver_count(uint row) const { assert(row < row_limit(), "oob"); return uint_at(receiver_count_cell_index(row)); } @@ -759,8 +1195,8 @@ virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); #ifndef PRODUCT - void print_receiver_data_on(outputStream* st); - void print_data_on(outputStream* st); + void print_receiver_data_on(outputStream* st) const; + void print_data_on(outputStream* st) const; #endif }; @@ -771,10 +1207,11 @@ class VirtualCallData : public ReceiverTypeData { public: VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) { - assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type"); + assert(layout->tag() == DataLayout::virtual_call_data_tag || + layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); } - virtual bool is_VirtualCallData() { return true; } + virtual bool is_VirtualCallData() const { return true; } static int static_cell_count() { // At this point we could add more profile state, e.g., for arguments. @@ -782,7 +1219,7 @@ return ReceiverTypeData::static_cell_count() GRAAL_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count); } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } @@ -839,7 +1276,133 @@ #endif #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; +#endif +}; + +// VirtualCallTypeData +// +// A VirtualCallTypeData is used to access profiling information about +// a virtual call for which we collect type information about +// arguments and return value. +class VirtualCallTypeData : public VirtualCallData { +private: + // entries for arguments if any + TypeStackSlotEntries _args; + // entry for return type if any + ReturnTypeEntry _ret; + + int cell_count_global_offset() const { + return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); + } + + // number of cells not counting the header + int cell_count_no_header() const { + return uint_at(cell_count_global_offset()); + } + + void check_number_of_arguments(int total) { + assert(number_of_arguments() == total, "should be set in DataLayout::initialize"); + } + +public: + VirtualCallTypeData(DataLayout* layout) : + VirtualCallData(layout), + _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()), + _ret(cell_count() - ReturnTypeEntry::static_cell_count()) + { + assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); + // Some compilers (VC++) don't want this passed in member initialization list + _args.set_profile_data(this); + _ret.set_profile_data(this); + } + + const TypeStackSlotEntries* args() const { + assert(has_arguments(), "no profiling of arguments"); + return &_args; + } + + const ReturnTypeEntry* ret() const { + assert(has_return(), "no profiling of return value"); + return &_ret; + } + + virtual bool is_VirtualCallTypeData() const { return true; } + + static int static_cell_count() { + return -1; + } + + static int compute_cell_count(BytecodeStream* stream) { + return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream); + } + + static void initialize(DataLayout* dl, int cell_count) { + TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count); + } + + virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); + + virtual int cell_count() const { + return VirtualCallData::static_cell_count() + + TypeEntriesAtCall::header_cell_count() + + int_at_unchecked(cell_count_global_offset()); + } + + int number_of_arguments() const { + return cell_count_no_header() / TypeStackSlotEntries::per_arg_count(); + } + + void set_argument_type(int i, Klass* k) { + assert(has_arguments(), "no arguments!"); + intptr_t current = _args.type(i); + _args.set_type(i, TypeEntries::with_status(k, current)); + } + + void set_return_type(Klass* k) { + assert(has_return(), "no return!"); + intptr_t current = _ret.type(); + _ret.set_type(TypeEntries::with_status(k, current)); + } + + // An entry for a return value takes less space than an entry for an + // argument, so if the remainder of the number of cells divided by + // the number of cells for an argument is not null, a return value + // is profiled in this object. + bool has_return() const { + bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0; + assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values"); + return res; + } + + // An entry for a return value takes less space than an entry for an + // argument so if the number of cells exceeds the number of cells + // needed for an argument, this object contains type information for + // at least one argument. + bool has_arguments() const { + bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count(); + assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments"); + return res; + } + + // Code generation support + static ByteSize args_data_offset() { + return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset(); + } + + // GC support + virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) { + ReceiverTypeData::clean_weak_klass_links(is_alive_closure); + if (has_arguments()) { + _args.clean_weak_klass_links(is_alive_closure); + } + if (has_return()) { + _ret.clean_weak_klass_links(is_alive_closure); + } + } + +#ifndef PRODUCT + virtual void print_data_on(outputStream* st) const; #endif }; @@ -882,7 +1445,7 @@ assert(layout->tag() == DataLayout::ret_data_tag, "wrong type"); } - virtual bool is_RetData() { return true; } + virtual bool is_RetData() const { return true; } enum { no_bci = -1 // value of bci when bci1/2 are not in use. @@ -892,7 +1455,7 @@ return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count; } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } @@ -910,13 +1473,13 @@ } // Direct accessors - int bci(uint row) { + int bci(uint row) const { return int_at(bci_cell_index(row)); } - uint bci_count(uint row) { + uint bci_count(uint row) const { return uint_at(bci_count_cell_index(row)); } - int bci_displacement(uint row) { + int bci_displacement(uint row) const { return int_at(bci_displacement_cell_index(row)); } @@ -938,7 +1501,7 @@ void post_initialize(BytecodeStream* stream, MethodData* mdo); #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; @@ -963,18 +1526,18 @@ assert(layout->tag() == DataLayout::branch_data_tag, "wrong type"); } - virtual bool is_BranchData() { return true; } + virtual bool is_BranchData() const { return true; } static int static_cell_count() { return branch_cell_count; } - virtual int cell_count() { + virtual int cell_count() const { return static_cell_count(); } // Direct accessor - uint not_taken() { + uint not_taken() const { return uint_at(not_taken_off_set); } @@ -1002,7 +1565,7 @@ void post_initialize(BytecodeStream* stream, MethodData* mdo); #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; @@ -1020,15 +1583,15 @@ array_start_off_set }; - uint array_uint_at(int index) { + uint array_uint_at(int index) const { int aindex = index + array_start_off_set; return uint_at(aindex); } - int array_int_at(int index) { + int array_int_at(int index) const { int aindex = index + array_start_off_set; return int_at(aindex); } - oop array_oop_at(int index) { + oop array_oop_at(int index) const { int aindex = index + array_start_off_set; return oop_at(aindex); } @@ -1045,17 +1608,17 @@ public: ArrayData(DataLayout* layout) : ProfileData(layout) {} - virtual bool is_ArrayData() { return true; } + virtual bool is_ArrayData() const { return true; } static int static_cell_count() { return -1; } - int array_len() { + int array_len() const { return int_at_unchecked(array_len_off_set); } - virtual int cell_count() { + virtual int cell_count() const { return array_len() + 1; } @@ -1102,29 +1665,29 @@ assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type"); } - virtual bool is_MultiBranchData() { return true; } + virtual bool is_MultiBranchData() const { return true; } static int compute_cell_count(BytecodeStream* stream); - int number_of_cases() { + int number_of_cases() const { int alen = array_len() - 2; // get rid of default case here. assert(alen % per_case_cell_count == 0, "must be even"); return (alen / per_case_cell_count); } - uint default_count() { + uint default_count() const { return array_uint_at(default_count_off_set); } - int default_displacement() { + int default_displacement() const { return array_int_at(default_disaplacement_off_set); } - uint count_at(int index) { + uint count_at(int index) const { return array_uint_at(case_array_start + index * per_case_cell_count + relative_count_off_set); } - int displacement_at(int index) { + int displacement_at(int index) const { return array_int_at(case_array_start + index * per_case_cell_count + relative_displacement_off_set); @@ -1159,7 +1722,7 @@ void post_initialize(BytecodeStream* stream, MethodData* mdo); #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; @@ -1170,14 +1733,14 @@ assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type"); } - virtual bool is_ArgInfoData() { return true; } + virtual bool is_ArgInfoData() const { return true; } - int number_of_args() { + int number_of_args() const { return array_len(); } - uint arg_modified(int arg) { + uint arg_modified(int arg) const { return array_uint_at(arg); } @@ -1186,10 +1749,79 @@ } #ifndef PRODUCT - void print_data_on(outputStream* st); + void print_data_on(outputStream* st) const; #endif }; +// ParametersTypeData +// +// A ParametersTypeData is used to access profiling information about +// types of parameters to a method +class ParametersTypeData : public ArrayData { + +private: + TypeStackSlotEntries _parameters; + + static int stack_slot_local_offset(int i) { + assert_profiling_enabled(); + return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i); + } + + static int type_local_offset(int i) { + assert_profiling_enabled(); + return array_start_off_set + TypeStackSlotEntries::type_local_offset(i); + } + + static bool profiling_enabled(); + static void assert_profiling_enabled() { + assert(profiling_enabled(), "method parameters profiling should be on"); + } + +public: + ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) { + assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type"); + // Some compilers (VC++) don't want this passed in member initialization list + _parameters.set_profile_data(this); + } + + static int compute_cell_count(Method* m); + + virtual bool is_ParametersTypeData() const { return true; } + + virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); + + int number_of_parameters() const { + return array_len() / TypeStackSlotEntries::per_arg_count(); + } + + const TypeStackSlotEntries* parameters() const { return &_parameters; } + + uint stack_slot(int i) const { + return _parameters.stack_slot(i); + } + + void set_type(int i, Klass* k) { + intptr_t current = _parameters.type(i); + _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current)); + } + + virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) { + _parameters.clean_weak_klass_links(is_alive_closure); + } + +#ifndef PRODUCT + virtual void print_data_on(outputStream* st) const; +#endif + + static ByteSize stack_slot_offset(int i) { + return cell_offset(stack_slot_local_offset(i)); + } + + static ByteSize type_offset(int i) { + return cell_offset(type_local_offset(i)); + } +}; + // MethodData* // // A MethodData* holds information which has been collected about @@ -1302,6 +1934,10 @@ // Size of _data array in bytes. (Excludes header and extra_data fields.) int _data_size; + // data index for the area dedicated to parameters. -1 if no + // parameter profiling. + int _parameters_type_data_di; + // Beginning of the data entries intptr_t _data[1]; @@ -1357,6 +1993,24 @@ // return the argument info cell ArgInfoData *arg_info(); + enum { + no_type_profile = 0, + type_profile_jsr292 = 1, + type_profile_all = 2 + }; + + static bool profile_jsr292(methodHandle m, int bci); + static int profile_arguments_flag(); + static bool profile_arguments_jsr292_only(); + static bool profile_all_arguments(); + static bool profile_arguments_for_invoke(methodHandle m, int bci); + static int profile_return_flag(); + static bool profile_all_return(); + static bool profile_return_for_invoke(methodHandle m, int bci); + static int profile_parameters_flag(); + static bool profile_parameters_jsr292_only(); + static bool profile_all_parameters(); + public: static int header_size() { return sizeof(MethodData)/wordSize; @@ -1559,6 +2213,16 @@ } } + // Return pointer to area dedicated to parameters in MDO + ParametersTypeData* parameters_type_data() const { + return _parameters_type_data_di != -1 ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL; + } + + int parameters_type_data_di() const { + assert(_parameters_type_data_di != -1, "no args type data"); + return _parameters_type_data_di; + } + // Support for code generation static ByteSize data_offset() { return byte_offset_of(MethodData, _data[0]); @@ -1575,6 +2239,10 @@ return byte_offset_of(MethodData, _backedge_counter); } + static ByteSize parameters_type_data_di_offset() { + return byte_offset_of(MethodData, _parameters_type_data_di); + } + // Deallocation support - no pointer fields to deallocate void deallocate_contents(ClassLoaderData* loader_data) {} @@ -1597,6 +2265,12 @@ // verification void verify_on(outputStream* st); void verify_data_on(outputStream* st); + + static bool profile_parameters_for_method(methodHandle m); + static bool profile_arguments(); + static bool profile_return(); + static bool profile_parameters(); + static bool profile_return_jsr292_only(); }; #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -197,6 +197,7 @@ // negative filter: should callee NOT be inlined? bool InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, + JVMState* jvms, WarmCallInfo* wci_result) { const char* fail_msg = NULL; @@ -226,7 +227,7 @@ // don't inline exception code unless the top method belongs to an // exception class if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) { - ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method(); + ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method(); if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) { wci_result->set_profit(wci_result->profit() * 0.1); } @@ -328,7 +329,7 @@ // return true if ok // Relocated from "InliningClosure::try_to_inline" bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, - int caller_bci, ciCallProfile& profile, + int caller_bci, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) { // Old algorithm had funny accumulating BC-size counters @@ -346,7 +347,7 @@ wci_result)) { return false; } - if (should_not_inline(callee_method, caller_method, wci_result)) { + if (should_not_inline(callee_method, caller_method, jvms, wci_result)) { return false; } @@ -388,6 +389,10 @@ return false; } if (inline_level() > _max_inline_level) { + if (callee_method->force_inline() && inline_level() > MaxForceInlineLevel) { + set_msg("MaxForceInlineLevel"); + return false; + } if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); return false; @@ -397,24 +402,35 @@ } // detect direct and indirect recursive inlining - if (!callee_method->is_compiled_lambda_form()) { + { // count the current method and the callee - int inline_level = (method() == callee_method) ? 1 : 0; - if (inline_level > MaxRecursiveInlineLevel) { - set_msg("recursively inlining too deep"); - return false; + const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form(); + int inline_level = 0; + if (!is_compiled_lambda_form) { + if (method() == callee_method) { + inline_level++; + } } // count callers of current method and callee - JVMState* jvms = caller_jvms(); - while (jvms != NULL && jvms->has_method()) { - if (jvms->method() == callee_method) { - inline_level++; - if (inline_level > MaxRecursiveInlineLevel) { - set_msg("recursively inlining too deep"); - return false; + Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL; + for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) { + if (j->method() == callee_method) { + if (is_compiled_lambda_form) { + // Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly + // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the + // compiler stack. + Node* caller_argument0 = j->map()->argument(j, 0)->uncast(); + if (caller_argument0 == callee_argument0) { + inline_level++; + } + } else { + inline_level++; } } - jvms = jvms->caller(); + } + if (inline_level > MaxRecursiveInlineLevel) { + set_msg("recursive inlining is too deep"); + return false; } } @@ -536,7 +552,7 @@ // Check if inlining policy says no. WarmCallInfo wci = *(initial_wci); bool success = try_to_inline(callee_method, caller_method, caller_bci, - profile, &wci, should_delay); + jvms, profile, &wci, should_delay); #ifndef PRODUCT if (UseOldInlining && InlineWarmCalls diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/c2_globals.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -638,7 +638,13 @@ "Find best control for expensive operations") \ \ product(bool, UseMathExactIntrinsics, true, \ - "Enables intrinsification of various java.lang.Math funcitons") + "Enables intrinsification of various java.lang.Math functions") \ + \ + experimental(bool, ReplaceInParentMaps, false, \ + "Propagate type improvements in callers of inlinee if possible") \ + \ + experimental(bool, UseTypeSpeculation, false, \ + "Speculatively propagate types from profiles") C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/c2compiler.cpp --- a/src/share/vm/opto/c2compiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/c2compiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -44,9 +44,6 @@ # include "adfiles/ad_ppc.hpp" #endif - -volatile int C2Compiler::_runtimes = uninitialized; - // register information defined by ADLC extern const char register_save_policy[]; extern const int register_save_type[]; @@ -57,7 +54,7 @@ const char* C2Compiler::retry_no_escape_analysis() { return "retry without escape analysis"; } -void C2Compiler::initialize_runtime() { +bool C2Compiler::init_c2_runtime() { // Check assumptions used while running ADLC Compile::adlc_verification(); @@ -90,41 +87,31 @@ CompilerThread* thread = CompilerThread::current(); - HandleMark handle_mark(thread); - - OptoRuntime::generate(thread->env()); - + HandleMark handle_mark(thread); + return OptoRuntime::generate(thread->env()); } void C2Compiler::initialize() { - - // This method can only be called once per C2Compiler object // The first compiler thread that gets here will initialize the - // small amount of global state (and runtime stubs) that c2 needs. + // small amount of global state (and runtime stubs) that C2 needs. // There is a race possible once at startup and then we're fine // Note that this is being called from a compiler thread not the // main startup thread. - - if (_runtimes != initialized) { - initialize_runtimes( initialize_runtime, &_runtimes); + if (should_perform_init()) { + bool successful = C2Compiler::init_c2_runtime(); + int new_state = (successful) ? initialized : failed; + set_state(new_state); } - - // Mark this compiler object as ready to roll - mark_initialized(); } -void C2Compiler::compile_method(ciEnv* env, - ciMethod* target, - int entry_bci) { - if (!is_initialized()) { - initialize(); - } +void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) { + assert(is_initialized(), "Compiler thread must be initialized"); + bool subsume_loads = SubsumeLoads; - bool do_escape_analysis = DoEscapeAnalysis && - !env->jvmti_can_access_local_variables(); + bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables(); bool eliminate_boxing = EliminateAutoBox; while (!env->failing()) { // Attempt to compile while subsuming loads into machine instructions. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/c2compiler.hpp --- a/src/share/vm/opto/c2compiler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/c2compiler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -28,9 +28,8 @@ #include "compiler/abstractCompiler.hpp" class C2Compiler : public AbstractCompiler { -private: - - static void initialize_runtime(); + private: + static bool init_c2_runtime(); public: C2Compiler() : AbstractCompiler(c2) {} @@ -38,16 +37,10 @@ // Name const char *name() { return "C2"; } - static volatile int _runtimes; - #ifdef TIERED virtual bool is_c2() { return true; }; #endif // TIERED - // Customization - bool needs_adapters () { return true; } - bool needs_stubs () { return true; } - void initialize(); // Compilation entry point for methods diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/callGenerator.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -63,12 +63,12 @@ } virtual bool is_parse() const { return true; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); int is_osr() { return _is_osr; } }; -JVMState* ParseGenerator::generate(JVMState* jvms) { +JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); if (is_osr()) { @@ -80,7 +80,7 @@ return NULL; // bailing out of the compile; do not try to parse } - Parse parser(jvms, method(), _expected_uses); + Parse parser(jvms, method(), _expected_uses, parent_parser); // Grab signature for matching/allocation #ifdef ASSERT if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { @@ -119,12 +119,12 @@ _separate_io_proj(separate_io_proj) { } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); CallStaticJavaNode* call_node() const { return _call_node; } }; -JVMState* DirectCallGenerator::generate(JVMState* jvms) { +JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() @@ -171,10 +171,10 @@ vtable_index >= 0, "either invalid or usable"); } virtual bool is_virtual() const { return true; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; -JVMState* VirtualCallGenerator::generate(JVMState* jvms) { +JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); Node* receiver = kit.argument(0); @@ -276,7 +276,7 @@ // Convert the CallStaticJava into an inline virtual void do_late_inline(); - virtual JVMState* generate(JVMState* jvms) { + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); @@ -290,7 +290,7 @@ // that the late inlining logic can distinguish between fall // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. - return DirectCallGenerator::generate(jvms); + return DirectCallGenerator::generate(jvms, parent_parser); } virtual void print_inlining_late(const char* msg) { @@ -389,7 +389,7 @@ } // Now perform the inling using the synthesized JVMState - JVMState* new_jvms = _inline_cg->generate(jvms); + JVMState* new_jvms = _inline_cg->generate(jvms, NULL); if (new_jvms == NULL) return; // no change if (C->failing()) return; @@ -429,8 +429,8 @@ virtual bool is_mh_late_inline() const { return true; } - virtual JVMState* generate(JVMState* jvms) { - JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); @@ -477,15 +477,17 @@ LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms) { + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_string_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms); + JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; } + + virtual bool is_string_late_inline() const { return true; } }; CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { @@ -498,13 +500,13 @@ LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms) { + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_boxing_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms); + JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; } }; @@ -540,7 +542,7 @@ virtual bool is_virtual() const { return _is_virtual; } virtual bool is_deferred() const { return true; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; @@ -550,12 +552,12 @@ return new WarmCallGenerator(ci, if_cold, if_hot); } -JVMState* WarmCallGenerator::generate(JVMState* jvms) { +JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } - jvms = _if_cold->generate(jvms); + jvms = _if_cold->generate(jvms, parent_parser); if (jvms != NULL) { Node* m = jvms->map()->control(); if (m->is_CatchProj()) m = m->in(0); else m = C->top(); @@ -616,7 +618,7 @@ virtual bool is_inline() const { return _if_hit->is_inline(); } virtual bool is_deferred() const { return _if_hit->is_deferred(); } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; @@ -628,7 +630,7 @@ } -JVMState* PredictedCallGenerator::generate(JVMState* jvms) { +JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. @@ -656,7 +658,7 @@ { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { - slow_jvms = _if_missed->generate(kit.sync_jvms()); + slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); @@ -677,12 +679,12 @@ kit.replace_in_map(receiver, exact_receiver); // Make the hot call: - JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); + JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); if (new_jvms == NULL) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); - new_jvms = cg->generate(kit.sync_jvms()); + new_jvms = cg->generate(kit.sync_jvms(), parent_parser); } kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); @@ -773,8 +775,8 @@ ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove const int vtable_index = Method::invalid_vtable_index; - CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } @@ -829,6 +831,7 @@ int vtable_index = Method::invalid_vtable_index; bool call_does_dispatch = false; + ciKlass* speculative_receiver_type = NULL; if (is_virtual_or_interface) { ciInstanceKlass* klass = target->holder(); Node* receiver_node = kit.argument(0); @@ -837,10 +840,13 @@ target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type, is_virtual, call_does_dispatch, vtable_index); // out-parameters + // We lack profiling at this call but type speculation may + // provide us with a type + speculative_receiver_type = receiver_type->speculative_type(); } - CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } @@ -874,7 +880,7 @@ virtual bool is_inlined() const { return true; } virtual bool is_intrinsic() const { return true; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; @@ -884,7 +890,7 @@ } -JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) { +JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); @@ -904,7 +910,7 @@ PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { - slow_jvms = _cg->generate(kit.sync_jvms()); + slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); @@ -922,12 +928,12 @@ } // Generate intrinsic code: - JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); + JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); if (new_jvms == NULL) { // Intrinsic failed, so use slow code or make a direct call. if (slow_map == NULL) { CallGenerator* cg = CallGenerator::for_direct_call(method()); - new_jvms = cg->generate(kit.sync_jvms()); + new_jvms = cg->generate(kit.sync_jvms(), parent_parser); } else { kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); @@ -997,7 +1003,7 @@ virtual bool is_virtual() const { ShouldNotReachHere(); return false; } virtual bool is_trap() const { return true; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; @@ -1009,7 +1015,7 @@ } -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). int nargs = method()->arg_size(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/callGenerator.hpp --- a/src/share/vm/opto/callGenerator.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/callGenerator.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -31,6 +31,8 @@ #include "opto/type.hpp" #include "runtime/deoptimization.hpp" +class Parse; + //---------------------------CallGenerator------------------------------------- // The subclasses of this class handle generation of ideal nodes for // call sites and method entry points. @@ -72,6 +74,7 @@ virtual bool is_late_inline() const { return false; } // same but for method handle calls virtual bool is_mh_late_inline() const { return false; } + virtual bool is_string_late_inline() const{ return false; } // for method handle calls: have we tried inlinining the call already? virtual bool already_attempted() const { ShouldNotReachHere(); return false; } @@ -108,7 +111,7 @@ // // If the result is NULL, it means that this CallGenerator was unable // to handle the given call, and another CallGenerator should be consulted. - virtual JVMState* generate(JVMState* jvms) = 0; + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0; // How to generate a call site that is inlined: static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/chaitin.hpp --- a/src/share/vm/opto/chaitin.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/chaitin.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -52,6 +52,7 @@ class LRG : public ResourceObj { friend class VMStructs; public: + static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions enum { SPILL_REG=29999 }; // Register number of a spilled LRG double _cost; // 2 for loads/1 for stores times block freq @@ -80,14 +81,21 @@ private: uint _eff_degree; // Effective degree: Sum of neighbors _num_regs public: - int degree() const { assert( _degree_valid, "" ); return _eff_degree; } + int degree() const { assert( _degree_valid , "" ); return _eff_degree; } // Degree starts not valid and any change to the IFG neighbor // set makes it not valid. - void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) } + void set_degree( uint degree ) { + _eff_degree = degree; + debug_only(_degree_valid = 1;) + assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers"); + } // Made a change that hammered degree void invalid_degree() { debug_only(_degree_valid=0;) } // Incrementally modify degree. If it was correct, it should remain correct - void inc_degree( uint mod ) { _eff_degree += mod; } + void inc_degree( uint mod ) { + _eff_degree += mod; + assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers"); + } // Compute the degree between 2 live ranges int compute_degree( LRG &l ) const; @@ -95,9 +103,9 @@ RegMask _mask; // Allowed registers for this LRG uint _mask_size; // cache of _mask.Size(); public: - int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); } + int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); } void set_mask_size( int size ) { - assert((size == 65535) || (size == (int)_mask.Size()), ""); + assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), ""); _mask_size = size; #ifdef ASSERT _msize_valid=1; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/classes.hpp --- a/src/share/vm/opto/classes.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/classes.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -30,6 +30,7 @@ macro(AbsI) macro(AddD) macro(AddExactI) +macro(AddExactL) macro(AddF) macro(AddI) macro(AddL) @@ -170,6 +171,8 @@ macro(Mach) macro(MachProj) macro(MathExact) +macro(MathExactI) +macro(MathExactL) macro(MaxI) macro(MemBarAcquire) macro(MemBarAcquireLock) @@ -189,12 +192,16 @@ macro(MoveL2D) macro(MoveD2L) macro(MulD) +macro(MulExactI) +macro(MulExactL) macro(MulF) macro(MulHiL) macro(MulI) macro(MulL) macro(Multi) macro(NegD) +macro(NegExactI) +macro(NegExactL) macro(NegF) macro(NeverBranch) macro(Opaque1) @@ -244,6 +251,8 @@ macro(StrEquals) macro(StrIndexOf) macro(SubD) +macro(SubExactI) +macro(SubExactL) macro(SubF) macro(SubI) macro(SubL) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/compile.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -47,6 +47,7 @@ #include "opto/machnode.hpp" #include "opto/macro.hpp" #include "opto/matcher.hpp" +#include "opto/mathexactnode.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/node.hpp" @@ -654,7 +655,8 @@ _inlining_progress(false), _inlining_incrementally(false), _print_inlining_list(NULL), - _print_inlining_idx(0) { + _print_inlining_idx(0), + _preserve_jvm_state(0) { C = this; CompileWrapper cw(this); @@ -762,7 +764,7 @@ return; } JVMState* jvms = build_start_state(start(), tf()); - if ((jvms = cg->generate(jvms)) == NULL) { + if ((jvms = cg->generate(jvms, NULL)) == NULL) { record_method_not_compilable("method parse failed"); return; } @@ -846,6 +848,7 @@ } #endif + NOT_PRODUCT( verify_barriers(); ) // Now that we know the size of all the monitors we can add a fixed slot // for the original deopt pc. @@ -939,7 +942,8 @@ _inlining_progress(false), _inlining_incrementally(false), _print_inlining_list(NULL), - _print_inlining_idx(0) { + _print_inlining_idx(0), + _preserve_jvm_state(0) { C = this; #ifndef PRODUCT @@ -1357,7 +1361,7 @@ // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. - if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) { + if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); } } @@ -1382,6 +1386,9 @@ // Also, make sure exact and non-exact variants alias the same. tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); } + if (to->speculative() != NULL) { + tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id()); + } // Canonicalize the holder of this field if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the @@ -2010,6 +2017,12 @@ if (failing()) return; } + // Remove the speculative part of types and clean up the graph from + // the extra CastPP nodes whose only purpose is to carry them. Do + // that early so that optimizations are not disrupted by the extra + // CastPP nodes. + remove_speculative_types(igvn); + // No more new expensive nodes will be added to the list from here // so keep only the actual candidates for optimizations. cleanup_expensive_nodes(igvn); @@ -2986,6 +2999,42 @@ n->set_req(MemBarNode::Precedent, top()); } break; + // Must set a control edge on all nodes that produce a FlagsProj + // so they can't escape the block that consumes the flags. + // Must also set the non throwing branch as the control + // for all nodes that depends on the result. Unless the node + // already have a control that isn't the control of the + // flag producer + case Op_FlagsProj: + { + MathExactNode* math = (MathExactNode*) n->in(0); + Node* ctrl = math->control_node(); + Node* non_throwing = math->non_throwing_branch(); + math->set_req(0, ctrl); + + Node* result = math->result_node(); + if (result != NULL) { + for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) { + Node* out = result->fast_out(j); + // Phi nodes shouldn't be moved. They would only match below if they + // had the same control as the MathExactNode. The only time that + // would happen is if the Phi is also an input to the MathExact + // + // Cmp nodes shouldn't have control set at all. + if (out->is_Phi() || + out->is_Cmp()) { + continue; + } + + if (out->in(0) == NULL) { + out->set_req(0, non_throwing); + } else if (out->in(0) == ctrl) { + out->set_req(0, non_throwing); + } + } + } + } + break; default: assert( !n->is_Call(), "" ); assert( !n->is_Mem(), "" ); @@ -3325,6 +3374,72 @@ } } } + +// Verify GC barriers consistency +// Currently supported: +// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) +void Compile::verify_barriers() { + if (UseG1GC) { + // Verify G1 pre-barriers + const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); + + ResourceArea *area = Thread::current()->resource_area(); + Unique_Node_List visited(area); + Node_List worklist(area); + // We're going to walk control flow backwards starting from the Root + worklist.push(_root); + while (worklist.size() > 0) { + Node* x = worklist.pop(); + if (x == NULL || x == top()) continue; + if (visited.member(x)) { + continue; + } else { + visited.push(x); + } + + if (x->is_Region()) { + for (uint i = 1; i < x->req(); i++) { + worklist.push(x->in(i)); + } + } else { + worklist.push(x->in(0)); + // We are looking for the pattern: + // /->ThreadLocal + // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) + // \->ConI(0) + // We want to verify that the If and the LoadB have the same control + // See GraphKit::g1_write_barrier_pre() + if (x->is_If()) { + IfNode *iff = x->as_If(); + if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { + CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); + if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 + && cmp->in(1)->is_Load()) { + LoadNode* load = cmp->in(1)->as_Load(); + if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal + && load->in(2)->in(3)->is_Con() + && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { + + Node* if_ctrl = iff->in(0); + Node* load_ctrl = load->in(0); + + if (if_ctrl != load_ctrl) { + // Skip possible CProj->NeverBranch in infinite loops + if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) + && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { + if_ctrl = if_ctrl->in(0)->in(0); + } + } + assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); + } + } + } + } + } + } + } +} + #endif // The Compile object keeps track of failure reasons separately from the ciEnv. @@ -3765,6 +3880,45 @@ } } +/** + * Remove the speculative part of types and clean up the graph + */ +void Compile::remove_speculative_types(PhaseIterGVN &igvn) { + if (UseTypeSpeculation) { + Unique_Node_List worklist; + worklist.push(root()); + int modified = 0; + // Go over all type nodes that carry a speculative type, drop the + // speculative part of the type and enqueue the node for an igvn + // which may optimize it out. + for (uint next = 0; next < worklist.size(); ++next) { + Node *n = worklist.at(next); + if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL && + n->as_Type()->type()->is_oopptr()->speculative() != NULL) { + TypeNode* tn = n->as_Type(); + const TypeOopPtr* t = tn->type()->is_oopptr(); + bool in_hash = igvn.hash_delete(n); + assert(in_hash, "node should be in igvn hash table"); + tn->set_type(t->remove_speculative()); + igvn.hash_insert(n); + igvn._worklist.push(n); // give it a chance to go away + modified++; + } + uint max = n->len(); + for( uint i = 0; i < max; ++i ) { + Node *m = n->in(i); + if (not_a_node(m)) continue; + worklist.push(m); + } + } + // Drop the speculative part of all types in the igvn's type table + igvn.remove_speculative_types(); + if (modified > 0) { + igvn.optimize(); + } + } +} + // Auxiliary method to support randomized stressing/fuzzing. // // This method can be called the arbitrary number of times, with current count diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/compile.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -424,6 +424,11 @@ static int cmp_expensive_nodes(Node** n1, Node** n2); // Expensive nodes list already sorted? bool expensive_nodes_sorted() const; + // Remove the speculative part of types and clean up the graph + void remove_speculative_types(PhaseIterGVN &igvn); + + // Are we within a PreserveJVMState block? + int _preserve_jvm_state; public: @@ -820,7 +825,9 @@ // Decide how to build a call. // The profile factor is a discount to apply to this site's interp. profile. - CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); + CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, + JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, + bool allow_intrinsics = true, bool delayed_forbidden = false); bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { return should_delay_string_inlining(call_method, jvms) || should_delay_boxing_inlining(call_method, jvms); @@ -1141,6 +1148,9 @@ // graph is strongly connected from root in both directions. void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; + // Verify GC barrier patterns + void verify_barriers() PRODUCT_RETURN; + // End-of-run dumps. static void print_statistics() PRODUCT_RETURN; @@ -1156,6 +1166,21 @@ // Auxiliary method for randomized fuzzing/stressing static bool randomized_select(int count); + + // enter a PreserveJVMState block + void inc_preserve_jvm_state() { + _preserve_jvm_state++; + } + + // exit a PreserveJVMState block + void dec_preserve_jvm_state() { + _preserve_jvm_state--; + assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative"); + } + + bool has_preserve_jvm_state() const { + return _preserve_jvm_state > 0; + } }; #endif // SHARE_VM_OPTO_COMPILE_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/doCall.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -63,7 +63,8 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, - float prof_factor, bool allow_intrinsics, bool delayed_forbidden) { + float prof_factor, ciKlass* speculative_receiver_type, + bool allow_intrinsics, bool delayed_forbidden) { ciMethod* caller = jvms->method(); int bci = jvms->bci(); Bytecodes::Code bytecode = caller->java_code_at_bci(bci); @@ -117,7 +118,7 @@ if (cg->is_predicted()) { // Code without intrinsic but, hopefully, inlined. CallGenerator* inline_cg = this->call_generator(callee, - vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false); + vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); if (inline_cg != NULL) { cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); } @@ -212,8 +213,24 @@ // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); ciMethod* receiver_method = NULL; - if (have_major_receiver || profile.morphism() == 1 || - (profile.morphism() == 2 && UseBimorphicInlining)) { + + int morphism = profile.morphism(); + if (speculative_receiver_type != NULL) { + // We have a speculative type, we should be able to resolve + // the call. We do that before looking at the profiling at + // this invoke because it may lead to bimorphic inlining which + // a speculative type should help us avoid. + receiver_method = callee->resolve_invoke(jvms->method()->holder(), + speculative_receiver_type); + if (receiver_method == NULL) { + speculative_receiver_type = NULL; + } else { + morphism = 1; + } + } + if (receiver_method == NULL && + (have_major_receiver || morphism == 1 || + (morphism == 2 && UseBimorphicInlining))) { // receiver_method = profile.method(); // Profiles do not suggest methods now. Look it up in the major receiver. receiver_method = callee->resolve_invoke(jvms->method()->holder(), @@ -227,7 +244,7 @@ // Look up second receiver. CallGenerator* next_hit_cg = NULL; ciMethod* next_receiver_method = NULL; - if (profile.morphism() == 2 && UseBimorphicInlining) { + if (morphism == 2 && UseBimorphicInlining) { next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), profile.receiver(1)); if (next_receiver_method != NULL) { @@ -242,11 +259,10 @@ } } CallGenerator* miss_cg; - Deoptimization::DeoptReason reason = (profile.morphism() == 2) ? + Deoptimization::DeoptReason reason = morphism == 2 ? Deoptimization::Reason_bimorphic : Deoptimization::Reason_class_check; - if (( profile.morphism() == 1 || - (profile.morphism() == 2 && next_hit_cg != NULL) ) && + if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) && !too_many_traps(jvms->method(), jvms->bci(), reason) ) { // Generate uncommon trap for class check failure path @@ -260,6 +276,7 @@ } if (miss_cg != NULL) { if (next_hit_cg != NULL) { + assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation"); trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); // We don't need to record dependency on a receiver here and below. // Whenever we inline, the dependency is added by Parse::Parse(). @@ -267,7 +284,9 @@ } if (miss_cg != NULL) { trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count); - CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); + ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0); + float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0); + CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); if (cg != NULL) return cg; } } @@ -446,13 +465,16 @@ int vtable_index = Method::invalid_vtable_index; bool call_does_dispatch = false; + // Speculative type of the receiver if any + ciKlass* speculative_receiver_type = NULL; if (is_virtual_or_interface) { - Node* receiver_node = stack(sp() - nargs); + Node* receiver_node = stack(sp() - nargs); const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); // call_does_dispatch and vtable_index are out-parameters. They might be changed. callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type, is_virtual, call_does_dispatch, vtable_index); // out-parameters + speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL; } // Note: It's OK to try to inline a virtual call. @@ -468,7 +490,7 @@ // Decide call tactic. // This call checks with CHA, the interpreter profile, intrinsics table, etc. // It decides whether inlining is desirable or not. - CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor()); + CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type); // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. orig_callee = callee = NULL; @@ -477,6 +499,10 @@ // Round double arguments before call round_double_arguments(cg->method()); + // Feed profiling data for arguments to the type system so it can + // propagate it as speculative types + record_profiled_arguments_for_speculation(cg->method(), bc()); + #ifndef PRODUCT // bump global counters for calls count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); @@ -491,11 +517,18 @@ // save across call, for a subsequent cast_not_null. Node* receiver = has_receiver ? argument(0) : NULL; + // The extra CheckCastPP for speculative types mess with PhaseStringOpts + if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) { + // Feed profiling data for a single receiver to the type system so + // it can propagate it as a speculative type + receiver = record_profiled_receiver_for_speculation(receiver); + } + // Bump method data counters (We profile *before* the call is made // because exceptions don't return to the call site.) profile_call(receiver); - JVMState* new_jvms = cg->generate(jvms); + JVMState* new_jvms = cg->generate(jvms, this); if (new_jvms == NULL) { // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it @@ -508,8 +541,8 @@ // the call site, perhaps because it did not match a pattern the // intrinsic was expecting to optimize. Should always be possible to // get a normal java call that may inline in that case - cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false); - if ((new_jvms = cg->generate(jvms)) == NULL) { + cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); + if ((new_jvms = cg->generate(jvms, this)) == NULL) { guarantee(failing(), "call failed to generate: calls should work"); return; } @@ -607,6 +640,16 @@ null_assert(peek()); set_bci(iter().cur_bci()); // put it back } + BasicType ct = ctype->basic_type(); + if (ct == T_OBJECT || ct == T_ARRAY) { + ciKlass* better_type = method()->return_profiled_type(bci()); + if (UseTypeSpeculation && better_type != NULL) { + // If profiling reports a single type for the return value, + // feed it to the type system so it can propagate it as a + // speculative type + record_profile_for_speculation(stack(sp()-1), better_type); + } + } } // Restart record of parsing work after possible inlining of call diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/escape.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -780,6 +780,7 @@ } } else { // Allocate instance if (cik->is_subclass_of(_compile->env()->Thread_klass()) || + cik->is_subclass_of(_compile->env()->Reference_klass()) || !cik->is_instance_klass() || // StressReflectiveCode cik->as_instance_klass()->has_finalizer()) { es = PointsToNode::GlobalEscape; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/graphKit.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -639,6 +639,7 @@ _map = kit->map(); // preserve the map _sp = kit->sp(); kit->set_map(clone_map ? kit->clone_map() : NULL); + Compile::current()->inc_preserve_jvm_state(); #ifdef ASSERT _bci = kit->bci(); Parse* parser = kit->is_Parse(); @@ -656,6 +657,7 @@ #endif kit->set_map(_map); kit->set_sp(_sp); + Compile::current()->dec_preserve_jvm_state(); } @@ -1373,17 +1375,70 @@ //--------------------------replace_in_map------------------------------------- void GraphKit::replace_in_map(Node* old, Node* neww) { - this->map()->replace_edge(old, neww); + if (old == neww) { + return; + } + + map()->replace_edge(old, neww); // Note: This operation potentially replaces any edge // on the map. This includes locals, stack, and monitors // of the current (innermost) JVM state. - // We can consider replacing in caller maps. - // The idea would be that an inlined function's null checks - // can be shared with the entire inlining tree. - // The expense of doing this is that the PreserveJVMState class - // would have to preserve caller states too, with a deep copy. + if (!ReplaceInParentMaps) { + return; + } + + // PreserveJVMState doesn't do a deep copy so we can't modify + // parents + if (Compile::current()->has_preserve_jvm_state()) { + return; + } + + Parse* parser = is_Parse(); + bool progress = true; + Node* ctrl = map()->in(0); + // Follow the chain of parsers and see whether the update can be + // done in the map of callers. We can do the replace for a caller if + // the current control post dominates the control of a caller. + while (parser != NULL && parser->caller() != NULL && progress) { + progress = false; + Node* parent_map = parser->caller()->map(); + assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch"); + + Node* parent_ctrl = parent_map->in(0); + + while (parent_ctrl->is_Region()) { + Node* n = parent_ctrl->as_Region()->is_copy(); + if (n == NULL) { + break; + } + parent_ctrl = n; + } + + for (;;) { + if (ctrl == parent_ctrl) { + // update the map of the exits which is the one that will be + // used when compilation resume after inlining + parser->exits().map()->replace_edge(old, neww); + progress = true; + break; + } + if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { + ctrl = ctrl->in(0)->in(0); + } else if (ctrl->is_Region()) { + Node* n = ctrl->as_Region()->is_copy(); + if (n == NULL) { + break; + } + ctrl = n; + } else { + break; + } + } + + parser = parser->parent_parser(); + } } @@ -2043,6 +2098,104 @@ } } +/** + * Record profiling data exact_kls for Node n with the type system so + * that it can propagate it (speculation) + * + * @param n node that the type applies to + * @param exact_kls type from profiling + * + * @return node with improved type + */ +Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) { + const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr(); + assert(UseTypeSpeculation, "type speculation must be on"); + if (exact_kls != NULL && + // nothing to improve if type is already exact + (current_type == NULL || + (!current_type->klass_is_exact() && + (current_type->speculative() == NULL || + !current_type->speculative()->klass_is_exact())))) { + const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); + const TypeOopPtr* xtype = tklass->as_instance_type(); + assert(xtype->klass_is_exact(), "Should be exact"); + + // Build a type with a speculative type (what we think we know + // about the type but will need a guard when we use it) + const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype); + // We're changing the type, we need a new cast node to carry the + // new type. The new type depends on the control: what profiling + // tells us is only valid from here as far as we can tell. + Node* cast = new(C) CastPPNode(n, spec_type); + cast->init_req(0, control()); + cast = _gvn.transform(cast); + replace_in_map(n, cast); + n = cast; + } + return n; +} + +/** + * Record profiling data from receiver profiling at an invoke with the + * type system so that it can propagate it (speculation) + * + * @param n receiver node + * + * @return node with improved type + */ +Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { + if (!UseTypeSpeculation) { + return n; + } + ciKlass* exact_kls = profile_has_unique_klass(); + return record_profile_for_speculation(n, exact_kls); +} + +/** + * Record profiling data from argument profiling at an invoke with the + * type system so that it can propagate it (speculation) + * + * @param dest_method target method for the call + * @param bc what invoke bytecode is this? + */ +void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { + if (!UseTypeSpeculation) { + return; + } + const TypeFunc* tf = TypeFunc::make(dest_method); + int nargs = tf->_domain->_cnt - TypeFunc::Parms; + int skip = Bytecodes::has_receiver(bc) ? 1 : 0; + for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { + const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms); + if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) { + ciKlass* better_type = method()->argument_profiled_type(bci(), i); + if (better_type != NULL) { + record_profile_for_speculation(argument(j), better_type); + } + i++; + } + } +} + +/** + * Record profiling data from parameter profiling at an invoke with + * the type system so that it can propagate it (speculation) + */ +void GraphKit::record_profiled_parameters_for_speculation() { + if (!UseTypeSpeculation) { + return; + } + for (int i = 0, j = 0; i < method()->arg_size() ; i++) { + if (_gvn.type(local(i))->isa_oopptr()) { + ciKlass* better_type = method()->parameter_profiled_type(j); + if (better_type != NULL) { + record_profile_for_speculation(local(i), better_type); + } + j++; + } + } +} + void GraphKit::round_double_result(ciMethod* dest_method) { // A non-strict method may return a double value which has an extended // exponent, but this must not be visible in a caller which is 'strict' @@ -2122,7 +2275,7 @@ // Null check oop. Set null-path control into Region in slot 3. // Make a cast-not-nullness use the other not-null control. Return cast. Node* GraphKit::null_check_oop(Node* value, Node* *null_control, - bool never_see_null) { + bool never_see_null, bool safe_for_replace) { // Initial NULL check taken path (*null_control) = top(); Node* cast = null_check_common(value, T_OBJECT, false, null_control); @@ -2140,6 +2293,9 @@ Deoptimization::Action_make_not_entrant); (*null_control) = top(); // NULL path is dead } + if ((*null_control) == top() && safe_for_replace) { + replace_in_map(value, cast); + } // Cast away null-ness on the result return cast; @@ -2577,10 +2733,10 @@ // If the profile has seen exactly one type, narrow to exactly that type. // Subsequent type checks will always fold up. Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, - ciProfileData* data, - ciKlass* require_klass) { + ciKlass* require_klass, + ciKlass* spec_klass, + bool safe_for_replace) { if (!UseTypeProfile || !TypeProfileCasts) return NULL; - if (data == NULL) return NULL; // Make sure we haven't already deoptimized from this tactic. if (too_many_traps(Deoptimization::Reason_class_check)) @@ -2588,15 +2744,15 @@ // (No, this isn't a call, but it's enough like a virtual call // to use the same ciMethod accessor to get the profile info...) - ciCallProfile profile = method()->call_profile_at_bci(bci()); - if (profile.count() >= 0 && // no cast failures here - profile.has_receiver(0) && - profile.morphism() == 1) { - ciKlass* exact_kls = profile.receiver(0); + // If we have a speculative type use it instead of profiling (which + // may not help us) + ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass; + if (exact_kls != NULL) {// no cast failures here if (require_klass == NULL || static_subtype_check(require_klass, exact_kls) == SSC_always_true) { - // If we narrow the type to match what the type profile sees, - // we can then remove the rest of the cast. + // If we narrow the type to match what the type profile sees or + // the speculative type, we can then remove the rest of the + // cast. // This is a win, even if the exact_kls is very specific, // because downstream operations, such as method calls, // will often benefit from the sharper type. @@ -2608,7 +2764,9 @@ uncommon_trap(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); } - replace_in_map(not_null_obj, exact_obj); + if (safe_for_replace) { + replace_in_map(not_null_obj, exact_obj); + } return exact_obj; } // assert(ssc == SSC_always_true)... except maybe the profile lied to us. @@ -2617,11 +2775,59 @@ return NULL; } +/** + * Cast obj to type and emit guard unless we had too many traps here + * already + * + * @param obj node being casted + * @param type type to cast the node to + * @param not_null true if we know node cannot be null + */ +Node* GraphKit::maybe_cast_profiled_obj(Node* obj, + ciKlass* type, + bool not_null) { + // type == NULL if profiling tells us this object is always null + if (type != NULL) { + if (!too_many_traps(Deoptimization::Reason_null_check) && + !too_many_traps(Deoptimization::Reason_class_check)) { + Node* not_null_obj = NULL; + // not_null is true if we know the object is not null and + // there's no need for a null check + if (!not_null) { + Node* null_ctl = top(); + not_null_obj = null_check_oop(obj, &null_ctl, true, true); + assert(null_ctl->is_top(), "no null control here"); + } else { + not_null_obj = obj; + } + + Node* exact_obj = not_null_obj; + ciKlass* exact_kls = type; + Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, + &exact_obj); + { + PreserveJVMState pjvms(this); + set_control(slow_ctl); + uncommon_trap(Deoptimization::Reason_class_check, + Deoptimization::Action_maybe_recompile); + } + replace_in_map(not_null_obj, exact_obj); + obj = exact_obj; + } + } else { + if (!too_many_traps(Deoptimization::Reason_null_assert)) { + Node* exact_obj = null_assert(obj); + replace_in_map(obj, exact_obj); + obj = exact_obj; + } + } + return obj; +} //-------------------------------gen_instanceof-------------------------------- // Generate an instance-of idiom. Used by both the instance-of bytecode // and the reflective instance-of call. -Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) { +Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) { kill_dead_locals(); // Benefit all the uncommon traps assert( !stopped(), "dead parse path should be checked in callers" ); assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), @@ -2642,7 +2848,7 @@ // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); - Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null); + Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? @@ -2659,14 +2865,37 @@ phi ->del_req(_null_path); } - if (ProfileDynamicTypes && data != NULL) { - Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL); - if (stopped()) { // Profile disagrees with this path. - set_control(null_ctl); // Null is the only remaining possibility. - return intcon(0); + // Do we know the type check always succeed? + bool known_statically = false; + if (_gvn.type(superklass)->singleton()) { + ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); + ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); + if (subk != NULL && subk->is_loaded()) { + int static_res = static_subtype_check(superk, subk); + known_statically = (static_res == SSC_always_true || static_res == SSC_always_false); } - if (cast_obj != NULL) - not_null_obj = cast_obj; + } + + if (known_statically && UseTypeSpeculation) { + // If we know the type check always succeed then we don't use the + // profiling data at this bytecode. Don't lose it, feed it to the + // type system as a speculative type. + not_null_obj = record_profiled_receiver_for_speculation(not_null_obj); + } else { + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + // We may not have profiling here or it may not help us. If we + // have a speculative type use it to perform an exact cast. + ciKlass* spec_obj_type = obj_type->speculative_type(); + if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { + Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); + if (stopped()) { // Profile disagrees with this path. + set_control(null_ctl); // Null is the only remaining possibility. + return intcon(0); + } + if (cast_obj != NULL) { + not_null_obj = cast_obj; + } + } } // Load the object's klass @@ -2713,7 +2942,10 @@ if (objtp != NULL && objtp->klass() != NULL) { switch (static_subtype_check(tk->klass(), objtp->klass())) { case SSC_always_true: - return obj; + // If we know the type check always succeed then we don't use + // the profiling data at this bytecode. Don't lose it, feed it + // to the type system as a speculative type. + return record_profiled_receiver_for_speculation(obj); case SSC_always_false: // It needs a null check because a null will *pass* the cast check. // A non-null value will always produce an exception. @@ -2723,11 +2955,13 @@ } ciProfileData* data = NULL; + bool safe_for_replace = false; if (failure_control == NULL) { // use MDO in regular case only assert(java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes::_checkcast, "interpreter profiles type checks only for these BCs"); data = method()->method_data()->bci_to_data(bci()); + safe_for_replace = true; } // Make the merge point @@ -2742,7 +2976,7 @@ // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); - Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null); + Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? @@ -2760,12 +2994,17 @@ } Node* cast_obj = NULL; - if (data != NULL && - // Counter has never been decremented (due to cast failure). - // ...This is a reasonable thing to expect. It is true of - // all casts inserted by javac to implement generic types. - data->as_CounterData()->count() >= 0) { - cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass()); + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + // We may not have profiling here or it may not help us. If we have + // a speculative type use it to perform an exact cast. + ciKlass* spec_obj_type = obj_type->speculative_type(); + if (spec_obj_type != NULL || + (data != NULL && + // Counter has never been decremented (due to cast failure). + // ...This is a reasonable thing to expect. It is true of + // all casts inserted by javac to implement generic types. + data->as_CounterData()->count() >= 0)) { + cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); if (cast_obj != NULL) { if (failure_control != NULL) // failure is now impossible (*failure_control) = top(); @@ -3608,7 +3847,7 @@ Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); // if (!marking) - __ if_then(marking, BoolTest::ne, zero); { + __ if_then(marking, BoolTest::ne, zero, unlikely); { BasicType index_bt = TypeX_X->basic_type(); assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size."); Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); @@ -3713,7 +3952,8 @@ Node* no_base = __ top(); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); - Node* zero = __ ConI(0); + Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val()); + Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val()); Node* zeroX = __ ConX(0); // Get the alias_index for raw card-mark memory @@ -3769,8 +4009,16 @@ // load the original value of the card Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); - __ if_then(card_val, BoolTest::ne, zero); { - g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); + __ if_then(card_val, BoolTest::ne, young_card); { + sync_kit(ideal); + // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier. + insert_mem_bar(Op_MemBarVolatile, oop_store); + __ sync_kit(this); + + Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); + __ if_then(card_val_reload, BoolTest::ne, dirty_card); { + g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); + } __ end_if(); } __ end_if(); } __ end_if(); } __ end_if(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/graphKit.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -378,16 +378,41 @@ // Return a cast-not-null node which depends on the not-null control. // If never_see_null, use an uncommon trap (*null_control sees a top). // The cast is not valid along the null path; keep a copy of the original. + // If safe_for_replace, then we can replace the value with the cast + // in the parsing map (the cast is guaranteed to dominate the map) Node* null_check_oop(Node* value, Node* *null_control, - bool never_see_null = false); + bool never_see_null = false, bool safe_for_replace = false); // Check the null_seen bit. bool seems_never_null(Node* obj, ciProfileData* data); + // Check for unique class for receiver at call + ciKlass* profile_has_unique_klass() { + ciCallProfile profile = method()->call_profile_at_bci(bci()); + if (profile.count() >= 0 && // no cast failures here + profile.has_receiver(0) && + profile.morphism() == 1) { + return profile.receiver(0); + } + return NULL; + } + + // record type from profiling with the type system + Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls); + Node* record_profiled_receiver_for_speculation(Node* n); + void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc); + void record_profiled_parameters_for_speculation(); + // Use the type profile to narrow an object type. Node* maybe_cast_profiled_receiver(Node* not_null_obj, - ciProfileData* data, - ciKlass* require_klass); + ciKlass* require_klass, + ciKlass* spec, + bool safe_for_replace); + + // Cast obj to type and emit guard unless we had too many traps here already + Node* maybe_cast_profiled_obj(Node* obj, + ciKlass* type, + bool not_null = false); // Cast obj to not-null on this path Node* cast_not_null(Node* obj, bool do_replace_in_map = true); @@ -773,7 +798,7 @@ // Generate an instance-of idiom. Used by both the instance-of bytecode // and the reflective instance-of call. - Node* gen_instanceof( Node *subobj, Node* superkls ); + Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/idealGraphPrinter.cpp --- a/src/share/vm/opto/idealGraphPrinter.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -616,7 +616,11 @@ buffer[0] = 0; _chaitin->dump_register(node, buffer); print_prop("reg", buffer); - print_prop("lrg", _chaitin->_lrg_map.live_range_id(node)); + uint lrg_id = 0; + if (node->_idx < _chaitin->_lrg_map.size()) { + lrg_id = _chaitin->_lrg_map.live_range_id(node); + } + print_prop("lrg", lrg_id); } node->_in_dump_cnt--; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/ifg.cpp --- a/src/share/vm/opto/ifg.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/ifg.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -677,7 +677,7 @@ } else { // Common case: size 1 bound removal if( lrg.mask().Member(r_reg) ) { lrg.Remove(r_reg); - lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1); + lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1); } } // If 'l' goes completely dry, it must spill. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/ifnode.cpp --- a/src/share/vm/opto/ifnode.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/ifnode.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -689,6 +689,7 @@ ctrl->in(0)->in(1)->is_Bool() && ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI && ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() && + ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() && ctrl->in(0)->in(1)->in(1)->in(1) == n) { IfNode* dom_iff = ctrl->in(0)->as_If(); Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con); @@ -1018,7 +1019,7 @@ // be skipped. For example, range check predicate has two checks // for lower and upper bounds. ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); - if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate)) + if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) prev_dom = idom; // Now walk the current IfNode's projections. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/library_call.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -63,7 +63,7 @@ virtual bool is_virtual() const { return _is_virtual; } virtual bool is_predicted() const { return _is_predicted; } virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; } - virtual JVMState* generate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); virtual Node* generate_predicate(JVMState* jvms); vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } }; @@ -203,8 +203,15 @@ bool inline_math_native(vmIntrinsics::ID id); bool inline_trig(vmIntrinsics::ID id); bool inline_math(vmIntrinsics::ID id); - bool inline_math_mathExact(Node* math); - bool inline_math_addExact(); + void inline_math_mathExact(Node* math); + bool inline_math_addExactI(bool is_increment); + bool inline_math_addExactL(bool is_increment); + bool inline_math_multiplyExactI(); + bool inline_math_multiplyExactL(); + bool inline_math_negateExactI(); + bool inline_math_negateExactL(); + bool inline_math_subtractExactI(bool is_decrement); + bool inline_math_subtractExactL(bool is_decrement); bool inline_exp(); bool inline_pow(); void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); @@ -507,13 +514,33 @@ if (!UseCRC32Intrinsics) return NULL; break; - case vmIntrinsics::_addExact: - if (!Matcher::match_rule_supported(Op_AddExactI)) { - return NULL; - } - if (!UseMathExactIntrinsics) { - return NULL; - } + case vmIntrinsics::_incrementExactI: + case vmIntrinsics::_addExactI: + if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_incrementExactL: + case vmIntrinsics::_addExactL: + if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_decrementExactI: + case vmIntrinsics::_subtractExactI: + if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_decrementExactL: + case vmIntrinsics::_subtractExactL: + if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_negateExactI: + if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_negateExactL: + if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_multiplyExactI: + if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL; + break; + case vmIntrinsics::_multiplyExactL: + if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL; break; default: @@ -556,7 +583,7 @@ // Nothing to do here. } -JVMState* LibraryIntrinsic::generate(JVMState* jvms) { +JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) { LibraryCallKit kit(jvms, this); Compile* C = kit.C; int nodes = C->unique(); @@ -686,7 +713,18 @@ case vmIntrinsics::_min: case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); - case vmIntrinsics::_addExact: return inline_math_addExact(); + case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */); + case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */); + case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */); + case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */); + case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */); + case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */); + case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI(); + case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL(); + case vmIntrinsics::_negateExactI: return inline_math_negateExactI(); + case vmIntrinsics::_negateExactL: return inline_math_negateExactL(); + case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */); + case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */); case vmIntrinsics::_arraycopy: return inline_arraycopy(); @@ -1931,7 +1969,14 @@ return true; } -bool LibraryCallKit::inline_math_mathExact(Node* math) { +void LibraryCallKit::inline_math_mathExact(Node* math) { + // If we didn't get the expected opcode it means we have optimized + // the node to something else and don't need the exception edge. + if (!math->is_MathExact()) { + set_result(math); + return; + } + Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node)); Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node)); @@ -1954,19 +1999,106 @@ set_control(fast_path); set_result(result); +} + +bool LibraryCallKit::inline_math_addExactI(bool is_increment) { + Node* arg1 = argument(0); + Node* arg2 = NULL; + + if (is_increment) { + arg2 = intcon(1); + } else { + arg2 = argument(1); + } + + Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); + inline_math_mathExact(add); + return true; +} + +bool LibraryCallKit::inline_math_addExactL(bool is_increment) { + Node* arg1 = argument(0); // type long + // argument(1) == TOP + Node* arg2 = NULL; + + if (is_increment) { + arg2 = longcon(1); + } else { + arg2 = argument(2); // type long + // argument(3) == TOP + } + + Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2)); + inline_math_mathExact(add); return true; } -bool LibraryCallKit::inline_math_addExact() { +bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) { + Node* arg1 = argument(0); + Node* arg2 = NULL; + + if (is_decrement) { + arg2 = intcon(1); + } else { + arg2 = argument(1); + } + + Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2)); + inline_math_mathExact(sub); + return true; +} + +bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) { + Node* arg1 = argument(0); // type long + // argument(1) == TOP + Node* arg2 = NULL; + + if (is_decrement) { + arg2 = longcon(1); + } else { + arg2 = argument(2); // type long + // argument(3) == TOP + } + + Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2)); + inline_math_mathExact(sub); + return true; +} + +bool LibraryCallKit::inline_math_negateExactI() { + Node* arg1 = argument(0); + + Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1)); + inline_math_mathExact(neg); + return true; +} + +bool LibraryCallKit::inline_math_negateExactL() { + Node* arg1 = argument(0); + // argument(1) == TOP + + Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1)); + inline_math_mathExact(neg); + return true; +} + +bool LibraryCallKit::inline_math_multiplyExactI() { Node* arg1 = argument(0); Node* arg2 = argument(1); - Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); - if (add->Opcode() == Op_AddExactI) { - return inline_math_mathExact(add); - } else { - set_result(add); - } + Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2)); + inline_math_mathExact(mul); + return true; +} + +bool LibraryCallKit::inline_math_multiplyExactL() { + Node* arg1 = argument(0); + // argument(1) == TOP + Node* arg2 = argument(2); + // argument(3) == TOP + + Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2)); + inline_math_mathExact(mul); return true; } @@ -3353,6 +3485,7 @@ // If kls is null, we have a primitive mirror. phi->init_req(_prim_path, prim_return_value); if (stopped()) { set_result(region, phi); return true; } + bool safe_for_replace = (region->in(_prim_path) == top()); Node* p; // handy temp Node* null_ctl; @@ -3363,7 +3496,7 @@ switch (id) { case vmIntrinsics::_isInstance: // nothing is an instance of a primitive type - query_value = gen_instanceof(obj, kls); + query_value = gen_instanceof(obj, kls, safe_for_replace); break; case vmIntrinsics::_getModifiers: @@ -4553,8 +4686,62 @@ const Type* dest_type = dest->Value(&_gvn); const TypeAryPtr* top_src = src_type->isa_aryptr(); const TypeAryPtr* top_dest = dest_type->isa_aryptr(); - if (top_src == NULL || top_src->klass() == NULL || - top_dest == NULL || top_dest->klass() == NULL) { + + // Do we have the type of src? + bool has_src = (top_src != NULL && top_src->klass() != NULL); + // Do we have the type of dest? + bool has_dest = (top_dest != NULL && top_dest->klass() != NULL); + // Is the type for src from speculation? + bool src_spec = false; + // Is the type for dest from speculation? + bool dest_spec = false; + + if (!has_src || !has_dest) { + // We don't have sufficient type information, let's see if + // speculative types can help. We need to have types for both src + // and dest so that it pays off. + + // Do we already have or could we have type information for src + bool could_have_src = has_src; + // Do we already have or could we have type information for dest + bool could_have_dest = has_dest; + + ciKlass* src_k = NULL; + if (!has_src) { + src_k = src_type->speculative_type(); + if (src_k != NULL && src_k->is_array_klass()) { + could_have_src = true; + } + } + + ciKlass* dest_k = NULL; + if (!has_dest) { + dest_k = dest_type->speculative_type(); + if (dest_k != NULL && dest_k->is_array_klass()) { + could_have_dest = true; + } + } + + if (could_have_src && could_have_dest) { + // This is going to pay off so emit the required guards + if (!has_src) { + src = maybe_cast_profiled_obj(src, src_k); + src_type = _gvn.type(src); + top_src = src_type->isa_aryptr(); + has_src = (top_src != NULL && top_src->klass() != NULL); + src_spec = true; + } + if (!has_dest) { + dest = maybe_cast_profiled_obj(dest, dest_k); + dest_type = _gvn.type(dest); + top_dest = dest_type->isa_aryptr(); + has_dest = (top_dest != NULL && top_dest->klass() != NULL); + dest_spec = true; + } + } + } + + if (!has_src || !has_dest) { // Conservatively insert a memory barrier on all memory slices. // Do not let writes into the source float below the arraycopy. insert_mem_bar(Op_MemBarCPUOrder); @@ -4589,6 +4776,40 @@ return true; } + if (src_elem == T_OBJECT) { + // If both arrays are object arrays then having the exact types + // for both will remove the need for a subtype check at runtime + // before the call and may make it possible to pick a faster copy + // routine (without a subtype check on every element) + // Do we have the exact type of src? + bool could_have_src = src_spec; + // Do we have the exact type of dest? + bool could_have_dest = dest_spec; + ciKlass* src_k = top_src->klass(); + ciKlass* dest_k = top_dest->klass(); + if (!src_spec) { + src_k = src_type->speculative_type(); + if (src_k != NULL && src_k->is_array_klass()) { + could_have_src = true; + } + } + if (!dest_spec) { + dest_k = dest_type->speculative_type(); + if (dest_k != NULL && dest_k->is_array_klass()) { + could_have_dest = true; + } + } + if (could_have_src && could_have_dest) { + // If we can have both exact types, emit the missing guards + if (could_have_src && !src_spec) { + src = maybe_cast_profiled_obj(src, src_k); + } + if (could_have_dest && !dest_spec) { + dest = maybe_cast_profiled_obj(dest, dest_k); + } + } + } + //--------------------------------------------------------------------------- // We will make a fast path for this call to arraycopy. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/loopPredicate.cpp --- a/src/share/vm/opto/loopPredicate.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/loopPredicate.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -41,63 +41,6 @@ * checks (such as null checks). */ -//-------------------------------is_uncommon_trap_proj---------------------------- -// Return true if proj is the form of "proj->[region->..]call_uct" -bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) { - int path_limit = 10; - assert(proj, "invalid argument"); - Node* out = proj; - for (int ct = 0; ct < path_limit; ct++) { - out = out->unique_ctrl_out(); - if (out == NULL) - return false; - if (out->is_CallStaticJava()) { - int req = out->as_CallStaticJava()->uncommon_trap_request(); - if (req != 0) { - Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); - if (trap_reason == reason || reason == Deoptimization::Reason_none) { - return true; - } - } - return false; // don't do further after call - } - if (out->Opcode() != Op_Region) - return false; - } - return false; -} - -//-------------------------------is_uncommon_trap_if_pattern------------------------- -// Return true for "if(test)-> proj -> ... -// | -// V -// other_proj->[region->..]call_uct" -// -// "must_reason_predicate" means the uct reason must be Reason_predicate -bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) { - Node *in0 = proj->in(0); - if (!in0->is_If()) return false; - // Variation of a dead If node. - if (in0->outcnt() < 2) return false; - IfNode* iff = in0->as_If(); - - // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate - if (reason != Deoptimization::Reason_none) { - if (iff->in(1)->Opcode() != Op_Conv2B || - iff->in(1)->in(1)->Opcode() != Op_Opaque1) { - return false; - } - } - - ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); - if (is_uncommon_trap_proj(other_proj, reason)) { - assert(reason == Deoptimization::Reason_none || - Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); - return true; - } - return false; -} - //-------------------------------register_control------------------------- void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { assert(n->is_CFG(), "must be control node"); @@ -147,7 +90,7 @@ // This code is also used to clone predicates to clonned loops. ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason) { - assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); IfNode* iff = cont_proj->in(0)->as_If(); ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); @@ -235,7 +178,7 @@ ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason) { assert(new_entry != 0, "only used for clone predicate"); - assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); IfNode* iff = cont_proj->in(0)->as_If(); ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); @@ -422,7 +365,7 @@ ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { if (start_c == NULL || !start_c->is_Proj()) return NULL; - if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) { + if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) { return start_c->as_Proj(); } return NULL; @@ -773,7 +716,7 @@ ProjNode* proj = if_proj_list.pop()->as_Proj(); IfNode* iff = proj->in(0)->as_If(); - if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) { + if (!proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { if (loop->is_loop_exit(iff)) { // stop processing the remaining projs in the list because the execution of them // depends on the condition of "iff" (iff->in(1)). diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/loopTransform.cpp --- a/src/share/vm/opto/loopTransform.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/loopTransform.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -713,6 +713,10 @@ case Op_ModL: body_size += 30; break; case Op_DivL: body_size += 30; break; case Op_MulL: body_size += 10; break; + case Op_FlagsProj: + // Can't handle unrolling of loops containing + // nodes that generate a FlagsProj at the moment + return false; case Op_StrComp: case Op_StrEquals: case Op_StrIndexOf: @@ -1960,7 +1964,7 @@ // Find loads off the surviving projection; remove their control edge for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { Node* cd = dp->fast_out(i); // Control-dependent node - if( cd->is_Load() ) { // Loads can now float around in the loop + if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop // Allow the load to float around in the loop, or before it // but NOT before the pre-loop. _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/loopnode.cpp --- a/src/share/vm/opto/loopnode.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/loopnode.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -167,7 +167,7 @@ // expensive nodes will notice the loop and skip over it to try to // move the node further up. if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { - if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) { + if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { break; } next = idom(ctl->in(1)->in(0)); @@ -181,7 +181,7 @@ } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); } else if (parent_ctl->is_If()) { - if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) { + if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { break; } assert(idom(ctl) == parent_ctl, "strange"); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/loopnode.hpp --- a/src/share/vm/opto/loopnode.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/loopnode.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -876,13 +876,6 @@ // Return true if exp is a scaled induction var plus (or minus) constant bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); - // Return true if proj is for "proj->[region->..]call_uct" - static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason); - // Return true for "if(test)-> proj -> ... - // | - // V - // other_proj->[region->..]call_uct" - static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason); // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/loopopts.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -42,6 +42,13 @@ // so disable this for now return NULL; } + + if (n->is_MathExact()) { + // MathExact has projections that are not correctly handled in the code + // below. + return NULL; + } + int wins = 0; assert(!n->is_CFG(), ""); assert(region->is_Region(), ""); @@ -238,7 +245,7 @@ ProjNode* dp_proj = dp->as_Proj(); ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); if (exclude_loop_predicate && - is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate)) + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) return; // Let IGVN transformation change control dependence. IdealLoopTree *old_loop = get_loop(dp); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/matcher.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -464,17 +464,17 @@ C->FIRST_STACK_mask().Clear(); // Add in the incoming argument area - OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); - for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1)) + OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); + for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) { C->FIRST_STACK_mask().Insert(i); - + } // Add in all bits past the outgoing argument area guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)), "must be able to represent all call arguments in reg mask"); - init = _out_arg_limit; - for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) + OptoReg::Name init = _out_arg_limit; + for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) { C->FIRST_STACK_mask().Insert(i); - + } // Finally, set the "infinite stack" bit. C->FIRST_STACK_mask().set_AllStack(); @@ -506,16 +506,36 @@ idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask()); } if (Matcher::vector_size_supported(T_FLOAT,2)) { + // For VecD we need dual alignment and 8 bytes (2 slots) for spills. + // RA guarantees such alignment since it is needed for Double and Long values. *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD]; idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,4)) { + // For VecX we need quadro alignment and 16 bytes (4 slots) for spills. + // + // RA can use input arguments stack slots for spills but until RA + // we don't know frame size and offset of input arg stack slots. + // + // Exclude last input arg stack slots to avoid spilling vectors there + // otherwise vector spills could stomp over stack slots in caller frame. + OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); + for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) { + aligned_stack_mask.Remove(in); + in = OptoReg::add(in, -1); + } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,8)) { + // For VecY we need octo alignment and 32 bytes (8 slots) for spills. + OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); + for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) { + aligned_stack_mask.Remove(in); + in = OptoReg::add(in, -1); + } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY]; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/matcher.hpp --- a/src/share/vm/opto/matcher.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/matcher.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -338,6 +338,7 @@ static RegMask modL_proj_mask(); static const RegMask mathExactI_result_proj_mask(); + static const RegMask mathExactL_result_proj_mask(); static const RegMask mathExactI_flags_proj_mask(); // Use hardware DIV instruction when it is faster than diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/mathexactnode.cpp --- a/src/share/vm/opto/mathexactnode.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/mathexactnode.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -25,18 +25,86 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" #include "opto/addnode.hpp" +#include "opto/cfgnode.hpp" #include "opto/machnode.hpp" +#include "opto/matcher.hpp" #include "opto/mathexactnode.hpp" -#include "opto/matcher.hpp" #include "opto/subnode.hpp" -MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) { +MathExactNode::MathExactNode(Node* ctrl, Node* in1) : MultiNode(2) { + init_class_id(Class_MathExact); + init_req(0, ctrl); + init_req(1, in1); +} + +MathExactNode::MathExactNode(Node* ctrl, Node* in1, Node* in2) : MultiNode(3) { + init_class_id(Class_MathExact); init_req(0, ctrl); - init_req(1, n1); - init_req(2, n2); + init_req(1, in1); + init_req(2, in2); +} + +BoolNode* MathExactNode::bool_node() const { + Node* flags = flags_node(); + BoolNode* boolnode = flags->unique_out()->as_Bool(); + assert(boolnode != NULL, "must have BoolNode"); + return boolnode; +} + +IfNode* MathExactNode::if_node() const { + BoolNode* boolnode = bool_node(); + IfNode* ifnode = boolnode->unique_out()->as_If(); + assert(ifnode != NULL, "must have IfNode"); + return ifnode; +} + +Node* MathExactNode::control_node() const { + IfNode* ifnode = if_node(); + return ifnode->in(0); } -Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) { +Node* MathExactNode::non_throwing_branch() const { + IfNode* ifnode = if_node(); + if (bool_node()->_test._test == BoolTest::overflow) { + return ifnode->proj_out(0); + } + return ifnode->proj_out(1); +} + +// If the MathExactNode won't overflow we have to replace the +// FlagsProjNode and ProjNode that is generated by the MathExactNode +Node* MathExactNode::no_overflow(PhaseGVN* phase, Node* new_result) { + PhaseIterGVN* igvn = phase->is_IterGVN(); + if (igvn) { + ProjNode* result = result_node(); + ProjNode* flags = flags_node(); + + if (result != NULL) { + igvn->replace_node(result, new_result); + } + + if (flags != NULL) { + BoolNode* boolnode = bool_node(); + switch (boolnode->_test._test) { + case BoolTest::overflow: + // if the check is for overflow - never taken + igvn->replace_node(boolnode, phase->intcon(0)); + break; + case BoolTest::no_overflow: + // if the check is for no overflow - always taken + igvn->replace_node(boolnode, phase->intcon(1)); + break; + default: + fatal("Unexpected value of BoolTest"); + break; + } + flags->del_req(0); + } + } + return new_result; +} + +Node* MathExactINode::match(const ProjNode* proj, const Matcher* m) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == result_proj_node) { @@ -49,42 +117,22 @@ return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg); } -// If the MathExactNode won't overflow we have to replace the -// FlagsProjNode and ProjNode that is generated by the MathExactNode -Node* MathExactNode::no_overflow(PhaseGVN *phase, Node* new_result) { - PhaseIterGVN *igvn = phase->is_IterGVN(); - if (igvn) { - ProjNode* result = result_node(); - ProjNode* flags = flags_node(); - - if (result != NULL) { - igvn->replace_node(result, new_result); - } - - if (flags != NULL) { - BoolNode* bolnode = (BoolNode *) flags->unique_out(); - switch (bolnode->_test._test) { - case BoolTest::overflow: - // if the check is for overflow - never taken - igvn->replace_node(bolnode, phase->intcon(0)); - break; - case BoolTest::no_overflow: - // if the check is for no overflow - always taken - igvn->replace_node(bolnode, phase->intcon(1)); - break; - default: - fatal("Unexpected value of BoolTest"); - break; - } - flags->del_req(0); - } +Node* MathExactLNode::match(const ProjNode* proj, const Matcher* m) { + uint ideal_reg = proj->ideal_reg(); + RegMask rm; + if (proj->_con == result_proj_node) { + rm = m->mathExactL_result_proj_mask(); + } else { + assert(proj->_con == flags_proj_node, "must be result or flags"); + assert(ideal_reg == Op_RegFlags, "sanity"); + rm = m->mathExactI_flags_proj_mask(); } - return new_result; + return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg); } -Node *AddExactINode::Ideal(PhaseGVN *phase, bool can_reshape) { - Node *arg1 = in(1); - Node *arg2 = in(2); +Node* AddExactINode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); const Type* type1 = phase->type(arg1); const Type* type2 = phase->type(arg2); @@ -102,12 +150,7 @@ return NULL; } - if (type1 == TypeInt::ZERO) { // (Add 0 x) == x - Node* add_result = new (phase->C) AddINode(arg1, arg2); - return no_overflow(phase, add_result); - } - - if (type2 == TypeInt::ZERO) { // (Add x 0) == x + if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) { // (Add 0 x) == x Node* add_result = new (phase->C) AddINode(arg1, arg2); return no_overflow(phase, add_result); } @@ -141,3 +184,247 @@ return NULL; } +Node* AddExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jlong val1 = arg1->get_long(); + jlong val2 = arg2->get_long(); + jlong result = val1 + val2; + // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result + if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) { + Node* con_result = ConLNode::make(phase->C, result); + return no_overflow(phase, con_result); + } + return NULL; + } + + if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) { // (Add 0 x) == x + Node* add_result = new (phase->C) AddLNode(arg1, arg2); + return no_overflow(phase, add_result); + } + + if (type2->singleton()) { + return NULL; // no change - keep constant on the right + } + + if (type1->singleton()) { + // Make it x + Constant - move constant to the right + swap_edges(1, 2); + return this; + } + + if (arg2->is_Load()) { + return NULL; // no change - keep load on the right + } + + if (arg1->is_Load()) { + // Make it x + Load - move load to the right + swap_edges(1, 2); + return this; + } + + if (arg1->_idx > arg2->_idx) { + // Sort the edges + swap_edges(1, 2); + return this; + } + + return NULL; +} + +Node* SubExactINode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jint val1 = arg1->get_int(); + jint val2 = arg2->get_int(); + jint result = val1 - val2; + + // Hacker's Delight 2-12 Overflow iff the arguments have different signs and + // the sign of the result is different than the sign of arg1 + if (((val1 ^ val2) & (val1 ^ result)) >= 0) { + Node* con_result = ConINode::make(phase->C, result); + return no_overflow(phase, con_result); + } + return NULL; + } + + if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) { + // Sub with zero is the same as add with zero + Node* add_result = new (phase->C) AddINode(arg1, arg2); + return no_overflow(phase, add_result); + } + + return NULL; +} + +Node* SubExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jlong val1 = arg1->get_long(); + jlong val2 = arg2->get_long(); + jlong result = val1 - val2; + + // Hacker's Delight 2-12 Overflow iff the arguments have different signs and + // the sign of the result is different than the sign of arg1 + if (((val1 ^ val2) & (val1 ^ result)) >= 0) { + Node* con_result = ConLNode::make(phase->C, result); + return no_overflow(phase, con_result); + } + return NULL; + } + + if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) { + // Sub with zero is the same as add with zero + Node* add_result = new (phase->C) AddLNode(arg1, arg2); + return no_overflow(phase, add_result); + } + + return NULL; +} + +Node* NegExactINode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node *arg = in(1); + + const Type* type = phase->type(arg); + if (type != Type::TOP && type->singleton()) { + jint value = arg->get_int(); + if (value != min_jint) { + Node* neg_result = ConINode::make(phase->C, -value); + return no_overflow(phase, neg_result); + } + } + return NULL; +} + +Node* NegExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node *arg = in(1); + + const Type* type = phase->type(arg); + if (type != Type::TOP && type->singleton()) { + jlong value = arg->get_long(); + if (value != min_jlong) { + Node* neg_result = ConLNode::make(phase->C, -value); + return no_overflow(phase, neg_result); + } + } + return NULL; +} + +Node* MulExactINode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jint val1 = arg1->get_int(); + jint val2 = arg2->get_int(); + jlong result = (jlong) val1 * (jlong) val2; + if ((jint) result == result) { + // no overflow + Node* mul_result = ConINode::make(phase->C, result); + return no_overflow(phase, mul_result); + } + } + + if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) { + return no_overflow(phase, ConINode::make(phase->C, 0)); + } + + if (type1 == TypeInt::ONE) { + Node* mul_result = new (phase->C) AddINode(arg2, phase->intcon(0)); + return no_overflow(phase, mul_result); + } + if (type2 == TypeInt::ONE) { + Node* mul_result = new (phase->C) AddINode(arg1, phase->intcon(0)); + return no_overflow(phase, mul_result); + } + + if (type1 == TypeInt::MINUS_1) { + return new (phase->C) NegExactINode(NULL, arg2); + } + + if (type2 == TypeInt::MINUS_1) { + return new (phase->C) NegExactINode(NULL, arg1); + } + + return NULL; +} + +Node* MulExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* arg1 = in(1); + Node* arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jlong val1 = arg1->get_long(); + jlong val2 = arg2->get_long(); + + jlong result = val1 * val2; + jlong ax = (val1 < 0 ? -val1 : val1); + jlong ay = (val2 < 0 ? -val2 : val2); + + bool overflow = false; + if ((ax | ay) & CONST64(0xFFFFFFFF00000000)) { + // potential overflow if any bit in upper 32 bits are set + if ((val1 == min_jlong && val2 == -1) || (val2 == min_jlong && val1 == -1)) { + // -1 * Long.MIN_VALUE will overflow + overflow = true; + } else if (val2 != 0 && (result / val2 != val1)) { + overflow = true; + } + } + + if (!overflow) { + Node* mul_result = ConLNode::make(phase->C, result); + return no_overflow(phase, mul_result); + } + } + + if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) { + return no_overflow(phase, ConLNode::make(phase->C, 0)); + } + + if (type1 == TypeLong::ONE) { + Node* mul_result = new (phase->C) AddLNode(arg2, phase->longcon(0)); + return no_overflow(phase, mul_result); + } + if (type2 == TypeLong::ONE) { + Node* mul_result = new (phase->C) AddLNode(arg1, phase->longcon(0)); + return no_overflow(phase, mul_result); + } + + if (type1 == TypeLong::MINUS_1) { + return new (phase->C) NegExactLNode(NULL, arg2); + } + + if (type2 == TypeLong::MINUS_1) { + return new (phase->C) NegExactLNode(NULL, arg1); + } + + return NULL; +} + diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/mathexactnode.hpp --- a/src/share/vm/opto/mathexactnode.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/mathexactnode.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -27,8 +27,11 @@ #include "opto/multnode.hpp" #include "opto/node.hpp" +#include "opto/subnode.hpp" #include "opto/type.hpp" +class BoolNode; +class IfNode; class Node; class PhaseGVN; @@ -36,6 +39,7 @@ class MathExactNode : public MultiNode { public: + MathExactNode(Node* ctrl, Node* in1); MathExactNode(Node* ctrl, Node* in1, Node* in2); enum { result_proj_node = 0, @@ -45,23 +49,92 @@ virtual Node* Identity(PhaseTransform* phase) { return this; } virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; } virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); } - virtual uint hash() const { return Node::hash(); } + virtual uint hash() const { return NO_HASH; } virtual bool is_CFG() const { return false; } virtual uint ideal_reg() const { return NotAMachineReg; } - ProjNode* result_node() { return proj_out(result_proj_node); } - ProjNode* flags_node() { return proj_out(flags_proj_node); } + ProjNode* result_node() const { return proj_out(result_proj_node); } + ProjNode* flags_node() const { return proj_out(flags_proj_node); } + Node* control_node() const; + Node* non_throwing_branch() const; protected: + IfNode* if_node() const; + BoolNode* bool_node() const; Node* no_overflow(PhaseGVN *phase, Node* new_result); }; -class AddExactINode : public MathExactNode { +class MathExactINode : public MathExactNode { + public: + MathExactINode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {} + MathExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* match(const ProjNode* proj, const Matcher* m); + virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; } +}; + +class MathExactLNode : public MathExactNode { public: - AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {} + MathExactLNode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {} + MathExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* match(const ProjNode* proj, const Matcher* m); + virtual const Type* bottom_type() const { return TypeTuple::LONG_CC_PAIR; } +}; + +class AddExactINode : public MathExactINode { +public: + AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); +}; + +class AddExactLNode : public MathExactLNode { +public: + AddExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class SubExactINode : public MathExactINode { +public: + SubExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {} virtual int Opcode() const; - virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; } - virtual Node* match(const ProjNode* proj, const Matcher* m); - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class SubExactLNode : public MathExactLNode { +public: + SubExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class NegExactINode : public MathExactINode { +public: + NegExactINode(Node* ctrl, Node* in1) : MathExactINode(ctrl, in1) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class NegExactLNode : public MathExactLNode { +public: + NegExactLNode(Node* ctrl, Node* in1) : MathExactLNode(ctrl, in1) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class MulExactINode : public MathExactINode { +public: + MulExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); +}; + +class MulExactLNode : public MathExactLNode { +public: + MulExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); }; class FlagsProjNode : public ProjNode { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/memnode.hpp --- a/src/share/vm/opto/memnode.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/memnode.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -204,6 +204,17 @@ protected: const Type* load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const; + // depends_only_on_test is almost always true, and needs to be almost always + // true to enable key hoisting & commoning optimizations. However, for the + // special case of RawPtr loads from TLS top & end, and other loads performed by + // GC barriers, the control edge carries the dependence preventing hoisting past + // a Safepoint instead of the memory edge. (An unfortunate consequence of having + // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes + // which produce results (new raw memory state) inside of loops preventing all + // manner of other optimizations). Basically, it's ugly but so is the alternative. + // See comment in macro.cpp, around line 125 expand_allocate_common(). + virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } + }; //------------------------------LoadBNode-------------------------------------- @@ -370,16 +381,6 @@ virtual uint ideal_reg() const { return Op_RegP; } virtual int store_Opcode() const { return Op_StoreP; } virtual BasicType memory_type() const { return T_ADDRESS; } - // depends_only_on_test is almost always true, and needs to be almost always - // true to enable key hoisting & commoning optimizations. However, for the - // special case of RawPtr loads from TLS top & end, the control edge carries - // the dependence preventing hoisting past a Safepoint instead of the memory - // edge. (An unfortunate consequence of having Safepoints not set Raw - // Memory; itself an unfortunate consequence of having Nodes which produce - // results (new raw memory state) inside of loops preventing all manner of - // other optimizations). Basically, it's ugly but so is the alternative. - // See comment in macro.cpp, around line 125 expand_allocate_common(). - virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } }; @@ -393,16 +394,6 @@ virtual uint ideal_reg() const { return Op_RegN; } virtual int store_Opcode() const { return Op_StoreN; } virtual BasicType memory_type() const { return T_NARROWOOP; } - // depends_only_on_test is almost always true, and needs to be almost always - // true to enable key hoisting & commoning optimizations. However, for the - // special case of RawPtr loads from TLS top & end, the control edge carries - // the dependence preventing hoisting past a Safepoint instead of the memory - // edge. (An unfortunate consequence of having Safepoints not set Raw - // Memory; itself an unfortunate consequence of having Nodes which produce - // results (new raw memory state) inside of loops preventing all manner of - // other optimizations). Basically, it's ugly but so is the alternative. - // See comment in macro.cpp, around line 125 expand_allocate_common(). - virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } }; //------------------------------LoadKlassNode---------------------------------- diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/multnode.cpp --- a/src/share/vm/opto/multnode.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/multnode.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "opto/callnode.hpp" +#include "opto/cfgnode.hpp" #include "opto/matcher.hpp" #include "opto/mathexactnode.hpp" #include "opto/multnode.hpp" @@ -150,3 +151,59 @@ uint ProjNode::ideal_reg() const { return bottom_type()->ideal_reg(); } + +//-------------------------------is_uncommon_trap_proj---------------------------- +// Return true if proj is the form of "proj->[region->..]call_uct" +bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) { + int path_limit = 10; + Node* out = this; + for (int ct = 0; ct < path_limit; ct++) { + out = out->unique_ctrl_out(); + if (out == NULL) + return false; + if (out->is_CallStaticJava()) { + int req = out->as_CallStaticJava()->uncommon_trap_request(); + if (req != 0) { + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); + if (trap_reason == reason || reason == Deoptimization::Reason_none) { + return true; + } + } + return false; // don't do further after call + } + if (out->Opcode() != Op_Region) + return false; + } + return false; +} + +//-------------------------------is_uncommon_trap_if_pattern------------------------- +// Return true for "if(test)-> proj -> ... +// | +// V +// other_proj->[region->..]call_uct" +// +// "must_reason_predicate" means the uct reason must be Reason_predicate +bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) { + Node *in0 = in(0); + if (!in0->is_If()) return false; + // Variation of a dead If node. + if (in0->outcnt() < 2) return false; + IfNode* iff = in0->as_If(); + + // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate + if (reason != Deoptimization::Reason_none) { + if (iff->in(1)->Opcode() != Op_Conv2B || + iff->in(1)->in(1)->Opcode() != Op_Opaque1) { + return false; + } + } + + ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj(); + if (other_proj->is_uncommon_trap_proj(reason)) { + assert(reason == Deoptimization::Reason_none || + Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); + return true; + } + return false; +} diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/multnode.hpp --- a/src/share/vm/opto/multnode.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/multnode.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -88,6 +88,14 @@ #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif + + // Return true if proj is for "proj->[region->..]call_uct" + bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason); + // Return true for "if(test)-> proj -> ... + // | + // V + // other_proj->[region->..]call_uct" + bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason); }; #endif // SHARE_VM_OPTO_MULTNODE_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/node.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -100,6 +100,7 @@ class MachSpillCopyNode; class MachTempNode; class Matcher; +class MathExactNode; class MemBarNode; class MemBarStoreStoreNode; class MemNode; @@ -568,6 +569,7 @@ DEFINE_CLASS_ID(MemBar, Multi, 3) DEFINE_CLASS_ID(Initialize, MemBar, 0) DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) + DEFINE_CLASS_ID(MathExact, Multi, 4) DEFINE_CLASS_ID(Mach, Node, 1) DEFINE_CLASS_ID(MachReturn, Mach, 0) @@ -757,6 +759,7 @@ DEFINE_CLASS_QUERY(MachSafePoint) DEFINE_CLASS_QUERY(MachSpillCopy) DEFINE_CLASS_QUERY(MachTemp) + DEFINE_CLASS_QUERY(MathExact) DEFINE_CLASS_QUERY(Mem) DEFINE_CLASS_QUERY(MemBar) DEFINE_CLASS_QUERY(MemBarStoreStore) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/parse.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -73,6 +73,7 @@ bool try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, + JVMState* jvms, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay); @@ -83,6 +84,7 @@ WarmCallInfo* wci_result); bool should_not_inline(ciMethod* callee_method, ciMethod* caller_method, + JVMState* jvms, WarmCallInfo* wci_result); void print_inlining(ciMethod* callee_method, int caller_bci, bool success) const; @@ -347,13 +349,15 @@ int _est_switch_depth; // Debugging SwitchRanges. #endif + // parser for the caller of the method of this object + Parse* const _parent; + public: // Constructor - Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); + Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent); virtual Parse* is_Parse() const { return (Parse*)this; } - public: // Accessors. JVMState* caller() const { return _caller; } float expected_uses() const { return _expected_uses; } @@ -405,6 +409,8 @@ return block()->successor_for_bci(bci); } + Parse* parent_parser() const { return _parent; } + private: // Create a JVMS & map for the initial state of this method. SafePointNode* create_entry_map(); @@ -601,6 +607,9 @@ // Assumes that there is no applicable local handler. void throw_to_exit(SafePointNode* ex_map); + // Use speculative type to optimize CmpP node + Node* optimize_cmp_with_klass(Node* c); + public: #ifndef PRODUCT // Handle PrintOpto, etc. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/parse1.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -381,8 +381,8 @@ //------------------------------Parse------------------------------------------ // Main parser constructor. -Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) - : _exits(caller) +Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent) + : _exits(caller), _parent(parent) { // Init some variables _caller = caller; @@ -1102,6 +1102,10 @@ _synch_lock = shared_lock(lock_obj); } + // Feed profiling data for parameters to the type system so it can + // propagate it as speculative types + record_profiled_parameters_for_speculation(); + if (depth() == 1) { increment_and_test_invocation_counter(Tier2CompileThreshold); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/parse2.cpp --- a/src/share/vm/opto/parse2.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/parse2.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -268,7 +268,7 @@ return adjoinRange(value, value, dest, table_index); } - void print(ciEnv* env) { + void print() { if (is_singleton()) tty->print(" {%d}=>%d", lo(), dest()); else if (lo() == min_jint) @@ -471,8 +471,8 @@ // These are the switch destinations hanging off the jumpnode int i = 0; for (SwitchRange* r = lo; r <= hi; r++) { - for (int j = r->lo(); j <= r->hi(); j++, i++) { - Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval)); + for (int64 j = r->lo(); j <= r->hi(); j++, i++) { + Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); { PreserveJVMState pjvms(this); set_control(input); @@ -632,7 +632,7 @@ } tty->print(" "); for( r = lo; r <= hi; r++ ) { - r->print(env()); + r->print(); } tty->print_cr(""); } @@ -1366,6 +1366,56 @@ } } +/** + * Use speculative type to optimize CmpP node: if comparison is + * against the low level class, cast the object to the speculative + * type if any. CmpP should then go away. + * + * @param c expected CmpP node + * @return result of CmpP on object casted to speculative type + * + */ +Node* Parse::optimize_cmp_with_klass(Node* c) { + // If this is transformed by the _gvn to a comparison with the low + // level klass then we may be able to use speculation + if (c->Opcode() == Op_CmpP && + (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && + c->in(2)->is_Con()) { + Node* load_klass = NULL; + Node* decode = NULL; + if (c->in(1)->Opcode() == Op_DecodeNKlass) { + decode = c->in(1); + load_klass = c->in(1)->in(1); + } else { + load_klass = c->in(1); + } + if (load_klass->in(2)->is_AddP()) { + Node* addp = load_klass->in(2); + Node* obj = addp->in(AddPNode::Address); + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + if (obj_type->speculative_type() != NULL) { + ciKlass* k = obj_type->speculative_type(); + inc_sp(2); + obj = maybe_cast_profiled_obj(obj, k); + dec_sp(2); + // Make the CmpP use the casted obj + addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); + load_klass = load_klass->clone(); + load_klass->set_req(2, addp); + load_klass = _gvn.transform(load_klass); + if (decode != NULL) { + decode = decode->clone(); + decode->set_req(1, load_klass); + load_klass = _gvn.transform(decode); + } + c = c->clone(); + c->set_req(1, load_klass); + c = _gvn.transform(c); + } + } + } + return c; +} //------------------------------do_one_bytecode-------------------------------- // Parse this bytecode, and alter the Parsers JVM->Node mapping @@ -2239,6 +2289,7 @@ a = pop(); b = pop(); c = _gvn.transform( new (C) CmpPNode(b, a) ); + c = optimize_cmp_with_klass(c); do_if(btest, c); break; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/parseHelper.cpp --- a/src/share/vm/opto/parseHelper.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/parseHelper.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -128,7 +128,7 @@ } // Push the bool result back on stack - Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass))); + Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); // Pop from stack AFTER gen_instanceof because it can uncommon trap. pop(); @@ -343,10 +343,14 @@ // Get the Method* node. ciMethod* m = method(); - address counters_adr = m->ensure_method_counters(); + MethodCounters* counters_adr = m->ensure_method_counters(); + if (counters_adr == NULL) { + C->record_failure("method counters allocation failed"); + return; + } Node* ctrl = control(); - const TypePtr* adr_type = TypeRawPtr::make(counters_adr); + const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); Node *counters_node = makecon(adr_type); Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, MethodCounters::interpreter_invocation_counter_offset_in_bytes()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/phaseX.cpp --- a/src/share/vm/opto/phaseX.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/phaseX.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1385,6 +1385,20 @@ } } +/** + * Remove the speculative part of all types that we know of + */ +void PhaseIterGVN::remove_speculative_types() { + assert(UseTypeSpeculation, "speculation is off"); + for (uint i = 0; i < _types.Size(); i++) { + const Type* t = _types.fast_lookup(i); + if (t != NULL && t->isa_oopptr()) { + const TypeOopPtr* to = t->is_oopptr(); + _types.map(i, to->remove_speculative()); + } + } +} + //============================================================================= #ifndef PRODUCT uint PhaseCCP::_total_invokes = 0; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/phaseX.hpp --- a/src/share/vm/opto/phaseX.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/phaseX.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -500,6 +500,8 @@ ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason); + void remove_speculative_types(); + #ifndef PRODUCT protected: // Sub-quadratic implementation of VerifyIterativeGVN. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/postaloc.cpp --- a/src/share/vm/opto/postaloc.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/postaloc.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -97,7 +97,8 @@ static bool expected_yanked_node(Node *old, Node *orig_old) { // This code is expected only next original nodes: // - load from constant table node which may have next data input nodes: - // MachConstantBase, Phi, MachTemp, MachSpillCopy + // MachConstantBase, MachTemp, MachSpillCopy + // - Phi nodes that are considered Junk // - load constant node which may have next data input nodes: // MachTemp, MachSpillCopy // - MachSpillCopy @@ -112,7 +113,9 @@ return (old == orig_old); } else if (old->is_MachTemp()) { return orig_old->is_Con(); - } else if (old->is_Phi() || old->is_MachConstantBase()) { + } else if (old->is_Phi()) { // Junk phi's + return true; + } else if (old->is_MachConstantBase()) { return (orig_old->is_Con() && orig_old->is_MachConstant()); } return false; @@ -522,11 +525,9 @@ u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input } if (u != NodeSentinel) { // Junk Phi. Remove - block->remove_node(j--); + phi->replace_by(u); + j -= yank_if_dead(phi, block, &value, ®nd); phi_dex--; - _cfg.unmap_node_from_block(phi); - phi->replace_by(u); - phi->disconnect_inputs(NULL, C); continue; } // Note that if value[pidx] exists, then we merged no new values here diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/reg_split.cpp --- a/src/share/vm/opto/reg_split.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/reg_split.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -51,15 +51,6 @@ static const char out_of_nodes[] = "out of nodes during split"; -static bool contains_no_live_range_input(const Node* def) { - for (uint i = 1; i < def->req(); ++i) { - if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { - return false; - } - } - return true; -} - //------------------------------get_spillcopy_wide----------------------------- // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the // wide ideal-register spill-mask if possible. If the 'wide-mask' does @@ -326,12 +317,11 @@ if( def->req() > 1 ) { for( uint i = 1; i < def->req(); i++ ) { Node *in = def->in(i); - // Check for single-def (LRG cannot redefined) uint lidx = _lrg_map.live_range_id(in); - if (lidx >= _lrg_map.max_lrg_id()) { - continue; // Value is a recent spill-copy - } - if (lrgs(lidx).is_singledef()) { + // We do not need this for live ranges that are only defined once. + // However, this is not true for spill copies that are added in this + // Split() pass, since they might get coalesced later on in this pass. + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) { continue; } @@ -485,7 +475,6 @@ uint bidx, pidx, slidx, insidx, inpidx, twoidx; uint non_phi = 1, spill_cnt = 0; - Node **Reachblock; Node *n1, *n2, *n3; Node_List *defs,*phis; bool *UPblock; @@ -568,7 +557,7 @@ b = _cfg.get_block(bidx); // Reaches & UP arrays for this block - Reachblock = Reaches[b->_pre_order]; + Node** Reachblock = Reaches[b->_pre_order]; UPblock = UP[b->_pre_order]; // Reset counter of start of non-Phi nodes in block non_phi = 1; @@ -1324,9 +1313,10 @@ pidx = pred->_pre_order; // Grab reaching def Node *def = Reaches[pidx][slidx]; + Node** Reachblock = Reaches[pidx]; assert( def, "must have reaching def" ); // If input up/down sense and reg-pressure DISagree - if (def->rematerialize() && contains_no_live_range_input(def)) { + if (def->rematerialize()) { // Place the rematerialized node above any MSCs created during // phi node splitting. end_idx points at the insertion point // so look at the node before it. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/runtime.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -138,9 +138,10 @@ #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \ - var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc) + var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \ + if (var == NULL) { return false; } -void OptoRuntime::generate(ciEnv* env) { +bool OptoRuntime::generate(ciEnv* env) { generate_exception_blob(); @@ -158,7 +159,7 @@ gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true , false, false); gen(env, _g1_wb_pre_Java , g1_wb_pre_Type , SharedRuntime::g1_wb_pre , 0 , false, false, false); gen(env, _g1_wb_post_Java , g1_wb_post_Type , SharedRuntime::g1_wb_post , 0 , false, false, false); - gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C , 0 , false, false, false); + gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false, false); gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true ); gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false); @@ -168,7 +169,7 @@ gen(env, _zap_dead_Java_locals_Java , zap_dead_locals_Type , zap_dead_Java_locals_C , 0 , false, true , false ); gen(env, _zap_dead_native_locals_Java , zap_dead_locals_Type , zap_dead_native_locals_C , 0 , false, true , false ); # endif - + return true; } #undef gen @@ -976,30 +977,36 @@ address handler_address = NULL; Handle exception(thread, thread->exception_oop()); + address pc = thread->exception_pc(); + + // Clear out the exception oop and pc since looking up an + // exception handler can cause class loading, which might throw an + // exception and those fields are expected to be clear during + // normal bytecode execution. + thread->clear_exception_oop_and_pc(); if (TraceExceptions) { - trace_exception(exception(), thread->exception_pc(), ""); + trace_exception(exception(), pc, ""); } + // for AbortVMOnException flag NOT_PRODUCT(Exceptions::debug_check_abort(exception)); - #ifdef ASSERT - if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { - // should throw an exception here - ShouldNotReachHere(); - } - #endif - +#ifdef ASSERT + if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { + // should throw an exception here + ShouldNotReachHere(); + } +#endif // new exception handling: this method is entered only from adapters // exceptions from compiled java methods are handled in compiled code // using rethrow node - address pc = thread->exception_pc(); nm = CodeCache::find_nmethod(pc); assert(nm != NULL, "No NMethod found"); if (nm->is_native_method()) { - fatal("Native mathod should not have path to exception handling"); + fatal("Native method should not have path to exception handling"); } else { // we are switching to old paradigm: search for exception handler in caller_frame // instead in exception handler of caller_frame.sender() @@ -1346,7 +1353,8 @@ tty->print(" in "); CodeBlob* blob = CodeCache::find_blob(exception_pc); if (blob->is_nmethod()) { - ((nmethod*)blob)->method()->print_value(); + nmethod* nm = blob->as_nmethod_or_null(); + nm->method()->print_value(); } else if (blob->is_runtime_stub()) { tty->print(""); } else { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/runtime.hpp --- a/src/share/vm/opto/runtime.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/runtime.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -203,8 +203,10 @@ static bool is_callee_saved_register(MachRegisterNumbers reg); - // One time only generate runtime code stubs - static void generate(ciEnv* env); + // One time only generate runtime code stubs. Returns true + // when runtime stubs have been generated successfully and + // false otherwise. + static bool generate(ciEnv* env); // Returns the name of a stub static const char* stub_name(address entry); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/stringopts.cpp --- a/src/share/vm/opto/stringopts.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/stringopts.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,10 +50,11 @@ Node* _arguments; // The list of arguments to be concatenated GrowableArray _mode; // into a String along with a mode flag // indicating how to treat the value. - + Node_List _constructors; // List of constructors (many in case of stacked concat) Node_List _control; // List of control nodes that will be deleted Node_List _uncommon_traps; // Uncommon traps that needs to be rewritten // to restart at the initial JVMState. + public: // Mode for converting arguments to Strings enum { @@ -73,6 +74,7 @@ _arguments->del_req(0); } + bool validate_mem_flow(); bool validate_control_flow(); void merge_add() { @@ -189,6 +191,10 @@ assert(!_control.contains(ctrl), "only push once"); _control.push(ctrl); } + void add_constructor(Node* init) { + assert(!_constructors.contains(init), "only push once"); + _constructors.push(init); + } CallStaticJavaNode* end() { return _end; } AllocateNode* begin() { return _begin; } Node* string_alloc() { return _string_alloc; } @@ -301,6 +307,12 @@ } } result->set_allocation(other->_begin); + for (uint i = 0; i < _constructors.size(); i++) { + result->add_constructor(_constructors.at(i)); + } + for (uint i = 0; i < other->_constructors.size(); i++) { + result->add_constructor(other->_constructors.at(i)); + } result->_multiple = true; return result; } @@ -510,7 +522,8 @@ sc->add_control(constructor); sc->add_control(alloc); sc->set_allocation(alloc); - if (sc->validate_control_flow()) { + sc->add_constructor(constructor); + if (sc->validate_control_flow() && sc->validate_mem_flow()) { return sc; } else { return NULL; @@ -620,7 +633,7 @@ #endif StringConcat* merged = sc->merge(other, arg); - if (merged->validate_control_flow()) { + if (merged->validate_control_flow() && merged->validate_mem_flow()) { #ifndef PRODUCT if (PrintOptimizeStringConcat) { tty->print_cr("stacking would succeed"); @@ -708,6 +721,139 @@ } +bool StringConcat::validate_mem_flow() { + Compile* C = _stringopts->C; + + for (uint i = 0; i < _control.size(); i++) { +#ifndef PRODUCT + Node_List path; +#endif + Node* curr = _control.at(i); + if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation + // Now here's the main invariant in our case: + // For memory between the constructor, and appends, and toString we should only see bottom memory, + // produced by the previous call we know about. + if (!_constructors.contains(curr)) { + NOT_PRODUCT(path.push(curr);) + Node* mem = curr->in(TypeFunc::Memory); + assert(mem != NULL, "calls should have memory edge"); + assert(!mem->is_Phi(), "should be handled by control flow validation"); + NOT_PRODUCT(path.push(mem);) + while (mem->is_MergeMem()) { + for (uint i = 1; i < mem->req(); i++) { + if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) { +#ifndef PRODUCT + if (PrintOptimizeStringConcat) { + tty->print("fusion has incorrect memory flow (side effects) for "); + _begin->jvms()->dump_spec(tty); tty->cr(); + path.dump(); + } +#endif + return false; + } + } + // skip through a potential MergeMem chain, linked through Bot + mem = mem->in(Compile::AliasIdxBot); + NOT_PRODUCT(path.push(mem);) + } + // now let it fall through, and see if we have a projection + if (mem->is_Proj()) { + // Should point to a previous known call + Node *prev = mem->in(0); + NOT_PRODUCT(path.push(prev);) + if (!prev->is_Call() || !_control.contains(prev)) { +#ifndef PRODUCT + if (PrintOptimizeStringConcat) { + tty->print("fusion has incorrect memory flow (unknown call) for "); + _begin->jvms()->dump_spec(tty); tty->cr(); + path.dump(); + } +#endif + return false; + } + } else { + assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name())); +#ifndef PRODUCT + if (PrintOptimizeStringConcat) { + tty->print("fusion has incorrect memory flow (unexpected source) for "); + _begin->jvms()->dump_spec(tty); tty->cr(); + path.dump(); + } +#endif + return false; + } + } else { + // For memory that feeds into constructors it's more complicated. + // However the advantage is that any side effect that happens between the Allocate/Initialize and + // the constructor will have to be control-dependent on Initialize. + // So we actually don't have to do anything, since it's going to be caught by the control flow + // analysis. +#ifdef ASSERT + // Do a quick verification of the control pattern between the constructor and the initialize node + assert(curr->is_Call(), "constructor should be a call"); + // Go up the control starting from the constructor call + Node* ctrl = curr->in(0); + IfNode* iff = NULL; + RegionNode* copy = NULL; + + while (true) { + // skip known check patterns + if (ctrl->is_Region()) { + if (ctrl->as_Region()->is_copy()) { + copy = ctrl->as_Region(); + ctrl = copy->is_copy(); + } else { // a cast + assert(ctrl->req() == 3 && + ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() && + ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() && + ctrl->in(1)->in(0) == ctrl->in(2)->in(0) && + ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(), + "must be a simple diamond"); + Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2); + for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) { + Node* use = i.get(); + assert(use == ctrl || use->is_ConstraintCast(), + err_msg_res("unexpected user: %s", use->Name())); + } + + iff = ctrl->in(1)->in(0)->as_If(); + ctrl = iff->in(0); + } + } else if (ctrl->is_IfTrue()) { // null checks, class checks + iff = ctrl->in(0)->as_If(); + assert(iff->is_If(), "must be if"); + // Verify that the other arm is an uncommon trap + Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con); + CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava(); + assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap"); + ctrl = iff->in(0); + } else { + break; + } + } + + assert(ctrl->is_Proj(), "must be a projection"); + assert(ctrl->in(0)->is_Initialize(), "should be initialize"); + for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) { + Node* use = i.get(); + assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(), + err_msg_res("unexpected user: %s", use->Name())); + } +#endif // ASSERT + } + } + } + +#ifndef PRODUCT + if (PrintOptimizeStringConcat) { + tty->print("fusion has correct memory flow for "); + _begin->jvms()->dump_spec(tty); tty->cr(); + tty->cr(); + } +#endif + return true; +} + bool StringConcat::validate_control_flow() { // We found all the calls and arguments now lets see if it's // safe to transform the graph as we would expect. @@ -753,7 +899,7 @@ } } - // Skip backwards through the control checking for unexpected contro flow + // Skip backwards through the control checking for unexpected control flow Node* ptr = _end; bool fail = false; while (ptr != _begin) { @@ -936,7 +1082,7 @@ if (PrintOptimizeStringConcat && !fail) { ttyLocker ttyl; tty->cr(); - tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size()); + tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size()); _begin->jvms()->dump_spec(tty); tty->cr(); for (int i = 0; i < num_arguments(); i++) { argument(i)->dump(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/type.cpp --- a/src/share/vm/opto/type.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/type.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -358,7 +358,7 @@ false, 0, oopDesc::mark_offset_in_bytes()); TypeInstPtr::KLASS = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), false, 0, oopDesc::klass_offset_in_bytes()); - TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot); + TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot, NULL); TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, OffsetBot); @@ -435,6 +435,11 @@ intccpair[1] = TypeInt::CC; TypeTuple::INT_CC_PAIR = TypeTuple::make(2, intccpair); + const Type **longccpair = TypeTuple::fields(2); + longccpair[0] = TypeLong::LONG; + longccpair[1] = TypeInt::CC; + TypeTuple::LONG_CC_PAIR = TypeTuple::make(2, longccpair); + _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM; _const_basic_type[T_NARROWKLASS] = Type::BOTTOM; _const_basic_type[T_BOOLEAN] = TypeInt::BOOL; @@ -577,7 +582,7 @@ //----------------------interface_vs_oop--------------------------------------- #ifdef ASSERT -bool Type::interface_vs_oop(const Type *t) const { +bool Type::interface_vs_oop_helper(const Type *t) const { bool result = false; const TypePtr* this_ptr = this->make_ptr(); // In case it is narrow_oop @@ -595,6 +600,29 @@ return result; } + +bool Type::interface_vs_oop(const Type *t) const { + if (interface_vs_oop_helper(t)) { + return true; + } + // Now check the speculative parts as well + const TypeOopPtr* this_spec = isa_oopptr() != NULL ? isa_oopptr()->speculative() : NULL; + const TypeOopPtr* t_spec = t->isa_oopptr() != NULL ? t->isa_oopptr()->speculative() : NULL; + if (this_spec != NULL && t_spec != NULL) { + if (this_spec->interface_vs_oop_helper(t_spec)) { + return true; + } + return false; + } + if (this_spec != NULL && this_spec->interface_vs_oop_helper(t)) { + return true; + } + if (t_spec != NULL && interface_vs_oop_helper(t_spec)) { + return true; + } + return false; +} + #endif //------------------------------meet------------------------------------------- @@ -1652,6 +1680,7 @@ const TypeTuple *TypeTuple::INT_PAIR; const TypeTuple *TypeTuple::LONG_PAIR; const TypeTuple *TypeTuple::INT_CC_PAIR; +const TypeTuple *TypeTuple::LONG_CC_PAIR; //------------------------------make------------------------------------------- @@ -2407,14 +2436,15 @@ const TypeOopPtr *TypeOopPtr::BOTTOM; //------------------------------TypeOopPtr------------------------------------- -TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) +TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative) : TypePtr(t, ptr, offset), _const_oop(o), _klass(k), _klass_is_exact(xk), _is_ptr_to_narrowoop(false), _is_ptr_to_narrowklass(false), _is_ptr_to_boxed_value(false), - _instance_id(instance_id) { + _instance_id(instance_id), + _speculative(speculative) { if (Compile::current()->eliminate_boxing() && (t == InstPtr) && (offset > 0) && xk && (k != 0) && k->is_instance_klass()) { _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset); @@ -2481,12 +2511,12 @@ //------------------------------make------------------------------------------- const TypeOopPtr *TypeOopPtr::make(PTR ptr, - int offset, int instance_id) { + int offset, int instance_id, const TypeOopPtr* speculative) { assert(ptr != Constant, "no constant generic pointers"); ciKlass* k = Compile::current()->env()->Object_klass(); bool xk = false; ciObject* o = NULL; - return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id))->hashcons(); + return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id, speculative))->hashcons(); } @@ -2494,7 +2524,7 @@ const Type *TypeOopPtr::cast_to_ptr_type(PTR ptr) const { assert(_base == OopPtr, "subclass must override cast_to_ptr_type"); if( ptr == _ptr ) return this; - return make(ptr, _offset, _instance_id); + return make(ptr, _offset, _instance_id, _speculative); } //-----------------------------cast_to_instance_id---------------------------- @@ -2524,10 +2554,31 @@ return TypeKlassPtr::make(xk? Constant: NotNull, k, 0); } +const Type *TypeOopPtr::xmeet(const Type *t) const { + const Type* res = xmeet_helper(t); + if (res->isa_oopptr() == NULL) { + return res; + } + + if (res->isa_oopptr() != NULL) { + // type->speculative() == NULL means that speculation is no better + // than type, i.e. type->speculative() == type. So there are 2 + // ways to represent the fact that we have no useful speculative + // data and we should use a single one to be able to test for + // equality between types. Check whether type->speculative() == + // type and set speculative to NULL if it is the case. + const TypeOopPtr* res_oopptr = res->is_oopptr(); + if (res_oopptr->remove_speculative() == res_oopptr->speculative()) { + return res_oopptr->remove_speculative(); + } + } + + return res; +} //------------------------------meet------------------------------------------- // Compute the MEET of two types. It returns a new Type object. -const Type *TypeOopPtr::xmeet( const Type *t ) const { +const Type *TypeOopPtr::xmeet_helper(const Type *t) const { // Perform a fast test for common case; meeting the same types together. if( this == t ) return this; // Meeting same type-rep? @@ -2569,7 +2620,8 @@ case TopPTR: case AnyNull: { int instance_id = meet_instance_id(InstanceTop); - return make(ptr, offset, instance_id); + const TypeOopPtr* speculative = _speculative; + return make(ptr, offset, instance_id, speculative); } case BotPTR: case NotNull: @@ -2581,7 +2633,8 @@ case OopPtr: { // Meeting to other OopPtrs const TypeOopPtr *tp = t->is_oopptr(); int instance_id = meet_instance_id(tp->instance_id()); - return make( meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id ); + const TypeOopPtr* speculative = meet_speculative(tp); + return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative); } case InstPtr: // For these, flip the call around to cut down @@ -2598,7 +2651,7 @@ const Type *TypeOopPtr::xdual() const { assert(klass() == Compile::current()->env()->Object_klass(), "no klasses here"); assert(const_oop() == NULL, "no constants here"); - return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() ); + return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative()); } //--------------------------make_from_klass_common----------------------------- @@ -2689,7 +2742,7 @@ } else if (!o->should_be_constant()) { return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); } - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, is_autobox_cache); + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, NULL, is_autobox_cache); return arr; } else if (klass->is_type_array_klass()) { // Element is an typeArray @@ -2734,13 +2787,11 @@ //-----------------------------filter------------------------------------------ // Do not allow interface-vs.-noninterface joins to collapse to top. -const Type *TypeOopPtr::filter( const Type *kills ) const { +const Type *TypeOopPtr::filter(const Type *kills) const { const Type* ft = join(kills); const TypeInstPtr* ftip = ft->isa_instptr(); const TypeInstPtr* ktip = kills->isa_instptr(); - const TypeKlassPtr* ftkp = ft->isa_klassptr(); - const TypeKlassPtr* ktkp = kills->isa_klassptr(); if (ft->empty()) { // Check for evil case of 'this' being a class and 'kills' expecting an @@ -2754,8 +2805,6 @@ // uplift the type. if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface()) return kills; // Uplift to interface - if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) - return kills; // Uplift to interface return Type::TOP; // Canonical empty value } @@ -2772,14 +2821,6 @@ assert(!ftip->klass_is_exact(), "interface could not be exact"); return ktip->cast_to_ptr_type(ftip->ptr()); } - // Interface klass type could be exact in opposite to interface type, - // return it here instead of incorrect Constant ptr J/L/Object (6894807). - if (ftkp != NULL && ktkp != NULL && - ftkp->is_loaded() && ftkp->klass()->is_interface() && - !ftkp->klass_is_exact() && // Keep exact interface klass - ktkp->is_loaded() && !ktkp->klass()->is_interface()) { - return ktkp->cast_to_ptr_type(ftkp->ptr()); - } return ft; } @@ -2789,7 +2830,8 @@ bool TypeOopPtr::eq( const Type *t ) const { const TypeOopPtr *a = (const TypeOopPtr*)t; if (_klass_is_exact != a->_klass_is_exact || - _instance_id != a->_instance_id) return false; + _instance_id != a->_instance_id || + !eq_speculative(a)) return false; ciObject* one = const_oop(); ciObject* two = a->const_oop(); if (one == NULL || two == NULL) { @@ -2806,6 +2848,7 @@ (const_oop() ? const_oop()->hash() : 0) + _klass_is_exact + _instance_id + + hash_speculative() + TypePtr::hash(); } @@ -2825,6 +2868,19 @@ st->print(",iid=top"); else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); + + dump_speculative(st); +} + +/** + *dump the speculative part of the type + */ +void TypeOopPtr::dump_speculative(outputStream *st) const { + if (_speculative != NULL) { + st->print(" (speculative="); + _speculative->dump_on(st); + st->print(")"); + } } #endif @@ -2838,8 +2894,15 @@ } //------------------------------add_offset------------------------------------- -const TypePtr *TypeOopPtr::add_offset( intptr_t offset ) const { - return make( _ptr, xadd_offset(offset), _instance_id); +const TypePtr *TypeOopPtr::add_offset(intptr_t offset) const { + return make(_ptr, xadd_offset(offset), _instance_id, add_offset_speculative(offset)); +} + +/** + * Return same type without a speculative part + */ +const TypeOopPtr* TypeOopPtr::remove_speculative() const { + return make(_ptr, _offset, _instance_id, NULL); } //------------------------------meet_instance_id-------------------------------- @@ -2859,6 +2922,89 @@ return _instance_id; // Map everything else into self } +/** + * meet of the speculative parts of 2 types + * + * @param other type to meet with + */ +const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const { + bool this_has_spec = (_speculative != NULL); + bool other_has_spec = (other->speculative() != NULL); + + if (!this_has_spec && !other_has_spec) { + return NULL; + } + + // If we are at a point where control flow meets and one branch has + // a speculative type and the other has not, we meet the speculative + // type of one branch with the actual type of the other. If the + // actual type is exact and the speculative is as well, then the + // result is a speculative type which is exact and we can continue + // speculation further. + const TypeOopPtr* this_spec = _speculative; + const TypeOopPtr* other_spec = other->speculative(); + + if (!this_has_spec) { + this_spec = this; + } + + if (!other_has_spec) { + other_spec = other; + } + + return this_spec->meet(other_spec)->is_oopptr(); +} + +/** + * dual of the speculative part of the type + */ +const TypeOopPtr* TypeOopPtr::dual_speculative() const { + if (_speculative == NULL) { + return NULL; + } + return _speculative->dual()->is_oopptr(); +} + +/** + * add offset to the speculative part of the type + * + * @param offset offset to add + */ +const TypeOopPtr* TypeOopPtr::add_offset_speculative(intptr_t offset) const { + if (_speculative == NULL) { + return NULL; + } + return _speculative->add_offset(offset)->is_oopptr(); +} + +/** + * Are the speculative parts of 2 types equal? + * + * @param other type to compare this one to + */ +bool TypeOopPtr::eq_speculative(const TypeOopPtr* other) const { + if (_speculative == NULL || other->speculative() == NULL) { + return _speculative == other->speculative(); + } + + if (_speculative->base() != other->speculative()->base()) { + return false; + } + + return _speculative->eq(other->speculative()); +} + +/** + * Hash of the speculative part of the type + */ +int TypeOopPtr::hash_speculative() const { + if (_speculative == NULL) { + return 0; + } + + return _speculative->hash(); +} + //============================================================================= // Convenience common pre-built types. @@ -2869,8 +3015,8 @@ const TypeInstPtr *TypeInstPtr::KLASS; //------------------------------TypeInstPtr------------------------------------- -TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id) - : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id), _name(k->name()) { +TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id, const TypeOopPtr* speculative) + : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative), _name(k->name()) { assert(k != NULL && (k->is_loaded() || o == NULL), "cannot have constants with non-loaded klass"); @@ -2882,7 +3028,8 @@ bool xk, ciObject* o, int offset, - int instance_id) { + int instance_id, + const TypeOopPtr* speculative) { assert( !k->is_loaded() || k->is_instance_klass(), "Must be for instance"); // Either const_oop() is NULL or else ptr is Constant assert( (!o && ptr != Constant) || (o && ptr == Constant), @@ -2903,7 +3050,7 @@ // Now hash this baby TypeInstPtr *result = - (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id))->hashcons(); + (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id, speculative))->hashcons(); return result; } @@ -2936,7 +3083,7 @@ if( ptr == _ptr ) return this; // Reconstruct _sig info here since not a problem with later lazy // construction, _sig will show up on demand. - return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id); + return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative); } @@ -2948,13 +3095,13 @@ ciInstanceKlass* ik = _klass->as_instance_klass(); if( (ik->is_final() || _const_oop) ) return this; // cannot clear xk if( ik->is_interface() ) return this; // cannot set xk - return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id); + return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id, _speculative); } //-----------------------------cast_to_instance_id---------------------------- const TypeOopPtr *TypeInstPtr::cast_to_instance_id(int instance_id) const { if( instance_id == _instance_id ) return this; - return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id); + return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative); } //------------------------------xmeet_unloaded--------------------------------- @@ -2964,6 +3111,7 @@ int off = meet_offset(tinst->offset()); PTR ptr = meet_ptr(tinst->ptr()); int instance_id = meet_instance_id(tinst->instance_id()); + const TypeOopPtr* speculative = meet_speculative(tinst); const TypeInstPtr *loaded = is_loaded() ? this : tinst; const TypeInstPtr *unloaded = is_loaded() ? tinst : this; @@ -2984,7 +3132,7 @@ assert(loaded->ptr() != TypePtr::Null, "insanity check"); // if( loaded->ptr() == TypePtr::TopPTR ) { return unloaded; } - else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make( ptr, unloaded->klass(), false, NULL, off, instance_id ); } + else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make(ptr, unloaded->klass(), false, NULL, off, instance_id, speculative); } else if (loaded->ptr() == TypePtr::BotPTR ) { return TypeInstPtr::BOTTOM; } else if (loaded->ptr() == TypePtr::Constant || loaded->ptr() == TypePtr::NotNull) { if (unloaded->ptr() == TypePtr::BotPTR ) { return TypeInstPtr::BOTTOM; } @@ -3006,7 +3154,7 @@ //------------------------------meet------------------------------------------- // Compute the MEET of two types. It returns a new Type object. -const Type *TypeInstPtr::xmeet( const Type *t ) const { +const Type *TypeInstPtr::xmeet_helper(const Type *t) const { // Perform a fast test for common case; meeting the same types together. if( this == t ) return this; // Meeting same type-rep? @@ -3040,16 +3188,20 @@ int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int instance_id = meet_instance_id(tp->instance_id()); + const TypeOopPtr* speculative = meet_speculative(tp); switch (ptr) { case TopPTR: case AnyNull: // Fall 'down' to dual of object klass - if (klass()->equals(ciEnv::current()->Object_klass())) { - return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id); + // For instances when a subclass meets a superclass we fall + // below the centerline when the superclass is exact. We need to + // do the same here. + if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) { + return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; instance_id = InstanceBot; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id); + return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative); } case Constant: case NotNull: @@ -3058,10 +3210,13 @@ if( above_centerline(_ptr) ) { // if( _ptr == TopPTR || _ptr == AnyNull ) // If 'this' (InstPtr) is above the centerline and it is Object class // then we can subclass in the Java class hierarchy. - if (klass()->equals(ciEnv::current()->Object_klass())) { + // For instances when a subclass meets a superclass we fall + // below the centerline when the superclass is exact. We need + // to do the same here. + if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) { // that is, tp's array type is a subtype of my klass return TypeAryPtr::make(ptr, (ptr == Constant ? tp->const_oop() : NULL), - tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id); + tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative); } } // The other case cannot happen, since I cannot be a subtype of an array. @@ -3069,7 +3224,7 @@ if( ptr == Constant ) ptr = NotNull; instance_id = InstanceBot; - return make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id ); + return make(ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative); default: typerr(t); } } @@ -3083,13 +3238,15 @@ case TopPTR: case AnyNull: { int instance_id = meet_instance_id(InstanceTop); + const TypeOopPtr* speculative = meet_speculative(tp); return make(ptr, klass(), klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset, instance_id); + (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative); } case NotNull: case BotPTR: { int instance_id = meet_instance_id(tp->instance_id()); - return TypeOopPtr::make(ptr, offset, instance_id); + const TypeOopPtr* speculative = meet_speculative(tp); + return TypeOopPtr::make(ptr, offset, instance_id, speculative); } default: typerr(t); } @@ -3102,17 +3259,18 @@ PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { case Null: - if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset ); + if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset); // else fall through to AnyNull case TopPTR: case AnyNull: { int instance_id = meet_instance_id(InstanceTop); - return make( ptr, klass(), klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset, instance_id); + const TypeOopPtr* speculative = _speculative; + return make(ptr, klass(), klass_is_exact(), + (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative); } case NotNull: case BotPTR: - return TypePtr::make( AnyPtr, ptr, offset ); + return TypePtr::make(AnyPtr, ptr, offset); default: typerr(t); } } @@ -3139,13 +3297,14 @@ int off = meet_offset( tinst->offset() ); PTR ptr = meet_ptr( tinst->ptr() ); int instance_id = meet_instance_id(tinst->instance_id()); + const TypeOopPtr* speculative = meet_speculative(tinst); // Check for easy case; klasses are equal (and perhaps not loaded!) // If we have constants, then we created oops so classes are loaded // and we can handle the constants further down. This case handles // both-not-loaded or both-loaded classes if (ptr != Constant && klass()->equals(tinst->klass()) && klass_is_exact() == tinst->klass_is_exact()) { - return make( ptr, klass(), klass_is_exact(), NULL, off, instance_id ); + return make(ptr, klass(), klass_is_exact(), NULL, off, instance_id, speculative); } // Classes require inspection in the Java klass hierarchy. Must be loaded. @@ -3167,7 +3326,8 @@ } // Handle mixing oops and interfaces first. - if( this_klass->is_interface() && !tinst_klass->is_interface() ) { + if( this_klass->is_interface() && !(tinst_klass->is_interface() || + tinst_klass == ciEnv::current()->Object_klass())) { ciKlass *tmp = tinst_klass; // Swap interface around tinst_klass = this_klass; this_klass = tmp; @@ -3208,7 +3368,7 @@ // Find out which constant. o = (this_klass == klass()) ? const_oop() : tinst->const_oop(); } - return make( ptr, k, xk, o, off, instance_id ); + return make(ptr, k, xk, o, off, instance_id, speculative); } // Either oop vs oop or interface vs interface or interface vs Object @@ -3285,7 +3445,7 @@ else ptr = NotNull; } - return make( ptr, this_klass, this_xk, o, off, instance_id ); + return make(ptr, this_klass, this_xk, o, off, instance_id, speculative); } // Else classes are not equal // Since klasses are different, we require a LCA in the Java @@ -3296,7 +3456,7 @@ // Now we find the LCA of Java classes ciKlass* k = this_klass->least_common_ancestor(tinst_klass); - return make( ptr, k, false, NULL, off, instance_id ); + return make(ptr, k, false, NULL, off, instance_id, speculative); } // End of case InstPtr } // End of switch @@ -3320,7 +3480,7 @@ // Dual: do NOT dual on klasses. This means I do NOT understand the Java // inheritance mechanism. const Type *TypeInstPtr::xdual() const { - return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() ); + return new TypeInstPtr(dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative()); } //------------------------------eq--------------------------------------------- @@ -3376,12 +3536,18 @@ st->print(",iid=top"); else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); + + dump_speculative(st); } #endif //------------------------------add_offset------------------------------------- -const TypePtr *TypeInstPtr::add_offset( intptr_t offset ) const { - return make( _ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id ); +const TypePtr *TypeInstPtr::add_offset(intptr_t offset) const { + return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset)); +} + +const TypeOopPtr *TypeInstPtr::remove_speculative() const { + return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL); } //============================================================================= @@ -3398,30 +3564,30 @@ const TypeAryPtr *TypeAryPtr::DOUBLES; //------------------------------make------------------------------------------- -const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) { +const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative) { assert(!(k == NULL && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); if (!xk) xk = ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative))->hashcons(); } //------------------------------make------------------------------------------- -const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, bool is_autobox_cache) { +const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, bool is_autobox_cache) { assert(!(k == NULL && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" ); if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); - return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative))->hashcons(); } //------------------------------cast_to_ptr_type------------------------------- const Type *TypeAryPtr::cast_to_ptr_type(PTR ptr) const { if( ptr == _ptr ) return this; - return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id); + return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative); } @@ -3430,13 +3596,13 @@ if( klass_is_exact == _klass_is_exact ) return this; if (!UseExactTypes) return this; if (_ary->ary_must_be_exact()) return this; // cannot clear xk - return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id); + return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id, _speculative); } //-----------------------------cast_to_instance_id---------------------------- const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const { if( instance_id == _instance_id ) return this; - return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id); + return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative); } //-----------------------------narrow_size_type------------------------------- @@ -3499,7 +3665,7 @@ new_size = narrow_size_type(new_size); if (new_size == size()) return this; const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable()); - return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id); + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative); } @@ -3548,7 +3714,7 @@ //------------------------------meet------------------------------------------- // Compute the MEET of two types. It returns a new Type object. -const Type *TypeAryPtr::xmeet( const Type *t ) const { +const Type *TypeAryPtr::xmeet_helper(const Type *t) const { // Perform a fast test for common case; meeting the same types together. if( this == t ) return this; // Meeting same type-rep? // Current "this->_base" is Pointer @@ -3582,13 +3748,15 @@ case TopPTR: case AnyNull: { int instance_id = meet_instance_id(InstanceTop); + const TypeOopPtr* speculative = meet_speculative(tp); return make(ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id); + _ary, _klass, _klass_is_exact, offset, instance_id, speculative); } case BotPTR: case NotNull: { int instance_id = meet_instance_id(tp->instance_id()); - return TypeOopPtr::make(ptr, offset, instance_id); + const TypeOopPtr* speculative = meet_speculative(tp); + return TypeOopPtr::make(ptr, offset, instance_id, speculative); } default: ShouldNotReachHere(); } @@ -3610,8 +3778,9 @@ // else fall through to AnyNull case AnyNull: { int instance_id = meet_instance_id(InstanceTop); - return make( ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id); + const TypeOopPtr* speculative = _speculative; + return make(ptr, (ptr == Constant ? const_oop() : NULL), + _ary, _klass, _klass_is_exact, offset, instance_id, speculative); } default: ShouldNotReachHere(); } @@ -3627,6 +3796,7 @@ const TypeAry *tary = _ary->meet(tap->_ary)->is_ary(); PTR ptr = meet_ptr(tap->ptr()); int instance_id = meet_instance_id(tap->instance_id()); + const TypeOopPtr* speculative = meet_speculative(tap); ciKlass* lazy_klass = NULL; if (tary->_elem->isa_int()) { // Integral array element types have irrelevant lattice relations. @@ -3654,7 +3824,7 @@ // 'this' is exact and super or unrelated: (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) { tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable); - return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot ); + return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot); } bool xk = false; @@ -3662,8 +3832,12 @@ case AnyNull: case TopPTR: // Compute new klass on demand, do not use tap->_klass - xk = (tap->_klass_is_exact | this->_klass_is_exact); - return make( ptr, const_oop(), tary, lazy_klass, xk, off, instance_id ); + if (below_centerline(this->_ptr)) { + xk = this->_klass_is_exact; + } else { + xk = (tap->_klass_is_exact | this->_klass_is_exact); + } + return make(ptr, const_oop(), tary, lazy_klass, xk, off, instance_id, speculative); case Constant: { ciObject* o = const_oop(); if( _ptr == Constant ) { @@ -3675,25 +3849,23 @@ } else { xk = true; } - } else if( above_centerline(_ptr) ) { + } else if(above_centerline(_ptr)) { o = tap->const_oop(); xk = true; } else { // Only precise for identical arrays xk = this->_klass_is_exact && (klass() == tap->klass()); } - return TypeAryPtr::make( ptr, o, tary, lazy_klass, xk, off, instance_id ); + return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, instance_id, speculative); } case NotNull: case BotPTR: // Compute new klass on demand, do not use tap->_klass if (above_centerline(this->_ptr)) xk = tap->_klass_is_exact; - else if (above_centerline(tap->_ptr)) - xk = this->_klass_is_exact; else xk = (tap->_klass_is_exact & this->_klass_is_exact) && (klass() == tap->klass()); // Only precise for identical arrays - return TypeAryPtr::make( ptr, NULL, tary, lazy_klass, xk, off, instance_id ); + return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, instance_id, speculative); default: ShouldNotReachHere(); } } @@ -3704,16 +3876,20 @@ int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); int instance_id = meet_instance_id(tp->instance_id()); + const TypeOopPtr* speculative = meet_speculative(tp); switch (ptr) { case TopPTR: case AnyNull: // Fall 'down' to dual of object klass - if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) { - return TypeAryPtr::make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id ); + // For instances when a subclass meets a superclass we fall + // below the centerline when the superclass is exact. We need to + // do the same here. + if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) { + return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; instance_id = InstanceBot; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id); + return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative); } case Constant: case NotNull: @@ -3722,10 +3898,13 @@ if (above_centerline(tp->ptr())) { // If 'tp' is above the centerline and it is Object class // then we can subclass in the Java class hierarchy. - if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) { + // For instances when a subclass meets a superclass we fall + // below the centerline when the superclass is exact. We need + // to do the same here. + if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) { // that is, my array type is a subtype of 'tp' klass - return make( ptr, (ptr == Constant ? const_oop() : NULL), - _ary, _klass, _klass_is_exact, offset, instance_id ); + return make(ptr, (ptr == Constant ? const_oop() : NULL), + _ary, _klass, _klass_is_exact, offset, instance_id, speculative); } } // The other case cannot happen, since t cannot be a subtype of an array. @@ -3733,7 +3912,7 @@ if( ptr == Constant ) ptr = NotNull; instance_id = InstanceBot; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id); + return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative); default: typerr(t); } } @@ -3744,7 +3923,7 @@ //------------------------------xdual------------------------------------------ // Dual: compute field-by-field dual const Type *TypeAryPtr::xdual() const { - return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache() ); + return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative()); } //----------------------interface_vs_oop--------------------------------------- @@ -3796,6 +3975,8 @@ st->print(",iid=top"); else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); + + dump_speculative(st); } #endif @@ -3805,10 +3986,13 @@ } //------------------------------add_offset------------------------------------- -const TypePtr *TypeAryPtr::add_offset( intptr_t offset ) const { - return make( _ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id ); -} - +const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const { + return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset)); +} + +const TypeOopPtr *TypeAryPtr::remove_speculative() const { + return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL); +} //============================================================================= @@ -4189,6 +4373,33 @@ return (_offset == 0) && !below_centerline(_ptr); } +// Do not allow interface-vs.-noninterface joins to collapse to top. +const Type *TypeKlassPtr::filter(const Type *kills) const { + // logic here mirrors the one from TypeOopPtr::filter. See comments + // there. + const Type* ft = join(kills); + const TypeKlassPtr* ftkp = ft->isa_klassptr(); + const TypeKlassPtr* ktkp = kills->isa_klassptr(); + + if (ft->empty()) { + if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) + return kills; // Uplift to interface + + return Type::TOP; // Canonical empty value + } + + // Interface klass type could be exact in opposite to interface type, + // return it here instead of incorrect Constant ptr J/L/Object (6894807). + if (ftkp != NULL && ktkp != NULL && + ftkp->is_loaded() && ftkp->klass()->is_interface() && + !ftkp->klass_is_exact() && // Keep exact interface klass + ktkp->is_loaded() && !ktkp->klass()->is_interface()) { + return ktkp->cast_to_ptr_type(ftkp->ptr()); + } + + return ft; +} + //----------------------compute_klass------------------------------------------ // Compute the defining klass for this class ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/opto/type.hpp --- a/src/share/vm/opto/type.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/opto/type.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -63,7 +63,7 @@ class TypeOopPtr; class TypeInstPtr; class TypeAryPtr; -class TypeKlassPtr; +class TypeKlassPtr; class TypeMetadataPtr; //------------------------------Type------------------------------------------- @@ -159,6 +159,11 @@ // Table for efficient dualing of base types static const TYPES dual_type[lastype]; +#ifdef ASSERT + // One type is interface, the other is oop + virtual bool interface_vs_oop_helper(const Type *t) const; +#endif + protected: // Each class of type is also identified by its base. const TYPES _base; // Enum of Types type @@ -376,6 +381,9 @@ bool require_constant = false, bool is_autobox_cache = false); + // Speculative type. See TypeInstPtr + virtual ciKlass* speculative_type() const { return NULL; } + private: // support arrays static const BasicType _basic_type[]; @@ -585,6 +593,7 @@ static const TypeTuple *INT_PAIR; static const TypeTuple *LONG_PAIR; static const TypeTuple *INT_CC_PAIR; + static const TypeTuple *LONG_CC_PAIR; #ifndef PRODUCT virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping #endif @@ -784,7 +793,7 @@ // Some kind of oop (Java pointer), either klass or instance or array. class TypeOopPtr : public TypePtr { protected: - TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ); + TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative); public: virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -810,11 +819,27 @@ // This is the the node index of the allocation node creating this instance. int _instance_id; + // Extra type information profiling gave us. We propagate it the + // same way the rest of the type info is propagated. If we want to + // use it, then we have to emit a guard: this part of the type is + // not something we know but something we speculate about the type. + const TypeOopPtr* _speculative; + static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact); int dual_instance_id() const; int meet_instance_id(int uid) const; + // utility methods to work on the speculative part of the type + const TypeOopPtr* dual_speculative() const; + const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const; + bool eq_speculative(const TypeOopPtr* other) const; + int hash_speculative() const; + const TypeOopPtr* add_offset_speculative(intptr_t offset) const; +#ifndef PRODUCT + void dump_speculative(outputStream *st) const; +#endif + public: // Creates a type given a klass. Correctly handles multi-dimensional arrays // Respects UseUniqueSubclasses. @@ -841,7 +866,7 @@ bool not_null_elements = false); // Make a generic (unclassed) pointer to an oop. - static const TypeOopPtr* make(PTR ptr, int offset, int instance_id); + static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative); ciObject* const_oop() const { return _const_oop; } virtual ciKlass* klass() const { return _klass; } @@ -855,6 +880,7 @@ bool is_known_instance() const { return _instance_id > 0; } int instance_id() const { return _instance_id; } bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } + const TypeOopPtr* speculative() const { return _speculative; } virtual intptr_t get_con() const; @@ -868,9 +894,13 @@ const TypeKlassPtr* as_klass_type() const; virtual const TypePtr *add_offset( intptr_t offset ) const; + // Return same type without a speculative part + virtual const TypeOopPtr* remove_speculative() const; - virtual const Type *xmeet( const Type *t ) const; + virtual const Type *xmeet(const Type *t) const; virtual const Type *xdual() const; // Compute dual right now. + // the core of the computation of the meet for TypeOopPtr and for its subclasses + virtual const Type *xmeet_helper(const Type *t) const; // Do not allow interface-vs.-noninterface joins to collapse to top. virtual const Type *filter( const Type *kills ) const; @@ -880,13 +910,24 @@ #ifndef PRODUCT virtual void dump2( Dict &d, uint depth, outputStream *st ) const; #endif + + // Return the speculative type if any + ciKlass* speculative_type() const { + if (_speculative != NULL) { + const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr(); + if (speculative->klass_is_exact()) { + return speculative->klass(); + } + } + return NULL; + } }; //------------------------------TypeInstPtr------------------------------------ // Class of Java object pointers, pointing either to non-array Java instances // or to a Klass* (including array klasses). class TypeInstPtr : public TypeOopPtr { - TypeInstPtr( PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ); + TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative); virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -899,30 +940,30 @@ // Make a pointer to a constant oop. static const TypeInstPtr *make(ciObject* o) { - return make(TypePtr::Constant, o->klass(), true, o, 0); + return make(TypePtr::Constant, o->klass(), true, o, 0, InstanceBot); } // Make a pointer to a constant oop with offset. static const TypeInstPtr *make(ciObject* o, int offset) { - return make(TypePtr::Constant, o->klass(), true, o, offset); + return make(TypePtr::Constant, o->klass(), true, o, offset, InstanceBot); } // Make a pointer to some value of type klass. static const TypeInstPtr *make(PTR ptr, ciKlass* klass) { - return make(ptr, klass, false, NULL, 0); + return make(ptr, klass, false, NULL, 0, InstanceBot); } // Make a pointer to some non-polymorphic value of exactly type klass. static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) { - return make(ptr, klass, true, NULL, 0); + return make(ptr, klass, true, NULL, 0, InstanceBot); } // Make a pointer to some value of type klass with offset. static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) { - return make(ptr, klass, false, NULL, offset); + return make(ptr, klass, false, NULL, offset, InstanceBot); } // Make a pointer to an oop. - static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot ); + static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL); /** Create constant type for a constant boxed value */ const Type* get_const_boxed_value() const; @@ -939,8 +980,11 @@ virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; virtual const TypePtr *add_offset( intptr_t offset ) const; + // Return same type without a speculative part + virtual const TypeOopPtr* remove_speculative() const; - virtual const Type *xmeet( const Type *t ) const; + // the core of the computation of the meet of 2 types + virtual const Type *xmeet_helper(const Type *t) const; virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -959,8 +1003,8 @@ // Class of Java array pointers class TypeAryPtr : public TypeOopPtr { TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, - int offset, int instance_id, bool is_autobox_cache ) - : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), + int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative) + : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative), _ary(ary), _is_autobox_cache(is_autobox_cache) { @@ -998,9 +1042,9 @@ bool is_autobox_cache() const { return _is_autobox_cache; } - static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); + static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL); // Constant pointer to array - static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false); + static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, bool is_autobox_cache = false); // Return a 'ptr' version of this type virtual const Type *cast_to_ptr_type(PTR ptr) const; @@ -1014,8 +1058,11 @@ virtual bool empty(void) const; // TRUE if type is vacuous virtual const TypePtr *add_offset( intptr_t offset ) const; + // Return same type without a speculative part + virtual const TypeOopPtr* remove_speculative() const; - virtual const Type *xmeet( const Type *t ) const; + // the core of the computation of the meet of 2 types + virtual const Type *xmeet_helper(const Type *t) const; virtual const Type *xdual() const; // Compute dual right now. const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const; @@ -1155,6 +1202,9 @@ virtual intptr_t get_con() const; + // Do not allow interface-vs.-noninterface joins to collapse to top. + virtual const Type *filter( const Type *kills ) const; + // Convenience common pre-built types. static const TypeKlassPtr* OBJECT; // Not-null object klass or below static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jni.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1594,10 +1594,8 @@ } } else { m = klass->lookup_method(name, signature); - // Look up interfaces - if (m == NULL && klass->oop_is_instance()) { - m = InstanceKlass::cast(klass())->lookup_method_in_all_interfaces(name, - signature); + if (m == NULL && klass->oop_is_instance()) { + m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature); } } if (m == NULL || (m->is_static() != is_static)) { @@ -3213,7 +3211,11 @@ HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY( env, string); #endif /* USDT2 */ - jsize ret = java_lang_String::length(JNIHandles::resolve_non_null(string)); + jsize ret = 0; + oop s = JNIHandles::resolve_non_null(string); + if (java_lang_String::value(s) != NULL) { + ret = java_lang_String::length(s); + } #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret); #else /* USDT2 */ @@ -3233,20 +3235,23 @@ HOTSPOT_JNI_GETSTRINGCHARS_ENTRY( env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ + jchar* buf = NULL; oop s = JNIHandles::resolve_non_null(string); - int s_len = java_lang_String::length(s); typeArrayOop s_value = java_lang_String::value(s); - int s_offset = java_lang_String::offset(s); - jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination - /* JNI Specification states return NULL on OOM */ - if (buf != NULL) { - if (s_len > 0) { - memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len); - } - buf[s_len] = 0; - //%note jni_5 - if (isCopy != NULL) { - *isCopy = JNI_TRUE; + if (s_value != NULL) { + int s_len = java_lang_String::length(s); + int s_offset = java_lang_String::offset(s); + buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination + /* JNI Specification states return NULL on OOM */ + if (buf != NULL) { + if (s_len > 0) { + memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len); + } + buf[s_len] = 0; + //%note jni_5 + if (isCopy != NULL) { + *isCopy = JNI_TRUE; + } } } #ifndef USDT2 @@ -3316,7 +3321,11 @@ HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY( env, string); #endif /* USDT2 */ - jsize ret = java_lang_String::utf8_length(JNIHandles::resolve_non_null(string)); + jsize ret = 0; + oop java_string = JNIHandles::resolve_non_null(string); + if (java_lang_String::value(java_string) != NULL) { + ret = java_lang_String::utf8_length(java_string); + } #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret); #else /* USDT2 */ @@ -3335,14 +3344,17 @@ HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY( env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ + char* result = NULL; oop java_string = JNIHandles::resolve_non_null(string); - size_t length = java_lang_String::utf8_length(java_string); - /* JNI Specification states return NULL on OOM */ - char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL); - if (result != NULL) { - java_lang_String::as_utf8_string(java_string, result, (int) length + 1); - if (isCopy != NULL) { - *isCopy = JNI_TRUE; + if (java_lang_String::value(java_string) != NULL) { + size_t length = java_lang_String::utf8_length(java_string); + /* JNI Specification states return NULL on OOM */ + result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL); + if (result != NULL) { + java_lang_String::as_utf8_string(java_string, result, (int) length + 1); + if (isCopy != NULL) { + *isCopy = JNI_TRUE; + } } } #ifndef USDT2 @@ -5050,6 +5062,8 @@ void TestReserveMemorySpecial_test(); void TestVirtualSpace_test(); void TestMetaspaceAux_test(); +void TestMetachunk_test(); +void TestVirtualSpaceNode_test(); #if INCLUDE_ALL_GCS void TestG1BiasedArray_test(); #endif @@ -5061,6 +5075,8 @@ run_unit_test(TestReserveMemorySpecial_test()); run_unit_test(TestVirtualSpace_test()); run_unit_test(TestMetaspaceAux_test()); + run_unit_test(TestMetachunk_test()); + run_unit_test(TestVirtualSpaceNode_test()); run_unit_test(GlobalDefinitions::test_globals()); run_unit_test(GCTimerAllTest::all()); run_unit_test(arrayOopDesc::test_max_array_length()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jniCheck.cpp --- a/src/share/vm/prims/jniCheck.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jniCheck.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1324,18 +1324,19 @@ IN_VM( checkString(thr, str); ) + jchar* newResult = NULL; const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy); assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected"); - - size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal); - *tagLocation = STRING_TAG; - jchar* newResult = (jchar*) (tagLocation + 1); - memcpy(newResult, result, len * sizeof(jchar)); - // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes - // Note that the dtrace arguments for the allocated memory will not match up with this solution. - FreeHeap((char*)result); - + if (result != NULL) { + size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination + jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal); + *tagLocation = STRING_TAG; + newResult = (jchar*) (tagLocation + 1); + memcpy(newResult, result, len * sizeof(jchar)); + // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes + // Note that the dtrace arguments for the allocated memory will not match up with this solution. + FreeHeap((char*)result); + } functionExit(env); return newResult; JNI_END @@ -1394,18 +1395,19 @@ IN_VM( checkString(thr, str); ) + char* newResult = NULL; const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy); assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected"); - - size_t len = strlen(result) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal); - *tagLocation = STRING_UTF_TAG; - char* newResult = (char*) (tagLocation + 1); - strcpy(newResult, result); - // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes - // Note that the dtrace arguments for the allocated memory will not match up with this solution. - FreeHeap((char*)result, mtInternal); - + if (result != NULL) { + size_t len = strlen(result) + 1; // + 1 for NULL termination + jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal); + *tagLocation = STRING_UTF_TAG; + newResult = (char*) (tagLocation + 1); + strcpy(newResult, result); + // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes + // Note that the dtrace arguments for the allocated memory will not match up with this solution. + FreeHeap((char*)result, mtInternal); + } functionExit(env); return newResult; JNI_END diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvm.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -670,13 +670,12 @@ JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth)) JVMWrapper("JVM_GetCallerClass"); - // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation. - if (!JDK_Version::is_gte_jdk18x_version() || SystemDictionary::reflect_CallerSensitive_klass() == NULL) { + // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or + // sun.reflect.Reflection.getCallerClass with a depth parameter is provided + // temporarily for existing code to use until a replacement API is defined. + if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) { Klass* k = thread->security_get_caller_class(depth); return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror()); - } else { - // Basic handshaking with Java_sun_reflect_Reflection_getCallerClass - assert(depth == -1, "wrong handshake depth"); } // Getting the class of the caller frame. @@ -3967,248 +3966,6 @@ } -// Serialization -JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, - jlongArray fieldIDs, jcharArray typecodes, jbyteArray data)) - assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier"); - - typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes)); - typeArrayOop dbuf = typeArrayOop(JNIHandles::resolve(data)); - typeArrayOop fids = typeArrayOop(JNIHandles::resolve(fieldIDs)); - oop o = JNIHandles::resolve(obj); - - if (o == NULL || fids == NULL || dbuf == NULL || tcodes == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - - jsize nfids = fids->length(); - if (nfids == 0) return; - - if (tcodes->length() < nfids) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } - - jsize off = 0; - /* loop through fields, setting values */ - for (jsize i = 0; i < nfids; i++) { - jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i); - int field_offset; - if (fid != NULL) { - // NULL is a legal value for fid, but retrieving the field offset - // trigger assertion in that case - field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid); - } - - switch (tcodes->char_at(i)) { - case 'Z': - if (fid != NULL) { - jboolean val = (dbuf->byte_at(off) != 0) ? JNI_TRUE : JNI_FALSE; - o->bool_field_put(field_offset, val); - } - off++; - break; - - case 'B': - if (fid != NULL) { - o->byte_field_put(field_offset, dbuf->byte_at(off)); - } - off++; - break; - - case 'C': - if (fid != NULL) { - jchar val = ((dbuf->byte_at(off + 0) & 0xFF) << 8) - + ((dbuf->byte_at(off + 1) & 0xFF) << 0); - o->char_field_put(field_offset, val); - } - off += 2; - break; - - case 'S': - if (fid != NULL) { - jshort val = ((dbuf->byte_at(off + 0) & 0xFF) << 8) - + ((dbuf->byte_at(off + 1) & 0xFF) << 0); - o->short_field_put(field_offset, val); - } - off += 2; - break; - - case 'I': - if (fid != NULL) { - jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24) - + ((dbuf->byte_at(off + 1) & 0xFF) << 16) - + ((dbuf->byte_at(off + 2) & 0xFF) << 8) - + ((dbuf->byte_at(off + 3) & 0xFF) << 0); - o->int_field_put(field_offset, ival); - } - off += 4; - break; - - case 'F': - if (fid != NULL) { - jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24) - + ((dbuf->byte_at(off + 1) & 0xFF) << 16) - + ((dbuf->byte_at(off + 2) & 0xFF) << 8) - + ((dbuf->byte_at(off + 3) & 0xFF) << 0); - jfloat fval = (*int_bits_to_float_fn)(env, NULL, ival); - o->float_field_put(field_offset, fval); - } - off += 4; - break; - - case 'J': - if (fid != NULL) { - jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56) - + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48) - + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40) - + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32) - + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24) - + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16) - + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8) - + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0); - o->long_field_put(field_offset, lval); - } - off += 8; - break; - - case 'D': - if (fid != NULL) { - jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56) - + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48) - + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40) - + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32) - + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24) - + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16) - + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8) - + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0); - jdouble dval = (*long_bits_to_double_fn)(env, NULL, lval); - o->double_field_put(field_offset, dval); - } - off += 8; - break; - - default: - // Illegal typecode - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode"); - } - } -JVM_END - - -JVM_ENTRY(void, JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, - jlongArray fieldIDs, jcharArray typecodes, jbyteArray data)) - assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier"); - - typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes)); - typeArrayOop dbuf = typeArrayOop(JNIHandles::resolve(data)); - typeArrayOop fids = typeArrayOop(JNIHandles::resolve(fieldIDs)); - oop o = JNIHandles::resolve(obj); - - if (o == NULL || fids == NULL || dbuf == NULL || tcodes == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - - jsize nfids = fids->length(); - if (nfids == 0) return; - - if (tcodes->length() < nfids) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } - - /* loop through fields, fetching values */ - jsize off = 0; - for (jsize i = 0; i < nfids; i++) { - jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i); - if (fid == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - int field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid); - - switch (tcodes->char_at(i)) { - case 'Z': - { - jboolean val = o->bool_field(field_offset); - dbuf->byte_at_put(off++, (val != 0) ? 1 : 0); - } - break; - - case 'B': - dbuf->byte_at_put(off++, o->byte_field(field_offset)); - break; - - case 'C': - { - jchar val = o->char_field(field_offset); - dbuf->byte_at_put(off++, (val >> 8) & 0xFF); - dbuf->byte_at_put(off++, (val >> 0) & 0xFF); - } - break; - - case 'S': - { - jshort val = o->short_field(field_offset); - dbuf->byte_at_put(off++, (val >> 8) & 0xFF); - dbuf->byte_at_put(off++, (val >> 0) & 0xFF); - } - break; - - case 'I': - { - jint val = o->int_field(field_offset); - dbuf->byte_at_put(off++, (val >> 24) & 0xFF); - dbuf->byte_at_put(off++, (val >> 16) & 0xFF); - dbuf->byte_at_put(off++, (val >> 8) & 0xFF); - dbuf->byte_at_put(off++, (val >> 0) & 0xFF); - } - break; - - case 'F': - { - jfloat fval = o->float_field(field_offset); - jint ival = (*float_to_int_bits_fn)(env, NULL, fval); - dbuf->byte_at_put(off++, (ival >> 24) & 0xFF); - dbuf->byte_at_put(off++, (ival >> 16) & 0xFF); - dbuf->byte_at_put(off++, (ival >> 8) & 0xFF); - dbuf->byte_at_put(off++, (ival >> 0) & 0xFF); - } - break; - - case 'J': - { - jlong val = o->long_field(field_offset); - dbuf->byte_at_put(off++, (val >> 56) & 0xFF); - dbuf->byte_at_put(off++, (val >> 48) & 0xFF); - dbuf->byte_at_put(off++, (val >> 40) & 0xFF); - dbuf->byte_at_put(off++, (val >> 32) & 0xFF); - dbuf->byte_at_put(off++, (val >> 24) & 0xFF); - dbuf->byte_at_put(off++, (val >> 16) & 0xFF); - dbuf->byte_at_put(off++, (val >> 8) & 0xFF); - dbuf->byte_at_put(off++, (val >> 0) & 0xFF); - } - break; - - case 'D': - { - jdouble dval = o->double_field(field_offset); - jlong lval = (*double_to_long_bits_fn)(env, NULL, dval); - dbuf->byte_at_put(off++, (lval >> 56) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 48) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 40) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 32) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 24) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 16) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 8) & 0xFF); - dbuf->byte_at_put(off++, (lval >> 0) & 0xFF); - } - break; - - default: - // Illegal typecode - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode"); - } - } -JVM_END - // Shared JNI/JVM entry points ////////////////////////////////////////////////////////////// diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvm.h --- a/src/share/vm/prims/jvm.h Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvm.h Thu Nov 21 15:04:54 2013 +0100 @@ -374,6 +374,9 @@ /* * java.lang.Class and java.lang.ClassLoader */ + +#define JVM_CALLER_DEPTH -1 + /* * Returns the class in which the code invoking the native method * belongs. diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvm_misc.hpp --- a/src/share/vm/prims/jvm_misc.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvm_misc.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -36,22 +36,6 @@ void trace_class_resolution(Klass* to_class); /* - * Support for Serialization and RMI. Currently used by HotSpot only. - */ - -extern "C" { - -void JNICALL -JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, - jlongArray fieldIDs, jcharArray typecodes, jbyteArray data); - -void JNICALL -JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, - jlongArray fieldIDs, jcharArray typecodes, jbyteArray data); - -} - -/* * Support for -Xcheck:jni */ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvmtiEnv.cpp --- a/src/share/vm/prims/jvmtiEnv.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvmtiEnv.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -259,8 +259,7 @@ // bytes to the InstanceKlass here because they have not been // validated and we're not at a safepoint. constantPoolHandle constants(current_thread, ikh->constants()); - oop cplock = constants->lock(); - ObjectLocker ol(cplock, current_thread, cplock != NULL); // lock constant pool while we query it + MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it JvmtiClassFileReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { @@ -2418,8 +2417,7 @@ instanceKlassHandle ikh(thread, k_oop); constantPoolHandle constants(thread, ikh->constants()); - oop cplock = constants->lock(); - ObjectLocker ol(cplock, thread, cplock != NULL); // lock constant pool while we query it + MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it JvmtiConstantPoolReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvmtiGetLoadedClasses.cpp --- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -29,8 +29,43 @@ #include "runtime/thread.hpp" +// The closure for GetLoadedClasses +class LoadedClassesClosure : public KlassClosure { +private: + Stack _classStack; + JvmtiEnv* _env; -// The closure for GetLoadedClasses and GetClassLoaderClasses +public: + LoadedClassesClosure(JvmtiEnv* env) { + _env = env; + } + + void do_klass(Klass* k) { + // Collect all jclasses + _classStack.push((jclass) _env->jni_reference(k->java_mirror())); + } + + int extract(jclass* result_list) { + // The size of the Stack will be 0 after extract, so get it here + int count = (int)_classStack.size(); + int i = count; + + // Pop all jclasses, fill backwards + while (!_classStack.is_empty()) { + result_list[--i] = _classStack.pop(); + } + + // Return the number of elements written + return count; + } + + // Return current size of the Stack + int get_count() { + return (int)_classStack.size(); + } +}; + +// The closure for GetClassLoaderClasses class JvmtiGetLoadedClassesClosure : public StackObj { // Since the SystemDictionary::classes_do callback // doesn't pass a closureData pointer, @@ -165,19 +200,6 @@ } } - // Finally, the static methods that are the callbacks - static void increment(Klass* k) { - JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); - if (that->get_initiatingLoader() == NULL) { - for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { - that->set_count(that->get_count() + 1); - } - } else if (k != NULL) { - // if initiating loader not null, just include the instance with 1 dimension - that->set_count(that->get_count() + 1); - } - } - static void increment_with_loader(Klass* k, ClassLoaderData* loader_data) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); oop class_loader = loader_data->class_loader(); @@ -196,24 +218,6 @@ } } - static void add(Klass* k) { - JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); - if (that->available()) { - if (that->get_initiatingLoader() == NULL) { - for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { - oop mirror = l->java_mirror(); - that->set_element(that->get_index(), mirror); - that->set_index(that->get_index() + 1); - } - } else if (k != NULL) { - // if initiating loader not null, just include the instance with 1 dimension - oop mirror = k->java_mirror(); - that->set_element(that->get_index(), mirror); - that->set_index(that->get_index() + 1); - } - } - } - static void add_with_loader(Klass* k, ClassLoaderData* loader_data) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); if (that->available()) { @@ -255,39 +259,30 @@ jvmtiError JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr) { - // Since SystemDictionary::classes_do only takes a function pointer - // and doesn't call back with a closure data pointer, - // we can only pass static methods. - JvmtiGetLoadedClassesClosure closure; + LoadedClassesClosure closure(env); { // To get a consistent list of classes we need MultiArray_lock to ensure - // array classes aren't created, and SystemDictionary_lock to ensure that - // classes aren't added to the system dictionary, + // array classes aren't created. MutexLocker ma(MultiArray_lock); - MutexLocker sd(SystemDictionary_lock); + + // Iterate through all classes in ClassLoaderDataGraph + // and collect them using the LoadedClassesClosure + ClassLoaderDataGraph::loaded_classes_do(&closure); + } - // First, count the classes - SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::increment); - Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::increment); - // Next, fill in the classes - closure.allocate(); - SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::add); - Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::add); - // Drop the SystemDictionary_lock, so the results could be wrong from here, - // but we still have a snapshot. + // Return results by extracting the collected contents into a list + // allocated via JvmtiEnv + jclass* result_list; + jvmtiError error = env->Allocate(closure.get_count() * sizeof(jclass), + (unsigned char**)&result_list); + + if (error == JVMTI_ERROR_NONE) { + int count = closure.extract(result_list); + *classCountPtr = count; + *classesPtr = result_list; } - // Post results - jclass* result_list; - jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass), - (unsigned char**)&result_list); - if (err != JVMTI_ERROR_NONE) { - return err; - } - closure.extract(env, result_list); - *classCountPtr = closure.get_count(); - *classesPtr = result_list; - return JVMTI_ERROR_NONE; + return error; } jvmtiError diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvmtiImpl.cpp --- a/src/share/vm/prims/jvmtiImpl.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvmtiImpl.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -210,6 +210,14 @@ } } +void GrowableCache::metadata_do(void f(Metadata*)) { + int len = _elements->length(); + for (int i=0; iat(i); + e->metadata_do(f); + } +} + void GrowableCache::gc_epilogue() { int len = _elements->length(); for (int i=0; iis_in_stack((address)&_method)) { - thread->allow_unhandled_oop((oop*)&_method); - } -#endif // CHECK_UNHANDLED_OOPS + _class_holder = NULL; } JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) { _method = m_method; - _class_loader = _method->method_holder()->class_loader_data()->class_loader(); + _class_holder = _method->method_holder()->klass_holder(); +#ifdef CHECK_UNHANDLED_OOPS + // _class_holder can't be wrapped in a Handle, because JvmtiBreakpoints are + // sometimes allocated on the heap. + // + // The code handling JvmtiBreakpoints allocated on the stack can't be + // interrupted by a GC until _class_holder is reachable by the GC via the + // oops_do method. + Thread::current()->allow_unhandled_oop(&_class_holder); +#endif // CHECK_UNHANDLED_OOPS assert(_method != NULL, "_method != NULL"); _bci = (int) location; assert(_bci >= 0, "_bci >= 0"); @@ -245,7 +255,7 @@ void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) { _method = bp._method; _bci = bp._bci; - _class_loader = bp._class_loader; + _class_holder = bp._class_holder; } bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) { @@ -363,6 +373,13 @@ } } +void VM_ChangeBreakpoints::metadata_do(void f(Metadata*)) { + // Walk metadata in breakpoints to keep from being deallocated with RedefineClasses + if (_bp != NULL) { + _bp->metadata_do(f); + } +} + // // class JvmtiBreakpoints // @@ -379,6 +396,10 @@ _bps.oops_do(f); } +void JvmtiBreakpoints::metadata_do(void f(Metadata*)) { + _bps.metadata_do(f); +} + void JvmtiBreakpoints::gc_epilogue() { _bps.gc_epilogue(); } @@ -497,6 +518,12 @@ } } +void JvmtiCurrentBreakpoints::metadata_do(void f(Metadata*)) { + if (_jvmti_breakpoints != NULL) { + _jvmti_breakpoints->metadata_do(f); + } +} + void JvmtiCurrentBreakpoints::gc_epilogue() { if (_jvmti_breakpoints != NULL) { _jvmti_breakpoints->gc_epilogue(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvmtiImpl.hpp --- a/src/share/vm/prims/jvmtiImpl.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvmtiImpl.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -69,6 +69,7 @@ virtual bool lessThan(GrowableElement *e)=0; virtual GrowableElement *clone() =0; virtual void oops_do(OopClosure* f) =0; + virtual void metadata_do(void f(Metadata*)) =0; }; class GrowableCache VALUE_OBJ_CLASS_SPEC { @@ -115,6 +116,8 @@ void clear(); // apply f to every element and update the cache void oops_do(OopClosure* f); + // walk metadata to preserve for RedefineClasses + void metadata_do(void f(Metadata*)); // update the cache after a full gc void gc_epilogue(); }; @@ -148,6 +151,7 @@ void remove (int index) { _cache.remove(index); } void clear() { _cache.clear(); } void oops_do(OopClosure* f) { _cache.oops_do(f); } + void metadata_do(void f(Metadata*)) { _cache.metadata_do(f); } void gc_epilogue() { _cache.gc_epilogue(); } }; @@ -169,7 +173,7 @@ Method* _method; int _bci; Bytecodes::Code _orig_bytecode; - oop _class_loader; + oop _class_holder; // keeps _method memory from being deallocated public: JvmtiBreakpoint(); @@ -191,9 +195,15 @@ bool lessThan(GrowableElement* e) { Unimplemented(); return false; } bool equals(GrowableElement* e) { return equals((JvmtiBreakpoint&) *e); } void oops_do(OopClosure* f) { - // Mark the method loader as live - f->do_oop(&_class_loader); + // Mark the method loader as live so the Method* class loader doesn't get + // unloaded and Method* memory reclaimed. + f->do_oop(&_class_holder); } + void metadata_do(void f(Metadata*)) { + // walk metadata to preserve for RedefineClasses + f(_method); + } + GrowableElement *clone() { JvmtiBreakpoint *bp = new JvmtiBreakpoint(); bp->copy(*this); @@ -239,6 +249,7 @@ int length(); void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); void print(); int set(JvmtiBreakpoint& bp); @@ -288,6 +299,7 @@ static inline bool is_breakpoint(address bcp); static void oops_do(OopClosure* f); + static void metadata_do(void f(Metadata*)); static void gc_epilogue(); }; @@ -332,6 +344,7 @@ VMOp_Type type() const { return VMOp_ChangeBreakpoints; } void doit(); void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/jvmtiRedefineClasses.cpp --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -2755,13 +2755,26 @@ // InstanceKlass around to hold obsolete methods so we don't have // any other InstanceKlass embedded vtables to update. The vtable // holds the Method*s for virtual (but not final) methods. - if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { + // Default methods, or concrete methods in interfaces are stored + // in the vtable, so if an interface changes we need to check + // adjust_method_entries() for every InstanceKlass, which will also + // adjust the default method vtable indices. + // We also need to adjust any default method entries that are + // not yet in the vtable, because the vtable setup is in progress. + // This must be done after we adjust the default_methods and + // default_vtable_indices for methods already in the vtable. + if (ik->vtable_length() > 0 && (_the_class_oop->is_interface() + || ik->is_subtype_of(_the_class_oop))) { // ik->vtable() creates a wrapper object; rm cleans it up ResourceMark rm(_thread); ik->vtable()->adjust_method_entries(_matching_old_methods, _matching_new_methods, _matching_methods_length, &trace_name_printed); + ik->adjust_default_methods(_matching_old_methods, + _matching_new_methods, + _matching_methods_length, + &trace_name_printed); } // If the current class has an itable and we are either redefining an @@ -2931,7 +2944,8 @@ old_method->set_is_obsolete(); obsolete_count++; - // obsolete methods need a unique idnum + // obsolete methods need a unique idnum so they become new entries in + // the jmethodID cache in InstanceKlass u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum(); if (num != ConstMethod::UNSET_IDNUM) { old_method->set_method_idnum(num); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/methodHandles.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -187,12 +187,34 @@ receiver_limit = m->method_holder(); assert(receiver_limit->verify_itable_index(vmindex), ""); flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); + if (TraceInvokeDynamic) { + ResourceMark rm; + tty->print_cr("memberName: invokeinterface method_holder::method: %s, receiver: %s, itableindex: %d, access_flags:", + Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()), + receiver_limit()->internal_name(), vmindex); + m->access_flags().print_on(tty); + if (!m->is_abstract()) { + tty->print("default"); + } + tty->cr(); + } break; case CallInfo::vtable_call: vmindex = info.vtable_index(); flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe"); + if (TraceInvokeDynamic) { + ResourceMark rm; + tty->print_cr("memberName: invokevirtual method_holder::method: %s, receiver: %s, vtableindex: %d, access_flags:", + Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()), + receiver_limit()->internal_name(), vmindex); + m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default"); + } + tty->cr(); + } break; case CallInfo::direct_call: @@ -1172,9 +1194,7 @@ } else if (vmtarget->is_klass()) { x = ((Klass*) vmtarget)->java_mirror(); } else if (vmtarget->is_method()) { - Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL); - CallInfo info((Method*)vmtarget); - x = MethodHandles::init_method_MemberName(mname2, info); + x = mname(); } result->obj_at_put(1, x); return JNIHandles::make_local(env, result()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/nativeLookup.cpp --- a/src/share/vm/prims/nativeLookup.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/nativeLookup.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -133,10 +133,6 @@ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) static JNINativeMethod lookup_special_native_methods[] = { - // Next two functions only exist for compatibility with 1.3.1 and earlier. - { CC"Java_java_io_ObjectOutputStream_getPrimitiveFieldValues", NULL, FN_PTR(JVM_GetPrimitiveFieldValues) }, // intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization - { CC"Java_java_io_ObjectInputStream_setPrimitiveFieldValues", NULL, FN_PTR(JVM_SetPrimitiveFieldValues) }, // intercept ObjectInputStream setPrimitiveFieldValues for faster serialization - { CC"Java_sun_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterUnsafeMethods) }, { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) }, @@ -148,9 +144,8 @@ }; static address lookup_special_native(char* jni_name) { - int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); - for (; i < count; i++) { + for (int i = 0; i < count; i++) { // NB: To ignore the jni prefix and jni postfix strstr is used matching. if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) { return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/wbtestmethods/parserTests.cpp --- a/src/share/vm/prims/wbtestmethods/parserTests.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/wbtestmethods/parserTests.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,11 +117,12 @@ const char* c_cmdline = java_lang_String::as_utf8_string(JNIHandles::resolve(j_cmdline)); objArrayOop argumentArray = objArrayOop(JNIHandles::resolve_non_null(arguments)); + objArrayHandle argumentArray_ah(THREAD, argumentArray); - int length = argumentArray->length(); + int length = argumentArray_ah->length(); for (int i = 0; i < length; i++) { - oop argument_oop = argumentArray->obj_at(i); + oop argument_oop = argumentArray_ah->obj_at(i); fill_in_parser(&parser, argument_oop); } @@ -130,19 +131,20 @@ Klass* k = SystemDictionary::Object_klass(); objArrayOop returnvalue_array = oopFactory::new_objArray(k, parser.num_arguments() * 2, CHECK_NULL); + objArrayHandle returnvalue_array_ah(THREAD, returnvalue_array); GrowableArray*parsedArgNames = parser.argument_name_array(); for (int i = 0; i < parser.num_arguments(); i++) { oop parsedName = java_lang_String::create_oop_from_str(parsedArgNames->at(i), CHECK_NULL); - returnvalue_array->obj_at_put(i*2, parsedName); + returnvalue_array_ah->obj_at_put(i*2, parsedName); GenDCmdArgument* arg = parser.lookup_dcmd_option(parsedArgNames->at(i), strlen(parsedArgNames->at(i))); char buf[VALUE_MAXLEN]; arg->value_as_str(buf, sizeof(buf)); oop parsedValue = java_lang_String::create_oop_from_str(buf, CHECK_NULL); - returnvalue_array->obj_at_put(i*2+1, parsedValue); + returnvalue_array_ah->obj_at_put(i*2+1, parsedValue); } - return (jobjectArray) JNIHandles::make_local(returnvalue_array); + return (jobjectArray) JNIHandles::make_local(returnvalue_array_ah()); WB_END diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/prims/whitebox.cpp --- a/src/share/vm/prims/whitebox.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/prims/whitebox.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,6 +53,8 @@ #include "compiler/compileBroker.hpp" #include "runtime/compilationPolicy.hpp" +#define SIZE_T_MAX_VALUE ((size_t) -1) + bool WhiteBox::_used = false; WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj)) @@ -105,10 +107,116 @@ gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap " SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT, p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(), - p->min_alignment(), p->max_alignment()); + p->space_alignment(), p->heap_alignment()); } WB_END +#ifndef PRODUCT +// Forward declaration +void TestReservedSpace_test(); +void TestReserveMemorySpecial_test(); +void TestVirtualSpace_test(); +void TestMetaspaceAux_test(); +#endif + +WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o)) +#ifndef PRODUCT + TestReservedSpace_test(); + TestReserveMemorySpecial_test(); + TestVirtualSpace_test(); + TestMetaspaceAux_test(); +#endif +WB_END + +WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) + size_t granularity = os::vm_allocation_granularity(); + ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL); + VirtualSpace vs; + vs.initialize(rhs, 50 * granularity); + + //Check if constraints are complied + if (!( UseCompressedOops && rhs.base() != NULL && + Universe::narrow_oop_base() != NULL && + Universe::narrow_oop_use_implicit_null_checks() )) { + tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n " + "\tUseCompressedOops is %d\n" + "\trhs.base() is "PTR_FORMAT"\n" + "\tUniverse::narrow_oop_base() is "PTR_FORMAT"\n" + "\tUniverse::narrow_oop_use_implicit_null_checks() is %d", + UseCompressedOops, + rhs.base(), + Universe::narrow_oop_base(), + Universe::narrow_oop_use_implicit_null_checks()); + return; + } + tty->print_cr("Reading from no access area... "); + tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c", + *(vs.low_boundary() - rhs.noaccess_prefix() / 2 )); +WB_END + +static jint wb_stress_virtual_space_resize(size_t reserved_space_size, + size_t magnitude, size_t iterations) { + size_t granularity = os::vm_allocation_granularity(); + ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL); + VirtualSpace vs; + if (!vs.initialize(rhs, 0)) { + tty->print_cr("Failed to initialize VirtualSpace. Can't proceed."); + return 3; + } + + long seed = os::random(); + tty->print_cr("Random seed is %ld", seed); + os::init_random(seed); + + for (size_t i = 0; i < iterations; i++) { + + // Whether we will shrink or grow + bool shrink = os::random() % 2L == 0; + + // Get random delta to resize virtual space + size_t delta = (size_t)os::random() % magnitude; + + // If we are about to shrink virtual space below zero, then expand instead + if (shrink && vs.committed_size() < delta) { + shrink = false; + } + + // Resizing by delta + if (shrink) { + vs.shrink_by(delta); + } else { + // If expanding fails expand_by will silently return false + vs.expand_by(delta, true); + } + } + return 0; +} + +WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o, + jlong reserved_space_size, jlong magnitude, jlong iterations)) + tty->print_cr("reservedSpaceSize="JLONG_FORMAT", magnitude="JLONG_FORMAT", " + "iterations="JLONG_FORMAT"\n", reserved_space_size, magnitude, + iterations); + if (reserved_space_size < 0 || magnitude < 0 || iterations < 0) { + tty->print_cr("One of variables printed above is negative. Can't proceed.\n"); + return 1; + } + + // sizeof(size_t) depends on whether OS is 32bit or 64bit. sizeof(jlong) is + // always 8 byte. That's why we should avoid overflow in case of 32bit platform. + if (sizeof(size_t) < sizeof(jlong)) { + jlong size_t_max_value = (jlong) SIZE_T_MAX_VALUE; + if (reserved_space_size > size_t_max_value || magnitude > size_t_max_value + || iterations > size_t_max_value) { + tty->print_cr("One of variables printed above overflows size_t. Can't proceed.\n"); + return 2; + } + } + + return wb_stress_virtual_space_resize((size_t) reserved_space_size, + (size_t) magnitude, (size_t) iterations); +WB_END + #if INCLUDE_ALL_GCS WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj)) G1CollectedHeap* g1 = G1CollectedHeap::heap(); @@ -445,6 +553,9 @@ {CC"getCompressedOopsMaxHeapSize", CC"()J", (void*)&WB_GetCompressedOopsMaxHeapSize}, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, + {CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests}, + {CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea}, + {CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize}, #if INCLUDE_ALL_GCS {CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark}, {CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous }, diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/arguments.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1151,9 +1151,6 @@ Tier3InvokeNotifyFreqLog = 0; Tier4InvocationThreshold = 0; } - if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { - FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); - } } #if INCLUDE_ALL_GCS @@ -1427,7 +1424,7 @@ // NULL page is located before the heap, we pad the NULL page to the conservative // maximum alignment that the GC may ever impose upon the heap. size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(), - Arguments::conservative_max_heap_alignment()); + _conservative_max_heap_alignment); LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page); NOT_LP64(ShouldNotReachHere(); return 0); @@ -1524,7 +1521,7 @@ } #endif // INCLUDE_ALL_GCS _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(), - CollectorPolicy::compute_max_alignment()); + CollectorPolicy::compute_heap_alignment()); } void Arguments::set_ergonomics_flags() { @@ -1976,12 +1973,6 @@ "please refer to the release notes for the combinations " "allowed\n"); status = false; - } else if (ReservedCodeCacheSize > 2*G) { - // Code cache size larger than MAXINT is not supported. - jio_fprintf(defaultStream::error_stream(), - "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, - (2*G)/M); - status = false; } return status; } @@ -2013,6 +2004,15 @@ warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. " "Use MaxRAMFraction instead."); } + if (FLAG_IS_CMDLINE(UseCMSCompactAtFullCollection)) { + warning("UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release."); + } + if (FLAG_IS_CMDLINE(CMSFullGCsBeforeCompaction)) { + warning("CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release."); + } + if (FLAG_IS_CMDLINE(UseCMSCollectionPassing)) { + warning("UseCMSCollectionPassing is deprecated and will likely be removed in a future release."); + } } // Check stack pages settings @@ -2064,6 +2064,9 @@ status = status && verify_interval(StringTableSize, minimumStringTableSize, (max_uintx / StringTable::bucket_size()), "StringTable size"); + status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize, + (max_uintx / SymbolTable::bucket_size()), "SymbolTable size"); + if (MinHeapFreeRatio > MaxHeapFreeRatio) { jio_fprintf(defaultStream::error_stream(), "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or " @@ -2178,6 +2181,10 @@ #if INCLUDE_ALL_GCS if (UseG1GC) { + status = status && verify_percentage(G1NewSizePercent, "G1NewSizePercent"); + status = status && verify_percentage(G1MaxNewSizePercent, "G1MaxNewSizePercent"); + status = status && verify_interval(G1NewSizePercent, 0, G1MaxNewSizePercent, "G1NewSizePercent"); + status = status && verify_percentage(InitiatingHeapOccupancyPercent, "InitiatingHeapOccupancyPercent"); status = status && verify_min_value(G1RefProcDrainInterval, 1, @@ -2701,16 +2708,16 @@ FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); // -Xmn for compatibility with other JVM vendors } else if (match_option(option, "-Xmn", &tail)) { - julong long_initial_eden_size = 0; - ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1); + julong long_initial_young_size = 0; + ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), - "Invalid initial eden size: %s\n", option->optionString); + "Invalid initial young generation size: %s\n", option->optionString); describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size); - FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size); + FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size); + FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size); // -Xms } else if (match_option(option, "-Xms", &tail)) { julong long_initial_heap_size = 0; @@ -2722,9 +2729,10 @@ describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); + set_min_heap_size((uintx)long_initial_heap_size); // Currently the minimum size and the initial heap sizes are the same. - set_min_heap_size(InitialHeapSize); + // Can be overridden with -XX:InitialHeapSize. + FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); // -Xmx } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) { julong long_max_heap_size = 0; @@ -2738,8 +2746,9 @@ FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size); // Xmaxf } else if (match_option(option, "-Xmaxf", &tail)) { - int maxf = (int)(atof(tail) * 100); - if (maxf < 0 || maxf > 100) { + char* err; + int maxf = (int)(strtod(tail, &err) * 100); + if (*err != '\0' || maxf < 0 || maxf > 100) { jio_fprintf(defaultStream::error_stream(), "Bad max heap free percentage size: %s\n", option->optionString); @@ -2749,8 +2758,9 @@ } // Xminf } else if (match_option(option, "-Xminf", &tail)) { - int minf = (int)(atof(tail) * 100); - if (minf < 0 || minf > 100) { + char* err; + int minf = (int)(strtod(tail, &err) * 100); + if (*err != '\0' || minf < 0 || minf > 100) { jio_fprintf(defaultStream::error_stream(), "Bad min heap free percentage size: %s\n", option->optionString); @@ -3693,6 +3703,11 @@ "Incompatible compilation policy selected", NULL); } } + // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered) + if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { + FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); + } + // Set heap size based on available physical memory set_heap_size(); @@ -3721,6 +3736,9 @@ assert(verify_serial_gc_flags(), "SerialGC unset"); #endif // INCLUDE_ALL_GCS + // Initialize Metaspace flags and alignments. + Metaspace::ergo_initialize(); + // Set bytecode rewriting flags set_bytecode_flags(); @@ -3774,6 +3792,18 @@ // incremental inlining: bump MaxNodeLimit FLAG_SET_DEFAULT(MaxNodeLimit, (intx)75000); } + if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) { + // nothing to use the profiling, turn if off + FLAG_SET_DEFAULT(TypeProfileLevel, 0); + } + if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) { + // Doing the replace in parent maps helps speculation + FLAG_SET_DEFAULT(ReplaceInParentMaps, true); + } +#ifndef X86 + // Only on x86 for now + FLAG_SET_DEFAULT(TypeProfileLevel, 0); +#endif #endif if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/deoptimization.hpp --- a/src/share/vm/runtime/deoptimization.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/deoptimization.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -152,6 +152,7 @@ // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob). // This is only a CheapObj to ease debugging after a deopt failure class UnrollBlock : public CHeapObj { + friend class VMStructs; private: int _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame int _caller_adjustment; // Adjustment, in bytes, to caller's SP by initial interpreted frame diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/globals.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -496,21 +496,21 @@ #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ \ lp64_product(bool, UseCompressedOops, false, \ - "Use 32-bit object references in 64-bit VM " \ - "lp64_product means flag is always constant in 32 bit VM") \ + "Use 32-bit object references in 64-bit VM. " \ + "lp64_product means flag is always constant in 32 bit VM") \ \ lp64_product(bool, UseCompressedClassPointers, false, \ - "Use 32-bit class pointers in 64-bit VM " \ - "lp64_product means flag is always constant in 32 bit VM") \ + "Use 32-bit class pointers in 64-bit VM. " \ + "lp64_product means flag is always constant in 32 bit VM") \ \ notproduct(bool, CheckCompressedOops, true, \ - "generate checks in encoding/decoding code in debug VM") \ + "Generate checks in encoding/decoding code in debug VM") \ \ product_pd(uintx, HeapBaseMinAddress, \ - "OS specific low limit for heap base address") \ + "OS specific low limit for heap base address") \ \ diagnostic(bool, PrintCompressedOopsMode, false, \ - "Print compressed oops base address and encoding mode") \ + "Print compressed oops base address and encoding mode") \ \ lp64_product(intx, ObjectAlignmentInBytes, 8, \ "Default object alignment in bytes, 8 is minimum") \ @@ -532,7 +532,7 @@ "Use lwsync instruction if true, else use slower sync") \ \ develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ - "Whether to clean the chunk pool asynchronously") \ + "Clean the chunk pool asynchronously") \ \ /* Temporary: See 6948537 */ \ experimental(bool, UseMemSetInBOT, true, \ @@ -542,10 +542,12 @@ "Enable normal processing of flags relating to field diagnostics")\ \ experimental(bool, UnlockExperimentalVMOptions, false, \ - "Enable normal processing of flags relating to experimental features")\ + "Enable normal processing of flags relating to experimental " \ + "features") \ \ product(bool, JavaMonitorsInStackTrace, true, \ - "Print info. about Java monitor locks when the stacks are dumped")\ + "Print information about Java monitor locks when the stacks are" \ + "dumped") \ \ product_pd(bool, UseLargePages, \ "Use large page memory") \ @@ -556,8 +558,12 @@ develop(bool, LargePagesIndividualAllocationInjectError, false, \ "Fail large pages individual allocation") \ \ + product(bool, UseLargePagesInMetaspace, false, \ + "Use large page memory in metaspace. " \ + "Only used if UseLargePages is enabled.") \ + \ develop(bool, TracePageSizes, false, \ - "Trace page size selection and usage.") \ + "Trace page size selection and usage") \ \ product(bool, UseNUMA, false, \ "Use NUMA if available") \ @@ -572,12 +578,12 @@ "Force NUMA optimizations on single-node/UMA systems") \ \ product(uintx, NUMAChunkResizeWeight, 20, \ - "Percentage (0-100) used to weigh the current sample when " \ + "Percentage (0-100) used to weigh the current sample when " \ "computing exponentially decaying average for " \ "AdaptiveNUMAChunkSizing") \ \ product(uintx, NUMASpaceResizeRate, 1*G, \ - "Do not reallocate more that this amount per collection") \ + "Do not reallocate more than this amount per collection") \ \ product(bool, UseAdaptiveNUMAChunkSizing, true, \ "Enable adaptive chunk sizing for NUMA") \ @@ -594,17 +600,17 @@ product(intx, UseSSE, 99, \ "Highest supported SSE instructions set on x86/x64") \ \ - product(bool, UseAES, false, \ + product(bool, UseAES, false, \ "Control whether AES instructions can be used on x86/x64") \ \ product(uintx, LargePageSizeInBytes, 0, \ - "Large page size (0 to let VM choose the page size") \ + "Large page size (0 to let VM choose the page size)") \ \ product(uintx, LargePageHeapSizeThreshold, 128*M, \ - "Use large pages if max heap is at least this big") \ + "Use large pages if maximum heap is at least this big") \ \ product(bool, ForceTimeHighResolution, false, \ - "Using high time resolution(For Win32 only)") \ + "Using high time resolution (for Win32 only)") \ \ develop(bool, TraceItables, false, \ "Trace initialization and use of itables") \ @@ -620,10 +626,10 @@ \ develop(bool, TraceLongCompiles, false, \ "Print out every time compilation is longer than " \ - "a given threashold") \ + "a given threshold") \ \ develop(bool, SafepointALot, false, \ - "Generates a lot of safepoints. Works with " \ + "Generate a lot of safepoints. This works with " \ "GuaranteedSafepointInterval") \ \ product_pd(bool, BackgroundCompilation, \ @@ -631,13 +637,13 @@ "compilation") \ \ product(bool, PrintVMQWaitTime, false, \ - "Prints out the waiting time in VM operation queue") \ + "Print out the waiting time in VM operation queue") \ \ develop(bool, NoYieldsInMicrolock, false, \ "Disable yields in microlock") \ \ develop(bool, TraceOopMapGeneration, false, \ - "Shows oopmap generation") \ + "Show OopMapGeneration") \ \ product(bool, MethodFlushing, true, \ "Reclamation of zombie and not-entrant methods") \ @@ -646,10 +652,11 @@ "Verify stack of each thread when it is entering a runtime call") \ \ diagnostic(bool, ForceUnreachable, false, \ - "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \ + "Make all non code cache addresses to be unreachable by " \ + "forcing use of 64bit literal fixups") \ \ notproduct(bool, StressDerivedPointers, false, \ - "Force scavenge when a derived pointers is detected on stack " \ + "Force scavenge when a derived pointer is detected on stack " \ "after rtm call") \ \ develop(bool, TraceDerivedPointers, false, \ @@ -668,86 +675,86 @@ "Use Inline Caches for virtual calls ") \ \ develop(bool, InlineArrayCopy, true, \ - "inline arraycopy native that is known to be part of " \ + "Inline arraycopy native that is known to be part of " \ "base library DLL") \ \ develop(bool, InlineObjectHash, true, \ - "inline Object::hashCode() native that is known to be part " \ + "Inline Object::hashCode() native that is known to be part " \ "of base library DLL") \ \ develop(bool, InlineNatives, true, \ - "inline natives that are known to be part of base library DLL") \ + "Inline natives that are known to be part of base library DLL") \ \ develop(bool, InlineMathNatives, true, \ - "inline SinD, CosD, etc.") \ + "Inline SinD, CosD, etc.") \ \ develop(bool, InlineClassNatives, true, \ - "inline Class.isInstance, etc") \ + "Inline Class.isInstance, etc") \ \ develop(bool, InlineThreadNatives, true, \ - "inline Thread.currentThread, etc") \ + "Inline Thread.currentThread, etc") \ \ develop(bool, InlineUnsafeOps, true, \ - "inline memory ops (native methods) from sun.misc.Unsafe") \ + "Inline memory ops (native methods) from sun.misc.Unsafe") \ \ product(bool, CriticalJNINatives, true, \ - "check for critical JNI entry points") \ + "Check for critical JNI entry points") \ \ notproduct(bool, StressCriticalJNINatives, false, \ - "Exercise register saving code in critical natives") \ + "Exercise register saving code in critical natives") \ \ product(bool, UseSSE42Intrinsics, false, \ "SSE4.2 versions of intrinsics") \ \ product(bool, UseAESIntrinsics, false, \ - "use intrinsics for AES versions of crypto") \ + "Use intrinsics for AES versions of crypto") \ \ product(bool, UseCRC32Intrinsics, false, \ "use intrinsics for java.util.zip.CRC32") \ \ develop(bool, TraceCallFixup, false, \ - "traces all call fixups") \ + "Trace all call fixups") \ \ develop(bool, DeoptimizeALot, false, \ - "deoptimize at every exit from the runtime system") \ + "Deoptimize at every exit from the runtime system") \ \ notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ - "a comma separated list of bcis to deoptimize at") \ + "A comma separated list of bcis to deoptimize at") \ \ product(bool, DeoptimizeRandom, false, \ - "deoptimize random frames on random exit from the runtime system")\ + "Deoptimize random frames on random exit from the runtime system")\ \ notproduct(bool, ZombieALot, false, \ - "creates zombies (non-entrant) at exit from the runt. system") \ + "Create zombies (non-entrant) at exit from the runtime system") \ \ product(bool, UnlinkSymbolsALot, false, \ - "unlink unreferenced symbols from the symbol table at safepoints")\ + "Unlink unreferenced symbols from the symbol table at safepoints")\ \ notproduct(bool, WalkStackALot, false, \ - "trace stack (no print) at every exit from the runtime system") \ + "Trace stack (no print) at every exit from the runtime system") \ \ product(bool, Debugging, false, \ - "set when executing debug methods in debug.ccp " \ + "Set when executing debug methods in debug.cpp " \ "(to prevent triggering assertions)") \ \ notproduct(bool, StrictSafepointChecks, trueInDebug, \ "Enable strict checks that safepoints cannot happen for threads " \ - "that used No_Safepoint_Verifier") \ + "that use No_Safepoint_Verifier") \ \ notproduct(bool, VerifyLastFrame, false, \ "Verify oops on last frame on entry to VM") \ \ develop(bool, TraceHandleAllocation, false, \ - "Prints out warnings when suspicious many handles are allocated") \ + "Print out warnings when suspiciously many handles are allocated")\ \ product(bool, UseCompilerSafepoints, true, \ "Stop at safepoints in compiled code") \ \ product(bool, FailOverToOldVerifier, true, \ - "fail over to old verifier when split verifier fails") \ + "Fail over to old verifier when split verifier fails") \ \ develop(bool, ShowSafepointMsgs, false, \ - "Show msg. about safepoint synch.") \ + "Show message about safepoint synchronization") \ \ product(bool, SafepointTimeout, false, \ "Time out and warn or fail after SafepointTimeoutDelay " \ @@ -771,19 +778,19 @@ "Trace external suspend wait failures") \ \ product(bool, MaxFDLimit, true, \ - "Bump the number of file descriptors to max in solaris.") \ + "Bump the number of file descriptors to maximum in Solaris") \ \ diagnostic(bool, LogEvents, true, \ - "Enable the various ring buffer event logs") \ + "Enable the various ring buffer event logs") \ \ diagnostic(uintx, LogEventsBufferEntries, 10, \ - "Enable the various ring buffer event logs") \ + "Number of ring buffer event logs") \ \ product(bool, BytecodeVerificationRemote, true, \ - "Enables the Java bytecode verifier for remote classes") \ + "Enable the Java bytecode verifier for remote classes") \ \ product(bool, BytecodeVerificationLocal, false, \ - "Enables the Java bytecode verifier for local classes") \ + "Enable the Java bytecode verifier for local classes") \ \ develop(bool, ForceFloatExceptions, trueInDebug, \ "Force exceptions on FP stack under/overflow") \ @@ -795,7 +802,7 @@ "Trace java language assertions") \ \ notproduct(bool, CheckAssertionStatusDirectives, false, \ - "temporary - see javaClasses.cpp") \ + "Temporary - see javaClasses.cpp") \ \ notproduct(bool, PrintMallocFree, false, \ "Trace calls to C heap malloc/free allocation") \ @@ -814,16 +821,16 @@ "entering the VM") \ \ notproduct(bool, CheckOopishValues, false, \ - "Warn if value contains oop ( requires ZapDeadLocals)") \ + "Warn if value contains oop (requires ZapDeadLocals)") \ \ develop(bool, UseMallocOnly, false, \ - "use only malloc/free for allocation (no resource area/arena)") \ + "Use only malloc/free for allocation (no resource area/arena)") \ \ develop(bool, PrintMalloc, false, \ - "print all malloc/free calls") \ + "Print all malloc/free calls") \ \ develop(bool, PrintMallocStatistics, false, \ - "print malloc/free statistics") \ + "Print malloc/free statistics") \ \ develop(bool, ZapResourceArea, trueInDebug, \ "Zap freed resource/arena space with 0xABABABAB") \ @@ -835,7 +842,7 @@ "Zap freed JNI handle space with 0xFEFEFEFE") \ \ notproduct(bool, ZapStackSegments, trueInDebug, \ - "Zap allocated/freed Stack segments with 0xFADFADED") \ + "Zap allocated/freed stack segments with 0xFADFADED") \ \ develop(bool, ZapUnusedHeapArea, trueInDebug, \ "Zap unused heap space with 0xBAADBABE") \ @@ -850,7 +857,7 @@ "Zap filler objects with 0xDEAFBABE") \ \ develop(bool, PrintVMMessages, true, \ - "Print vm messages on console") \ + "Print VM messages on console") \ \ product(bool, PrintGCApplicationConcurrentTime, false, \ "Print the time the application has been running") \ @@ -859,21 +866,21 @@ "Print the time the application has been stopped") \ \ diagnostic(bool, VerboseVerification, false, \ - "Display detailed verification details") \ + "Display detailed verification details") \ \ notproduct(uintx, ErrorHandlerTest, 0, \ - "If > 0, provokes an error after VM initialization; the value" \ - "determines which error to provoke. See test_error_handler()" \ + "If > 0, provokes an error after VM initialization; the value " \ + "determines which error to provoke. See test_error_handler() " \ "in debug.cpp.") \ \ develop(bool, Verbose, false, \ - "Prints additional debugging information from other modes") \ + "Print additional debugging information from other modes") \ \ develop(bool, PrintMiscellaneous, false, \ - "Prints uncategorized debugging information (requires +Verbose)") \ + "Print uncategorized debugging information (requires +Verbose)") \ \ develop(bool, WizardMode, false, \ - "Prints much more debugging information") \ + "Print much more debugging information") \ \ product(bool, ShowMessageBoxOnError, false, \ "Keep process alive on VM fatal error") \ @@ -885,7 +892,7 @@ "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ \ product(bool, SuppressFatalErrorMessage, false, \ - "Do NO Fatal Error report [Avoid deadlock]") \ + "Report NO fatal error message (avoid deadlock)") \ \ product(ccstrlist, OnError, "", \ "Run user-defined commands on fatal error; see VMError.cpp " \ @@ -895,17 +902,17 @@ "Run user-defined commands on first java.lang.OutOfMemoryError") \ \ manageable(bool, HeapDumpBeforeFullGC, false, \ - "Dump heap to file before any major stop-world GC") \ + "Dump heap to file before any major stop-the-world GC") \ \ manageable(bool, HeapDumpAfterFullGC, false, \ - "Dump heap to file after any major stop-world GC") \ + "Dump heap to file after any major stop-the-world GC") \ \ manageable(bool, HeapDumpOnOutOfMemoryError, false, \ "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ \ manageable(ccstr, HeapDumpPath, NULL, \ - "When HeapDumpOnOutOfMemoryError is on, the path (filename or" \ - "directory) of the dump file (defaults to java_pid.hprof" \ + "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ + "directory) of the dump file (defaults to java_pid.hprof " \ "in the working directory)") \ \ develop(uintx, SegmentedHeapDumpThreshold, 2*G, \ @@ -919,10 +926,10 @@ "Execute breakpoint upon encountering VM warning") \ \ develop(bool, TraceVMOperation, false, \ - "Trace vm operations") \ + "Trace VM operations") \ \ develop(bool, UseFakeTimers, false, \ - "Tells whether the VM should use system time or a fake timer") \ + "Tell whether the VM should use system time or a fake timer") \ \ product(ccstr, NativeMemoryTracking, "off", \ "Native memory tracking options") \ @@ -932,7 +939,7 @@ \ diagnostic(bool, AutoShutdownNMT, true, \ "Automatically shutdown native memory tracking under stress " \ - "situation. When set to false, native memory tracking tries to " \ + "situations. When set to false, native memory tracking tries to " \ "stay alive at the expense of JVM performance") \ \ diagnostic(bool, LogCompilation, false, \ @@ -942,12 +949,12 @@ "Print compilations") \ \ diagnostic(bool, TraceNMethodInstalls, false, \ - "Trace nmethod intallation") \ + "Trace nmethod installation") \ \ diagnostic(intx, ScavengeRootsInCode, 2, \ - "0: do not allow scavengable oops in the code cache; " \ - "1: allow scavenging from the code cache; " \ - "2: emit as many constants as the compiler can see") \ + "0: do not allow scavengable oops in the code cache; " \ + "1: allow scavenging from the code cache; " \ + "2: emit as many constants as the compiler can see") \ \ product(bool, AlwaysRestoreFPU, false, \ "Restore the FPU control word after every JNI call (expensive)") \ @@ -968,7 +975,7 @@ "Print assembly code (using external disassembler.so)") \ \ diagnostic(ccstr, PrintAssemblyOptions, NULL, \ - "Options string passed to disassembler.so") \ + "Print options string passed to disassembler.so") \ \ product(bool, PrintNMethodStatistics, false, \ "Print a summary statistic for the generated nmethods") \ @@ -992,20 +999,21 @@ "Print exception handler tables for all nmethods when generated") \ \ develop(bool, StressCompiledExceptionHandlers, false, \ - "Exercise compiled exception handlers") \ + "Exercise compiled exception handlers") \ \ develop(bool, InterceptOSException, false, \ - "Starts debugger when an implicit OS (e.g., NULL) " \ + "Start debugger when an implicit OS (e.g. NULL) " \ "exception happens") \ \ product(bool, PrintCodeCache, false, \ "Print the code cache memory usage when exiting") \ \ develop(bool, PrintCodeCache2, false, \ - "Print detailed usage info on the code cache when exiting") \ + "Print detailed usage information on the code cache when exiting")\ \ product(bool, PrintCodeCacheOnCompilation, false, \ - "Print the code cache memory usage each time a method is compiled") \ + "Print the code cache memory usage each time a method is " \ + "compiled") \ \ diagnostic(bool, PrintStubCode, false, \ "Print generated stub code") \ @@ -1017,40 +1025,40 @@ "Omit backtraces for some 'hot' exceptions in optimized code") \ \ product(bool, ProfilerPrintByteCodeStatistics, false, \ - "Prints byte code statictics when dumping profiler output") \ + "Print bytecode statistics when dumping profiler output") \ \ product(bool, ProfilerRecordPC, false, \ - "Collects tick for each 16 byte interval of compiled code") \ + "Collect ticks for each 16 byte interval of compiled code") \ \ product(bool, ProfileVM, false, \ - "Profiles ticks that fall within VM (either in the VM Thread " \ + "Profile ticks that fall within VM (either in the VM Thread " \ "or VM code called through stubs)") \ \ product(bool, ProfileIntervals, false, \ - "Prints profiles for each interval (see ProfileIntervalsTicks)") \ + "Print profiles for each interval (see ProfileIntervalsTicks)") \ \ notproduct(bool, ProfilerCheckIntervals, false, \ - "Collect and print info on spacing of profiler ticks") \ + "Collect and print information on spacing of profiler ticks") \ \ develop(bool, PrintJVMWarnings, false, \ - "Prints warnings for unimplemented JVM functions") \ + "Print warnings for unimplemented JVM functions") \ \ product(bool, PrintWarnings, true, \ - "Prints JVM warnings to output stream") \ + "Print JVM warnings to output stream") \ \ notproduct(uintx, WarnOnStalledSpinLock, 0, \ - "Prints warnings for stalled SpinLocks") \ + "Print warnings for stalled SpinLocks") \ \ product(bool, RegisterFinalizersAtInit, true, \ "Register finalizable objects at end of Object. or " \ "after allocation") \ \ develop(bool, RegisterReferences, true, \ - "Tells whether the VM should register soft/weak/final/phantom " \ + "Tell whether the VM should register soft/weak/final/phantom " \ "references") \ \ develop(bool, IgnoreRewrites, false, \ - "Supress rewrites of bytecodes in the oopmap generator. " \ + "Suppress rewrites of bytecodes in the oopmap generator. " \ "This is unsafe!") \ \ develop(bool, PrintCodeCacheExtension, false, \ @@ -1060,8 +1068,7 @@ "Enable the security JVM functions") \ \ develop(bool, ProtectionDomainVerification, true, \ - "Verifies protection domain before resolution in system " \ - "dictionary") \ + "Verify protection domain before resolution in system dictionary")\ \ product(bool, ClassUnloading, true, \ "Do unloading of classes") \ @@ -1074,14 +1081,14 @@ "Write memory usage profiling to log file") \ \ notproduct(bool, PrintSystemDictionaryAtExit, false, \ - "Prints the system dictionary at exit") \ + "Print the system dictionary at exit") \ \ experimental(intx, PredictedLoadedClassCount, 0, \ - "Experimental: Tune loaded class cache starting size.") \ + "Experimental: Tune loaded class cache starting size") \ \ diagnostic(bool, UnsyncloadClass, false, \ "Unstable: VM calls loadClass unsynchronized. Custom " \ - "class loader must call VM synchronized for findClass " \ + "class loader must call VM synchronized for findClass " \ "and defineClass.") \ \ product(bool, AlwaysLockClassLoader, false, \ @@ -1097,22 +1104,22 @@ "Call loadClassInternal() rather than loadClass()") \ \ product_pd(bool, DontYieldALot, \ - "Throw away obvious excess yield calls (for SOLARIS only)") \ + "Throw away obvious excess yield calls (for Solaris only)") \ \ product_pd(bool, ConvertSleepToYield, \ - "Converts sleep(0) to thread yield " \ - "(may be off for SOLARIS to improve GUI)") \ + "Convert sleep(0) to thread yield " \ + "(may be off for Solaris to improve GUI)") \ \ product(bool, ConvertYieldToSleep, false, \ - "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\ - "behavior (SOLARIS only)") \ + "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ + "behavior (Solaris only)") \ \ product(bool, UseBoundThreads, true, \ - "Bind user level threads to kernel threads (for SOLARIS only)") \ + "Bind user level threads to kernel threads (for Solaris only)") \ \ develop(bool, UseDetachedThreads, true, \ "Use detached threads that are recycled upon termination " \ - "(for SOLARIS only)") \ + "(for Solaris only)") \ \ product(bool, UseLWPSynchronization, true, \ "Use LWP-based instead of libthread-based synchronization " \ @@ -1122,41 +1129,43 @@ "(Unstable) Various monitor synchronization tunables") \ \ product(intx, EmitSync, 0, \ - "(Unsafe,Unstable) " \ - " Controls emission of inline sync fast-path code") \ + "(Unsafe, Unstable) " \ + "Control emission of inline sync fast-path code") \ \ product(intx, MonitorBound, 0, "Bound Monitor population") \ \ product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ \ - product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \ - \ - product(intx, SyncVerbose, 0, "(Unstable)" ) \ - \ - product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \ + product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \ + \ + product(intx, SyncVerbose, 0, "(Unstable)") \ + \ + product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ \ product(intx, hashCode, 5, \ - "(Unstable) select hashCode generation algorithm" ) \ + "(Unstable) select hashCode generation algorithm") \ \ product(intx, WorkAroundNPTLTimedWaitHang, 1, \ - "(Unstable, Linux-specific)" \ - " avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \ + "(Unstable, Linux-specific) " \ + "avoid NPTL-FUTEX hang pthread_cond_timedwait") \ \ product(bool, FilterSpuriousWakeups, true, \ "Prevent spurious or premature wakeups from object.wait " \ "(Solaris only)") \ \ - product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ - product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ - product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ + product(intx, NativeMonitorTimeout, -1, "(Unstable)") \ + \ + product(intx, NativeMonitorFlags, 0, "(Unstable)") \ + \ + product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ \ develop(bool, UsePthreads, false, \ "Use pthread-based instead of libthread-based synchronization " \ "(SPARC only)") \ \ product(bool, AdjustConcurrency, false, \ - "call thr_setconcurrency at thread create time to avoid " \ - "LWP starvation on MP systems (For Solaris Only)") \ + "Call thr_setconcurrency at thread creation time to avoid " \ + "LWP starvation on MP systems (for Solaris Only)") \ \ product(bool, ReduceSignalUsage, false, \ "Reduce the use of OS signals in Java and/or the VM") \ @@ -1165,13 +1174,14 @@ "Share vtable stubs (smaller code but worse branch prediction") \ \ develop(bool, LoadLineNumberTables, true, \ - "Tells whether the class file parser loads line number tables") \ + "Tell whether the class file parser loads line number tables") \ \ develop(bool, LoadLocalVariableTables, true, \ - "Tells whether the class file parser loads local variable tables")\ + "Tell whether the class file parser loads local variable tables") \ \ develop(bool, LoadLocalVariableTypeTables, true, \ - "Tells whether the class file parser loads local variable type tables")\ + "Tell whether the class file parser loads local variable type" \ + "tables") \ \ product(bool, AllowUserSignalHandlers, false, \ "Do not complain if the application installs signal handlers " \ @@ -1202,10 +1212,12 @@ \ product(bool, EagerXrunInit, false, \ "Eagerly initialize -Xrun libraries; allows startup profiling, " \ - " but not all -Xrun libraries may support the state of the VM at this time") \ + "but not all -Xrun libraries may support the state of the VM " \ + "at this time") \ \ product(bool, PreserveAllAnnotations, false, \ - "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \ + "Preserve RuntimeInvisibleAnnotations as well " \ + "as RuntimeVisibleAnnotations") \ \ develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ "Number of OutOfMemoryErrors preallocated with backtrace") \ @@ -1280,7 +1292,7 @@ "Trace level for JVMTI RedefineClasses") \ \ develop(bool, StressMethodComparator, false, \ - "run the MethodComparator on all loaded methods") \ + "Run the MethodComparator on all loaded methods") \ \ /* change to false by default sometime after Mustang */ \ product(bool, VerifyMergedCPBytecodes, true, \ @@ -1314,7 +1326,7 @@ "Trace dependencies") \ \ develop(bool, VerifyDependencies, trueInDebug, \ - "Exercise and verify the compilation dependency mechanism") \ + "Exercise and verify the compilation dependency mechanism") \ \ develop(bool, TraceNewOopMapGeneration, false, \ "Trace OopMapGeneration") \ @@ -1332,7 +1344,7 @@ "Trace monitor matching failures during OopMapGeneration") \ \ develop(bool, TraceOopMapRewrites, false, \ - "Trace rewritting of method oops during oop map generation") \ + "Trace rewriting of method oops during oop map generation") \ \ develop(bool, TraceSafepoint, false, \ "Trace safepoint operations") \ @@ -1350,10 +1362,10 @@ "Trace setup time") \ \ develop(bool, TraceProtectionDomainVerification, false, \ - "Trace protection domain verifcation") \ + "Trace protection domain verification") \ \ develop(bool, TraceClearedExceptions, false, \ - "Prints when an exception is forcibly cleared") \ + "Print when an exception is forcibly cleared") \ \ product(bool, TraceClassResolution, false, \ "Trace all constant pool resolutions (for debugging)") \ @@ -1367,7 +1379,7 @@ /* gc */ \ \ product(bool, UseSerialGC, false, \ - "Use the serial garbage collector") \ + "Use the Serial garbage collector") \ \ product(bool, UseG1GC, false, \ "Use the Garbage-First garbage collector") \ @@ -1386,16 +1398,16 @@ "The collection count for the first maximum compaction") \ \ product(bool, UseMaximumCompactionOnSystemGC, true, \ - "In the Parallel Old garbage collector maximum compaction for " \ - "a system GC") \ + "Use maximum compaction in the Parallel Old garbage collector " \ + "for a system GC") \ \ product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ - "The mean used by the par compact dead wood" \ - "limiter (a number between 0-100).") \ + "The mean used by the parallel compact dead wood " \ + "limiter (a number between 0-100)") \ \ product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ - "The standard deviation used by the par compact dead wood" \ - "limiter (a number between 0-100).") \ + "The standard deviation used by the parallel compact dead wood " \ + "limiter (a number between 0-100)") \ \ product(uintx, ParallelGCThreads, 0, \ "Number of parallel threads parallel gc will use") \ @@ -1405,7 +1417,7 @@ "parallel gc will use") \ \ diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ - "Force dynamic selection of the number of" \ + "Force dynamic selection of the number of " \ "parallel threads parallel gc will use to aid debugging") \ \ product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \ @@ -1416,7 +1428,7 @@ "Trace the dynamic GC thread usage") \ \ develop(bool, ParallelOldGCSplitALot, false, \ - "Provoke splitting (copying data from a young gen space to" \ + "Provoke splitting (copying data from a young gen space to " \ "multiple destination spaces)") \ \ develop(uintx, ParallelOldGCSplitInterval, 3, \ @@ -1426,19 +1438,19 @@ "Number of threads concurrent gc will use") \ \ product(uintx, YoungPLABSize, 4096, \ - "Size of young gen promotion labs (in HeapWords)") \ + "Size of young gen promotion LAB's (in HeapWords)") \ \ product(uintx, OldPLABSize, 1024, \ - "Size of old gen promotion labs (in HeapWords)") \ + "Size of old gen promotion LAB's (in HeapWords)") \ \ product(uintx, GCTaskTimeStampEntries, 200, \ "Number of time stamp entries per gc worker thread") \ \ product(bool, AlwaysTenure, false, \ - "Always tenure objects in eden. (ParallelGC only)") \ + "Always tenure objects in eden (ParallelGC only)") \ \ product(bool, NeverTenure, false, \ - "Never tenure objects in eden, May tenure on overflow " \ + "Never tenure objects in eden, may tenure on overflow " \ "(ParallelGC only)") \ \ product(bool, ScavengeBeforeFullGC, true, \ @@ -1446,14 +1458,14 @@ "used with UseParallelGC") \ \ develop(bool, ScavengeWithObjectsInToSpace, false, \ - "Allow scavenges to occur when to_space contains objects.") \ + "Allow scavenges to occur when to-space contains objects") \ \ product(bool, UseConcMarkSweepGC, false, \ "Use Concurrent Mark-Sweep GC in the old generation") \ \ product(bool, ExplicitGCInvokesConcurrent, false, \ - "A System.gc() request invokes a concurrent collection;" \ - " (effective only when UseConcMarkSweepGC)") \ + "A System.gc() request invokes a concurrent collection; " \ + "(effective only when UseConcMarkSweepGC)") \ \ product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ "A System.gc() request invokes a concurrent collection and " \ @@ -1461,19 +1473,19 @@ "(effective only when UseConcMarkSweepGC)") \ \ product(bool, GCLockerInvokesConcurrent, false, \ - "The exit of a JNI CS necessitating a scavenge also" \ - " kicks off a bkgrd concurrent collection") \ + "The exit of a JNI critical section necessitating a scavenge, " \ + "also kicks off a background concurrent collection") \ \ product(uintx, GCLockerEdenExpansionPercent, 5, \ - "How much the GC can expand the eden by while the GC locker " \ + "How much the GC can expand the eden by while the GC locker " \ "is active (as a percentage)") \ \ diagnostic(intx, GCLockerRetryAllocationCount, 2, \ - "Number of times to retry allocations when" \ - " blocked by the GC locker") \ + "Number of times to retry allocations when " \ + "blocked by the GC locker") \ \ develop(bool, UseCMSAdaptiveFreeLists, true, \ - "Use Adaptive Free Lists in the CMS generation") \ + "Use adaptive free lists in the CMS generation") \ \ develop(bool, UseAsyncConcMarkSweepGC, true, \ "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\ @@ -1488,44 +1500,46 @@ "Use passing of collection from background to foreground") \ \ product(bool, UseParNewGC, false, \ - "Use parallel threads in the new generation.") \ + "Use parallel threads in the new generation") \ \ product(bool, ParallelGCVerbose, false, \ - "Verbose output for parallel GC.") \ + "Verbose output for parallel gc") \ \ product(uintx, ParallelGCBufferWastePct, 10, \ - "Wasted fraction of parallel allocation buffer.") \ + "Wasted fraction of parallel allocation buffer") \ \ diagnostic(bool, ParallelGCRetainPLAB, false, \ - "Retain parallel allocation buffers across scavenges; " \ - " -- disabled because this currently conflicts with " \ - " parallel card scanning under certain conditions ") \ + "Retain parallel allocation buffers across scavenges; " \ + "it is disabled because this currently conflicts with " \ + "parallel card scanning under certain conditions.") \ \ product(uintx, TargetPLABWastePct, 10, \ "Target wasted space in last buffer as percent of overall " \ "allocation") \ \ product(uintx, PLABWeight, 75, \ - "Percentage (0-100) used to weight the current sample when" \ - "computing exponentially decaying average for ResizePLAB.") \ + "Percentage (0-100) used to weigh the current sample when " \ + "computing exponentially decaying average for ResizePLAB") \ \ product(bool, ResizePLAB, true, \ - "Dynamically resize (survivor space) promotion labs") \ + "Dynamically resize (survivor space) promotion LAB's") \ \ product(bool, PrintPLAB, false, \ - "Print (survivor space) promotion labs sizing decisions") \ + "Print (survivor space) promotion LAB's sizing decisions") \ \ product(intx, ParGCArrayScanChunk, 50, \ - "Scan a subset and push remainder, if array is bigger than this") \ + "Scan a subset of object array and push remainder, if array is " \ + "bigger than this") \ \ product(bool, ParGCUseLocalOverflow, false, \ "Instead of a global overflow list, use local overflow stacks") \ \ product(bool, ParGCTrimOverflow, true, \ - "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \ + "Eagerly trim the local overflow lists " \ + "(when ParGCUseLocalOverflow)") \ \ notproduct(bool, ParGCWorkQueueOverflowALot, false, \ - "Whether we should simulate work queue overflow in ParNew") \ + "Simulate work queue overflow in ParNew") \ \ notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ "An `interval' counter that determines how frequently " \ @@ -1543,43 +1557,46 @@ "during card table scanning") \ \ product(uintx, CMSParPromoteBlocksToClaim, 16, \ - "Number of blocks to attempt to claim when refilling CMS LAB for "\ - "parallel GC.") \ + "Number of blocks to attempt to claim when refilling CMS LAB's " \ + "for parallel GC") \ \ product(uintx, OldPLABWeight, 50, \ - "Percentage (0-100) used to weight the current sample when" \ - "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \ + "Percentage (0-100) used to weight the current sample when " \ + "computing exponentially decaying average for resizing " \ + "CMSParPromoteBlocksToClaim") \ \ product(bool, ResizeOldPLAB, true, \ - "Dynamically resize (old gen) promotion labs") \ + "Dynamically resize (old gen) promotion LAB's") \ \ product(bool, PrintOldPLAB, false, \ - "Print (old gen) promotion labs sizing decisions") \ + "Print (old gen) promotion LAB's sizing decisions") \ \ product(uintx, CMSOldPLABMin, 16, \ - "Min size of CMS gen promotion lab caches per worker per blksize")\ + "Minimum size of CMS gen promotion LAB caches per worker " \ + "per block size") \ \ product(uintx, CMSOldPLABMax, 1024, \ - "Max size of CMS gen promotion lab caches per worker per blksize")\ + "Maximum size of CMS gen promotion LAB caches per worker " \ + "per block size") \ \ product(uintx, CMSOldPLABNumRefills, 4, \ - "Nominal number of refills of CMS gen promotion lab cache" \ - " per worker per block size") \ + "Nominal number of refills of CMS gen promotion LAB cache " \ + "per worker per block size") \ \ product(bool, CMSOldPLABResizeQuicker, false, \ - "Whether to react on-the-fly during a scavenge to a sudden" \ - " change in block demand rate") \ + "React on-the-fly during a scavenge to a sudden " \ + "change in block demand rate") \ \ product(uintx, CMSOldPLABToleranceFactor, 4, \ - "The tolerance of the phase-change detector for on-the-fly" \ - " PLAB resizing during a scavenge") \ + "The tolerance of the phase-change detector for on-the-fly " \ + "PLAB resizing during a scavenge") \ \ product(uintx, CMSOldPLABReactivityFactor, 2, \ - "The gain in the feedback loop for on-the-fly PLAB resizing" \ - " during a scavenge") \ + "The gain in the feedback loop for on-the-fly PLAB resizing " \ + "during a scavenge") \ \ product(bool, AlwaysPreTouch, false, \ - "It forces all freshly committed pages to be pre-touched.") \ + "Force all freshly committed pages to be pre-touched") \ \ product_pd(uintx, CMSYoungGenPerWorker, \ "The maximum size of young gen chosen by default per GC worker " \ @@ -1589,64 +1606,67 @@ "Whether CMS GC should operate in \"incremental\" mode") \ \ product(uintx, CMSIncrementalDutyCycle, 10, \ - "CMS incremental mode duty cycle (a percentage, 0-100). If" \ - "CMSIncrementalPacing is enabled, then this is just the initial" \ - "value") \ + "Percentage (0-100) of CMS incremental mode duty cycle. If " \ + "CMSIncrementalPacing is enabled, then this is just the initial " \ + "value.") \ \ product(bool, CMSIncrementalPacing, true, \ "Whether the CMS incremental mode duty cycle should be " \ "automatically adjusted") \ \ product(uintx, CMSIncrementalDutyCycleMin, 0, \ - "Lower bound on the duty cycle when CMSIncrementalPacing is " \ - "enabled (a percentage, 0-100)") \ + "Minimum percentage (0-100) of the CMS incremental duty cycle " \ + "used when CMSIncrementalPacing is enabled") \ \ product(uintx, CMSIncrementalSafetyFactor, 10, \ "Percentage (0-100) used to add conservatism when computing the " \ "duty cycle") \ \ product(uintx, CMSIncrementalOffset, 0, \ - "Percentage (0-100) by which the CMS incremental mode duty cycle" \ - " is shifted to the right within the period between young GCs") \ + "Percentage (0-100) by which the CMS incremental mode duty cycle "\ + "is shifted to the right within the period between young GCs") \ \ product(uintx, CMSExpAvgFactor, 50, \ - "Percentage (0-100) used to weight the current sample when" \ - "computing exponential averages for CMS statistics.") \ + "Percentage (0-100) used to weigh the current sample when " \ + "computing exponential averages for CMS statistics") \ \ product(uintx, CMS_FLSWeight, 75, \ - "Percentage (0-100) used to weight the current sample when" \ - "computing exponentially decating averages for CMS FLS statistics.") \ + "Percentage (0-100) used to weigh the current sample when " \ + "computing exponentially decaying averages for CMS FLS " \ + "statistics") \ \ product(uintx, CMS_FLSPadding, 1, \ - "The multiple of deviation from mean to use for buffering" \ - "against volatility in free list demand.") \ + "The multiple of deviation from mean to use for buffering " \ + "against volatility in free list demand") \ \ product(uintx, FLSCoalescePolicy, 2, \ - "CMS: Aggression level for coalescing, increasing from 0 to 4") \ + "CMS: aggressiveness level for coalescing, increasing " \ + "from 0 to 4") \ \ product(bool, FLSAlwaysCoalesceLarge, false, \ - "CMS: Larger free blocks are always available for coalescing") \ + "CMS: larger free blocks are always available for coalescing") \ \ product(double, FLSLargestBlockCoalesceProximity, 0.99, \ - "CMS: the smaller the percentage the greater the coalition force")\ + "CMS: the smaller the percentage the greater the coalescing " \ + "force") \ \ product(double, CMSSmallCoalSurplusPercent, 1.05, \ - "CMS: the factor by which to inflate estimated demand of small" \ - " block sizes to prevent coalescing with an adjoining block") \ + "CMS: the factor by which to inflate estimated demand of small " \ + "block sizes to prevent coalescing with an adjoining block") \ \ product(double, CMSLargeCoalSurplusPercent, 0.95, \ - "CMS: the factor by which to inflate estimated demand of large" \ - " block sizes to prevent coalescing with an adjoining block") \ + "CMS: the factor by which to inflate estimated demand of large " \ + "block sizes to prevent coalescing with an adjoining block") \ \ product(double, CMSSmallSplitSurplusPercent, 1.10, \ - "CMS: the factor by which to inflate estimated demand of small" \ - " block sizes to prevent splitting to supply demand for smaller" \ - " blocks") \ + "CMS: the factor by which to inflate estimated demand of small " \ + "block sizes to prevent splitting to supply demand for smaller " \ + "blocks") \ \ product(double, CMSLargeSplitSurplusPercent, 1.00, \ - "CMS: the factor by which to inflate estimated demand of large" \ - " block sizes to prevent splitting to supply demand for smaller" \ - " blocks") \ + "CMS: the factor by which to inflate estimated demand of large " \ + "block sizes to prevent splitting to supply demand for smaller " \ + "blocks") \ \ product(bool, CMSExtrapolateSweep, false, \ "CMS: cushion for block demand during sweep") \ @@ -1658,11 +1678,11 @@ \ product(uintx, CMS_SweepPadding, 1, \ "The multiple of deviation from mean to use for buffering " \ - "against volatility in inter-sweep duration.") \ + "against volatility in inter-sweep duration") \ \ product(uintx, CMS_SweepTimerThresholdMillis, 10, \ "Skip block flux-rate sampling for an epoch unless inter-sweep " \ - "duration exceeds this threhold in milliseconds") \ + "duration exceeds this threshold in milliseconds") \ \ develop(bool, CMSTraceIncrementalMode, false, \ "Trace CMS incremental mode") \ @@ -1677,14 +1697,15 @@ "Whether class unloading enabled when using CMS GC") \ \ product(uintx, CMSClassUnloadingMaxInterval, 0, \ - "When CMS class unloading is enabled, the maximum CMS cycle count"\ - " for which classes may not be unloaded") \ + "When CMS class unloading is enabled, the maximum CMS cycle " \ + "count for which classes may not be unloaded") \ \ product(bool, CMSCompactWhenClearAllSoftRefs, true, \ - "Compact when asked to collect CMS gen with clear_all_soft_refs") \ + "Compact when asked to collect CMS gen with " \ + "clear_all_soft_refs()") \ \ product(bool, UseCMSCompactAtFullCollection, true, \ - "Use mark sweep compact at full collections") \ + "Use Mark-Sweep-Compact algorithm at full collections") \ \ product(uintx, CMSFullGCsBeforeCompaction, 0, \ "Number of CMS full collection done before compaction if > 0") \ @@ -1706,38 +1727,37 @@ "Warn in case of excessive CMS looping") \ \ develop(bool, CMSOverflowEarlyRestoration, false, \ - "Whether preserved marks should be restored early") \ + "Restore preserved marks early") \ \ product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ "Size of marking stack") \ \ product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ - "Max size of marking stack") \ + "Maximum size of marking stack") \ \ notproduct(bool, CMSMarkStackOverflowALot, false, \ - "Whether we should simulate frequent marking stack / work queue" \ - " overflow") \ + "Simulate frequent marking stack / work queue overflow") \ \ notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ - "An `interval' counter that determines how frequently" \ - " we simulate overflow; a smaller number increases frequency") \ + "An \"interval\" counter that determines how frequently " \ + "to simulate overflow; a smaller number increases frequency") \ \ product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ - "(Temporary, subject to experimentation)" \ + "(Temporary, subject to experimentation) " \ "Maximum number of abortable preclean iterations, if > 0") \ \ product(intx, CMSMaxAbortablePrecleanTime, 5000, \ - "(Temporary, subject to experimentation)" \ - "Maximum time in abortable preclean in ms") \ + "(Temporary, subject to experimentation) " \ + "Maximum time in abortable preclean (in milliseconds)") \ \ product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ - "(Temporary, subject to experimentation)" \ + "(Temporary, subject to experimentation) " \ "Nominal minimum work per abortable preclean iteration") \ \ manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ - "(Temporary, subject to experimentation)" \ - " Time that we sleep between iterations when not given" \ - " enough work per iteration") \ + "(Temporary, subject to experimentation) " \ + "Time that we sleep between iterations when not given " \ + "enough work per iteration") \ \ product(uintx, CMSRescanMultiple, 32, \ "Size (in cards) of CMS parallel rescan task") \ @@ -1755,23 +1775,24 @@ "Whether parallel remark enabled (only if ParNewGC)") \ \ product(bool, CMSParallelSurvivorRemarkEnabled, true, \ - "Whether parallel remark of survivor space" \ - " enabled (effective only if CMSParallelRemarkEnabled)") \ + "Whether parallel remark of survivor space " \ + "enabled (effective only if CMSParallelRemarkEnabled)") \ \ product(bool, CMSPLABRecordAlways, true, \ - "Whether to always record survivor space PLAB bdries" \ - " (effective only if CMSParallelSurvivorRemarkEnabled)") \ + "Always record survivor space PLAB boundaries (effective only " \ + "if CMSParallelSurvivorRemarkEnabled)") \ \ product(bool, CMSEdenChunksRecordAlways, true, \ - "Whether to always record eden chunks used for " \ - "the parallel initial mark or remark of eden" ) \ + "Always record eden chunks used for the parallel initial mark " \ + "or remark of eden") \ \ product(bool, CMSPrintEdenSurvivorChunks, false, \ "Print the eden and the survivor chunks used for the parallel " \ "initial mark or remark of the eden/survivor spaces") \ \ product(bool, CMSConcurrentMTEnabled, true, \ - "Whether multi-threaded concurrent work enabled (if ParNewGC)") \ + "Whether multi-threaded concurrent work enabled " \ + "(effective only if ParNewGC)") \ \ product(bool, CMSPrecleaningEnabled, true, \ "Whether concurrent precleaning enabled") \ @@ -1780,12 +1801,12 @@ "Maximum number of precleaning iteration passes") \ \ product(uintx, CMSPrecleanNumerator, 2, \ - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ - " ratio") \ + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ + "ratio") \ \ product(uintx, CMSPrecleanDenominator, 3, \ - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ - " ratio") \ + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ + "ratio") \ \ product(bool, CMSPrecleanRefLists1, true, \ "Preclean ref lists during (initial) preclean phase") \ @@ -1800,7 +1821,7 @@ "Preclean survivors during abortable preclean phase") \ \ product(uintx, CMSPrecleanThreshold, 1000, \ - "Don't re-iterate if #dirty cards less than this") \ + "Do not iterate again if number of dirty cards is less than this")\ \ product(bool, CMSCleanOnEnter, true, \ "Clean-on-enter optimization for reducing number of dirty cards") \ @@ -1809,14 +1830,16 @@ "Choose variant (1,2) of verification following remark") \ \ product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ - "If Eden used is below this value, don't try to schedule remark") \ + "If Eden size is below this, do not try to schedule remark") \ \ product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ - "The Eden occupancy % at which to try and schedule remark pause") \ + "The Eden occupancy percentage (0-100) at which " \ + "to try and schedule remark pause") \ \ product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ - "Start sampling Eden top at least before yg occupancy reaches" \ - " 1/ of the size at which we plan to schedule remark") \ + "Start sampling eden top at least before young gen " \ + "occupancy reaches 1/ of the size at which " \ + "we plan to schedule remark") \ \ product(uintx, CMSSamplingGrain, 16*K, \ "The minimum distance between eden samples for CMS (see above)") \ @@ -1838,27 +1861,27 @@ "should start a collection cycle") \ \ product(bool, CMSYield, true, \ - "Yield between steps of concurrent mark & sweep") \ + "Yield between steps of CMS") \ \ product(uintx, CMSBitMapYieldQuantum, 10*M, \ - "Bitmap operations should process at most this many bits" \ + "Bitmap operations should process at most this many bits " \ "between yields") \ \ product(bool, CMSDumpAtPromotionFailure, false, \ "Dump useful information about the state of the CMS old " \ - " generation upon a promotion failure.") \ + "generation upon a promotion failure") \ \ product(bool, CMSPrintChunksInDump, false, \ "In a dump enabled by CMSDumpAtPromotionFailure, include " \ - " more detailed information about the free chunks.") \ + "more detailed information about the free chunks") \ \ product(bool, CMSPrintObjectsInDump, false, \ "In a dump enabled by CMSDumpAtPromotionFailure, include " \ - " more detailed information about the allocated objects.") \ + "more detailed information about the allocated objects") \ \ diagnostic(bool, FLSVerifyAllHeapReferences, false, \ - "Verify that all refs across the FLS boundary " \ - " are to valid objects") \ + "Verify that all references across the FLS boundary " \ + "are to valid objects") \ \ diagnostic(bool, FLSVerifyLists, false, \ "Do lots of (expensive) FreeListSpace verification") \ @@ -1870,17 +1893,18 @@ "Do lots of (expensive) FLS dictionary verification") \ \ develop(bool, VerifyBlockOffsetArray, false, \ - "Do (expensive!) block offset array verification") \ + "Do (expensive) block offset array verification") \ \ diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ - "Maintain _unallocated_block in BlockOffsetArray" \ - " (currently applicable only to CMS collector)") \ + "Maintain _unallocated_block in BlockOffsetArray " \ + "(currently applicable only to CMS collector)") \ \ develop(bool, TraceCMSState, false, \ "Trace the state of the CMS collection") \ \ product(intx, RefDiscoveryPolicy, 0, \ - "Whether reference-based(0) or referent-based(1)") \ + "Select type of reference discovery policy: " \ + "reference-based(0) or referent-based(1)") \ \ product(bool, ParallelRefProcEnabled, false, \ "Enable parallel reference processing whenever possible") \ @@ -1908,7 +1932,7 @@ "denotes 'do constant GC cycles'.") \ \ product(bool, UseCMSInitiatingOccupancyOnly, false, \ - "Only use occupancy as a crierion for starting a CMS collection") \ + "Only use occupancy as a criterion for starting a CMS collection")\ \ product(uintx, CMSIsTooFullPercentage, 98, \ "An absolute ceiling above which CMS will always consider the " \ @@ -1920,7 +1944,7 @@ \ notproduct(bool, CMSVerifyReturnedBytes, false, \ "Check that all the garbage collected was returned to the " \ - "free lists.") \ + "free lists") \ \ notproduct(bool, ScavengeALot, false, \ "Force scavenge at every Nth exit from the runtime system " \ @@ -1935,16 +1959,16 @@ \ product(bool, PrintPromotionFailure, false, \ "Print additional diagnostic information following " \ - " promotion failure") \ + "promotion failure") \ \ notproduct(bool, PromotionFailureALot, false, \ "Use promotion failure handling on every youngest generation " \ "collection") \ \ develop(uintx, PromotionFailureALotCount, 1000, \ - "Number of promotion failures occurring at ParGCAllocBuffer" \ + "Number of promotion failures occurring at ParGCAllocBuffer " \ "refill attempts (ParNew) or promotion attempts " \ - "(other young collectors) ") \ + "(other young collectors)") \ \ develop(uintx, PromotionFailureALotInterval, 5, \ "Total collections between promotion failures alot") \ @@ -1963,7 +1987,7 @@ "Ratio of hard spins to calls to yield") \ \ develop(uintx, ObjArrayMarkingStride, 512, \ - "Number of ObjArray elements to push onto the marking stack" \ + "Number of object array elements to push onto the marking stack " \ "before pushing a continuation entry") \ \ develop(bool, MetadataAllocationFailALot, false, \ @@ -1971,14 +1995,7 @@ "MetadataAllocationFailALotInterval") \ \ develop(uintx, MetadataAllocationFailALotInterval, 1000, \ - "metadata allocation failure alot interval") \ - \ - develop(bool, MetaDataDeallocateALot, false, \ - "Deallocation bunches of metadata at intervals controlled by " \ - "MetaDataAllocateALotInterval") \ - \ - develop(uintx, MetaDataDeallocateALotInterval, 100, \ - "Metadata deallocation alot interval") \ + "Metadata allocation failure a lot interval") \ \ develop(bool, TraceMetadataChunkAllocation, false, \ "Trace chunk metadata allocations") \ @@ -1990,7 +2007,7 @@ "Trace virtual space metadata allocations") \ \ notproduct(bool, ExecuteInternalVMTests, false, \ - "Enable execution of internal VM tests.") \ + "Enable execution of internal VM tests") \ \ notproduct(bool, VerboseInternalVMTests, false, \ "Turn on logging for internal VM tests.") \ @@ -1998,7 +2015,7 @@ product_pd(bool, UseTLAB, "Use thread-local object allocation") \ \ product_pd(bool, ResizeTLAB, \ - "Dynamically resize tlab size for threads") \ + "Dynamically resize TLAB size for threads") \ \ product(bool, ZeroTLAB, false, \ "Zero out the newly created TLAB") \ @@ -2010,7 +2027,8 @@ "Print various TLAB related information") \ \ product(bool, TLABStats, true, \ - "Print various TLAB related information") \ + "Provide more detailed and expensive TLAB statistics " \ + "(with PrintTLAB)") \ \ EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \ "Enable LowMemoryProtection")) \ @@ -2044,14 +2062,14 @@ "Fraction (1/n) of real memory used for initial heap size") \ \ develop(uintx, MaxVirtMemFraction, 2, \ - "Maximum fraction (1/n) of virtual memory used for ergonomically" \ + "Maximum fraction (1/n) of virtual memory used for ergonomically "\ "determining maximum heap size") \ \ product(bool, UseAutoGCSelectPolicy, false, \ "Use automatic collection selection policy") \ \ product(uintx, AutoGCSelectPauseMillis, 5000, \ - "Automatic GC selection pause threshhold in ms") \ + "Automatic GC selection pause threshold in milliseconds") \ \ product(bool, UseAdaptiveSizePolicy, true, \ "Use adaptive generation sizing policies") \ @@ -2066,7 +2084,7 @@ "Use adaptive young-old sizing policies at major collections") \ \ product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ - "Use statistics from System.GC for adaptive size policy") \ + "Include statistics from System.gc() for adaptive size policy") \ \ product(bool, UseAdaptiveGCBoundary, false, \ "Allow young-old boundary to move") \ @@ -2078,16 +2096,16 @@ "Resize the virtual spaces of the young or old generations") \ \ product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ - "Policy for changeing generation size for throughput goals") \ + "Policy for changing generation size for throughput goals") \ \ product(uintx, AdaptiveSizePausePolicy, 0, \ "Policy for changing generation size for pause goals") \ \ develop(bool, PSAdjustTenuredGenForMinorPause, false, \ - "Adjust tenured generation to achive a minor pause goal") \ + "Adjust tenured generation to achieve a minor pause goal") \ \ develop(bool, PSAdjustYoungGenForMajorPause, false, \ - "Adjust young generation to achive a major pause goal") \ + "Adjust young generation to achieve a major pause goal") \ \ product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ "Number of steps where heuristics is used before data is used") \ @@ -2142,14 +2160,15 @@ "Decay factor to TenuredGenerationSizeIncrement") \ \ product(uintx, MaxGCPauseMillis, max_uintx, \ - "Adaptive size policy maximum GC pause time goal in msec, " \ - "or (G1 Only) the max. GC time per MMU time slice") \ + "Adaptive size policy maximum GC pause time goal in millisecond, "\ + "or (G1 Only) the maximum GC time per MMU time slice") \ \ product(uintx, GCPauseIntervalMillis, 0, \ "Time slice for MMU specification") \ \ product(uintx, MaxGCMinorPauseMillis, max_uintx, \ - "Adaptive size policy maximum GC minor pause time goal in msec") \ + "Adaptive size policy maximum GC minor pause time goal " \ + "in millisecond") \ \ product(uintx, GCTimeRatio, 99, \ "Adaptive size policy application time to GC time ratio") \ @@ -2167,7 +2186,7 @@ "Minimum ratio of young generation/survivor space size") \ \ product(uintx, InitialSurvivorRatio, 8, \ - "Initial ratio of eden/survivor space size") \ + "Initial ratio of young generation/survivor space size") \ \ product(uintx, BaseFootPrintEstimate, 256*M, \ "Estimate of footprint other than Java Heap") \ @@ -2177,8 +2196,8 @@ "before an OutOfMemory error is thrown") \ \ product(uintx, GCTimeLimit, 98, \ - "Limit of proportion of time spent in GC before an OutOfMemory" \ - "error is thrown (used with GCHeapFreeLimit)") \ + "Limit of the proportion of time spent in GC before " \ + "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ \ product(uintx, GCHeapFreeLimit, 2, \ "Minimum percentage of free space after a full GC before an " \ @@ -2200,7 +2219,7 @@ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ \ diagnostic(bool, VerifySilently, false, \ - "Don't print print the verification progress") \ + "Do not print the verification progress") \ \ diagnostic(bool, VerifyDuringStartup, false, \ "Verify memory system before executing any Java code " \ @@ -2223,7 +2242,7 @@ \ diagnostic(bool, DeferInitialCardMark, false, \ "When +ReduceInitialCardMarks, explicitly defer any that " \ - "may arise from new_pre_store_barrier") \ + "may arise from new_pre_store_barrier") \ \ diagnostic(bool, VerifyRememberedSets, false, \ "Verify GC remembered sets") \ @@ -2232,10 +2251,10 @@ "Verify GC object start array if verify before/after") \ \ product(bool, DisableExplicitGC, false, \ - "Tells whether calling System.gc() does a full GC") \ + "Ignore calls to System.gc()") \ \ notproduct(bool, CheckMemoryInitialization, false, \ - "Checks memory initialization") \ + "Check memory initialization") \ \ product(bool, CollectGen0First, false, \ "Collect youngest generation before each full GC") \ @@ -2256,44 +2275,45 @@ "Stride through processors when distributing processes") \ \ product(uintx, CMSCoordinatorYieldSleepCount, 10, \ - "number of times the coordinator GC thread will sleep while " \ + "Number of times the coordinator GC thread will sleep while " \ "yielding before giving up and resuming GC") \ \ product(uintx, CMSYieldSleepCount, 0, \ - "number of times a GC thread (minus the coordinator) " \ + "Number of times a GC thread (minus the coordinator) " \ "will sleep while yielding before giving up and resuming GC") \ \ /* gc tracing */ \ manageable(bool, PrintGC, false, \ - "Print message at garbage collect") \ + "Print message at garbage collection") \ \ manageable(bool, PrintGCDetails, false, \ - "Print more details at garbage collect") \ + "Print more details at garbage collection") \ \ manageable(bool, PrintGCDateStamps, false, \ - "Print date stamps at garbage collect") \ + "Print date stamps at garbage collection") \ \ manageable(bool, PrintGCTimeStamps, false, \ - "Print timestamps at garbage collect") \ + "Print timestamps at garbage collection") \ \ product(bool, PrintGCTaskTimeStamps, false, \ "Print timestamps for individual gc worker thread tasks") \ \ develop(intx, ConcGCYieldTimeout, 0, \ - "If non-zero, assert that GC threads yield within this # of ms.") \ + "If non-zero, assert that GC threads yield within this " \ + "number of milliseconds") \ \ notproduct(bool, TraceMarkSweep, false, \ "Trace mark sweep") \ \ product(bool, PrintReferenceGC, false, \ "Print times spent handling reference objects during GC " \ - " (enabled only when PrintGCDetails)") \ + "(enabled only when PrintGCDetails)") \ \ develop(bool, TraceReferenceGC, false, \ "Trace handling of soft/weak/final/phantom references") \ \ develop(bool, TraceFinalizerRegistration, false, \ - "Trace registration of final references") \ + "Trace registration of final references") \ \ notproduct(bool, TraceScavenge, false, \ "Trace scavenge") \ @@ -2330,7 +2350,7 @@ "Print heap layout before and after each GC") \ \ product_rw(bool, PrintHeapAtGCExtended, false, \ - "Prints extended information about the layout of the heap " \ + "Print extended information about the layout of the heap " \ "when -XX:+PrintHeapAtGC is set") \ \ product(bool, PrintHeapAtSIGBREAK, true, \ @@ -2367,45 +2387,45 @@ "Trace actions of the GC task threads") \ \ product(bool, PrintParallelOldGCPhaseTimes, false, \ - "Print the time taken by each parallel old gc phase." \ - "PrintGCDetails must also be enabled.") \ + "Print the time taken by each phase in ParallelOldGC " \ + "(PrintGCDetails must also be enabled)") \ \ develop(bool, TraceParallelOldGCMarkingPhase, false, \ - "Trace parallel old gc marking phase") \ + "Trace marking phase in ParallelOldGC") \ \ develop(bool, TraceParallelOldGCSummaryPhase, false, \ - "Trace parallel old gc summary phase") \ + "Trace summary phase in ParallelOldGC") \ \ develop(bool, TraceParallelOldGCCompactionPhase, false, \ - "Trace parallel old gc compaction phase") \ + "Trace compaction phase in ParallelOldGC") \ \ develop(bool, TraceParallelOldGCDensePrefix, false, \ - "Trace parallel old gc dense prefix computation") \ + "Trace dense prefix computation for ParallelOldGC") \ \ develop(bool, IgnoreLibthreadGPFault, false, \ "Suppress workaround for libthread GP fault") \ \ product(bool, PrintJNIGCStalls, false, \ - "Print diagnostic message when GC is stalled" \ + "Print diagnostic message when GC is stalled " \ "by JNI critical section") \ \ experimental(double, ObjectCountCutOffPercent, 0.5, \ "The percentage of the used heap that the instances of a class " \ - "must occupy for the class to generate a trace event.") \ + "must occupy for the class to generate a trace event") \ \ /* GC log rotation setting */ \ \ product(bool, UseGCLogFileRotation, false, \ - "Prevent large gclog file for long running app. " \ - "Requires -Xloggc:") \ + "Rotate gclog files (for long running applications). It requires "\ + "-Xloggc:") \ \ product(uintx, NumberOfGCLogFiles, 0, \ - "Number of gclog files in rotation, " \ - "Default: 0, no rotation") \ + "Number of gclog files in rotation " \ + "(default: 0, no rotation)") \ \ product(uintx, GCLogFileSize, 0, \ - "GC log file size, Default: 0 bytes, no rotation " \ - "Only valid with UseGCLogFileRotation") \ + "GC log file size (default: 0 bytes, no rotation). " \ + "It requires UseGCLogFileRotation") \ \ /* JVMTI heap profiling */ \ \ @@ -2482,40 +2502,40 @@ "Generate range checks for array accesses") \ \ develop_pd(bool, ImplicitNullChecks, \ - "generate code for implicit null checks") \ + "Generate code for implicit null checks") \ \ product(bool, PrintSafepointStatistics, false, \ - "print statistics about safepoint synchronization") \ + "Print statistics about safepoint synchronization") \ \ product(intx, PrintSafepointStatisticsCount, 300, \ - "total number of safepoint statistics collected " \ + "Total number of safepoint statistics collected " \ "before printing them out") \ \ product(intx, PrintSafepointStatisticsTimeout, -1, \ - "print safepoint statistics only when safepoint takes" \ - " more than PrintSafepointSatisticsTimeout in millis") \ + "Print safepoint statistics only when safepoint takes " \ + "more than PrintSafepointSatisticsTimeout in millis") \ \ product(bool, TraceSafepointCleanupTime, false, \ - "print the break down of clean up tasks performed during" \ - " safepoint") \ + "Print the break down of clean up tasks performed during " \ + "safepoint") \ \ product(bool, Inline, true, \ - "enable inlining") \ + "Enable inlining") \ \ product(bool, ClipInlining, true, \ - "clip inlining if aggregate method exceeds DesiredMethodLimit") \ + "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ \ develop(bool, UseCHA, true, \ - "enable CHA") \ + "Enable CHA") \ \ product(bool, UseTypeProfile, true, \ "Check interpreter profile for historically monomorphic calls") \ \ notproduct(bool, TimeCompiler, false, \ - "time the compiler") \ + "Time the compiler") \ \ diagnostic(bool, PrintInlining, false, \ - "prints inlining optimizations") \ + "Print inlining optimizations") \ \ product(bool, UsePopCountInstruction, false, \ "Use population count instruction") \ @@ -2527,57 +2547,59 @@ "Print when methods are replaced do to recompilation") \ \ develop(bool, PrintMethodFlushing, false, \ - "print the nmethods being flushed") \ + "Print the nmethods being flushed") \ \ develop(bool, UseRelocIndex, false, \ - "use an index to speed random access to relocations") \ + "Use an index to speed random access to relocations") \ \ develop(bool, StressCodeBuffers, false, \ - "Exercise code buffer expansion and other rare state changes") \ + "Exercise code buffer expansion and other rare state changes") \ \ diagnostic(bool, DebugNonSafepoints, trueInDebug, \ - "Generate extra debugging info for non-safepoints in nmethods") \ + "Generate extra debugging information for non-safepoints in " \ + "nmethods") \ \ product(bool, PrintVMOptions, false, \ - "Print flags that appeared on the command line") \ + "Print flags that appeared on the command line") \ \ product(bool, IgnoreUnrecognizedVMOptions, false, \ - "Ignore unrecognized VM options") \ + "Ignore unrecognized VM options") \ \ product(bool, PrintCommandLineFlags, false, \ - "Print flags specified on command line or set by ergonomics") \ + "Print flags specified on command line or set by ergonomics") \ \ product(bool, PrintFlagsInitial, false, \ - "Print all VM flags before argument processing and exit VM") \ + "Print all VM flags before argument processing and exit VM") \ \ product(bool, PrintFlagsFinal, false, \ - "Print all VM flags after argument and ergonomic processing") \ + "Print all VM flags after argument and ergonomic processing") \ \ notproduct(bool, PrintFlagsWithComments, false, \ - "Print all VM flags with default values and descriptions and exit")\ + "Print all VM flags with default values and descriptions and " \ + "exit") \ \ diagnostic(bool, SerializeVMOutput, true, \ - "Use a mutex to serialize output to tty and LogFile") \ + "Use a mutex to serialize output to tty and LogFile") \ \ diagnostic(bool, DisplayVMOutput, true, \ - "Display all VM output on the tty, independently of LogVMOutput") \ + "Display all VM output on the tty, independently of LogVMOutput") \ \ diagnostic(bool, LogVMOutput, false, \ - "Save VM output to LogFile") \ + "Save VM output to LogFile") \ \ diagnostic(ccstr, LogFile, NULL, \ - "If LogVMOutput or LogCompilation is on, save VM output to " \ - "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \ + "If LogVMOutput or LogCompilation is on, save VM output to " \ + "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ \ product(ccstr, ErrorFile, NULL, \ - "If an error occurs, save the error data to this file " \ - "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ + "If an error occurs, save the error data to this file " \ + "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ \ product(bool, DisplayVMOutputToStderr, false, \ - "If DisplayVMOutput is true, display all VM output to stderr") \ + "If DisplayVMOutput is true, display all VM output to stderr") \ \ product(bool, DisplayVMOutputToStdout, false, \ - "If DisplayVMOutput is true, display all VM output to stdout") \ + "If DisplayVMOutput is true, display all VM output to stdout") \ \ product(bool, UseHeavyMonitors, false, \ "use heavyweight instead of lightweight Java monitors") \ @@ -2601,7 +2623,7 @@ \ notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \ "Call fatal if the exception pointed by AbortVMOnException " \ - "has this message.") \ + "has this message") \ \ develop(bool, DebugVtables, false, \ "add debugging code to vtable dispatch") \ @@ -2666,31 +2688,44 @@ product(bool, AggressiveOpts, false, \ "Enable aggressive optimizations - see arguments.cpp") \ \ + product_pd(uintx, TypeProfileLevel, \ + "=XYZ, with Z: Type profiling of arguments at call; " \ + "Y: Type profiling of return value at call; " \ + "X: Type profiling of parameters to methods; " \ + "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ + \ + product(intx, TypeProfileArgsLimit, 2, \ + "max number of call arguments to consider for type profiling") \ + \ + product(intx, TypeProfileParmsLimit, 2, \ + "max number of incoming parameters to consider for type profiling"\ + ", -1 for all") \ + \ /* statistics */ \ develop(bool, CountCompiledCalls, false, \ - "counts method invocations") \ + "Count method invocations") \ \ notproduct(bool, CountRuntimeCalls, false, \ - "counts VM runtime calls") \ + "Count VM runtime calls") \ \ develop(bool, CountJNICalls, false, \ - "counts jni method invocations") \ + "Count jni method invocations") \ \ notproduct(bool, CountJVMCalls, false, \ - "counts jvm method invocations") \ + "Count jvm method invocations") \ \ notproduct(bool, CountRemovableExceptions, false, \ - "count exceptions that could be replaced by branches due to " \ + "Count exceptions that could be replaced by branches due to " \ "inlining") \ \ notproduct(bool, ICMissHistogram, false, \ - "produce histogram of IC misses") \ + "Produce histogram of IC misses") \ \ notproduct(bool, PrintClassStatistics, false, \ - "prints class statistics at end of run") \ + "Print class statistics at end of run") \ \ notproduct(bool, PrintMethodStatistics, false, \ - "prints method statistics at end of run") \ + "Print method statistics at end of run") \ \ /* interpreter */ \ develop(bool, ClearInterpreterLocals, false, \ @@ -2704,7 +2739,7 @@ "Rewrite frequently used bytecode pairs into a single bytecode") \ \ diagnostic(bool, PrintInterpreter, false, \ - "Prints the generated interpreter code") \ + "Print the generated interpreter code") \ \ product(bool, UseInterpreter, true, \ "Use interpreter for non-compiled methods") \ @@ -2722,8 +2757,8 @@ "Use fast method entry code for accessor methods") \ \ product_pd(bool, UseOnStackReplacement, \ - "Use on stack replacement, calls runtime if invoc. counter " \ - "overflows in loop") \ + "Use on stack replacement, calls runtime if invoc. counter " \ + "overflows in loop") \ \ notproduct(bool, TraceOnStackReplacement, false, \ "Trace on stack replacement") \ @@ -2771,10 +2806,10 @@ "Trace frequency based inlining") \ \ develop_pd(bool, InlineIntrinsics, \ - "Inline intrinsics that can be statically resolved") \ + "Inline intrinsics that can be statically resolved") \ \ product_pd(bool, ProfileInterpreter, \ - "Profile at the bytecode level during interpretation") \ + "Profile at the bytecode level during interpretation") \ \ develop_pd(bool, ProfileTraps, \ "Profile deoptimization traps at the bytecode level") \ @@ -2784,7 +2819,7 @@ "CompileThreshold) before using the method's profile") \ \ develop(bool, PrintMethodData, false, \ - "Print the results of +ProfileInterpreter at end of run") \ + "Print the results of +ProfileInterpreter at end of run") \ \ develop(bool, VerifyDataPointer, trueInDebug, \ "Verify the method data pointer during interpreter profiling") \ @@ -2799,7 +2834,7 @@ \ /* compilation */ \ product(bool, UseCompiler, true, \ - "use compilation") \ + "Use Just-In-Time compilation") \ \ develop(bool, TraceCompilationPolicy, false, \ "Trace compilation policy") \ @@ -2808,20 +2843,21 @@ "Time the compilation policy") \ \ product(bool, UseCounterDecay, true, \ - "adjust recompilation counters") \ + "Adjust recompilation counters") \ \ develop(intx, CounterHalfLifeTime, 30, \ - "half-life time of invocation counters (in secs)") \ + "Half-life time of invocation counters (in seconds)") \ \ develop(intx, CounterDecayMinIntervalLength, 500, \ - "Min. ms. between invocation of CounterDecay") \ + "The minimum interval (in milliseconds) between invocation of " \ + "CounterDecay") \ \ product(bool, AlwaysCompileLoopMethods, false, \ - "when using recompilation, never interpret methods " \ + "When using recompilation, never interpret methods " \ "containing loops") \ \ product(bool, DontCompileHugeMethods, true, \ - "don't compile methods > HugeMethodLimit") \ + "Do not compile methods > HugeMethodLimit") \ \ /* Bytecode escape analysis estimation. */ \ product(bool, EstimateArgEscape, true, \ @@ -2831,10 +2867,10 @@ "How much tracing to do of bytecode escape analysis estimates") \ \ product(intx, MaxBCEAEstimateLevel, 5, \ - "Maximum number of nested calls that are analyzed by BC EA.") \ + "Maximum number of nested calls that are analyzed by BC EA") \ \ product(intx, MaxBCEAEstimateSize, 150, \ - "Maximum bytecode size of a method to be analyzed by BC EA.") \ + "Maximum bytecode size of a method to be analyzed by BC EA") \ \ product(intx, AllocatePrefetchStyle, 1, \ "0 = no prefetch, " \ @@ -2849,7 +2885,8 @@ "Number of lines to prefetch ahead of array allocation pointer") \ \ product(intx, AllocateInstancePrefetchLines, 1, \ - "Number of lines to prefetch ahead of instance allocation pointer") \ + "Number of lines to prefetch ahead of instance allocation " \ + "pointer") \ \ product(intx, AllocatePrefetchStepSize, 16, \ "Step size in bytes of sequential prefetch instructions") \ @@ -2872,8 +2909,8 @@ "(0 means off)") \ \ product(intx, MaxJavaStackTraceDepth, 1024, \ - "Max. no. of lines in the stack trace for Java exceptions " \ - "(0 means all)") \ + "The maximum number of lines in the stack trace for Java " \ + "exceptions (0 means all)") \ \ NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ "Guarantee a safepoint (at least) every so many milliseconds " \ @@ -2897,10 +2934,10 @@ "result in more aggressive sweeping") \ \ notproduct(bool, LogSweeper, false, \ - "Keep a ring buffer of sweeper activity") \ + "Keep a ring buffer of sweeper activity") \ \ notproduct(intx, SweeperLogEntries, 1024, \ - "Number of records in the ring buffer of sweeper activity") \ + "Number of records in the ring buffer of sweeper activity") \ \ notproduct(intx, MemProfilingInterval, 500, \ "Time between each invocation of the MemProfiler") \ @@ -2938,39 +2975,43 @@ product(intx, MaxRecursiveInlineLevel, 1, \ "maximum number of nested recursive calls that are inlined") \ \ + develop(intx, MaxForceInlineLevel, 100, \ + "maximum number of nested @ForceInline calls that are inlined") \ + \ product_pd(intx, InlineSmallCode, \ "Only inline already compiled methods if their code size is " \ "less than this") \ \ product(intx, MaxInlineSize, 35, \ - "maximum bytecode size of a method to be inlined") \ + "The maximum bytecode size of a method to be inlined") \ \ product_pd(intx, FreqInlineSize, \ - "maximum bytecode size of a frequent method to be inlined") \ + "The maximum bytecode size of a frequent method to be inlined") \ \ product(intx, MaxTrivialSize, 6, \ - "maximum bytecode size of a trivial method to be inlined") \ + "The maximum bytecode size of a trivial method to be inlined") \ \ product(intx, MinInliningThreshold, 250, \ - "min. invocation count a method needs to have to be inlined") \ + "The minimum invocation count a method needs to have to be " \ + "inlined") \ \ develop(intx, MethodHistogramCutoff, 100, \ - "cutoff value for method invoc. histogram (+CountCalls)") \ + "The cutoff value for method invocation histogram (+CountCalls)") \ \ develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ - "# of interpreted methods to show in profile") \ + "Number of interpreted methods to show in profile") \ \ develop(intx, ProfilerNumberOfCompiledMethods, 25, \ - "# of compiled methods to show in profile") \ + "Number of compiled methods to show in profile") \ \ develop(intx, ProfilerNumberOfStubMethods, 25, \ - "# of stub methods to show in profile") \ + "Number of stub methods to show in profile") \ \ develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ - "# of runtime stub nodes to show in profile") \ + "Number of runtime stub nodes to show in profile") \ \ product(intx, ProfileIntervalsTicks, 100, \ - "# of ticks between printing of interval profile " \ + "Number of ticks between printing of interval profile " \ "(+ProfileIntervals)") \ \ notproduct(intx, ScavengeALotInterval, 1, \ @@ -2991,7 +3032,7 @@ \ develop(intx, MinSleepInterval, 1, \ "Minimum sleep() interval (milliseconds) when " \ - "ConvertSleepToYield is off (used for SOLARIS)") \ + "ConvertSleepToYield is off (used for Solaris)") \ \ develop(intx, ProfilerPCTickThreshold, 15, \ "Number of ticks in a PC buckets to be a hotspot") \ @@ -3002,29 +3043,26 @@ notproduct(intx, ZombieALotInterval, 5, \ "Number of exits until ZombieALot kicks in") \ \ - develop(bool, StressNonEntrant, false, \ - "Mark nmethods non-entrant at registration") \ - \ diagnostic(intx, MallocVerifyInterval, 0, \ - "if non-zero, verify C heap after every N calls to " \ + "If non-zero, verify C heap after every N calls to " \ "malloc/realloc/free") \ \ diagnostic(intx, MallocVerifyStart, 0, \ - "if non-zero, start verifying C heap after Nth call to " \ + "If non-zero, start verifying C heap after Nth call to " \ "malloc/realloc/free") \ \ diagnostic(uintx, MallocMaxTestWords, 0, \ - "if non-zero, max # of Words that malloc/realloc can allocate " \ - "(for testing only)") \ - \ - product_pd(intx, TypeProfileWidth, \ - "number of receiver types to record in call/cast profile") \ + "If non-zero, maximum number of words that malloc/realloc can " \ + "allocate (for testing only)") \ + \ + product(intx, TypeProfileWidth, 2, \ + "Number of receiver types to record in call/cast profile") \ \ product_pd(intx, MethodProfileWidth, \ "number of methods to record in call profile") \ \ develop(intx, BciProfileWidth, 2, \ - "number of return bci's to record in ret profile") \ + "Number of return bci's to record in ret profile") \ \ product(intx, PerMethodRecompilationCutoff, 400, \ "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ @@ -3091,7 +3129,7 @@ "Percentage of Eden that can be wasted") \ \ product(uintx, TLABRefillWasteFraction, 64, \ - "Max TLAB waste at a refill (internal fragmentation)") \ + "Maximum TLAB waste at a refill (internal fragmentation)") \ \ product(uintx, TLABWasteIncrement, 4, \ "Increment allowed waste at slow allocation") \ @@ -3100,7 +3138,7 @@ "Ratio of eden/survivor space size") \ \ product(uintx, NewRatio, 2, \ - "Ratio of new/old generation sizes") \ + "Ratio of old/new generation sizes") \ \ product_pd(uintx, NewSizeThreadIncrease, \ "Additional size added to desired new generation size per " \ @@ -3117,28 +3155,34 @@ "class pointers are used") \ \ product(uintx, MinHeapFreeRatio, 40, \ - "Min percentage of heap free after GC to avoid expansion") \ + "The minimum percentage of heap free after GC to avoid expansion."\ + " For most GCs this applies to the old generation. In G1 it" \ + " applies to the whole heap. Not supported by ParallelGC.") \ \ product(uintx, MaxHeapFreeRatio, 70, \ - "Max percentage of heap free after GC to avoid shrinking") \ + "The maximum percentage of heap free after GC to avoid shrinking."\ + " For most GCs this applies to the old generation. In G1 it" \ + " applies to the whole heap. Not supported by ParallelGC.") \ \ product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ "Number of milliseconds per MB of free space in the heap") \ \ product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ - "Min change in heap space due to GC (in bytes)") \ + "The minimum change in heap space due to GC (in bytes)") \ \ product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ - "Min expansion of Metaspace (in bytes)") \ + "The minimum expansion of Metaspace (in bytes)") \ \ product(uintx, MinMetaspaceFreeRatio, 40, \ - "Min percentage of Metaspace free after GC to avoid expansion") \ + "The minimum percentage of Metaspace free after GC to avoid " \ + "expansion") \ \ product(uintx, MaxMetaspaceFreeRatio, 70, \ - "Max percentage of Metaspace free after GC to avoid shrinking") \ + "The maximum percentage of Metaspace free after GC to avoid " \ + "shrinking") \ \ product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ - "Max expansion of Metaspace without full GC (in bytes)") \ + "The maximum expansion of Metaspace without full GC (in bytes)") \ \ product(uintx, QueuedAllocationWarningCount, 0, \ "Number of times an allocation that queues behind a GC " \ @@ -3160,13 +3204,14 @@ "Desired percentage of survivor space used after scavenge") \ \ product(uintx, MarkSweepDeadRatio, 5, \ - "Percentage (0-100) of the old gen allowed as dead wood." \ - "Serial mark sweep treats this as both the min and max value." \ - "CMS uses this value only if it falls back to mark sweep." \ - "Par compact uses a variable scale based on the density of the" \ - "generation and treats this as the max value when the heap is" \ - "either completely full or completely empty. Par compact also" \ - "has a smaller default value; see arguments.cpp.") \ + "Percentage (0-100) of the old gen allowed as dead wood. " \ + "Serial mark sweep treats this as both the minimum and maximum " \ + "value. " \ + "CMS uses this value only if it falls back to mark sweep. " \ + "Par compact uses a variable scale based on the density of the " \ + "generation and treats this as the maximum value when the heap " \ + "is either completely full or completely empty. Par compact " \ + "also has a smaller default value; see arguments.cpp.") \ \ product(uintx, MarkSweepAlwaysCompactCount, 4, \ "How often should we fully compact the heap (ignoring the dead " \ @@ -3185,27 +3230,27 @@ "Census for CMS' FreeListSpace") \ \ develop(uintx, GCExpandToAllocateDelayMillis, 0, \ - "Delay in ms between expansion and allocation") \ + "Delay between expansion and allocation (in milliseconds)") \ \ develop(uintx, GCWorkerDelayMillis, 0, \ - "Delay in ms in scheduling GC workers") \ + "Delay in scheduling GC workers (in milliseconds)") \ \ product(intx, DeferThrSuspendLoopCount, 4000, \ "(Unstable) Number of times to iterate in safepoint loop " \ - " before blocking VM threads ") \ + "before blocking VM threads ") \ \ product(intx, DeferPollingPageLoopCount, -1, \ "(Unsafe,Unstable) Number of iterations in safepoint loop " \ "before changing safepoint polling page to RO ") \ \ - product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ + product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ \ product(bool, PSChunkLargeArrays, true, \ - "true: process large arrays in chunks") \ + "Process large arrays in chunks") \ \ product(uintx, GCDrainStackTargetSize, 64, \ - "how many entries we'll try to leave on the stack during " \ - "parallel GC") \ + "Number of entries we will try to leave on the stack " \ + "during parallel gc") \ \ /* stack parameters */ \ product_pd(intx, StackYellowPages, \ @@ -3215,8 +3260,8 @@ "Number of red zone (unrecoverable overflows) pages") \ \ product_pd(intx, StackShadowPages, \ - "Number of shadow zone (for overflow checking) pages" \ - " this should exceed the depth of the VM and native call stack") \ + "Number of shadow zone (for overflow checking) pages " \ + "this should exceed the depth of the VM and native call stack") \ \ product_pd(intx, ThreadStackSize, \ "Thread Stack Size (in Kbytes)") \ @@ -3256,19 +3301,19 @@ "Reserved code cache size (in bytes) - maximum code cache size") \ \ product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ - "When less than X space left, we stop compiling.") \ + "When less than X space left, we stop compiling") \ \ product_pd(uintx, CodeCacheExpansionSize, \ "Code cache expansion size (in bytes)") \ \ develop_pd(uintx, CodeCacheMinBlockLength, \ - "Minimum number of segments in a code cache block.") \ + "Minimum number of segments in a code cache block") \ \ notproduct(bool, ExitOnFullCodeCache, false, \ - "Exit the VM if we fill the code cache.") \ + "Exit the VM if we fill the code cache") \ \ product(bool, UseCodeCacheFlushing, true, \ - "Attempt to clean the code cache before shutting off compiler") \ + "Remove cold/old nmethods from the code cache") \ \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ @@ -3276,31 +3321,31 @@ "switch") \ \ develop(intx, StopInterpreterAt, 0, \ - "Stops interpreter execution at specified bytecode number") \ + "Stop interpreter execution at specified bytecode number") \ \ develop(intx, TraceBytecodesAt, 0, \ - "Traces bytecodes starting with specified bytecode number") \ + "Trace bytecodes starting with specified bytecode number") \ \ /* compiler interface */ \ develop(intx, CIStart, 0, \ - "the id of the first compilation to permit") \ + "The id of the first compilation to permit") \ \ develop(intx, CIStop, -1, \ - "the id of the last compilation to permit") \ + "The id of the last compilation to permit") \ \ develop(intx, CIStartOSR, 0, \ - "the id of the first osr compilation to permit " \ + "The id of the first osr compilation to permit " \ "(CICountOSR must be on)") \ \ develop(intx, CIStopOSR, -1, \ - "the id of the last osr compilation to permit " \ + "The id of the last osr compilation to permit " \ "(CICountOSR must be on)") \ \ develop(intx, CIBreakAtOSR, -1, \ - "id of osr compilation to break at") \ + "The id of osr compilation to break at") \ \ develop(intx, CIBreakAt, -1, \ - "id of compilation to break at") \ + "The id of compilation to break at") \ \ product(ccstrlist, CompileOnly, "", \ "List of methods (pkg/class.name) to restrict compilation to") \ @@ -3319,11 +3364,11 @@ "[default: ./replay_pid%p.log] (%p replaced with pid)") \ \ develop(intx, ReplaySuppressInitializers, 2, \ - "Controls handling of class initialization during replay" \ - "0 - don't do anything special" \ - "1 - treat all class initializers as empty" \ - "2 - treat class initializers for application classes as empty" \ - "3 - allow all class initializers to run during bootstrap but" \ + "Control handling of class initialization during replay: " \ + "0 - don't do anything special; " \ + "1 - treat all class initializers as empty; " \ + "2 - treat class initializers for application classes as empty; " \ + "3 - allow all class initializers to run during bootstrap but " \ " pretend they are empty after starting replay") \ \ develop(bool, ReplayIgnoreInitErrors, false, \ @@ -3352,14 +3397,15 @@ "0 : Normal. "\ " VM chooses priorities that are appropriate for normal "\ " applications. On Solaris NORM_PRIORITY and above are mapped "\ - " to normal native priority. Java priorities below NORM_PRIORITY"\ - " map to lower native priority values. On Windows applications"\ - " are allowed to use higher native priorities. However, with "\ - " ThreadPriorityPolicy=0, VM will not use the highest possible"\ - " native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\ - " interfere with system threads. On Linux thread priorities "\ - " are ignored because the OS does not support static priority "\ - " in SCHED_OTHER scheduling class which is the only choice for"\ + " to normal native priority. Java priorities below " \ + " NORM_PRIORITY map to lower native priority values. On "\ + " Windows applications are allowed to use higher native "\ + " priorities. However, with ThreadPriorityPolicy=0, VM will "\ + " not use the highest possible native priority, "\ + " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ + " system threads. On Linux thread priorities are ignored "\ + " because the OS does not support static priority in "\ + " SCHED_OTHER scheduling class which is the only choice for "\ " non-root, non-realtime applications. "\ "1 : Aggressive. "\ " Java thread priorities map over to the entire range of "\ @@ -3390,16 +3436,35 @@ product(bool, VMThreadHintNoPreempt, false, \ "(Solaris only) Give VM thread an extra quanta") \ \ - product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \ - product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \ + product(intx, JavaPriority1_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority2_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority3_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority4_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority5_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority6_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority7_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority8_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority9_To_OSPriority, -1, \ + "Map Java priorities to OS priorities") \ + \ + product(intx, JavaPriority10_To_OSPriority,-1, \ + "Map Java priorities to OS priorities") \ \ experimental(bool, UseCriticalJavaThreadPriority, false, \ "Java thread priority 10 maps to critical scheduling priority") \ @@ -3430,37 +3495,38 @@ "Used with +TraceLongCompiles") \ \ product(intx, StarvationMonitorInterval, 200, \ - "Pause between each check in ms") \ + "Pause between each check (in milliseconds)") \ \ /* recompilation */ \ product_pd(intx, CompileThreshold, \ "number of interpreted method invocations before (re-)compiling") \ \ product_pd(intx, BackEdgeThreshold, \ - "Interpreter Back edge threshold at which an OSR compilation is invoked")\ + "Interpreter Back edge threshold at which an OSR compilation is " \ + "invoked") \ \ product(intx, Tier0InvokeNotifyFreqLog, 7, \ - "Interpreter (tier 0) invocation notification frequency.") \ + "Interpreter (tier 0) invocation notification frequency") \ \ product(intx, Tier2InvokeNotifyFreqLog, 11, \ - "C1 without MDO (tier 2) invocation notification frequency.") \ + "C1 without MDO (tier 2) invocation notification frequency") \ \ product(intx, Tier3InvokeNotifyFreqLog, 10, \ "C1 with MDO profiling (tier 3) invocation notification " \ - "frequency.") \ + "frequency") \ \ product(intx, Tier23InlineeNotifyFreqLog, 20, \ "Inlinee invocation (tiers 2 and 3) notification frequency") \ \ product(intx, Tier0BackedgeNotifyFreqLog, 10, \ - "Interpreter (tier 0) invocation notification frequency.") \ + "Interpreter (tier 0) invocation notification frequency") \ \ product(intx, Tier2BackedgeNotifyFreqLog, 14, \ - "C1 without MDO (tier 2) invocation notification frequency.") \ + "C1 without MDO (tier 2) invocation notification frequency") \ \ product(intx, Tier3BackedgeNotifyFreqLog, 13, \ "C1 with MDO profiling (tier 3) invocation notification " \ - "frequency.") \ + "frequency") \ \ product(intx, Tier2CompileThreshold, 0, \ "threshold at which tier 2 compilation is invoked") \ @@ -3477,7 +3543,7 @@ \ product(intx, Tier3CompileThreshold, 2000, \ "Threshold at which tier 3 compilation is invoked (invocation " \ - "minimum must be satisfied.") \ + "minimum must be satisfied") \ \ product(intx, Tier3BackEdgeThreshold, 60000, \ "Back edge threshold at which tier 3 OSR compilation is invoked") \ @@ -3491,7 +3557,7 @@ \ product(intx, Tier4CompileThreshold, 15000, \ "Threshold at which tier 4 compilation is invoked (invocation " \ - "minimum must be satisfied.") \ + "minimum must be satisfied") \ \ product(intx, Tier4BackEdgeThreshold, 40000, \ "Back edge threshold at which tier 4 OSR compilation is invoked") \ @@ -3520,12 +3586,12 @@ "Stop at given compilation level") \ \ product(intx, Tier0ProfilingStartPercentage, 200, \ - "Start profiling in interpreter if the counters exceed tier 3" \ + "Start profiling in interpreter if the counters exceed tier 3 " \ "thresholds by the specified percentage") \ \ product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ - "Increase the compile threshold for C1 compilation if the code" \ - "cache is filled by the specified percentage.") \ + "Increase the compile threshold for C1 compilation if the code " \ + "cache is filled by the specified percentage") \ \ product(intx, TieredRateUpdateMinTime, 1, \ "Minimum rate sampling interval (in milliseconds)") \ @@ -3540,24 +3606,26 @@ "Print tiered events notifications") \ \ product_pd(intx, OnStackReplacePercentage, \ - "NON_TIERED number of method invocations/branches (expressed as %"\ - "of CompileThreshold) before (re-)compiling OSR code") \ + "NON_TIERED number of method invocations/branches (expressed as " \ + "% of CompileThreshold) before (re-)compiling OSR code") \ \ product(intx, InterpreterProfilePercentage, 33, \ - "NON_TIERED number of method invocations/branches (expressed as %"\ - "of CompileThreshold) before profiling in the interpreter") \ + "NON_TIERED number of method invocations/branches (expressed as " \ + "% of CompileThreshold) before profiling in the interpreter") \ \ develop(intx, MaxRecompilationSearchLength, 10, \ - "max. # frames to inspect searching for recompilee") \ + "The maximum number of frames to inspect when searching for " \ + "recompilee") \ \ develop(intx, MaxInterpretedSearchLength, 3, \ - "max. # interp. frames to skip when searching for recompilee") \ + "The maximum number of interpreted frames to skip when searching "\ + "for recompilee") \ \ develop(intx, DesiredMethodLimit, 8000, \ - "desired max. method size (in bytecodes) after inlining") \ + "The desired maximum method size (in bytecodes) after inlining") \ \ develop(intx, HugeMethodLimit, 8000, \ - "don't compile methods larger than this if " \ + "Don't compile methods larger than this if " \ "+DontCompileHugeMethods") \ \ /* New JDK 1.4 reflection implementation */ \ @@ -3579,7 +3647,7 @@ "in InvocationTargetException. See 6531596") \ \ develop(bool, VerifyLambdaBytecodes, false, \ - "Force verification of jdk 8 lambda metafactory bytecodes.") \ + "Force verification of jdk 8 lambda metafactory bytecodes") \ \ develop(intx, FastSuperclassLimit, 8, \ "Depth of hardwired instanceof accelerator array") \ @@ -3603,18 +3671,19 @@ /* flags for performance data collection */ \ \ product(bool, UsePerfData, falseInEmbedded, \ - "Flag to disable jvmstat instrumentation for performance testing" \ - "and problem isolation purposes.") \ + "Flag to disable jvmstat instrumentation for performance testing "\ + "and problem isolation purposes") \ \ product(bool, PerfDataSaveToFile, false, \ "Save PerfData memory to hsperfdata_ file on exit") \ \ product(ccstr, PerfDataSaveFile, NULL, \ - "Save PerfData memory to the specified absolute pathname," \ - "%p in the file name if present will be replaced by pid") \ - \ - product(intx, PerfDataSamplingInterval, 50 /*ms*/, \ - "Data sampling interval in milliseconds") \ + "Save PerfData memory to the specified absolute pathname. " \ + "The string %p in the file name (if present) " \ + "will be replaced by pid") \ + \ + product(intx, PerfDataSamplingInterval, 50, \ + "Data sampling interval (in milliseconds)") \ \ develop(bool, PerfTraceDataCreation, false, \ "Trace creation of Performance Data Entries") \ @@ -3639,7 +3708,7 @@ "Bypass Win32 file system criteria checks (Windows Only)") \ \ product(intx, UnguardOnExecutionViolation, 0, \ - "Unguard page and retry on no-execute fault (Win32 only)" \ + "Unguard page and retry on no-execute fault (Win32 only) " \ "0=off, 1=conservative, 2=aggressive") \ \ /* Serviceability Support */ \ @@ -3648,7 +3717,7 @@ "Create JMX Management Server") \ \ product(bool, DisableAttachMechanism, false, \ - "Disable mechanism that allows tools to attach to this VM") \ + "Disable mechanism that allows tools to attach to this VM") \ \ product(bool, StartAttachListener, false, \ "Always start Attach Listener at VM startup") \ @@ -3671,9 +3740,9 @@ "Require shared spaces for metadata") \ \ product(bool, DumpSharedSpaces, false, \ - "Special mode: JVM reads a class list, loads classes, builds " \ - "shared spaces, and dumps the shared spaces to a file to be " \ - "used in future JVM runs.") \ + "Special mode: JVM reads a class list, loads classes, builds " \ + "shared spaces, and dumps the shared spaces to a file to be " \ + "used in future JVM runs") \ \ product(bool, PrintSharedSpaces, false, \ "Print usage of shared spaces") \ @@ -3746,11 +3815,14 @@ "Relax the access control checks in the verifier") \ \ diagnostic(bool, PrintDTraceDOF, false, \ - "Print the DTrace DOF passed to the system for JSDT probes") \ + "Print the DTrace DOF passed to the system for JSDT probes") \ \ product(uintx, StringTableSize, defaultStringTableSize, \ "Number of buckets in the interned String table") \ \ + experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ + "Number of buckets in the JVM internal Symbol table") \ + \ develop(bool, TraceDefaultMethods, false, \ "Trace the default method processing steps") \ \ @@ -3759,8 +3831,8 @@ \ product(bool, UseVMInterruptibleIO, false, \ "(Unstable, Solaris-specific) Thread interrupt before or with " \ - "EINTR for I/O operations results in OS_INTRPT. The default value"\ - " of this flag is true for JDK 6 and earlier") \ + "EINTR for I/O operations results in OS_INTRPT. The default " \ + "value of this flag is true for JDK 6 and earlier") \ \ diagnostic(bool, WhiteBoxAPI, false, \ "Enable internal testing APIs") \ @@ -3787,10 +3859,10 @@ \ product(bool, EnableTracing, false, \ "Enable event-based tracing") \ + \ product(bool, UseLockedTracing, false, \ "Use locked-tracing when doing event-based tracing") - /* * Macros for factoring of globals */ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/handles.cpp --- a/src/share/vm/runtime/handles.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/handles.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -45,7 +45,7 @@ oop* HandleArea::allocate_handle(oop obj) { assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark"); assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark"); - assert(obj->is_oop(), "sanity check"); + assert(obj->is_oop(), err_msg("not an oop: " INTPTR_FORMAT, (intptr_t*) obj)); return real_allocate_handle(obj); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/java.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -186,6 +186,7 @@ collected_profiled_methods->sort(&compare_methods); int count = collected_profiled_methods->length(); + int total_size = 0; if (count > 0) { for (int index = 0; index < count; index++) { Method* m = collected_profiled_methods->at(index); @@ -193,10 +194,18 @@ tty->print_cr("------------------------------------------------------------------------"); //m->print_name(tty); m->print_invocation_count(); + tty->print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); tty->cr(); + // Dump data on parameters if any + if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) { + tty->fill_to(2); + m->method_data()->parameters_type_data()->print_data_on(tty); + } m->print_codes(); + total_size += m->method_data()->size_in_bytes(); } tty->print_cr("------------------------------------------------------------------------"); + tty->print_cr("Total MDO size: %d bytes", total_size); } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/reflectionUtils.cpp --- a/src/share/vm/runtime/reflectionUtils.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/reflectionUtils.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,8 +27,11 @@ #include "memory/universe.inline.hpp" #include "runtime/reflectionUtils.hpp" -KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only) { - _klass = klass; +KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, + bool classes_only, bool walk_defaults) { + _klass = _base_klass = klass; + _base_class_search_defaults = false; + _defaults_checked = false; if (classes_only) { _interfaces = Universe::the_empty_klass_array(); } else { @@ -37,6 +40,7 @@ _interface_index = _interfaces->length(); _local_only = local_only; _classes_only = classes_only; + _walk_defaults = walk_defaults; } bool KlassStream::eos() { @@ -45,7 +49,13 @@ if (!_klass->is_interface() && _klass->super() != NULL) { // go up superclass chain (not for interfaces) _klass = _klass->super(); + // Next for method walks, walk default methods + } else if (_walk_defaults && (_defaults_checked == false) && (_base_klass->default_methods() != NULL)) { + _base_class_search_defaults = true; + _klass = _base_klass; + _defaults_checked = true; } else { + // Next walk transitive interfaces if (_interface_index > 0) { _klass = _interfaces->at(--_interface_index); } else { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/reflectionUtils.hpp --- a/src/share/vm/runtime/reflectionUtils.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/reflectionUtils.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ // and (super)interfaces. Streaming is done in reverse order (subclasses first, // interfaces last). // -// for (KlassStream st(k, false, false); !st.eos(); st.next()) { +// for (KlassStream st(k, false, false, false); !st.eos(); st.next()) { // Klass* k = st.klass(); // ... // } @@ -46,17 +46,21 @@ class KlassStream VALUE_OBJ_CLASS_SPEC { protected: instanceKlassHandle _klass; // current klass/interface iterated over - Array* _interfaces; // transitive interfaces for initial class + instanceKlassHandle _base_klass; // initial klass/interface to iterate over + Array* _interfaces; // transitive interfaces for initial class int _interface_index; // current interface being processed bool _local_only; // process initial class/interface only bool _classes_only; // process classes only (no interfaces) + bool _walk_defaults; // process default methods + bool _base_class_search_defaults; // time to process default methods + bool _defaults_checked; // already checked for default methods int _index; - virtual int length() const = 0; + virtual int length() = 0; public: // constructor - KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only); + KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only, bool walk_defaults); // testing bool eos(); @@ -67,6 +71,8 @@ // accessors instanceKlassHandle klass() const { return _klass; } int index() const { return _index; } + bool base_class_search_defaults() const { return _base_class_search_defaults; } + void base_class_search_defaults(bool b) { _base_class_search_defaults = b; } }; @@ -81,17 +87,24 @@ class MethodStream : public KlassStream { private: - int length() const { return methods()->length(); } - Array* methods() const { return _klass->methods(); } + int length() { return methods()->length(); } + Array* methods() { + if (base_class_search_defaults()) { + base_class_search_defaults(false); + return _klass->default_methods(); + } else { + return _klass->methods(); + } + } public: MethodStream(instanceKlassHandle klass, bool local_only, bool classes_only) - : KlassStream(klass, local_only, classes_only) { + : KlassStream(klass, local_only, classes_only, true) { _index = length(); next(); } void next() { _index--; } - Method* method() const { return methods()->at(index()); } + Method* method() { return methods()->at(index()); } }; @@ -107,13 +120,13 @@ class FieldStream : public KlassStream { private: - int length() const { return _klass->java_fields_count(); } + int length() { return _klass->java_fields_count(); } fieldDescriptor _fd_buf; public: FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only) - : KlassStream(klass, local_only, classes_only) { + : KlassStream(klass, local_only, classes_only, false) { _index = length(); next(); } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/signature.cpp --- a/src/share/vm/runtime/signature.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/signature.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -378,6 +378,16 @@ return result; } +int SignatureStream::reference_parameter_count() { + int args_count = 0; + for ( ; !at_return_type(); next()) { + if (is_object()) { + args_count++; + } + } + return args_count; +} + bool SignatureVerifier::is_valid_signature(Symbol* sig) { const char* signature = (const char*)sig->bytes(); ssize_t len = sig->utf8_length(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/signature.hpp --- a/src/share/vm/runtime/signature.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/signature.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -401,6 +401,9 @@ // return same as_symbol except allocation of new symbols is avoided. Symbol* as_symbol_or_null(); + + // count the number of references in the signature + int reference_parameter_count(); }; class SignatureVerifier : public StackObj { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/sweeper.cpp --- a/src/share/vm/runtime/sweeper.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/sweeper.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -112,14 +112,13 @@ if (_records != NULL) { _records[_sweep_index].traversal = _traversals; _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; - _records[_sweep_index].invocation = _invocations; + _records[_sweep_index].invocation = _sweep_fractions_left; _records[_sweep_index].compile_id = nm->compile_id(); _records[_sweep_index].kind = nm->compile_kind(); _records[_sweep_index].state = nm->_state; _records[_sweep_index].vep = nm->verified_entry_point(); _records[_sweep_index].uep = nm->entry_point(); _records[_sweep_index].line = line; - _sweep_index = (_sweep_index + 1) % SweeperLogEntries; } } @@ -127,26 +126,29 @@ #define SWEEP(nm) #endif -nmethod* NMethodSweeper::_current = NULL; // Current nmethod -long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed -int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache -int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep -int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep -int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep - -volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass -volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. +nmethod* NMethodSweeper::_current = NULL; // Current nmethod +long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. +long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper +long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened +int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache +int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep +int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep +int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep -jint NMethodSweeper::_locked_seen = 0; -jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_request_mark_phase = false; +volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper +volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass +volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper +volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: + // 1) alive -> not_entrant + // 2) not_entrant -> zombie + // 3) zombie -> marked_for_reclamation -int NMethodSweeper::_total_nof_methods_reclaimed = 0; -jlong NMethodSweeper::_total_time_sweeping = 0; -jlong NMethodSweeper::_total_time_this_sweep = 0; -jlong NMethodSweeper::_peak_sweep_time = 0; -jlong NMethodSweeper::_peak_sweep_fraction_time = 0; -int NMethodSweeper::_hotness_counter_reset_val = 0; +int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed +jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping +jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep +jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep +jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction +int NMethodSweeper::_hotness_counter_reset_val = 0; class MarkActivationClosure: public CodeBlobClosure { @@ -197,13 +199,16 @@ return; } + // Increase time so that we can estimate when to invoke the sweeper again. + _time_counter++; + // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (!sweep_in_progress() && need_marking_phase()) { - _seen = 0; - _invocations = NmethodSweepFraction; - _current = CodeCache::first_nmethod(); - _traversals += 1; + if (!sweep_in_progress()) { + _seen = 0; + _sweep_fractions_left = NmethodSweepFraction; + _current = CodeCache::first_nmethod(); + _traversals += 1; _total_time_this_sweep = 0; if (PrintMethodFlushing) { @@ -211,10 +216,6 @@ } Threads::nmethods_do(&mark_activation_closure); - // reset the flags since we started a scan from the beginning. - reset_nmethod_marking(); - _locked_seen = 0; - _not_entrant_seen_on_stack = 0; } else { // Only set hotness counter Threads::nmethods_do(&set_hotness_closure); @@ -222,14 +223,48 @@ OrderAccess::storestore(); } - +/** + * This function invokes the sweeper if at least one of the three conditions is met: + * (1) The code cache is getting full + * (2) There are sufficient state changes in/since the last sweep. + * (3) We have not been sweeping for 'some time' + */ void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); if (!MethodFlushing || !sweep_in_progress()) { return; } - if (_invocations > 0) { + // If there was no state change while nmethod sweeping, 'should_sweep' will be false. + // This is one of the two places where should_sweep can be set to true. The general + // idea is as follows: If there is enough free space in the code cache, there is no + // need to invoke the sweeper. The following formula (which determines whether to invoke + // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes + // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, + // the formula considers how much space in the code cache is currently used. Here are + // some examples that will (hopefully) help in understanding. + // + // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since + // the result of the division is 0. This + // keeps the used code cache size small + // (important for embedded Java) + // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula + // computes: (256 / 16) - 1 = 15 + // As a result, we invoke the sweeper after + // 15 invocations of 'mark_active_nmethods. + // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula + // computes: (256 / 16) - 10 = 6. + if (!_should_sweep) { + int time_since_last_sweep = _time_counter - _last_sweep; + double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep - + CodeCache::reverse_free_ratio(); + + if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { + _should_sweep = true; + } + } + + if (_should_sweep && _sweep_fractions_left > 0) { // Only one thread at a time will sweep jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); if (old != 0) { @@ -242,31 +277,46 @@ memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); } #endif - if (_invocations > 0) { + + if (_sweep_fractions_left > 0) { sweep_code_cache(); - _invocations--; + _sweep_fractions_left--; + } + + // We are done with sweeping the code cache once. + if (_sweep_fractions_left == 0) { + _last_sweep = _time_counter; + // Reset flag; temporarily disables sweeper + _should_sweep = false; + // If there was enough state change, 'possibly_enable_sweeper()' + // sets '_should_sweep' to true + possibly_enable_sweeper(); + // Reset _bytes_changed only if there was enough state change. _bytes_changed + // can further increase by calls to 'report_state_change'. + if (_should_sweep) { + _bytes_changed = 0; + } } _sweep_started = 0; } } void NMethodSweeper::sweep_code_cache() { - jlong sweep_start_counter = os::elapsed_counter(); - _flushed_count = 0; - _zombified_count = 0; - _marked_count = 0; + _flushed_count = 0; + _zombified_count = 0; + _marked_for_reclamation_count = 0; if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); + tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } if (!CompileBroker::should_compile_new_jobs()) { // If we have turned off compilations we might as well do full sweeps // in order to reach the clean state faster. Otherwise the sleeping compiler // threads will slow down sweeping. - _invocations = 1; + _sweep_fractions_left = 1; } // We want to visit all nmethods after NmethodSweepFraction @@ -274,7 +324,7 @@ // remaining number of invocations. This is only an estimate since // the number of nmethods changes during the sweep so the final // stage must iterate until it there are no more nmethods. - int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; + int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left; int swept_count = 0; @@ -286,11 +336,11 @@ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // The last invocation iterates until there are no more nmethods - for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { + for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) { swept_count++; if (SafepointSynchronize::is_synchronizing()) { // Safepoint request if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); + tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -314,19 +364,7 @@ } } - assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - - if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { - // we've completed a scan without making progress but there were - // nmethods we were unable to process either because they were - // locked or were still on stack. We don't have to aggressively - // clean them up so just stop scanning. We could scan once more - // but that complicates the control logic and it's unlikely to - // matter much. - if (PrintMethodFlushing) { - tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); - } - } + assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache"); jlong sweep_end_counter = os::elapsed_counter(); jlong sweep_time = sweep_end_counter - sweep_start_counter; @@ -340,21 +378,21 @@ event.set_starttime(sweep_start_counter); event.set_endtime(sweep_end_counter); event.set_sweepIndex(_traversals); - event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); + event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); event.set_sweptCount(swept_count); event.set_flushedCount(_flushed_count); - event.set_markedCount(_marked_count); + event.set_markedCount(_marked_for_reclamation_count); event.set_zombifiedCount(_zombified_count); event.commit(); } #ifdef ASSERT if(PrintMethodFlushing) { - tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time); } #endif - if (_invocations == 1) { + if (_sweep_fractions_left == 1) { _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); } @@ -368,12 +406,37 @@ // it only makes sense to re-enable compilation if we have actually freed memory. // Note that typically several kB are released for sweeping 16MB of the code // cache. As a result, 'freed_memory' > 0 to restart the compiler. - if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { + if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); log_sweep("restart_compiler"); } } +/** + * This function updates the sweeper statistics that keep track of nmethods + * state changes. If there is 'enough' state change, the sweeper is invoked + * as soon as possible. There can be data races on _bytes_changed. The data + * races are benign, since it does not matter if we loose a couple of bytes. + * In the worst case we call the sweeper a little later. Also, we are guaranteed + * to invoke the sweeper if the code cache gets full. + */ +void NMethodSweeper::report_state_change(nmethod* nm) { + _bytes_changed += nm->total_size(); + possibly_enable_sweeper(); +} + +/** + * Function determines if there was 'enough' state change in the code cache to invoke + * the sweeper again. Currently, we determine 'enough' as more than 1% state change in + * the code cache since the last sweep. + */ +void NMethodSweeper::possibly_enable_sweeper() { + double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; + if (percent_changed > 1.0) { + _should_sweep = true; + } +} + class NMethodMarker: public StackObj { private: #ifdef GRAAL @@ -432,9 +495,6 @@ MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); - } else { - _locked_seen++; - SWEEP(nm); } return freed_memory; } @@ -456,8 +516,9 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - request_nmethod_marking(); - _marked_count++; + // Keep track of code cache state change + _bytes_changed += nm->total_size(); + _marked_for_reclamation_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -467,18 +528,14 @@ if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } + // Code cache state change is tracked in make_zombie() nm->make_zombie(); - request_nmethod_marking(); _zombified_count++; SWEEP(nm); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); - // we coudn't transition this nmethod so don't immediately - // request a rescan. If this method stays on the stack for a - // long time we don't want to keep rescanning the code cache. - _not_entrant_seen_on_stack++; SWEEP(nm); } } else if (nm->is_unloaded()) { @@ -493,8 +550,8 @@ release_nmethod(nm); _flushed_count++; } else { + // Code cache state change is tracked in make_zombie() nm->make_zombie(); - request_nmethod_marking(); _zombified_count++; SWEEP(nm); } @@ -522,7 +579,11 @@ // The second condition ensures that methods are not immediately made not-entrant // after compilation. nm->make_not_entrant(); - request_nmethod_marking(); + // Code cache state change is tracked in make_not_entrant() + if (PrintMethodFlushing && Verbose) { + tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", + nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); + } } } } diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/sweeper.hpp --- a/src/share/vm/runtime/sweeper.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/sweeper.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -53,22 +53,22 @@ // is full. class NMethodSweeper : public AllStatic { - static long _traversals; // Stack scan count, also sweep ID. - static nmethod* _current; // Current nmethod - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static int _flushed_count; // Nof. nmethods flushed in current sweep - static int _zombified_count; // Nof. nmethods made zombie in current sweep - static int _marked_count; // Nof. nmethods marked for reclaim in current sweep + static long _traversals; // Stack scan count, also sweep ID. + static long _time_counter; // Virtual time used to periodically invoke sweeper + static long _last_sweep; // Value of _time_counter when the last sweep happened + static nmethod* _current; // Current nmethod + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static int _flushed_count; // Nof. nmethods flushed in current sweep + static int _zombified_count; // Nof. nmethods made zombie in current sweep + static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep - static volatile int _invocations; // No. of invocations left until we are completed with this pass - static volatile int _sweep_started; // Flag to control conc sweeper - - //The following are reset in mark_active_nmethods and synchronized by the safepoint - static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse, - // always checked and reset at a safepoint so memory will be in sync. - static int _locked_seen; // Number of locked nmethods encountered during the scan - static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack - + static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass + static volatile int _sweep_started; // Flag to control conc sweeper + static volatile bool _should_sweep; // Indicates if we should invoke the sweeper + static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from: + // 1) alive -> not_entrant + // 2) not_entrant -> zombie + // 3) zombie -> marked_for_reclamation // Stat counters static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed static jlong _total_time_sweeping; // Accumulated time sweeping @@ -81,9 +81,6 @@ static bool sweep_in_progress(); static void sweep_code_cache(); - static void request_nmethod_marking() { _request_mark_phase = true; } - static void reset_nmethod_marking() { _request_mark_phase = false; } - static bool need_marking_phase() { return _request_mark_phase; } static int _hotness_counter_reset_val; @@ -109,13 +106,8 @@ static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2); static int hotness_counter_reset_val(); - - static void notify() { - // Request a new sweep of the code cache from the beginning. No - // need to synchronize the setting of this flag since it only - // changes to false at safepoint so we can never overwrite it with false. - request_nmethod_marking(); - } + static void report_state_change(nmethod* nm); + static void possibly_enable_sweeper(); }; #endif // SHARE_VM_RUNTIME_SWEEPER_HPP diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/thread.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1101,7 +1101,7 @@ // General purpose hook into Java code, run once when the VM is initialized. // The Java library method itself may be changed independently from the VM. static void call_postVMInitHook(TRAPS) { - Klass* k = SystemDictionary::PostVMInitHook_klass(); + Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD); instanceKlassHandle klass (THREAD, k); if (klass.not_null()) { JavaValue result(T_VOID); @@ -1481,6 +1481,8 @@ _stack_guard_state = stack_guard_unused; #ifdef GRAAL _graal_alternate_call_target = NULL; + _graal_implicit_exception_pc = NULL; + _graal_compiling = false; #if GRAAL_COUNTERS_SIZE > 0 for (int i = 0; i < GRAAL_COUNTERS_SIZE; i++) { _graal_counters[i] = 0; @@ -1497,7 +1499,6 @@ _interp_only_mode = 0; _special_runtime_exit_condition = _no_async_condition; _pending_async_exception = NULL; - _is_compiling = false; _thread_stat = NULL; _thread_stat = new ThreadStatistics(); _blocked_on_compilation = false; @@ -1867,7 +1868,8 @@ // Call Thread.exit(). We try 3 times in case we got another Thread.stop during // the execution of the method. If that is not enough, then we don't really care. Thread.stop // is deprecated anyhow. - { int count = 3; + if (!is_Compiler_thread()) { + int count = 3; while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) { EXCEPTION_MARK; JavaValue result(T_VOID); @@ -1880,7 +1882,6 @@ CLEAR_PENDING_EXCEPTION; } } - // notify JVMTI if (JvmtiExport::should_post_thread_life()) { JvmtiExport::post_thread_end(this); @@ -3296,12 +3297,26 @@ _task = NULL; _queue = queue; _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; + _compiler = NULL; #ifndef PRODUCT _ideal_graph_printer = NULL; #endif } +void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { + JavaThread::oops_do(f, cld_f, cf); + if (_scanned_nmethod != NULL && cf != NULL) { + // Safepoints can occur when the sweeper is scanning an nmethod so + // process it here to make sure it isn't unloaded in the middle of + // a scan. + cf->do_code_blob(_scanned_nmethod); + } +} + + // ======= Threads ======== // The Threads class links together all active threads, and provides @@ -3322,8 +3337,6 @@ // All JavaThreads #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next()) -void os_stream(); - // All JavaThreads + all non-JavaThreads (i.e., every thread in the system) void Threads::threads_do(ThreadClosure* tc) { assert_locked_or_safepoint(Threads_lock); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/thread.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -920,6 +920,7 @@ #ifdef GRAAL address _graal_alternate_call_target; address _graal_implicit_exception_pc; // pc at which the most recent implicit exception occurred + bool _graal_compiling; // number of counters, increase as needed. 0 == disabled #define GRAAL_COUNTERS_SIZE (0) @@ -946,9 +947,6 @@ volatile address _exception_handler_pc; // PC for handler of exception volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. - // support for compilation - bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible) - // support for JNI critical regions jint _jni_active_critical; // count of entries into JNI critical region @@ -1038,10 +1036,6 @@ // Testers virtual bool is_Java_thread() const { return true; } - // compilation - void set_is_compiling(bool f) { _is_compiling = f; } - bool is_compiling() const { return _is_compiling; } - // Thread chain operations JavaThread* next() const { return _next; } void set_next(JavaThread* p) { _next = p; } @@ -1308,6 +1302,8 @@ #ifdef GRAAL void set_graal_alternate_call_target(address a) { _graal_alternate_call_target = a; } void set_graal_implicit_exception_pc(address a) { _graal_implicit_exception_pc = a; } + bool is_graal_compiling() { return _graal_compiling; } + void set_is_graal_compiling(bool b) { _graal_compiling = b; } #endif // Exception handling for compiled methods @@ -1321,6 +1317,11 @@ void set_exception_handler_pc(address a) { _exception_handler_pc = a; } void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } + void clear_exception_oop_and_pc() { + set_exception_oop(NULL); + set_exception_pc(NULL); + } + // Stack overflow support inline size_t stack_available(address cur_sp); address stack_yellow_zone_base() @@ -1858,11 +1859,14 @@ private: CompilerCounters* _counters; - ciEnv* _env; - CompileLog* _log; - CompileTask* _task; - CompileQueue* _queue; + ciEnv* _env; + CompileLog* _log; + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + AbstractCompiler* _compiler; public: @@ -1874,13 +1878,19 @@ // Hide this compiler thread from external view. bool is_hidden_from_external_view() const { return true; } - CompileQueue* queue() { return _queue; } - CompilerCounters* counters() { return _counters; } + void set_compiler(AbstractCompiler* c) { _compiler = c; } + AbstractCompiler* compiler() const { return _compiler; } + + CompileQueue* queue() const { return _queue; } + CompilerCounters* counters() const { return _counters; } // Get/set the thread's compilation environment. ciEnv* env() { return _env; } void set_env(ciEnv* env) { _env = env; } + BufferBlob* get_buffer_blob() const { return _buffer_blob; } + void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; }; + // Get/set the thread's logging information CompileLog* log() { return _log; } void init_log(CompileLog* log) { @@ -1889,6 +1899,11 @@ _log = log; } + // GC support + // Apply "f->do_oop" to all root oops in "this". + // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames + void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + #ifndef PRODUCT private: IdealGraphPrinter *_ideal_graph_printer; @@ -1901,6 +1916,11 @@ CompileTask* task() { return _task; } void set_task(CompileTask* task) { _task = task; } + // Track the nmethod currently being scanned by the sweeper + void set_scanned_nmethod(nmethod* nm) { + assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value"); + _scanned_nmethod = nm; + } }; inline CompilerThread* CompilerThread::current() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/virtualspace.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -368,8 +368,15 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { + const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1); + return initialize_with_granularity(rs, committed_size, max_commit_granularity); +} + +bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { if(!rs.is_reserved()) return false; // allocation failed. assert(_low_boundary == NULL, "VirtualSpace already initialized"); + assert(max_commit_granularity > 0, "Granularity must be non-zero."); + _low_boundary = rs.base(); _high_boundary = low_boundary() + rs.size(); @@ -390,7 +397,7 @@ // No attempt is made to force large page alignment at the very top and // bottom of the space if they are not aligned so already. _lower_alignment = os::vm_page_size(); - _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); + _middle_alignment = max_commit_granularity; _upper_alignment = os::vm_page_size(); // End of each region @@ -966,17 +973,52 @@ class TestVirtualSpace : AllStatic { + enum TestLargePages { + Default, + Disable, + Reserve, + Commit + }; + + static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { + switch(mode) { + default: + case Default: + case Reserve: + return ReservedSpace(reserve_size_aligned); + case Disable: + case Commit: + return ReservedSpace(reserve_size_aligned, + os::vm_allocation_granularity(), + /* large */ false, /* exec */ false); + } + } + + static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { + switch(mode) { + default: + case Default: + case Reserve: + return vs.initialize(rs, 0); + case Disable: + return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); + case Commit: + return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1)); + } + } + public: - static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) { + static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, + TestLargePages mode = Default) { size_t granularity = os::vm_allocation_granularity(); size_t reserve_size_aligned = align_size_up(reserve_size, granularity); - ReservedSpace reserved(reserve_size_aligned); + ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); assert(reserved.is_reserved(), "Must be"); VirtualSpace vs; - bool initialized = vs.initialize(reserved, 0); + bool initialized = initialize_virtual_space(vs, reserved, mode); assert(initialized, "Failed to initialize VirtualSpace"); vs.expand_by(commit_size, false); @@ -986,7 +1028,10 @@ } else { assert_ge(vs.actual_committed_size(), commit_size); // Approximate the commit granularity. - size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size(); + // Make sure that we don't commit using large pages + // if large pages has been disabled for this VirtualSpace. + size_t commit_granularity = (mode == Disable || !UseLargePages) ? + os::vm_page_size() : os::large_page_size(); assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); } @@ -1042,9 +1087,40 @@ test_virtual_space_actual_committed_space(10 * M, 10 * M); } + static void test_virtual_space_disable_large_pages() { + if (!UseLargePages) { + return; + } + // These test cases verify that if we force VirtualSpace to disable large pages + test_virtual_space_actual_committed_space(10 * M, 0, Disable); + test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); + test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); + test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); + test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); + test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); + test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); + + test_virtual_space_actual_committed_space(10 * M, 0, Reserve); + test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); + test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); + test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); + test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); + test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); + test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); + + test_virtual_space_actual_committed_space(10 * M, 0, Commit); + test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); + test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); + test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); + test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); + test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); + test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); + } + static void test_virtual_space() { test_virtual_space_actual_committed_space(); test_virtual_space_actual_committed_space_one_large_page(); + test_virtual_space_disable_large_pages(); } }; diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/virtualspace.hpp --- a/src/share/vm/runtime/virtualspace.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/virtualspace.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -178,6 +178,7 @@ public: // Initialization VirtualSpace(); + bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity); bool initialize(ReservedSpace rs, size_t committed_byte_size); // Destruction diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/runtime/vmStructs.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -27,7 +27,6 @@ #include "classfile/javaClasses.hpp" #include "classfile/loaderConstraints.hpp" #include "classfile/placeholders.hpp" -#include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "ci/ciField.hpp" #include "ci/ciInstance.hpp" @@ -59,7 +58,7 @@ #include "memory/generation.hpp" #include "memory/generationSpec.hpp" #include "memory/heap.hpp" -#include "memory/metablock.hpp" +#include "memory/metachunk.hpp" #include "memory/referenceType.hpp" #include "memory/space.hpp" #include "memory/tenuredGeneration.hpp" @@ -104,6 +103,9 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/hashtable.hpp" #include "utilities/macros.hpp" +#ifdef GRAAL +# include "graal/vmStructs_graal.hpp" +#endif #ifdef TARGET_ARCH_x86 # include "vmStructs_x86.hpp" #endif @@ -289,6 +291,7 @@ nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ nonstatic_field(InstanceKlass, _array_klasses, Klass*) \ nonstatic_field(InstanceKlass, _methods, Array*) \ + nonstatic_field(InstanceKlass, _default_methods, Array*) \ nonstatic_field(InstanceKlass, _local_interfaces, Array*) \ nonstatic_field(InstanceKlass, _transitive_interfaces, Array*) \ nonstatic_field(InstanceKlass, _fields, Array*) \ @@ -323,6 +326,7 @@ nonstatic_field(nmethodBucket, _count, int) \ nonstatic_field(nmethodBucket, _next, nmethodBucket*) \ nonstatic_field(InstanceKlass, _method_ordering, Array*) \ + nonstatic_field(InstanceKlass, _default_vtable_indices, Array*) \ nonstatic_field(Klass, _super_check_offset, juint) \ nonstatic_field(Klass, _secondary_super_cache, Klass*) \ nonstatic_field(Klass, _secondary_supers, Array*) \ @@ -717,11 +721,17 @@ nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \ \ /**************************/ \ - /* ProctectionDomainEntry */ \ + /* ProtectionDomainEntry */ \ /**************************/ \ \ nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \ - nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \ + nonstatic_field(ProtectionDomainEntry, _pd_cache, ProtectionDomainCacheEntry*) \ + \ + /*******************************/ \ + /* ProtectionDomainCacheEntry */ \ + /*******************************/ \ + \ + nonstatic_field(ProtectionDomainCacheEntry, _literal, oop) \ \ /*************************/ \ /* LoaderConstraintEntry */ \ @@ -869,7 +879,18 @@ nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ \ - unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ + unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ + \ + nonstatic_field(Deoptimization::UnrollBlock, _size_of_deoptimized_frame, int) \ + nonstatic_field(Deoptimization::UnrollBlock, _caller_adjustment, int) \ + nonstatic_field(Deoptimization::UnrollBlock, _number_of_frames, int) \ + nonstatic_field(Deoptimization::UnrollBlock, _total_frame_sizes, int) \ + nonstatic_field(Deoptimization::UnrollBlock, _frame_sizes, intptr_t*) \ + nonstatic_field(Deoptimization::UnrollBlock, _frame_pcs, address*) \ + nonstatic_field(Deoptimization::UnrollBlock, _register_block, intptr_t*) \ + nonstatic_field(Deoptimization::UnrollBlock, _return_type, BasicType) \ + nonstatic_field(Deoptimization::UnrollBlock, _initial_info, intptr_t) \ + nonstatic_field(Deoptimization::UnrollBlock, _caller_actual_parameters, int) \ \ /********************************/ \ /* JavaCalls (NOTE: incomplete) */ \ @@ -913,7 +934,6 @@ volatile_nonstatic_field(JavaThread, _exception_oop, oop) \ volatile_nonstatic_field(JavaThread, _exception_pc, address) \ volatile_nonstatic_field(JavaThread, _is_method_handle_return, int) \ - nonstatic_field(JavaThread, _is_compiling, bool) \ nonstatic_field(JavaThread, _special_runtime_exit_condition, JavaThread::AsyncRequests) \ nonstatic_field(JavaThread, _saved_exception_pc, address) \ volatile_nonstatic_field(JavaThread, _thread_state, JavaThreadState) \ @@ -1246,7 +1266,6 @@ static_field(java_lang_Class, _array_klass_offset, int) \ static_field(java_lang_Class, _oop_size_offset, int) \ static_field(java_lang_Class, _static_oop_field_count_offset, int) \ - GRAAL_ONLY(static_field(java_lang_Class, _graal_mirror_offset, int)) \ \ /************************/ \ /* Miscellaneous fields */ \ @@ -1285,7 +1304,6 @@ nonstatic_field(FreeList, _count, ssize_t) \ nonstatic_field(MetablockTreeDictionary, _total_size, size_t) - //-------------------------------------------------------------------------------- // VM_TYPES // @@ -1462,6 +1480,7 @@ declare_toplevel_type(CheckedExceptionElement) \ declare_toplevel_type(LocalVariableTableElement) \ declare_toplevel_type(ExceptionTableElement) \ + declare_toplevel_type(MethodParametersElement) \ \ declare_toplevel_type(ClassLoaderData) \ declare_toplevel_type(ClassLoaderDataGraph) \ @@ -1568,6 +1587,7 @@ declare_toplevel_type(SystemDictionary) \ declare_toplevel_type(vmSymbols) \ declare_toplevel_type(ProtectionDomainEntry) \ + declare_toplevel_type(ProtectionDomainCacheEntry) \ \ declare_toplevel_type(GenericGrowableArray) \ declare_toplevel_type(GrowableArray) \ @@ -1656,6 +1676,7 @@ declare_toplevel_type(Dependencies) \ declare_toplevel_type(CompileTask) \ declare_toplevel_type(Deoptimization) \ + declare_toplevel_type(Deoptimization::UnrollBlock) \ \ /************************/ \ /* OopMap and OopMapSet */ \ @@ -1937,7 +1958,13 @@ declare_c2_type(CmpDNode, CmpNode) \ declare_c2_type(CmpD3Node, CmpDNode) \ declare_c2_type(MathExactNode, MultiNode) \ - declare_c2_type(AddExactINode, MathExactNode) \ + declare_c2_type(MathExactINode, MathExactNode) \ + declare_c2_type(AddExactINode, MathExactINode) \ + declare_c2_type(AddExactLNode, MathExactLNode) \ + declare_c2_type(SubExactINode, MathExactINode) \ + declare_c2_type(SubExactLNode, MathExactLNode) \ + declare_c2_type(NegExactINode, MathExactINode) \ + declare_c2_type(MulExactINode, MathExactINode) \ declare_c2_type(FlagsProjNode, ProjNode) \ declare_c2_type(BoolNode, Node) \ declare_c2_type(AbsNode, Node) \ @@ -2253,12 +2280,6 @@ declare_preprocessor_constant("PERFDATA_BIG_ENDIAN", PERFDATA_BIG_ENDIAN) \ declare_preprocessor_constant("PERFDATA_LITTLE_ENDIAN", PERFDATA_LITTLE_ENDIAN) \ \ - /***************/ \ - /* SymbolTable */ \ - /***************/ \ - \ - declare_constant(SymbolTable::symbol_table_size) \ - \ /***********************************/ \ /* LoaderConstraintTable constants */ \ /***********************************/ \ @@ -2341,6 +2362,7 @@ declare_constant(ConstMethod::_has_localvariable_table) \ declare_constant(ConstMethod::_has_exception_table) \ declare_constant(ConstMethod::_has_generic_signature) \ + declare_constant(ConstMethod::_has_method_parameters) \ declare_constant(ConstMethod::_has_method_annotations) \ declare_constant(ConstMethod::_has_parameter_annotations) \ declare_constant(ConstMethod::_has_default_annotations) \ @@ -2493,6 +2515,11 @@ declare_constant(Deoptimization::Action_make_not_compilable) \ declare_constant(Deoptimization::Action_LIMIT) \ \ + declare_constant(Deoptimization::Unpack_deopt) \ + declare_constant(Deoptimization::Unpack_exception) \ + declare_constant(Deoptimization::Unpack_uncommon_trap) \ + declare_constant(Deoptimization::Unpack_reexecute) \ + \ /*********************/ \ /* Matcher (C2 only) */ \ /*********************/ \ @@ -2888,6 +2915,11 @@ GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY, GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY) +#ifdef GRAAL + VM_STRUCTS_GRAAL(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, + GENERATE_STATIC_VM_STRUCT_ENTRY) +#endif + #if INCLUDE_ALL_GCS VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_STATIC_VM_STRUCT_ENTRY) @@ -2932,6 +2964,11 @@ GENERATE_C2_VM_TYPE_ENTRY, GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY) +#ifdef GRAAL + VM_TYPES_GRAAL(GENERATE_VM_TYPE_ENTRY, + GENERATE_TOPLEVEL_VM_TYPE_ENTRY) +#endif + #if INCLUDE_ALL_GCS VM_TYPES_PARALLELGC(GENERATE_VM_TYPE_ENTRY, GENERATE_TOPLEVEL_VM_TYPE_ENTRY) diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/diagnosticCommand.cpp --- a/src/share/vm/services/diagnosticCommand.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/diagnosticCommand.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -48,7 +48,7 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #if INCLUDE_SERVICES // Heap dumping/inspection supported - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_SERVICES @@ -505,7 +505,11 @@ _jdp_pause ("jdp.pause", - "set com.sun.management.jdp.pause", "INT", false) + "set com.sun.management.jdp.pause", "INT", false), + + _jdp_name + ("jdp.name", + "set com.sun.management.jdp.name", "STRING", false) { _dcmdparser.add_dcmd_option(&_config_file); @@ -527,6 +531,7 @@ _dcmdparser.add_dcmd_option(&_jdp_source_addr); _dcmdparser.add_dcmd_option(&_jdp_ttl); _dcmdparser.add_dcmd_option(&_jdp_pause); + _dcmdparser.add_dcmd_option(&_jdp_name); } @@ -596,6 +601,7 @@ PUT_OPTION(_jdp_source_addr); PUT_OPTION(_jdp_ttl); PUT_OPTION(_jdp_pause); + PUT_OPTION(_jdp_name); #undef PUT_OPTION diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/diagnosticCommand.hpp --- a/src/share/vm/services/diagnosticCommand.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/diagnosticCommand.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -302,6 +302,7 @@ DCmdArgument _jdp_source_addr; DCmdArgument _jdp_ttl; DCmdArgument _jdp_pause; + DCmdArgument _jdp_name; public: JMXStartRemoteDCmd(outputStream *output, bool heap_allocated); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/heapDumper.cpp --- a/src/share/vm/services/heapDumper.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/heapDumper.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1545,7 +1545,9 @@ // writes a HPROF_GC_CLASS_DUMP record for the given class void VM_HeapDumper::do_class_dump(Klass* k) { - DumperSupport::dump_class_and_array_classes(writer(), k); + if (k->oop_is_instance()) { + DumperSupport::dump_class_and_array_classes(writer(), k); + } } // writes a HPROF_GC_CLASS_DUMP records for a given basic type @@ -1722,7 +1724,7 @@ SymbolTable::symbols_do(&sym_dumper); // write HPROF_LOAD_CLASS records - SystemDictionary::classes_do(&do_load_class); + ClassLoaderDataGraph::classes_do(&do_load_class); Universe::basic_type_classes_do(&do_load_class); // write HPROF_FRAME and HPROF_TRACE records @@ -1733,7 +1735,7 @@ write_dump_header(); // Writes HPROF_GC_CLASS_DUMP records - SystemDictionary::classes_do(&do_class_dump); + ClassLoaderDataGraph::classes_do(&do_class_dump); Universe::basic_type_classes_do(&do_basic_type_array_class_dump); check_segment_length(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/jmm.h --- a/src/share/vm/services/jmm.h Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/jmm.h Thu Nov 21 15:04:54 2013 +0100 @@ -78,6 +78,7 @@ JMM_COMPILE_TOTAL_TIME_MS = 8, /* Total accumulated time spent in compilation */ JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */ JMM_GC_COUNT = 10, /* Total number of collections */ + JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */ JMM_INTERNAL_ATTRIBUTE_INDEX = 100, JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */ diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/management.cpp --- a/src/share/vm/services/management.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/management.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1032,6 +1032,9 @@ case JMM_JVM_INIT_DONE_TIME_MS: return Management::vm_init_done_time(); + case JMM_JVM_UPTIME_MS: + return Management::ticks_to_ms(os::elapsed_counter()); + case JMM_COMPILE_TOTAL_TIME_MS: return Management::ticks_to_ms(CompileBroker::total_compilation_ticks()); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/memoryService.hpp --- a/src/share/vm/services/memoryService.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/memoryService.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -148,6 +148,12 @@ static void track_code_cache_memory_usage() { track_memory_pool_usage(_code_heap_pool); } + static void track_metaspace_memory_usage() { + track_memory_pool_usage(_metaspace_pool); + } + static void track_compressed_class_memory_usage() { + track_memory_pool_usage(_compressed_class_pool); + } static void track_memory_pool_usage(MemoryPool* pool); static void gc_begin(bool fullGC, bool recordGCBeginTime, diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/services/runtimeService.cpp --- a/src/share/vm/services/runtimeService.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/services/runtimeService.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,7 +119,7 @@ #endif /* USDT2 */ // Print the time interval in which the app was executing - if (PrintGCApplicationConcurrentTime) { + if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("Application time: %3.7f seconds", diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/shark/sharkCompiler.cpp --- a/src/share/vm/shark/sharkCompiler.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/shark/sharkCompiler.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -133,11 +133,10 @@ exit(1); } - execution_engine()->addModule( - _native_context->module()); + execution_engine()->addModule(_native_context->module()); // All done - mark_initialized(); + set_state(initialized); } void SharkCompiler::initialize() { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/shark/sharkCompiler.hpp --- a/src/share/vm/shark/sharkCompiler.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/shark/sharkCompiler.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -50,10 +50,6 @@ return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form()); } - // Customization - bool needs_adapters() { return false; } - bool needs_stubs() { return false; } - // Initialization void initialize(); diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/trace/traceEventClasses.xsl --- a/src/share/vm/trace/traceEventClasses.xsl Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/trace/traceEventClasses.xsl Thu Nov 21 15:04:54 2013 +0100 @@ -23,8 +23,8 @@ --> + - diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/trace/traceEventIds.xsl --- a/src/share/vm/trace/traceEventIds.xsl Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/trace/traceEventIds.xsl Thu Nov 21 15:04:54 2013 +0100 @@ -23,8 +23,8 @@ --> + - diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/trace/traceTypes.xsl --- a/src/share/vm/trace/traceTypes.xsl Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/trace/traceTypes.xsl Thu Nov 21 15:04:54 2013 +0100 @@ -23,8 +23,8 @@ --> + - diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/utilities/constantTag.cpp --- a/src/share/vm/utilities/constantTag.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/utilities/constantTag.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -51,7 +51,9 @@ case JVM_CONSTANT_ClassIndex : case JVM_CONSTANT_StringIndex : case JVM_CONSTANT_MethodHandle : + case JVM_CONSTANT_MethodHandleInError : case JVM_CONSTANT_MethodType : + case JVM_CONSTANT_MethodTypeInError : return T_OBJECT; default: ShouldNotReachHere(); @@ -60,6 +62,19 @@ } +jbyte constantTag::non_error_value() const { + switch (_tag) { + case JVM_CONSTANT_UnresolvedClassInError: + return JVM_CONSTANT_UnresolvedClass; + case JVM_CONSTANT_MethodHandleInError: + return JVM_CONSTANT_MethodHandle; + case JVM_CONSTANT_MethodTypeInError: + return JVM_CONSTANT_MethodType; + default: + return _tag; + } +} + const char* constantTag::internal_name() const { switch (_tag) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/utilities/constantTag.hpp --- a/src/share/vm/utilities/constantTag.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/utilities/constantTag.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -108,7 +108,8 @@ _tag = tag; } - jbyte value() { return _tag; } + jbyte value() const { return _tag; } + jbyte non_error_value() const; BasicType basic_type() const; // if used with ldc, what kind of value gets pushed? diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/utilities/globalDefinitions.hpp Thu Nov 21 15:04:54 2013 +0100 @@ -326,12 +326,18 @@ const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) +// Default ProtectionDomainCacheSize values + +const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017); //---------------------------------------------------------------------------------------------------- // Default and minimum StringTableSize values const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); -const int minimumStringTableSize=1009; +const int minimumStringTableSize = 1009; + +const int defaultSymbolTableSize = 20011; +const int minimumSymbolTableSize = 1009; //---------------------------------------------------------------------------------------------------- @@ -362,8 +368,6 @@ // Klass encoding metaspace max size const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; -const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G - // Machine dependent stuff #ifdef TARGET_ARCH_x86 @@ -452,6 +456,13 @@ return (void*) align_size_up_((uintptr_t)addr, size); } +// Align down with a lower bound. If the aligning results in 0, return 'alignment'. + +inline size_t align_size_down_bounded(size_t size, size_t alignment) { + size_t aligned_size = align_size_down_(size, alignment); + return aligned_size > 0 ? aligned_size : alignment; +} + // Clamp an address to be within a specific page // 1. If addr is on the page it is returned as is // 2. If addr is above the page_address the start of the *next* page will be returned diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/utilities/ostream.cpp --- a/src/share/vm/utilities/ostream.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/utilities/ostream.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -465,7 +465,7 @@ } // log_name comes from -XX:LogFile=log_name or -Xloggc:log_name -// in log_name, %p => pipd1234 and +// in log_name, %p => pid1234 and // %t => YYYY-MM-DD_HH-MM-SS static const char* make_log_name(const char* log_name, const char* force_directory) { char timestr[32]; @@ -792,7 +792,7 @@ void defaultStream::init_log() { // %%% Need a MutexLocker? - const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log"; + const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log"; const char* try_name = make_log_name(log_name, NULL); fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name); if (!file->is_open()) { diff -r 790ebab62d23 -r f9f4503a4ab5 src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/utilities/vmError.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -1050,7 +1050,7 @@ FILE* replay_data_file = os::open(fd, "w"); if (replay_data_file != NULL) { fileStream replay_data_stream(replay_data_file, /*need_close=*/true); - env->dump_replay_data(&replay_data_stream); + env->dump_replay_data_unsafe(&replay_data_stream); out.print_raw("#\n# Compiler replay data is saved as:\n# "); out.print_raw_cr(buffer); } else { diff -r 790ebab62d23 -r f9f4503a4ab5 test/TEST.groups --- a/test/TEST.groups Thu Nov 21 15:04:26 2013 +0100 +++ b/test/TEST.groups Thu Nov 21 15:04:54 2013 +0100 @@ -27,7 +27,7 @@ # - compact1, compact2, compact3, full JRE, JDK # # In addition they support testing of the minimal VM on compact1 and compact2. -# Essentially this defines groups based around the specified API's and VM +# Essentially this defines groups based around the specified API's and VM # services available in the runtime. # # The groups are defined hierarchically in two forms: @@ -44,9 +44,9 @@ # by listing the top-level test directories. # # To use a group simply list it on the jtreg command line eg: -# jtreg :jdk +# jtreg :jdk # runs all tests. While -# jtreg :compact2 +# jtreg :compact2 # runs those tests that only require compact1 and compact2 API's. # @@ -64,12 +64,13 @@ gc/TestG1ZeroPGCTJcmdThreadPrint.java \ gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \ gc/metaspace/TestMetaspacePerfCounters.java \ + gc/metaspace/TestPerfCountersAndMemoryPools.java \ runtime/6819213/TestBootNativeLibraryPath.java \ - runtime/6878713/Test6878713.sh \ runtime/6925573/SortMethodsTest.java \ runtime/7107135/Test7107135.sh \ runtime/7158988/FieldMonitor.java \ runtime/7194254/Test7194254.java \ + runtime/8026365/InvokeSpecialAnonTest.java \ runtime/jsig/Test8017498.sh \ runtime/Metaspace/FragmentMetaspace.java \ runtime/NMT/BaselineWithParameter.java \ @@ -85,7 +86,9 @@ runtime/NMT/VirtualAllocTestType.java \ runtime/RedefineObject/TestRedefineObject.java \ runtime/XCheckJniJsig/XCheckJSig.java \ - serviceability/attach/AttachWithStalePidFile.java + serviceability/attach/AttachWithStalePidFile.java \ + serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java + # JRE adds further tests to compact3 # @@ -99,7 +102,9 @@ needs_jre = \ compiler/6852078/Test6852078.java \ compiler/7047069/Test7047069.java \ - runtime/6294277/SourceDebugExtension.java + runtime/6294277/SourceDebugExtension.java \ + runtime/ClassFile/JsrRewriting.java \ + runtime/ClassFile/OomWhileParsingRepeatedJsr.java # Compact 3 adds further tests to compact2 # @@ -123,7 +128,7 @@ compiler/whitebox/IsMethodCompilableTest.java \ gc/6581734/Test6581734.java \ gc/7072527/TestFullGCCount.java \ - gc/7168848/HumongousAlloc.java \ + gc/g1/TestHumongousAllocInitialMark.java \ gc/arguments/TestG1HeapRegionSize.java \ gc/metaspace/TestMetaspaceMemoryPool.java \ runtime/InternalApi/ThreadCpuTimesDeadlock.java \ @@ -139,7 +144,7 @@ -:needs_jdk # Tests that require compact2 API's and a full VM -# +# needs_full_vm_compact2 = # Compact 1 adds full VM tests diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/8013496/Test8013496.sh --- a/test/compiler/8013496/Test8013496.sh Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,55 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# -# @test -# @bug 8013496 -# @summary Test checks that the order in which ReversedCodeCacheSize and -# InitialCodeCacheSize are passed to the VM is irrelevant. -# @run shell Test8013496.sh -# -# -## some tests require path to find test source dir -if [ "${TESTSRC}" = "" ] -then - TESTSRC=${PWD} - echo "TESTSRC not set. Using "${TESTSRC}" as default" -fi -echo "TESTSRC=${TESTSRC}" -## Adding common setup Variables for running shell tests. -. ${TESTSRC}/../../test_env.sh -set -x - -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1 - -diff 1.out 2.out - -result=$? -if [ $result -eq 0 ] ; then - echo "Test Passed" - exit 0 -else - echo "Test Failed" - exit 1 -fi diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8013496 + * @summary Test checks that the order in which ReversedCodeCacheSize and + * InitialCodeCacheSize are passed to the VM is irrelevant. + * @library /testlibrary + * + */ +import com.oracle.java.testlibrary.*; + +public class CheckReservedInitialCodeCacheSizeArgOrder { + public static void main(String[] args) throws Exception { + ProcessBuilder pb1, pb2; + OutputAnalyzer out1, out2; + + pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version"); + pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version"); + + out1 = new OutputAnalyzer(pb1.start()); + out2 = new OutputAnalyzer(pb2.start()); + + // Check that the outputs are equal + if (out1.getStdout().compareTo(out2.getStdout()) != 0) { + throw new RuntimeException("Test failed"); + } + + out1.shouldHaveExitValue(0); + out2.shouldHaveExitValue(0); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/inlining/InlineDefaultMethod.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/inlining/InlineDefaultMethod.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026735 + * @summary CHA in C1 should make correct decisions about default methods + * @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod::test -XX:TieredStopAtLevel=1 InlineDefaultMethod + */ + + +interface InterfaceWithDefaultMethod0 { + default public int defaultMethod() { + return 1; + } +} + +interface InterfaceWithDefaultMethod1 extends InterfaceWithDefaultMethod0 { } + +abstract class Subtype implements InterfaceWithDefaultMethod1 { } + +class Decoy extends Subtype { + public int defaultMethod() { + return 2; + } +} + +class Instance extends Subtype { } + +public class InlineDefaultMethod { + public static int test(InterfaceWithDefaultMethod1 x) { + return x.defaultMethod(); + } + public static void main(String[] args) { + InterfaceWithDefaultMethod1 a = new Decoy(); + InterfaceWithDefaultMethod1 b = new Instance(); + if (test(a) != 2 || + test(b) != 1) { + System.err.println("FAILED"); + System.exit(97); + } + System.err.println("PASSED"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactICondTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactICondTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile AddExactICondTest.java + * @run main AddExactICondTest + * + */ + +public class AddExactICondTest { + public static int result = 0; + + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + runTest(); + } + } + + public static void runTest() { + int i = 7; + while (java.lang.Math.addExact(i, result) < 89361) { + if ((java.lang.Math.addExact(i, i) & 1) == 1) { + i += 3; + } else if ((i & 5) == 4) { + i += 7; + } else if ((i & 0xf) == 6) { + i += 2; + } else { + i += 1; + } + result += 2; + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactIConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test constant addExact + * @compile AddExactIConstantTest.java Verify.java + * @run main AddExactIConstantTest + * + */ + +public class AddExactIConstantTest { + public static void main(String[] args) { + Verify.ConstantTest.verify(new Verify.AddExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactILoadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactILoadTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile AddExactILoadTest.java Verify.java + * @run main AddExactILoadTest + * + */ + +public class AddExactILoadTest { + public static void main(String[] args) { + Verify.LoadTest.init(); + Verify.LoadTest.verify(new Verify.AddExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile AddExactILoopDependentTest.java Verify.java + * @run main AddExactILoopDependentTest + * + */ + +public class AddExactILoopDependentTest { + public static void main(String[] args) { + Verify.LoopDependentTest.verify(new Verify.AddExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile AddExactINonConstantTest.java Verify.java + * @run main AddExactINonConstantTest + * + */ + +public class AddExactINonConstantTest { + public static void main(String[] args) { + Verify.NonConstantTest.verify(new Verify.AddExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8025657 + * @summary Test repeating addExact + * @compile AddExactIRepeatTest.java Verify.java + * @run main AddExactIRepeatTest + * + */ + +public class AddExactIRepeatTest { + public static void main(String[] args) { + runTest(new Verify.AddExactI()); + } + + public static int nonExact(int x, int y, Verify.BinaryMethod method) { + int result = method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + return result; + } + + public static void runTest(Verify.BinaryMethod method) { + java.util.Random rnd = new java.util.Random(); + for (int i = 0; i < 50000; ++i) { + int x = Integer.MAX_VALUE - 10; + int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); + + int c = rnd.nextInt() / 2; + int d = rnd.nextInt() / 2; + + int a = catchingExact(x, y, method); + + if (a != 36) { + throw new RuntimeException("a != 36 : " + a); + } + + int b = nonExact(c, d, method); + int n = exact(c, d, method); + + + if (n != b) { + throw new RuntimeException("n != b : " + n + " != " + b); + } + } + } + + public static int exact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + return result; + } + + public static int catchingExact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + try { + result += 5; + result = method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 1; + } + try { + result += 6; + + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 2; + } + try { + result += 7; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 3; + } + try { + result += 8; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 4; + } + return result; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactLConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant addExact + * @compile AddExactLConstantTest.java Verify.java + * @run main AddExactLConstantTest + * + */ + +public class AddExactLConstantTest { + public static void main(String[] args) { + Verify.ConstantLongTest.verify(new Verify.AddExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant addExact + * @compile AddExactLNonConstantTest.java Verify.java + * @run main AddExactLNonConstantTest + * + */ + +public class AddExactLNonConstantTest { + public static void main(String[] args) { + Verify.NonConstantLongTest.verify(new Verify.AddExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/CompareTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/CompareTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026722 + * @summary Verify that the compare after addExact is a signed compare + * @compile CompareTest.java + * @run main CompareTest + * + */ + +public class CompareTest { + public static long store = 0; + public static long addValue = 1231; + + public static void main(String[] args) { + for (int i = 0; i < 20000; ++i) { + runTest(i, i); + runTest(i-1, i); + } + } + + public static long create(long value, int v) { + if ((value | v) == 0) { + return 0; + } + + // C2 turned this test into unsigned test when a control edge was set on the Cmp + if (value < -31557014167219200L || value > 31556889864403199L) { + throw new RuntimeException("error"); + } + + return value; + } + + public static void runTest(long value, int value2) { + long res = Math.addExact(value, addValue); + store = create(res, Math.floorMod(value2, 100000)); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/CondTest.java --- a/test/compiler/intrinsics/mathexact/CondTest.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8024924 - * @summary Test non constant addExact - * @compile CondTest.java Verify.java - * @run main CondTest - * - */ - -import java.lang.ArithmeticException; - -public class CondTest { - public static int result = 0; - - public static void main(String[] args) { - for (int i = 0; i < 50000; ++i) { - runTest(); - } - } - - public static void runTest() { - int i = 7; - while (java.lang.Math.addExact(i, result) < 89361) { - if ((java.lang.Math.addExact(i, i) & 1) == 1) { - i += 3; - } else if ((i & 5) == 4) { - i += 7; - } else if ((i & 0xf) == 6) { - i += 2; - } else { - i += 1; - } - result += 2; - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/ConstantTest.java --- a/test/compiler/intrinsics/mathexact/ConstantTest.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8024924 - * @summary Test constant addExact - * @compile ConstantTest.java Verify.java - * @run main ConstantTest - * - */ - -import java.lang.ArithmeticException; - -public class ConstantTest { - public static void main(String[] args) { - for (int i = 0; i < 50000; ++i) { - Verify.verify(5, 7); - Verify.verify(Integer.MAX_VALUE, 1); - Verify.verify(Integer.MIN_VALUE, -1); - Verify.verify(Integer.MAX_VALUE, -1); - Verify.verify(Integer.MIN_VALUE, 1); - Verify.verify(Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 2); - Verify.verify(Integer.MAX_VALUE / 2, (Integer.MAX_VALUE / 2) + 3); - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/DecExactITest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/DecExactITest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test decrementExact + * @compile DecExactITest.java Verify.java + * @run main DecExactITest + * + */ + +public class DecExactITest { + public static int[] values = {1, 1, 1, 1}; + public static int[] minvalues = {Integer.MIN_VALUE, Integer.MIN_VALUE}; + + public static void main(String[] args) { + runTest(new Verify.DecExactI()); + } + + public static void runTest(Verify.UnaryMethod method) { + for (int i = 0; i < 20000; ++i) { + Verify.verifyUnary(Integer.MIN_VALUE, method); + Verify.verifyUnary(minvalues[0], method); + Verify.verifyUnary(Integer.MIN_VALUE - values[2], method); + Verify.verifyUnary(0, method); + Verify.verifyUnary(values[2], method); + Verify.verifyUnary(Integer.MAX_VALUE, method); + Verify.verifyUnary(Integer.MIN_VALUE - values[0] + values[3], method); + Verify.verifyUnary(Integer.MIN_VALUE + 1 - values[0], method); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/DecExactLTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/DecExactLTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test decrementExact + * @compile DecExactITest.java Verify.java + * @run main DecExactITest + * + */ + +public class DecExactLTest { + public static long[] values = {1, 1, 1, 1}; + public static long[] minvalues = {Long.MIN_VALUE, Long.MIN_VALUE}; + + public static void main(String[] args) { + runTest(new Verify.DecExactL()); + } + + public static void runTest(Verify.UnaryLongMethod method) { + for (int i = 0; i < 20000; ++i) { + Verify.verifyUnary(Long.MIN_VALUE, method); + Verify.verifyUnary(minvalues[0], method); + Verify.verifyUnary(Long.MIN_VALUE - values[2], method); + Verify.verifyUnary(0, method); + Verify.verifyUnary(values[2], method); + Verify.verifyUnary(Long.MAX_VALUE, method); + Verify.verifyUnary(Long.MIN_VALUE - values[0] + values[3], method); + Verify.verifyUnary(Long.MIN_VALUE + 1 - values[0], method); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/GVNTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/GVNTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028207 + * @summary Verify that GVN doesn't mess up the two addExacts + * @compile GVNTest.java + * @run main GVNTest + * + */ + +public class GVNTest { + public static int result = 0; + public static int value = 93; + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + } + } + + public static int runTest(int value) { + int v = value + value; + int sum = 0; + if (v < 4032) { + for (int i = 0; i < 1023; ++i) { + sum += Math.addExact(value, value); + } + } else { + for (int i = 0; i < 321; ++i) { + sum += Math.addExact(value, value); + } + } + return sum + v; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/IncExactITest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/IncExactITest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test incrementExact + * @compile IncExactITest.java Verify.java + * @run main IncExactITest + * + */ + + +public class IncExactITest { + public static int[] values = {1, 1, 1, 1}; + public static void main(String[] args) { + runTest(new Verify.IncExactI()); + } + + public static void runTest(Verify.UnaryMethod method) { + for (int i = 0; i < 20000; ++i) { + Verify.verifyUnary(Integer.MIN_VALUE, method); + Verify.verifyUnary(Integer.MAX_VALUE - 1, method); + Verify.verifyUnary(0, method); + Verify.verifyUnary(values[1], method); + Verify.verifyUnary(Integer.MAX_VALUE, method); + Verify.verifyUnary(Integer.MAX_VALUE - values[0] + values[3], method); + Verify.verifyUnary(Integer.MAX_VALUE - 1 + values[0], method); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/IncExactLTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/IncExactLTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test incrementExact + * @compile IncExactLTest.java Verify.java + * @run main IncExactLTest + * + */ + +public class IncExactLTest { + public static long[] values = {1, 1, 1, 1}; + public static void main(String[] args) { + runTest(new Verify.IncExactL()); + } + + public static void runTest(Verify.UnaryLongMethod method) { + for (int i = 0; i < 20000; ++i) { + Verify.verifyUnary(Long.MIN_VALUE, method); + Verify.verifyUnary(Long.MAX_VALUE - 1, method); + Verify.verifyUnary(0, method); + Verify.verifyUnary(values[1], method); + Verify.verifyUnary(Long.MAX_VALUE, method); + Verify.verifyUnary(Long.MAX_VALUE - values[0] + values[3], method); + Verify.verifyUnary(Long.MAX_VALUE - 1 + values[0], method); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/LoadTest.java --- a/test/compiler/intrinsics/mathexact/LoadTest.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8024924 - * @summary Test non constant addExact - * @compile LoadTest.java Verify.java - * @run main LoadTest - * - */ - -import java.lang.ArithmeticException; - -public class LoadTest { - public static java.util.Random rnd = new java.util.Random(); - public static int[] values = new int[256]; - - public static void main(String[] args) { - for (int i = 0; i < values.length; ++i) { - values[i] = rnd.nextInt(); - } - - for (int i = 0; i < 50000; ++i) { - Verify.verify(values[i & 255], values[i & 255] - i); - Verify.verify(values[i & 255] + i, values[i & 255] - i); - Verify.verify(values[i & 255], values[i & 255]); - if ((i & 1) == 1 && i > 5) { - Verify.verify(values[i & 255] + i, values[i & 255] - i); - } else { - Verify.verify(values[i & 255] - i, values[i & 255] + i); - } - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/LoopDependentTest.java --- a/test/compiler/intrinsics/mathexact/LoopDependentTest.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8024924 - * @summary Test non constant addExact - * @compile LoopDependentTest.java Verify.java - * @run main LoopDependentTest - * - */ - -import java.lang.ArithmeticException; - -public class LoopDependentTest { - public static java.util.Random rnd = new java.util.Random(); - - public static void main(String[] args) { - int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); - for (int i = 0; i < 50000; ++i) { - Verify.verify(rnd1 + i, rnd2 + i); - Verify.verify(rnd1 + i, rnd2 + (i & 0xff)); - Verify.verify(rnd1 - i, rnd2 - (i & 0xff)); - Verify.verify(rnd1 + i + 1, rnd2 + i + 2); - Verify.verify(rnd1 + i * 2, rnd2 + i); - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactICondTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactICondTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test multiplyExact as condition + * @compile MulExactICondTest.java + * @run main MulExactICondTest + * + */ + +public class MulExactICondTest { + public static int result = 0; + + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + runTest(); + } + } + + public static void runTest() { + int i = 7; + while (java.lang.Math.multiplyExact(i, result) < 89361) { + if ((java.lang.Math.multiplyExact(i, i) & 1) == 1) { + i += 3; + } else if ((i & 5) == 4) { + i += 7; + } else if ((i & 0xf) == 6) { + i += 2; + } else { + i += 1; + } + result += 2; + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactIConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant multiplyExact + * @compile MulExactIConstantTest.java Verify.java + * @run main MulExactIConstantTest + * + */ + +public class MulExactIConstantTest { + public static void main(String[] args) { + Verify.ConstantTest.verify(new Verify.MulExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactILoadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactILoadTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test multiplyExact + * @compile MulExactILoadTest.java Verify.java + * @run main MulExactILoadTest + * + */ + +public class MulExactILoadTest { + public static void main(String[] args) { + Verify.LoadTest.init(); + Verify.LoadTest.verify(new Verify.MulExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test loop dependent multiplyExact + * @compile MulExactILoopDependentTest.java Verify.java + * @run main MulExactILoopDependentTest + * + */ +public class MulExactILoopDependentTest { + public static void main(String[] args) { + Verify.LoopDependentTest.verify(new Verify.MulExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant multiplyExact + * @compile MulExactINonConstantTest.java Verify.java + * @run main MulExactINonConstantTest + * + */ + +public class MulExactINonConstantTest { + public static void main(String[] args) { + Verify.NonConstantTest.verify(new Verify.MulExactI()); + Verify.LoadTest.verify(new Verify.MulExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test repeating multiplyExact + * @compile MulExactIRepeatTest.java Verify.java + * @run main MulExactIRepeatTest + * + */ + +public class MulExactIRepeatTest { + public static void main(String[] args) { + runTest(new Verify.MulExactI()); + } + + public static int nonExact(int x, int y, Verify.BinaryMethod method) { + int result = method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + return result; + } + + public static void runTest(Verify.BinaryMethod method) { + java.util.Random rnd = new java.util.Random(); + for (int i = 0; i < 50000; ++i) { + int x = Integer.MAX_VALUE - 10; + int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); + + int c = rnd.nextInt() / 10; + int d = rnd.nextInt(9); + + int a = catchingExact(x, y, method); + + if (a != 36) { + throw new RuntimeException("a != 36 : " + a); + } + + int b = nonExact(c, d, method); + int n = exact(c, d, method); + + + if (n != b) { + throw new RuntimeException("n != b : " + n + " != " + b); + } + } + } + + public static int exact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + return result; + } + + public static int catchingExact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + try { + result += 5; + result = method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 1; + } + try { + result += 6; + + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 2; + } + try { + result += 7; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 3; + } + try { + result += 8; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 4; + } + return result; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactLConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant mulExact + * @compile MulExactLConstantTest.java Verify.java + * @run main MulExactLConstantTest + * + */ + +public class MulExactLConstantTest { + public static void main(String[] args) { + Verify.ConstantLongTest.verify(new Verify.MulExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant mulExact + * @compile MulExactLNonConstantTest.java Verify.java + * @run main MulExactLNonConstantTest + * + */ + +public class MulExactLNonConstantTest { + public static void main(String[] args) { + Verify.NonConstantLongTest.verify(new Verify.MulExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactIConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant negExact + * @compile NegExactIConstantTest.java Verify.java + * @run main NegExactIConstantTest + * + */ + +public class NegExactIConstantTest { + public static void main(String[] args) { + Verify.ConstantTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI())); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactILoadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactILoadTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test negExact + * @compile NegExactILoadTest.java Verify.java + * @run main NegExactILoadTest + * + */ + +public class NegExactILoadTest { + public static void main(String[] args) { + Verify.LoadTest.init(); + Verify.LoadTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI())); + } + +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test negExact loop dependent + * @compile NegExactILoopDependentTest.java Verify.java + * @run main NegExactILoopDependentTest + * + */ +public class NegExactILoopDependentTest { + public static void main(String[] args) { + Verify.LoopDependentTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI())); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant negExact + * @compile NegExactINonConstantTest.java Verify.java + * @run main NegExactINonConstantTest + * + */ + +public class NegExactINonConstantTest { + public static void main(String[] args) { + Verify.NonConstantTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI())); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactLConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant negExact + * @compile NegExactLConstantTest.java Verify.java + * @run main NegExactLConstantTest + * + */ + +public class NegExactLConstantTest { + public static void main(String[] args) { + Verify.ConstantLongTest.verify(new Verify.UnaryToBinaryLong(new Verify.NegExactL())); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant negExact + * @compile NegExactLNonConstantTest.java Verify.java + * @run main NegExactLNonConstantTest + * + */ + +public class NegExactLNonConstantTest { + public static void main(String[] args) { + Verify.NonConstantLongTest.verify(new Verify.UnaryToBinaryLong(new Verify.NegExactL())); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NestedMathExactTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NestedMathExactTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027444 + * @summary Test nested loops + * @compile NestedMathExactTest.java + * @run main NestedMathExactTest + * + */ + +public class NestedMathExactTest { + public static final int LIMIT = 100; + public static int[] result = new int[LIMIT]; + public static int value = 17; + + public static void main(String[] args) { + for (int i = 0; i < 100; ++i) { + result[i] = runTest(); + } + } + + public static int runTest() { + int sum = 0; + for (int j = 0; j < 100000; j = Math.addExact(j, 1)) { + sum = 1; + for (int i = 0; i < 5; ++i) { + sum *= value; + } + } + return sum; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/NonConstantTest.java --- a/test/compiler/intrinsics/mathexact/NonConstantTest.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8024924 - * @summary Test non constant addExact - * @compile NonConstantTest.java Verify.java - * @run main NonConstantTest - * - */ - -import java.lang.ArithmeticException; - -public class NonConstantTest { - public static java.util.Random rnd = new java.util.Random(); - - public static void main(String[] args) { - for (int i = 0; i < 50000; ++i) { - int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); - Verify.verify(rnd1, rnd2); - Verify.verify(rnd1, rnd2 + 1); - Verify.verify(rnd1 + 1, rnd2); - Verify.verify(rnd1 - 1, rnd2); - Verify.verify(rnd1, rnd2 - 1); - } - } -} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SplitThruPhiTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SplitThruPhiTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028198 + * @summary Verify that split through phi does the right thing + * @compile SplitThruPhiTest.java + * @run main SplitThruPhiTest + * + */ + +public class SplitThruPhiTest { + public static volatile int value = 19; + public static int store = 0; + public static void main(String[] args) { + for (int i = 0; i < 150000; ++i) { + store = runTest(value); + } + } + + public static int runTest(int val) { + int result = Math.addExact(val, 1); + int total = 0; + for (int i = val; i < 200; i = Math.addExact(i, 1)) { + total += i; + } + return total; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactICondTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactICondTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test subtractExact as condition + * @compile SubExactICondTest.java Verify.java + * @run main SubExactICondTest + * + */ + +public class SubExactICondTest { + public static int result = 0; + + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + runTest(); + } + } + + public static void runTest() { + int i = 7; + while (java.lang.Math.subtractExact(i, result) > -31361) { + if ((java.lang.Math.subtractExact(i, i) & 1) == 1) { + i -= 3; + } else if ((i & 5) == 4) { + i -= 7; + } else if ((i & 0xf) == 6) { + i -= 2; + } else { + i -= 1; + } + result += 2; + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactIConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test constant subtractExact + * @compile SubExactIConstantTest.java Verify.java + * @run main SubExactIConstantTest + * + */ + +public class SubExactIConstantTest { + public static void main(String[] args) { + Verify.ConstantTest.verify(new Verify.SubExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactILoadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactILoadTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant subtractExact + * @compile SubExactILoadTest.java Verify.java + * @run main SubExactILoadTest + * + */ + +public class SubExactILoadTest { + public static void main(String[] args) { + Verify.LoadTest.init(); + Verify.LoadTest.verify(new Verify.SubExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant subtractExact + * @compile SubExactILoopDependentTest.java Verify.java + * @run main SubExactILoopDependentTest + * + */ + +public class SubExactILoopDependentTest { + public static void main(String[] args) { + Verify.LoopDependentTest.verify(new Verify.SubExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test non constant subtractExact + * @compile SubExactINonConstantTest.java Verify.java + * @run main SubExactINonConstantTest + * + */ + +public class SubExactINonConstantTest { + public static void main(String[] args) { + Verify.NonConstantTest.verify(new Verify.SubExactI()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @summary Test repeating subtractExact + * @compile SubExactIRepeatTest.java Verify.java + * @run main SubExactIRepeatTest + * + */ + +import java.lang.ArithmeticException; + +public class SubExactIRepeatTest { + public static void main(String[] args) { + runTest(new Verify.SubExactI()); + } + + public static int nonExact(int x, int y, Verify.BinaryMethod method) { + int result = method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + result += method.unchecked(x, y); + return result; + } + + public static void runTest(Verify.BinaryMethod method) { + java.util.Random rnd = new java.util.Random(); + for (int i = 0; i < 50000; ++i) { + int x = Integer.MIN_VALUE + 10; + int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); + + int c = rnd.nextInt() / 2; + int d = rnd.nextInt() / 2; + + int a = catchingExact(x, y, method); + + if (a != 36) { + throw new RuntimeException("a != 36 : " + a); + } + + int b = nonExact(c, d, method); + int n = exact(c, d, method); + + + if (n != b) { + throw new RuntimeException("n != b : " + n + " != " + b); + } + } + } + + public static int exact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + result += method.checkMethod(x, y); + return result; + } + + public static int catchingExact(int x, int y, Verify.BinaryMethod method) { + int result = 0; + try { + result += 5; + result = method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 1; + } + try { + result += 6; + + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 2; + } + try { + result += 7; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 3; + } + try { + result += 8; + result += method.checkMethod(x, y); + } catch (ArithmeticException e) { + result += 4; + } + return result; + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactLConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @bug 8027353 + * @summary Test constant subtractExact + * @compile SubExactLConstantTest.java Verify.java + * @run main SubExactLConstantTest + * + */ + +public class SubExactLConstantTest { + public static void main(String[] args) { + Verify.ConstantLongTest.verify(new Verify.SubExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026844 + * @bug 8027353 + * @summary Test non constant subtractExact + * @compile SubExactLNonConstantTest.java Verify.java + * @run main SubExactLNonConstantTest + * + */ + +public class SubExactLNonConstantTest { + public static void main(String[] args) { + Verify.NonConstantLongTest.verify(new Verify.SubExactL()); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/mathexact/Verify.java --- a/test/compiler/intrinsics/mathexact/Verify.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/compiler/intrinsics/mathexact/Verify.java Thu Nov 21 15:04:54 2013 +0100 @@ -22,47 +22,641 @@ */ public class Verify { - public static String throwWord(boolean threw) { - return (threw ? "threw" : "didn't throw"); - } + public static String throwWord(boolean threw) { + return (threw ? "threw" : "didn't throw"); + } + + public static void verifyResult(UnaryMethod method, int result1, int result2, boolean exception1, boolean exception2, int value) { + if (exception1 != exception2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version" + throwWord(exception2) + " for: " + value); + } + if (result1 != result2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2); + } + } + + public static void verifyResult(UnaryLongMethod method, long result1, long result2, boolean exception1, boolean exception2, long value) { + if (exception1 != exception2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version" + throwWord(exception2) + " for: " + value); + } + if (result1 != result2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2); + } + } + + private static void verifyResult(BinaryMethod method, int result1, int result2, boolean exception1, boolean exception2, int a, int b) { + if (exception1 != exception2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b); + } + if (result1 != result2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2); + } + } + + private static void verifyResult(BinaryLongMethod method, long result1, long result2, boolean exception1, boolean exception2, long a, long b) { + if (exception1 != exception2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b); + } + if (result1 != result2) { + throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2); + } + } + + + public static void verifyUnary(int a, UnaryMethod method) { + boolean exception1 = false, exception2 = false; + int result1 = 0, result2 = 0; + try { + result1 = method.checkMethod(a); + } catch (ArithmeticException e) { + exception1 = true; + } + try { + result2 = method.safeMethod(a); + } catch (ArithmeticException e) { + exception2 = true; + } + + verifyResult(method, result1, result2, exception1, exception2, a); + } + + public static void verifyUnary(long a, UnaryLongMethod method) { + boolean exception1 = false, exception2 = false; + long result1 = 0, result2 = 0; + try { + result1 = method.checkMethod(a); + } catch (ArithmeticException e) { + exception1 = true; + } + try { + result2 = method.safeMethod(a); + } catch (ArithmeticException e) { + exception2 = true; + } + + verifyResult(method, result1, result2, exception1, exception2, a); + } + + + public static void verifyBinary(int a, int b, BinaryMethod method) { + boolean exception1 = false, exception2 = false; + int result1 = 0, result2 = 0; + try { + result1 = method.checkMethod(a, b); + } catch (ArithmeticException e) { + exception1 = true; + } + try { + result2 = method.safeMethod(a, b); + } catch (ArithmeticException e) { + exception2 = true; + } + + verifyResult(method, result1, result2, exception1, exception2, a, b); + } + + public static void verifyBinary(long a, long b, BinaryLongMethod method) { + boolean exception1 = false, exception2 = false; + long result1 = 0, result2 = 0; + try { + result1 = method.checkMethod(a, b); + } catch (ArithmeticException e) { + exception1 = true; + } + try { + result2 = method.safeMethod(a, b); + } catch (ArithmeticException e) { + exception2 = true; + } + + verifyResult(method, result1, result2, exception1, exception2, a, b); + } + + + public static class LoadTest { + public static java.util.Random rnd = new java.util.Random(); + public static int[] values = new int[256]; + + public static void init() { + for (int i = 0; i < values.length; ++i) { + values[i] = rnd.nextInt(); + } + } + + public static void verify(BinaryMethod method) { + for (int i = 0; i < 50000; ++i) { + Verify.verifyBinary(values[i & 255], values[i & 255] - i, method); + Verify.verifyBinary(values[i & 255] + i, values[i & 255] - i, method); + Verify.verifyBinary(values[i & 255], values[i & 255], method); + if ((i & 1) == 1 && i > 5) { + Verify.verifyBinary(values[i & 255] + i, values[i & 255] - i, method); + } else { + Verify.verifyBinary(values[i & 255] - i, values[i & 255] + i, method); + } + Verify.verifyBinary(values[i & 255], values[(i + 1) & 255], method); + } + } + } + + public static class NonConstantTest { + public static java.util.Random rnd = new java.util.Random(); + + public static void verify(BinaryMethod method) { + for (int i = 0; i < 50000; ++i) { + int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); + Verify.verifyBinary(rnd1, rnd2, method); + Verify.verifyBinary(rnd1, rnd2 + 1, method); + Verify.verifyBinary(rnd1 + 1, rnd2, method); + Verify.verifyBinary(rnd1 - 1, rnd2, method); + Verify.verifyBinary(rnd1, rnd2 - 1, method); + } + } + } - public static void verify(int a, int b) { - boolean exception1 = false, exception2 = false; - int result1 = 0, result2 = 0; - try { - result1 = testIntrinsic(a, b); - } catch (ArithmeticException e) { - exception1 = true; + public static class NonConstantLongTest { + public static long[] values = { Long.MIN_VALUE, Long.MAX_VALUE, 0, Long.MAX_VALUE - 1831 }; + public static java.util.Random rnd = new java.util.Random(); + + public static void verify(BinaryLongMethod method) { + for (int i = 0; i < 50000; ++i) { + long rnd1 = rnd.nextLong(), rnd2 = rnd.nextLong(); + Verify.verifyBinary(rnd1, rnd2, method); + Verify.verifyBinary(rnd1, rnd2 + 1, method); + Verify.verifyBinary(rnd1 + 1, rnd2, method); + Verify.verifyBinary(rnd1 - 1, rnd2, method); + Verify.verifyBinary(rnd1, rnd2 - 1, method); + Verify.verifyBinary(rnd1 + Long.MAX_VALUE - rnd2, rnd2 + 1, method); + Verify.verifyBinary(values[0], values[2], method); + Verify.verifyBinary(values[1], values[2], method); + Verify.verifyBinary(values[3], 74L, method); + } + } + } + + public static class LoopDependentTest { + public static java.util.Random rnd = new java.util.Random(); + + public static void verify(BinaryMethod method) { + int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); + runTest(rnd1, rnd2, method); + } + + private static void runTest(int rnd1, int rnd2, BinaryMethod method) { + for (int i = 0; i < 50000; ++i) { + Verify.verifyBinary(rnd1 + i, rnd2 + i, method); + Verify.verifyBinary(rnd1 + i, rnd2 + (i & 0xff), method); + Verify.verifyBinary(rnd1 - i, rnd2 - (i & 0xff), method); + Verify.verifyBinary(rnd1 + i + 1, rnd2 + i + 2, method); + Verify.verifyBinary(rnd1 + i * 2, rnd2 + i, method); + } + } + } + + public static class ConstantTest { + public static void verify(BinaryMethod method) { + for (int i = 0; i < 50000; ++i) { + Verify.verifyBinary(5, 7, method); + Verify.verifyBinary(Integer.MAX_VALUE, 1, method); + Verify.verifyBinary(Integer.MIN_VALUE, -1, method); + Verify.verifyBinary(Integer.MAX_VALUE, -1, method); + Verify.verifyBinary(Integer.MIN_VALUE, 1, method); + Verify.verifyBinary(Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 2, method); + Verify.verifyBinary(Integer.MAX_VALUE / 2, (Integer.MAX_VALUE / 2) + 3, method); + Verify.verifyBinary(Integer.MAX_VALUE, Integer.MIN_VALUE, method); + } + } + } + + public static class ConstantLongTest { + public static void verify(BinaryLongMethod method) { + for (int i = 0; i < 50000; ++i) { + Verify.verifyBinary(5, 7, method); + Verify.verifyBinary(Long.MAX_VALUE, 1, method); + Verify.verifyBinary(Long.MIN_VALUE, -1, method); + Verify.verifyBinary(Long.MAX_VALUE, -1, method); + Verify.verifyBinary(Long.MIN_VALUE, 1, method); + Verify.verifyBinary(Long.MAX_VALUE / 2, Long.MAX_VALUE / 2, method); + Verify.verifyBinary(Long.MAX_VALUE / 2, (Long.MAX_VALUE / 2) + 3, method); + Verify.verifyBinary(Long.MAX_VALUE, Long.MIN_VALUE, method); + } + } + } + + public static interface BinaryMethod { + int safeMethod(int a, int b); + int checkMethod(int a, int b); + int unchecked(int a, int b); + String name(); } - try { - result2 = testNonIntrinsic(a, b); - } catch (ArithmeticException e) { - exception2 = true; + + public static interface UnaryMethod { + int safeMethod(int value); + int checkMethod(int value); + int unchecked(int value); + String name(); + } + + public static interface BinaryLongMethod { + long safeMethod(long a, long b); + long checkMethod(long a, long b); + long unchecked(long a, long b); + String name(); + } + + public static interface UnaryLongMethod { + long safeMethod(long value); + long checkMethod(long value); + long unchecked(long value); + String name(); + } + + public static class UnaryToBinary implements BinaryMethod { + private final UnaryMethod method; + public UnaryToBinary(UnaryMethod method) { + this.method = method; + } + + @Override + public int safeMethod(int a, int b) { + return method.safeMethod(a); + } + + @Override + public int checkMethod(int a, int b) { + return method.checkMethod(a); + } + + @Override + public int unchecked(int a, int b) { + return method.unchecked(a); + + } + + @Override + public String name() { + return method.name(); + } + } + + public static class UnaryToBinaryLong implements BinaryLongMethod { + private final UnaryLongMethod method; + public UnaryToBinaryLong(UnaryLongMethod method) { + this.method = method; + } + + @Override + public long safeMethod(long a, long b) { + return method.safeMethod(a); + } + + @Override + public long checkMethod(long a, long b) { + return method.checkMethod(a); + } + + @Override + public long unchecked(long a, long b) { + return method.unchecked(a); + + } + + @Override + public String name() { + return method.name(); + } } - if (exception1 != exception2) { - throw new RuntimeException("Intrinsic version " + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b); + + public static class AddExactI implements BinaryMethod { + @Override + public int safeMethod(int x, int y) { + int r = x + y; + // HD 2-12 Overflow iff both arguments have the opposite sign of the result + if (((x ^ r) & (y ^ r)) < 0) { + throw new ArithmeticException("integer overflow"); + } + return r; + + } + + @Override + public int checkMethod(int a, int b) { + return Math.addExact(a, b); + } + + @Override + public String name() { + return "addExact"; + } + + @Override + public int unchecked(int a, int b) { + return a + b; + } } - if (result1 != result2) { - throw new RuntimeException("Intrinsic version returned: " + a + " while NonIntrinsic version returned: " + b); + + public static class AddExactL implements BinaryLongMethod { + @Override + public long safeMethod(long x, long y) { + long r = x + y; + // HD 2-12 Overflow iff both arguments have the opposite sign of the result + if (((x ^ r) & (y ^ r)) < 0) { + throw new ArithmeticException("integer overflow"); + } + return r; + + } + + @Override + public long checkMethod(long a, long b) { + return Math.addExact(a, b); + } + + @Override + public String name() { + return "addExactLong"; + } + + @Override + public long unchecked(long a, long b) { + return a + b; + } + } + + public static class MulExactI implements BinaryMethod { + @Override + public int safeMethod(int x, int y) { + long r = (long)x * (long)y; + if ((int)r != r) { + throw new ArithmeticException("integer overflow"); + } + return (int)r; + + } + + @Override + public int checkMethod(int a, int b) { + return Math.multiplyExact(a, b); + } + + @Override + public int unchecked(int a, int b) { + return a * b; + } + + @Override + public String name() { + return "multiplyExact"; + } } - } + + public static class MulExactL implements BinaryLongMethod { + @Override + public long safeMethod(long x, long y) { + long r = x * y; + long ax = Math.abs(x); + long ay = Math.abs(y); + if (((ax | ay) >>> 31 != 0)) { + // Some bits greater than 2^31 that might cause overflow + // Check the result using the divide operator + // and check for the special case of Long.MIN_VALUE * -1 + if (((y != 0) && (r / y != x)) || + (x == Long.MIN_VALUE && y == -1)) { + throw new ArithmeticException("long overflow"); + } + } + return r; + } + + @Override + public long checkMethod(long a, long b) { + return Math.multiplyExact(a, b); + } + + @Override + public long unchecked(long a, long b) { + return a * b; + } + + @Override + public String name() { + return "multiplyExact"; + } + } - public static int testIntrinsic(int a, int b) { - return java.lang.Math.addExact(a, b); - } + public static class NegExactL implements UnaryLongMethod { + @Override + public long safeMethod(long a) { + if (a == Long.MIN_VALUE) { + throw new ArithmeticException("long overflow"); + } + + return -a; + + } + + @Override + public long checkMethod(long value) { + return Math.negateExact(value); + } + + @Override + public long unchecked(long value) { + return -value; + } + + @Override + public String name() { + return "negateExactLong"; + } + } + + public static class NegExactI implements UnaryMethod { + @Override + public int safeMethod(int a) { + if (a == Integer.MIN_VALUE) { + throw new ArithmeticException("integer overflow"); + } + + return -a; + + } + + @Override + public int checkMethod(int value) { + return Math.negateExact(value); + } + + @Override + public int unchecked(int value) { + return -value; + } - public static int testNonIntrinsic(int a, int b) { - return safeAddExact(a, b); - } + @Override + public String name() { + return "negateExact"; + } + } + + public static class SubExactI implements BinaryMethod { + @Override + public int safeMethod(int x, int y) { + int r = x - y; + // HD 2-12 Overflow iff the arguments have different signs and + // the sign of the result is different than the sign of x + if (((x ^ y) & (x ^ r)) < 0) { + throw new ArithmeticException("integer overflow"); + } + return r; + } + + @Override + public int checkMethod(int a, int b) { + return Math.subtractExact(a, b); + } + + @Override + public int unchecked(int a, int b) { + return a - b; + } + + @Override + public String name() { + return "subtractExact"; + } + } + + public static class SubExactL implements BinaryLongMethod { + @Override + public long safeMethod(long x, long y) { + long r = x - y; + // HD 2-12 Overflow iff the arguments have different signs and + // the sign of the result is different than the sign of x + if (((x ^ y) & (x ^ r)) < 0) { + throw new ArithmeticException("integer overflow"); + } + return r; + } + + @Override + public long checkMethod(long a, long b) { + return Math.subtractExact(a, b); + } + + @Override + public long unchecked(long a, long b) { + return a - b; + } + + @Override + public String name() { + return "subtractExactLong"; + } + } + + static class IncExactL implements UnaryLongMethod { + @Override + public long safeMethod(long a) { + if (a == Long.MAX_VALUE) { + throw new ArithmeticException("long overflow"); + } + + return a + 1L; + + } + + @Override + public long checkMethod(long value) { + return Math.incrementExact(value); + } + + @Override + public long unchecked(long value) { + return value + 1; + } - // Copied java.lang.Math.addExact to avoid intrinsification - public static int safeAddExact(int x, int y) { - int r = x + y; - // HD 2-12 Overflow iff both arguments have the opposite sign of the result - if (((x ^ r) & (y ^ r)) < 0) { - throw new ArithmeticException("integer overflow"); + @Override + public String name() { + return "incrementExactLong"; + } + } + + static class IncExactI implements UnaryMethod { + @Override + public int safeMethod(int a) { + if (a == Integer.MAX_VALUE) { + throw new ArithmeticException("integer overflow"); + } + + return a + 1; + } + + @Override + public int checkMethod(int value) { + return Math.incrementExact(value); + } + + @Override + public int unchecked(int value) { + return value + 1; + } + + @Override + public String name() { + return "incrementExact"; + } } - return r; - } + + static class DecExactL implements UnaryLongMethod { + @Override + public long safeMethod(long a) { + if (a == Long.MIN_VALUE) { + throw new ArithmeticException("long overflow"); + } + + return a - 1L; + } + + @Override + public long checkMethod(long value) { + return Math.decrementExact(value); + } + + @Override + public long unchecked(long value) { + return value - 1; + } + + @Override + public String name() { + return "decExactLong"; + } + } + + static class DecExactI implements UnaryMethod { + @Override + public int safeMethod(int a) { + if (a == Integer.MIN_VALUE) { + throw new ArithmeticException("integer overflow"); + } + + return a - 1; + } + + @Override + public int checkMethod(int value) { + return Math.decrementExact(value); + } + + @Override + public int unchecked(int value) { + return value - 1; + } + + @Override + public String name() { + return "decrementExact"; + } + } + } diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/intrinsics/stringequals/TestStringEqualsBadLength.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/stringequals/TestStringEqualsBadLength.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027445 + * @summary String.equals() may be called with a length whose upper bits are not cleared + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestStringEqualsBadLength + * + */ + +import java.util.Arrays; + +public class TestStringEqualsBadLength { + + int v1; + int v2; + + boolean m(String s1) { + int l = v2 - v1; // 0 - (-1) = 1. On 64 bit: 0xffffffff00000001 + char[] arr = new char[l]; + arr[0] = 'a'; + String s2 = new String(arr); + // The string length is not reloaded but the value computed is + // reused so pointer computation must not use + // 0xffffffff00000001 + return s2.equals(s1); + } + + // Same thing with String.compareTo() + int m2(String s1) { + int l = v2 - v1; + char[] arr = new char[l+1]; + arr[0] = 'a'; + arr[1] = 'b'; + String s2 = new String(arr); + return s2.compareTo(s1); + } + + // Same thing with equals() for arrays + boolean m3(char[] arr1) { + int l = v2 - v1; // 0 - (-1) = 1. On 64 bit: 0xffffffff00000001 + char[] arr2 = new char[l]; + arr2[0] = 'a'; + return Arrays.equals(arr2, arr1); + } + + static public void main(String[] args) { + TestStringEqualsBadLength tse = new TestStringEqualsBadLength(); + tse.v1 = -1; + tse.v2 = 0; + char[] arr = new char[1]; + arr[0] = 'a'; + for (int i = 0; i < 20000; i++) { + tse.m("a"); + tse.m2("ab"); + tse.m3(arr); + } + + System.out.println("TEST PASSED"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8026124 + * @summary Javascript file provoked assertion failure in linkResolver.cpp + * + * @run main/othervm CreatesInterfaceDotEqualsCallInfo + */ + +public class CreatesInterfaceDotEqualsCallInfo { + public static void main(String[] args) throws java.io.IOException { + String[] jsargs = { System.getProperty("test.src", ".") + + "/createsInterfaceDotEqualsCallInfo.js" }; + jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs); + System.out.println("PASS, did not crash running Javascript"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +var path = new java.io.File("/Users/someone").toPath(); +path.toString(); diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/print/PrintInlining.java --- a/test/compiler/print/PrintInlining.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/compiler/print/PrintInlining.java Thu Nov 21 15:04:54 2013 +0100 @@ -25,7 +25,7 @@ * @test * @bug 8022585 * @summary VM crashes when ran with -XX:+PrintInlining - * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining + * @run main/othervm -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining PrintInlining * */ diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/profiling/TestUnexpectedProfilingMismatch.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/TestUnexpectedProfilingMismatch.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027631 + * @summary profiling of arguments at calls cannot rely on signature of callee for types + * @run main/othervm -XX:-BackgroundCompilation -XX:TieredStopAtLevel=3 -XX:TypeProfileLevel=111 -XX:Tier3InvocationThreshold=200 -XX:Tier0InvokeNotifyFreqLog=7 TestUnexpectedProfilingMismatch + * + */ + +import java.lang.invoke.*; + +public class TestUnexpectedProfilingMismatch { + + static class A { + } + + static class B { + } + + static void mA(A a) { + } + + static void mB(B b) { + } + + static final MethodHandle mhA; + static final MethodHandle mhB; + static { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodType mt = MethodType.methodType(void.class, A.class); + MethodHandle res = null; + try { + res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mA", mt); + } catch(NoSuchMethodException ex) { + } catch(IllegalAccessException ex) { + } + mhA = res; + mt = MethodType.methodType(void.class, B.class); + try { + res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mB", mt); + } catch(NoSuchMethodException ex) { + } catch(IllegalAccessException ex) { + } + mhB = res; + } + + void m1(A a, boolean doit) throws Throwable { + if (doit) { + mhA.invoke(a); + } + } + + void m2(B b) throws Throwable { + mhB.invoke(b); + } + + static public void main(String[] args) { + TestUnexpectedProfilingMismatch tih = new TestUnexpectedProfilingMismatch(); + A a = new A(); + B b = new B(); + try { + for (int i = 0; i < 256 - 1; i++) { + tih.m1(a, true); + } + // Will trigger the compilation but will also run once + // more interpreted with a non null MDO which it will + // update. Make it skip the body of the method. + tih.m1(a, false); + // Compile this one as well and do the profiling + for (int i = 0; i < 256; i++) { + tih.m2(b); + } + // Will run and see a conflict + tih.m1(a, true); + } catch(Throwable ex) { + ex.printStackTrace(); + } + System.out.println("TEST PASSED"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/profiling/unloadingconflict/B.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/unloadingconflict/B.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class B { +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/profiling/unloadingconflict/TestProfileConflictClassUnloading.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/unloadingconflict/TestProfileConflictClassUnloading.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027572 + * @summary class unloading resets profile, method compiled after the profile is first set and before class loading sets unknown bit with not recorded class + * @build B + * @run main/othervm -XX:TypeProfileLevel=222 -XX:-BackgroundCompilation TestProfileConflictClassUnloading + * + */ + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Paths; + +public class TestProfileConflictClassUnloading { + static class A { + } + + + static void m1(Object o) { + } + + static void m2(Object o) { + m1(o); + } + + static void m3(A a, boolean do_call) { + if (!do_call) { + return; + } + m2(a); + } + + public static ClassLoader newClassLoader() { + try { + return new URLClassLoader(new URL[] { + Paths.get(System.getProperty("test.classes",".")).toUri().toURL(), + }, null); + } catch (MalformedURLException e){ + throw new RuntimeException("Unexpected URL conversion failure", e); + } + } + + public static void main(String[] args) throws Exception { + ClassLoader loader = newClassLoader(); + Object o = loader.loadClass("B").newInstance(); + // collect conflicting profiles + for (int i = 0; i < 5000; i++) { + m2(o); + } + // prepare for conflict + A a = new A(); + for (int i = 0; i < 5000; i++) { + m3(a, false); + } + // unload class in profile + o = null; + loader = null; + System.gc(); + // record the conflict + m3(a, true); + // trigger another GC + System.gc(); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/regalloc/C1ObjectSpillInLogicOp.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027751 + * @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill + * @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp + * + * G1 barriers use logical operators (xor) on T_OBJECT mixed with T_LONG or T_INT. + * The current implementation of logical operations on x86 in C1 doesn't allow for long operands to be on stack. + * There is a special code in the register allocator that forces long arguments in registers on x86. However T_OBJECT + * can be spilled just fine, and in that case the xor emission will fail. + */ + +import java.util.concurrent.atomic.*; +class C1ObjectSpillInLogicOp { + static public void main(String[] args) { + AtomicReferenceArray x = new AtomicReferenceArray(128); + Integer y = new Integer(0); + for (int i = 0; i < 50000; i++) { + x.getAndSet(i % x.length(), y); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/startup/SmallCodeCacheStartup.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/startup/SmallCodeCacheStartup.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8023014 + * @summary Test ensures that there is no crash when compiler initialization fails + * @library /testlibrary + * + */ +import com.oracle.java.testlibrary.*; + +public class SmallCodeCacheStartup { + public static void main(String[] args) throws Exception { + ProcessBuilder pb; + OutputAnalyzer out; + + pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version"); + out = new OutputAnalyzer(pb.start()); + out.shouldContain("no space to run compiler"); + out.shouldHaveExitValue(0); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/startup/StartupOutput.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/startup/StartupOutput.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026949 + * @summary Test ensures correct VM output during startup + * @library ../../testlibrary + * + */ +import com.oracle.java.testlibrary.*; + +public class StartupOutput { + public static void main(String[] args) throws Exception { + ProcessBuilder pb; + OutputAnalyzer out; + + pb = ProcessTools.createJavaProcessBuilder("-Xint", "-XX:+DisplayVMOutputToStdout", "-version"); + out = new OutputAnalyzer(pb.start()); + out.shouldNotContain("no space to run compilers"); + + out.shouldHaveExitValue(0); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/tiered/CompLevelsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/tiered/CompLevelsTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * Abstract class for testing of used compilation levels correctness. + * + * @author igor.ignatyev@oracle.com + */ +public abstract class CompLevelsTest extends CompilerWhiteBoxTest { + protected CompLevelsTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + /** + * Checks that level is available. + * @param compLevel level to check + */ + protected void testAvailableLevel(int compLevel, int bci) { + if (IS_VERBOSE) { + System.out.printf("testAvailableLevel(level = %d, bci = %d)%n", + compLevel, bci); + } + WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci); + checkCompiled(); + checkLevel(compLevel, getCompLevel()); + deoptimize(); + } + + /** + * Checks that level is unavailable. + * @param compLevel level to check + */ + protected void testUnavailableLevel(int compLevel, int bci) { + if (IS_VERBOSE) { + System.out.printf("testUnavailableLevel(level = %d, bci = %d)%n", + compLevel, bci); + } + WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci); + checkNotCompiled(); + } + + /** + * Checks validity of compilation level. + * @param expected expected level + * @param actual actually level + */ + protected void checkLevel(int expected, int actual) { + if (expected != actual) { + throw new RuntimeException("expected[" + expected + "] != actual[" + + actual + "]"); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/tiered/NonTieredLevelsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/tiered/NonTieredLevelsTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.function.IntPredicate; + +/** + * @test NonTieredLevelsTest + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * @build NonTieredLevelsTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:CompileCommand=compileonly,TestCase$Helper::* + * NonTieredLevelsTest + * @summary Verify that only one level can be used + * @author igor.ignatyev@oracle.com + */ +public class NonTieredLevelsTest extends CompLevelsTest { + private static final int AVAILABLE_COMP_LEVEL; + private static final IntPredicate IS_AVAILABLE_COMPLEVEL; + static { + String vmName = System.getProperty("java.vm.name"); + if (vmName.endsWith(" Server VM")) { + AVAILABLE_COMP_LEVEL = COMP_LEVEL_FULL_OPTIMIZATION; + IS_AVAILABLE_COMPLEVEL = x -> x == COMP_LEVEL_FULL_OPTIMIZATION; + } else if (vmName.endsWith(" Client VM") + || vmName.endsWith(" Minimal VM")) { + AVAILABLE_COMP_LEVEL = COMP_LEVEL_SIMPLE; + IS_AVAILABLE_COMPLEVEL = x -> x >= COMP_LEVEL_SIMPLE + && x <= COMP_LEVEL_FULL_PROFILE; + } else { + throw new RuntimeException("Unknown VM: " + vmName); + } + + } + public static void main(String[] args) throws Exception { + if (TIERED_COMPILATION) { + System.err.println("Test isn't applicable w/ enabled " + + "TieredCompilation. Skip test."); + return; + } + for (TestCase test : TestCase.values()) { + new NonTieredLevelsTest(test).runTest(); + } + } + + private NonTieredLevelsTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + @Override + protected void test() throws Exception { + checkNotCompiled(); + compile(); + checkCompiled(); + + int compLevel = getCompLevel(); + checkLevel(AVAILABLE_COMP_LEVEL, compLevel); + int bci = WHITE_BOX.getMethodEntryBci(method); + deoptimize(); + if (!testCase.isOsr) { + for (int level = 1; level <= COMP_LEVEL_MAX; ++level) { + if (IS_AVAILABLE_COMPLEVEL.test(level)) { + testAvailableLevel(level, bci); + } else { + testUnavailableLevel(level, bci); + } + } + } else { + System.out.println("skip other levels testing in OSR"); + testAvailableLevel(AVAILABLE_COMP_LEVEL, bci); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/tiered/TieredLevelsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/tiered/TieredLevelsTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TieredLevelsTest + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * @build TieredLevelsTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:CompileCommand=compileonly,TestCase$Helper::* + * TieredLevelsTest + * @summary Verify that all levels < 'TieredStopAtLevel' can be used + * @author igor.ignatyev@oracle.com + */ +public class TieredLevelsTest extends CompLevelsTest { + public static void main(String[] args) throws Exception { + if (!TIERED_COMPILATION) { + System.err.println("Test isn't applicable w/ disabled " + + "TieredCompilation. Skip test."); + return; + } + for (TestCase test : TestCase.values()) { + new TieredLevelsTest(test).runTest(); + } + } + + private TieredLevelsTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + @Override + protected void test() throws Exception { + checkNotCompiled(); + compile(); + checkCompiled(); + + int compLevel = getCompLevel(); + if (compLevel > TIERED_STOP_AT_LEVEL) { + throw new RuntimeException("method.compLevel[" + compLevel + + "] > TieredStopAtLevel [" + TIERED_STOP_AT_LEVEL + "]"); + } + int bci = WHITE_BOX.getMethodEntryBci(method); + deoptimize(); + + for (int testedTier = 1; testedTier <= TIERED_STOP_AT_LEVEL; + ++testedTier) { + testAvailableLevel(testedTier, bci); + } + for (int testedTier = TIERED_STOP_AT_LEVEL + 1; + testedTier <= COMP_LEVEL_MAX; ++testedTier) { + testUnavailableLevel(testedTier, bci); + } + } + + + @Override + protected void checkLevel(int expected, int actual) { + if (expected == COMP_LEVEL_FULL_PROFILE + && actual == COMP_LEVEL_LIMITED_PROFILE) { + // for simple method full_profile may be replaced by limited_profile + return; + } + super.checkLevel(expected, actual); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/types/TypeSpeculation.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/types/TypeSpeculation.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024070 + * @summary Test that type speculation doesn't cause incorrect execution + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 TypeSpeculation + * + */ + +public class TypeSpeculation { + + interface I { + } + + static class A { + int m() { + return 1; + } + } + + static class B extends A implements I { + int m() { + return 2; + } + } + + static class C extends B { + int m() { + return 3; + } + } + + static int test1_invokevirtual(A a) { + return a.m(); + } + + static int test1_1(A a) { + return test1_invokevirtual(a); + } + + static boolean test1() { + A a = new A(); + B b = new B(); + C c = new C(); + + // pollute profile at test1_invokevirtual to make sure the + // compiler cannot rely on it + for (int i = 0; i < 5000; i++) { + test1_invokevirtual(a); + test1_invokevirtual(b); + test1_invokevirtual(c); + } + + // profiling + speculation should make test1_invokevirtual + // inline A.m() with a guard + for (int i = 0; i < 20000; i++) { + int res = test1_1(b); + if (res != b.m()) { + System.out.println("test1 failed with class B"); + return false; + } + } + // check that the guard works as expected by passing a + // different type + int res = test1_1(a); + if (res != a.m()) { + System.out.println("test1 failed with class A"); + return false; + } + return true; + } + + static int test2_invokevirtual(A a) { + return a.m(); + } + + static int test2_1(A a, boolean t) { + A aa; + if (t) { + aa = (B)a; + } else { + aa = a; + } + // if a of type B is passed to test2_1, the static type of aa + // here is no better than A but the profiled type is B so this + // should inline + return test2_invokevirtual(aa); + } + + static boolean test2() { + A a = new A(); + B b = new B(); + C c = new C(); + + // pollute profile at test2_invokevirtual to make sure the + // compiler cannot rely on it + for (int i = 0; i < 5000; i++) { + test2_invokevirtual(a); + test2_invokevirtual(b); + test2_invokevirtual(c); + } + + // profiling + speculation should make test2_invokevirtual + // inline A.m() with a guard + for (int i = 0; i < 20000; i++) { + int res = test2_1(b, (i % 2) == 0); + if (res != b.m()) { + System.out.println("test2 failed with class B"); + return false; + } + } + // check that the guard works as expected by passing a + // different type + int res = test2_1(a, false); + if (res != a.m()) { + System.out.println("test2 failed with class A"); + return false; + } + return true; + } + + static int test3_invokevirtual(A a) { + return a.m(); + } + + static void test3_2(A a) { + } + + static int test3_1(A a, int i) { + if (i == 0) { + return 0; + } + // If we come here and a is of type B but parameter profiling + // is polluted, both branches of the if below should have + // profiling that tell us and inlining of the virtual call + // should happen + if (i == 1) { + test3_2(a); + } else { + test3_2(a); + } + return test3_invokevirtual(a); + } + + static boolean test3() { + A a = new A(); + B b = new B(); + C c = new C(); + + // pollute profile at test3_invokevirtual and test3_1 to make + // sure the compiler cannot rely on it + for (int i = 0; i < 3000; i++) { + test3_invokevirtual(a); + test3_invokevirtual(b); + test3_invokevirtual(c); + test3_1(a, 0); + test3_1(b, 0); + } + + // profiling + speculation should make test3_invokevirtual + // inline A.m() with a guard + for (int i = 0; i < 20000; i++) { + int res = test3_1(b, (i % 2) + 1); + if (res != b.m()) { + System.out.println("test3 failed with class B"); + return false; + } + } + // check that the guard works as expected by passing a + // different type + int res = test3_1(a, 1); + if (res != a.m()) { + System.out.println("test3 failed with class A"); + return false; + } + return true; + } + + // Mix 2 incompatible profiled types + static int test4_invokevirtual(A a) { + return a.m(); + } + + static void test4_2(A a) { + } + + static int test4_1(A a, boolean b) { + if (b) { + test4_2(a); + } else { + test4_2(a); + } + // shouldn't inline + return test4_invokevirtual(a); + } + + static boolean test4() { + A a = new A(); + B b = new B(); + C c = new C(); + + // pollute profile at test3_invokevirtual and test3_1 to make + // sure the compiler cannot rely on it + for (int i = 0; i < 3000; i++) { + test4_invokevirtual(a); + test4_invokevirtual(b); + test4_invokevirtual(c); + } + + for (int i = 0; i < 20000; i++) { + if ((i % 2) == 0) { + int res = test4_1(a, true); + if (res != a.m()) { + System.out.println("test4 failed with class A"); + return false; + } + } else { + int res = test4_1(b, false); + if (res != b.m()) { + System.out.println("test4 failed with class B"); + return false; + } + } + } + return true; + } + + // Mix one profiled type with an incompatible type + static int test5_invokevirtual(A a) { + return a.m(); + } + + static void test5_2(A a) { + } + + static int test5_1(A a, boolean b) { + if (b) { + test5_2(a); + } else { + A aa = (B)a; + } + // shouldn't inline + return test5_invokevirtual(a); + } + + static boolean test5() { + A a = new A(); + B b = new B(); + C c = new C(); + + // pollute profile at test3_invokevirtual and test3_1 to make + // sure the compiler cannot rely on it + for (int i = 0; i < 3000; i++) { + test5_invokevirtual(a); + test5_invokevirtual(b); + test5_invokevirtual(c); + } + + for (int i = 0; i < 20000; i++) { + if ((i % 2) == 0) { + int res = test5_1(a, true); + if (res != a.m()) { + System.out.println("test5 failed with class A"); + return false; + } + } else { + int res = test5_1(b, false); + if (res != b.m()) { + System.out.println("test5 failed with class B"); + return false; + } + } + } + return true; + } + + // Mix incompatible profiled types + static void test6_2(Object o) { + } + + static Object test6_1(Object o, boolean b) { + if (b) { + test6_2(o); + } else { + test6_2(o); + } + return o; + } + + static boolean test6() { + A a = new A(); + A[] aa = new A[10]; + + for (int i = 0; i < 20000; i++) { + if ((i % 2) == 0) { + test6_1(a, true); + } else { + test6_1(aa, false); + } + } + return true; + } + + // Mix a profiled type with an incompatible type + static void test7_2(Object o) { + } + + static Object test7_1(Object o, boolean b) { + if (b) { + test7_2(o); + } else { + Object oo = (A[])o; + } + return o; + } + + static boolean test7() { + A a = new A(); + A[] aa = new A[10]; + + for (int i = 0; i < 20000; i++) { + if ((i % 2) == 0) { + test7_1(a, true); + } else { + test7_1(aa, false); + } + } + return true; + } + + // Mix a profiled type with an interface + static void test8_2(Object o) { + } + + static I test8_1(Object o) { + test8_2(o); + return (I)o; + } + + static boolean test8() { + A a = new A(); + B b = new B(); + C c = new C(); + + for (int i = 0; i < 20000; i++) { + test8_1(b); + } + return true; + } + + // Mix a profiled type with a constant + static void test9_2(Object o) { + } + + static Object test9_1(Object o, boolean b) { + Object oo; + if (b) { + test9_2(o); + oo = o; + } else { + oo = "some string"; + } + return oo; + } + + static boolean test9() { + A a = new A(); + + for (int i = 0; i < 20000; i++) { + if ((i % 2) == 0) { + test9_1(a, true); + } else { + test9_1(a, false); + } + } + return true; + } + + static public void main(String[] args) { + boolean success = true; + + success = test1() && success; + + success = test2() && success; + + success = test3() && success; + + success = test4() && success; + + success = test5() && success; + + success = test6() && success; + + success = test7() && success; + + success = test8() && success; + + success = test9() && success; + + if (success) { + System.out.println("TEST PASSED"); + } else { + throw new RuntimeException("TEST FAILED: erroneous bound check elimination"); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/uncommontrap/UncommonTrapStackBang.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/uncommontrap/UncommonTrapStackBang.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,10908 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8026775 + * @summary Uncommon trap blob did not bang all the stack shadow pages + * + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:+TieredCompilation UncommonTrapStackBang + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation UncommonTrapStackBang + * + * + * Note: This test does not reproduce the problem with absolute + * certainty. Empirically the bug reproduces on Windows some 80+% of + * the time. Setting everything up to fail in 100% of the cases turns + * out to be tricky at best. + * + * + * The goal of this test is to set up the following stack: + * + * doIt() + * eatStack() + * ... + * eatStack() + * run() + * + * + * When doIt() gets executed it will hit an uncommon trap and expand + * into a huge interpreter frame. The doIt method then calls the + * compiled version of StringBuilder.() which does a single + * stack bang StackShadowPages down. + * + * The uncommon trap blob is supposed to prepare for the interpreter + * by pre-touching stack pages. The bug was that it didn't correctly + * pre-touch all the stack shadow pages, leaving a "hole" on the stack + * which raises an exception on Windows when the stack bang in + * StringBuilder is performed. + */ +public class UncommonTrapStackBang extends Thread { + class Foo { } + + public static void main(String[] args) throws Exception { + doWarmup(); + + // Fork off a new thread to increase odds of the stack being unmapped + UncommonTrapStackBang htsb = new UncommonTrapStackBang(); + htsb.run(); + htsb.join(); + } + + // Make sure: + // + // a) StringBuilder.()V is compiled + // b) doIt() is compiled with an uncommon trap in the unlikely path + public static void doWarmup() { + for (int i = 0; i < 100_000; i++) { + new StringBuilder(); + + doIt(false); + } + } + + public void run() { + eatStack(200); + } + + // Consume some stack to get down to some unused/unmapped pages, + // then call doIt and provoke the uncommon trap/deoptimization + private void eatStack(int n) { + if (n <= 0) { + doIt(true); + return; + } + + eatStack(n - 1); + } + + static public void doIt(boolean unlikely) { + int i0; + int i1; + int i2; + int i3; + int i4; + int i5; + int i6; + int i7; + int i8; + int i9; + int i10; + int i11; + int i12; + int i13; + int i14; + int i15; + int i16; + int i17; + int i18; + int i19; + int i20; + int i21; + int i22; + int i23; + int i24; + int i25; + int i26; + int i27; + int i28; + int i29; + int i30; + int i31; + int i32; + int i33; + int i34; + int i35; + int i36; + int i37; + int i38; + int i39; + int i40; + int i41; + int i42; + int i43; + int i44; + int i45; + int i46; + int i47; + int i48; + int i49; + int i50; + int i51; + int i52; + int i53; + int i54; + int i55; + int i56; + int i57; + int i58; + int i59; + int i60; + int i61; + int i62; + int i63; + int i64; + int i65; + int i66; + int i67; + int i68; + int i69; + int i70; + int i71; + int i72; + int i73; + int i74; + int i75; + int i76; + int i77; + int i78; + int i79; + int i80; + int i81; + int i82; + int i83; + int i84; + int i85; + int i86; + int i87; + int i88; + int i89; + int i90; + int i91; + int i92; + int i93; + int i94; + int i95; + int i96; + int i97; + int i98; + int i99; + int i100; + int i101; + int i102; + int i103; + int i104; + int i105; + int i106; + int i107; + int i108; + int i109; + int i110; + int i111; + int i112; + int i113; + int i114; + int i115; + int i116; + int i117; + int i118; + int i119; + int i120; + int i121; + int i122; + int i123; + int i124; + int i125; + int i126; + int i127; + int i128; + int i129; + int i130; + int i131; + int i132; + int i133; + int i134; + int i135; + int i136; + int i137; + int i138; + int i139; + int i140; + int i141; + int i142; + int i143; + int i144; + int i145; + int i146; + int i147; + int i148; + int i149; + int i150; + int i151; + int i152; + int i153; + int i154; + int i155; + int i156; + int i157; + int i158; + int i159; + int i160; + int i161; + int i162; + int i163; + int i164; + int i165; + int i166; + int i167; + int i168; + int i169; + int i170; + int i171; + int i172; + int i173; + int i174; + int i175; + int i176; + int i177; + int i178; + int i179; + int i180; + int i181; + int i182; + int i183; + int i184; + int i185; + int i186; + int i187; + int i188; + int i189; + int i190; + int i191; + int i192; + int i193; + int i194; + int i195; + int i196; + int i197; + int i198; + int i199; + int i200; + int i201; + int i202; + int i203; + int i204; + int i205; + int i206; + int i207; + int i208; + int i209; + int i210; + int i211; + int i212; + int i213; + int i214; + int i215; + int i216; + int i217; + int i218; + int i219; + int i220; + int i221; + int i222; + int i223; + int i224; + int i225; + int i226; + int i227; + int i228; + int i229; + int i230; + int i231; + int i232; + int i233; + int i234; + int i235; + int i236; + int i237; + int i238; + int i239; + int i240; + int i241; + int i242; + int i243; + int i244; + int i245; + int i246; + int i247; + int i248; + int i249; + int i250; + int i251; + int i252; + int i253; + int i254; + int i255; + int i256; + int i257; + int i258; + int i259; + int i260; + int i261; + int i262; + int i263; + int i264; + int i265; + int i266; + int i267; + int i268; + int i269; + int i270; + int i271; + int i272; + int i273; + int i274; + int i275; + int i276; + int i277; + int i278; + int i279; + int i280; + int i281; + int i282; + int i283; + int i284; + int i285; + int i286; + int i287; + int i288; + int i289; + int i290; + int i291; + int i292; + int i293; + int i294; + int i295; + int i296; + int i297; + int i298; + int i299; + int i300; + int i301; + int i302; + int i303; + int i304; + int i305; + int i306; + int i307; + int i308; + int i309; + int i310; + int i311; + int i312; + int i313; + int i314; + int i315; + int i316; + int i317; + int i318; + int i319; + int i320; + int i321; + int i322; + int i323; + int i324; + int i325; + int i326; + int i327; + int i328; + int i329; + int i330; + int i331; + int i332; + int i333; + int i334; + int i335; + int i336; + int i337; + int i338; + int i339; + int i340; + int i341; + int i342; + int i343; + int i344; + int i345; + int i346; + int i347; + int i348; + int i349; + int i350; + int i351; + int i352; + int i353; + int i354; + int i355; + int i356; + int i357; + int i358; + int i359; + int i360; + int i361; + int i362; + int i363; + int i364; + int i365; + int i366; + int i367; + int i368; + int i369; + int i370; + int i371; + int i372; + int i373; + int i374; + int i375; + int i376; + int i377; + int i378; + int i379; + int i380; + int i381; + int i382; + int i383; + int i384; + int i385; + int i386; + int i387; + int i388; + int i389; + int i390; + int i391; + int i392; + int i393; + int i394; + int i395; + int i396; + int i397; + int i398; + int i399; + int i400; + int i401; + int i402; + int i403; + int i404; + int i405; + int i406; + int i407; + int i408; + int i409; + int i410; + int i411; + int i412; + int i413; + int i414; + int i415; + int i416; + int i417; + int i418; + int i419; + int i420; + int i421; + int i422; + int i423; + int i424; + int i425; + int i426; + int i427; + int i428; + int i429; + int i430; + int i431; + int i432; + int i433; + int i434; + int i435; + int i436; + int i437; + int i438; + int i439; + int i440; + int i441; + int i442; + int i443; + int i444; + int i445; + int i446; + int i447; + int i448; + int i449; + int i450; + int i451; + int i452; + int i453; + int i454; + int i455; + int i456; + int i457; + int i458; + int i459; + int i460; + int i461; + int i462; + int i463; + int i464; + int i465; + int i466; + int i467; + int i468; + int i469; + int i470; + int i471; + int i472; + int i473; + int i474; + int i475; + int i476; + int i477; + int i478; + int i479; + int i480; + int i481; + int i482; + int i483; + int i484; + int i485; + int i486; + int i487; + int i488; + int i489; + int i490; + int i491; + int i492; + int i493; + int i494; + int i495; + int i496; + int i497; + int i498; + int i499; + int i500; + int i501; + int i502; + int i503; + int i504; + int i505; + int i506; + int i507; + int i508; + int i509; + int i510; + int i511; + int i512; + int i513; + int i514; + int i515; + int i516; + int i517; + int i518; + int i519; + int i520; + int i521; + int i522; + int i523; + int i524; + int i525; + int i526; + int i527; + int i528; + int i529; + int i530; + int i531; + int i532; + int i533; + int i534; + int i535; + int i536; + int i537; + int i538; + int i539; + int i540; + int i541; + int i542; + int i543; + int i544; + int i545; + int i546; + int i547; + int i548; + int i549; + int i550; + int i551; + int i552; + int i553; + int i554; + int i555; + int i556; + int i557; + int i558; + int i559; + int i560; + int i561; + int i562; + int i563; + int i564; + int i565; + int i566; + int i567; + int i568; + int i569; + int i570; + int i571; + int i572; + int i573; + int i574; + int i575; + int i576; + int i577; + int i578; + int i579; + int i580; + int i581; + int i582; + int i583; + int i584; + int i585; + int i586; + int i587; + int i588; + int i589; + int i590; + int i591; + int i592; + int i593; + int i594; + int i595; + int i596; + int i597; + int i598; + int i599; + int i600; + int i601; + int i602; + int i603; + int i604; + int i605; + int i606; + int i607; + int i608; + int i609; + int i610; + int i611; + int i612; + int i613; + int i614; + int i615; + int i616; + int i617; + int i618; + int i619; + int i620; + int i621; + int i622; + int i623; + int i624; + int i625; + int i626; + int i627; + int i628; + int i629; + int i630; + int i631; + int i632; + int i633; + int i634; + int i635; + int i636; + int i637; + int i638; + int i639; + int i640; + int i641; + int i642; + int i643; + int i644; + int i645; + int i646; + int i647; + int i648; + int i649; + int i650; + int i651; + int i652; + int i653; + int i654; + int i655; + int i656; + int i657; + int i658; + int i659; + int i660; + int i661; + int i662; + int i663; + int i664; + int i665; + int i666; + int i667; + int i668; + int i669; + int i670; + int i671; + int i672; + int i673; + int i674; + int i675; + int i676; + int i677; + int i678; + int i679; + int i680; + int i681; + int i682; + int i683; + int i684; + int i685; + int i686; + int i687; + int i688; + int i689; + int i690; + int i691; + int i692; + int i693; + int i694; + int i695; + int i696; + int i697; + int i698; + int i699; + int i700; + int i701; + int i702; + int i703; + int i704; + int i705; + int i706; + int i707; + int i708; + int i709; + int i710; + int i711; + int i712; + int i713; + int i714; + int i715; + int i716; + int i717; + int i718; + int i719; + int i720; + int i721; + int i722; + int i723; + int i724; + int i725; + int i726; + int i727; + int i728; + int i729; + int i730; + int i731; + int i732; + int i733; + int i734; + int i735; + int i736; + int i737; + int i738; + int i739; + int i740; + int i741; + int i742; + int i743; + int i744; + int i745; + int i746; + int i747; + int i748; + int i749; + int i750; + int i751; + int i752; + int i753; + int i754; + int i755; + int i756; + int i757; + int i758; + int i759; + int i760; + int i761; + int i762; + int i763; + int i764; + int i765; + int i766; + int i767; + int i768; + int i769; + int i770; + int i771; + int i772; + int i773; + int i774; + int i775; + int i776; + int i777; + int i778; + int i779; + int i780; + int i781; + int i782; + int i783; + int i784; + int i785; + int i786; + int i787; + int i788; + int i789; + int i790; + int i791; + int i792; + int i793; + int i794; + int i795; + int i796; + int i797; + int i798; + int i799; + int i800; + int i801; + int i802; + int i803; + int i804; + int i805; + int i806; + int i807; + int i808; + int i809; + int i810; + int i811; + int i812; + int i813; + int i814; + int i815; + int i816; + int i817; + int i818; + int i819; + int i820; + int i821; + int i822; + int i823; + int i824; + int i825; + int i826; + int i827; + int i828; + int i829; + int i830; + int i831; + int i832; + int i833; + int i834; + int i835; + int i836; + int i837; + int i838; + int i839; + int i840; + int i841; + int i842; + int i843; + int i844; + int i845; + int i846; + int i847; + int i848; + int i849; + int i850; + int i851; + int i852; + int i853; + int i854; + int i855; + int i856; + int i857; + int i858; + int i859; + int i860; + int i861; + int i862; + int i863; + int i864; + int i865; + int i866; + int i867; + int i868; + int i869; + int i870; + int i871; + int i872; + int i873; + int i874; + int i875; + int i876; + int i877; + int i878; + int i879; + int i880; + int i881; + int i882; + int i883; + int i884; + int i885; + int i886; + int i887; + int i888; + int i889; + int i890; + int i891; + int i892; + int i893; + int i894; + int i895; + int i896; + int i897; + int i898; + int i899; + int i900; + int i901; + int i902; + int i903; + int i904; + int i905; + int i906; + int i907; + int i908; + int i909; + int i910; + int i911; + int i912; + int i913; + int i914; + int i915; + int i916; + int i917; + int i918; + int i919; + int i920; + int i921; + int i922; + int i923; + int i924; + int i925; + int i926; + int i927; + int i928; + int i929; + int i930; + int i931; + int i932; + int i933; + int i934; + int i935; + int i936; + int i937; + int i938; + int i939; + int i940; + int i941; + int i942; + int i943; + int i944; + int i945; + int i946; + int i947; + int i948; + int i949; + int i950; + int i951; + int i952; + int i953; + int i954; + int i955; + int i956; + int i957; + int i958; + int i959; + int i960; + int i961; + int i962; + int i963; + int i964; + int i965; + int i966; + int i967; + int i968; + int i969; + int i970; + int i971; + int i972; + int i973; + int i974; + int i975; + int i976; + int i977; + int i978; + int i979; + int i980; + int i981; + int i982; + int i983; + int i984; + int i985; + int i986; + int i987; + int i988; + int i989; + int i990; + int i991; + int i992; + int i993; + int i994; + int i995; + int i996; + int i997; + int i998; + int i999; + int i1000; + int i1001; + int i1002; + int i1003; + int i1004; + int i1005; + int i1006; + int i1007; + int i1008; + int i1009; + int i1010; + int i1011; + int i1012; + int i1013; + int i1014; + int i1015; + int i1016; + int i1017; + int i1018; + int i1019; + int i1020; + int i1021; + int i1022; + int i1023; + int i1024; + int i1025; + int i1026; + int i1027; + int i1028; + int i1029; + int i1030; + int i1031; + int i1032; + int i1033; + int i1034; + int i1035; + int i1036; + int i1037; + int i1038; + int i1039; + int i1040; + int i1041; + int i1042; + int i1043; + int i1044; + int i1045; + int i1046; + int i1047; + int i1048; + int i1049; + int i1050; + int i1051; + int i1052; + int i1053; + int i1054; + int i1055; + int i1056; + int i1057; + int i1058; + int i1059; + int i1060; + int i1061; + int i1062; + int i1063; + int i1064; + int i1065; + int i1066; + int i1067; + int i1068; + int i1069; + int i1070; + int i1071; + int i1072; + int i1073; + int i1074; + int i1075; + int i1076; + int i1077; + int i1078; + int i1079; + int i1080; + int i1081; + int i1082; + int i1083; + int i1084; + int i1085; + int i1086; + int i1087; + int i1088; + int i1089; + int i1090; + int i1091; + int i1092; + int i1093; + int i1094; + int i1095; + int i1096; + int i1097; + int i1098; + int i1099; + int i1100; + int i1101; + int i1102; + int i1103; + int i1104; + int i1105; + int i1106; + int i1107; + int i1108; + int i1109; + int i1110; + int i1111; + int i1112; + int i1113; + int i1114; + int i1115; + int i1116; + int i1117; + int i1118; + int i1119; + int i1120; + int i1121; + int i1122; + int i1123; + int i1124; + int i1125; + int i1126; + int i1127; + int i1128; + int i1129; + int i1130; + int i1131; + int i1132; + int i1133; + int i1134; + int i1135; + int i1136; + int i1137; + int i1138; + int i1139; + int i1140; + int i1141; + int i1142; + int i1143; + int i1144; + int i1145; + int i1146; + int i1147; + int i1148; + int i1149; + int i1150; + int i1151; + int i1152; + int i1153; + int i1154; + int i1155; + int i1156; + int i1157; + int i1158; + int i1159; + int i1160; + int i1161; + int i1162; + int i1163; + int i1164; + int i1165; + int i1166; + int i1167; + int i1168; + int i1169; + int i1170; + int i1171; + int i1172; + int i1173; + int i1174; + int i1175; + int i1176; + int i1177; + int i1178; + int i1179; + int i1180; + int i1181; + int i1182; + int i1183; + int i1184; + int i1185; + int i1186; + int i1187; + int i1188; + int i1189; + int i1190; + int i1191; + int i1192; + int i1193; + int i1194; + int i1195; + int i1196; + int i1197; + int i1198; + int i1199; + int i1200; + int i1201; + int i1202; + int i1203; + int i1204; + int i1205; + int i1206; + int i1207; + int i1208; + int i1209; + int i1210; + int i1211; + int i1212; + int i1213; + int i1214; + int i1215; + int i1216; + int i1217; + int i1218; + int i1219; + int i1220; + int i1221; + int i1222; + int i1223; + int i1224; + int i1225; + int i1226; + int i1227; + int i1228; + int i1229; + int i1230; + int i1231; + int i1232; + int i1233; + int i1234; + int i1235; + int i1236; + int i1237; + int i1238; + int i1239; + int i1240; + int i1241; + int i1242; + int i1243; + int i1244; + int i1245; + int i1246; + int i1247; + int i1248; + int i1249; + int i1250; + int i1251; + int i1252; + int i1253; + int i1254; + int i1255; + int i1256; + int i1257; + int i1258; + int i1259; + int i1260; + int i1261; + int i1262; + int i1263; + int i1264; + int i1265; + int i1266; + int i1267; + int i1268; + int i1269; + int i1270; + int i1271; + int i1272; + int i1273; + int i1274; + int i1275; + int i1276; + int i1277; + int i1278; + int i1279; + int i1280; + int i1281; + int i1282; + int i1283; + int i1284; + int i1285; + int i1286; + int i1287; + int i1288; + int i1289; + int i1290; + int i1291; + int i1292; + int i1293; + int i1294; + int i1295; + int i1296; + int i1297; + int i1298; + int i1299; + int i1300; + int i1301; + int i1302; + int i1303; + int i1304; + int i1305; + int i1306; + int i1307; + int i1308; + int i1309; + int i1310; + int i1311; + int i1312; + int i1313; + int i1314; + int i1315; + int i1316; + int i1317; + int i1318; + int i1319; + int i1320; + int i1321; + int i1322; + int i1323; + int i1324; + int i1325; + int i1326; + int i1327; + int i1328; + int i1329; + int i1330; + int i1331; + int i1332; + int i1333; + int i1334; + int i1335; + int i1336; + int i1337; + int i1338; + int i1339; + int i1340; + int i1341; + int i1342; + int i1343; + int i1344; + int i1345; + int i1346; + int i1347; + int i1348; + int i1349; + int i1350; + int i1351; + int i1352; + int i1353; + int i1354; + int i1355; + int i1356; + int i1357; + int i1358; + int i1359; + int i1360; + int i1361; + int i1362; + int i1363; + int i1364; + int i1365; + int i1366; + int i1367; + int i1368; + int i1369; + int i1370; + int i1371; + int i1372; + int i1373; + int i1374; + int i1375; + int i1376; + int i1377; + int i1378; + int i1379; + int i1380; + int i1381; + int i1382; + int i1383; + int i1384; + int i1385; + int i1386; + int i1387; + int i1388; + int i1389; + int i1390; + int i1391; + int i1392; + int i1393; + int i1394; + int i1395; + int i1396; + int i1397; + int i1398; + int i1399; + int i1400; + int i1401; + int i1402; + int i1403; + int i1404; + int i1405; + int i1406; + int i1407; + int i1408; + int i1409; + int i1410; + int i1411; + int i1412; + int i1413; + int i1414; + int i1415; + int i1416; + int i1417; + int i1418; + int i1419; + int i1420; + int i1421; + int i1422; + int i1423; + int i1424; + int i1425; + int i1426; + int i1427; + int i1428; + int i1429; + int i1430; + int i1431; + int i1432; + int i1433; + int i1434; + int i1435; + int i1436; + int i1437; + int i1438; + int i1439; + int i1440; + int i1441; + int i1442; + int i1443; + int i1444; + int i1445; + int i1446; + int i1447; + int i1448; + int i1449; + int i1450; + int i1451; + int i1452; + int i1453; + int i1454; + int i1455; + int i1456; + int i1457; + int i1458; + int i1459; + int i1460; + int i1461; + int i1462; + int i1463; + int i1464; + int i1465; + int i1466; + int i1467; + int i1468; + int i1469; + int i1470; + int i1471; + int i1472; + int i1473; + int i1474; + int i1475; + int i1476; + int i1477; + int i1478; + int i1479; + int i1480; + int i1481; + int i1482; + int i1483; + int i1484; + int i1485; + int i1486; + int i1487; + int i1488; + int i1489; + int i1490; + int i1491; + int i1492; + int i1493; + int i1494; + int i1495; + int i1496; + int i1497; + int i1498; + int i1499; + int i1500; + int i1501; + int i1502; + int i1503; + int i1504; + int i1505; + int i1506; + int i1507; + int i1508; + int i1509; + int i1510; + int i1511; + int i1512; + int i1513; + int i1514; + int i1515; + int i1516; + int i1517; + int i1518; + int i1519; + int i1520; + int i1521; + int i1522; + int i1523; + int i1524; + int i1525; + int i1526; + int i1527; + int i1528; + int i1529; + int i1530; + int i1531; + int i1532; + int i1533; + int i1534; + int i1535; + int i1536; + int i1537; + int i1538; + int i1539; + int i1540; + int i1541; + int i1542; + int i1543; + int i1544; + int i1545; + int i1546; + int i1547; + int i1548; + int i1549; + int i1550; + int i1551; + int i1552; + int i1553; + int i1554; + int i1555; + int i1556; + int i1557; + int i1558; + int i1559; + int i1560; + int i1561; + int i1562; + int i1563; + int i1564; + int i1565; + int i1566; + int i1567; + int i1568; + int i1569; + int i1570; + int i1571; + int i1572; + int i1573; + int i1574; + int i1575; + int i1576; + int i1577; + int i1578; + int i1579; + int i1580; + int i1581; + int i1582; + int i1583; + int i1584; + int i1585; + int i1586; + int i1587; + int i1588; + int i1589; + int i1590; + int i1591; + int i1592; + int i1593; + int i1594; + int i1595; + int i1596; + int i1597; + int i1598; + int i1599; + int i1600; + int i1601; + int i1602; + int i1603; + int i1604; + int i1605; + int i1606; + int i1607; + int i1608; + int i1609; + int i1610; + int i1611; + int i1612; + int i1613; + int i1614; + int i1615; + int i1616; + int i1617; + int i1618; + int i1619; + int i1620; + int i1621; + int i1622; + int i1623; + int i1624; + int i1625; + int i1626; + int i1627; + int i1628; + int i1629; + int i1630; + int i1631; + int i1632; + int i1633; + int i1634; + int i1635; + int i1636; + int i1637; + int i1638; + int i1639; + int i1640; + int i1641; + int i1642; + int i1643; + int i1644; + int i1645; + int i1646; + int i1647; + int i1648; + int i1649; + int i1650; + int i1651; + int i1652; + int i1653; + int i1654; + int i1655; + int i1656; + int i1657; + int i1658; + int i1659; + int i1660; + int i1661; + int i1662; + int i1663; + int i1664; + int i1665; + int i1666; + int i1667; + int i1668; + int i1669; + int i1670; + int i1671; + int i1672; + int i1673; + int i1674; + int i1675; + int i1676; + int i1677; + int i1678; + int i1679; + int i1680; + int i1681; + int i1682; + int i1683; + int i1684; + int i1685; + int i1686; + int i1687; + int i1688; + int i1689; + int i1690; + int i1691; + int i1692; + int i1693; + int i1694; + int i1695; + int i1696; + int i1697; + int i1698; + int i1699; + int i1700; + int i1701; + int i1702; + int i1703; + int i1704; + int i1705; + int i1706; + int i1707; + int i1708; + int i1709; + int i1710; + int i1711; + int i1712; + int i1713; + int i1714; + int i1715; + int i1716; + int i1717; + int i1718; + int i1719; + int i1720; + int i1721; + int i1722; + int i1723; + int i1724; + int i1725; + int i1726; + int i1727; + int i1728; + int i1729; + int i1730; + int i1731; + int i1732; + int i1733; + int i1734; + int i1735; + int i1736; + int i1737; + int i1738; + int i1739; + int i1740; + int i1741; + int i1742; + int i1743; + int i1744; + int i1745; + int i1746; + int i1747; + int i1748; + int i1749; + int i1750; + int i1751; + int i1752; + int i1753; + int i1754; + int i1755; + int i1756; + int i1757; + int i1758; + int i1759; + int i1760; + int i1761; + int i1762; + int i1763; + int i1764; + int i1765; + int i1766; + int i1767; + int i1768; + int i1769; + int i1770; + int i1771; + int i1772; + int i1773; + int i1774; + int i1775; + int i1776; + int i1777; + int i1778; + int i1779; + int i1780; + int i1781; + int i1782; + int i1783; + int i1784; + int i1785; + int i1786; + int i1787; + int i1788; + int i1789; + int i1790; + int i1791; + int i1792; + int i1793; + int i1794; + int i1795; + int i1796; + int i1797; + int i1798; + int i1799; + int i1800; + int i1801; + int i1802; + int i1803; + int i1804; + int i1805; + int i1806; + int i1807; + int i1808; + int i1809; + int i1810; + int i1811; + int i1812; + int i1813; + int i1814; + int i1815; + int i1816; + int i1817; + int i1818; + int i1819; + int i1820; + int i1821; + int i1822; + int i1823; + int i1824; + int i1825; + int i1826; + int i1827; + int i1828; + int i1829; + int i1830; + int i1831; + int i1832; + int i1833; + int i1834; + int i1835; + int i1836; + int i1837; + int i1838; + int i1839; + int i1840; + int i1841; + int i1842; + int i1843; + int i1844; + int i1845; + int i1846; + int i1847; + int i1848; + int i1849; + int i1850; + int i1851; + int i1852; + int i1853; + int i1854; + int i1855; + int i1856; + int i1857; + int i1858; + int i1859; + int i1860; + int i1861; + int i1862; + int i1863; + int i1864; + int i1865; + int i1866; + int i1867; + int i1868; + int i1869; + int i1870; + int i1871; + int i1872; + int i1873; + int i1874; + int i1875; + int i1876; + int i1877; + int i1878; + int i1879; + int i1880; + int i1881; + int i1882; + int i1883; + int i1884; + int i1885; + int i1886; + int i1887; + int i1888; + int i1889; + int i1890; + int i1891; + int i1892; + int i1893; + int i1894; + int i1895; + int i1896; + int i1897; + int i1898; + int i1899; + int i1900; + int i1901; + int i1902; + int i1903; + int i1904; + int i1905; + int i1906; + int i1907; + int i1908; + int i1909; + int i1910; + int i1911; + int i1912; + int i1913; + int i1914; + int i1915; + int i1916; + int i1917; + int i1918; + int i1919; + int i1920; + int i1921; + int i1922; + int i1923; + int i1924; + int i1925; + int i1926; + int i1927; + int i1928; + int i1929; + int i1930; + int i1931; + int i1932; + int i1933; + int i1934; + int i1935; + int i1936; + int i1937; + int i1938; + int i1939; + int i1940; + int i1941; + int i1942; + int i1943; + int i1944; + int i1945; + int i1946; + int i1947; + int i1948; + int i1949; + int i1950; + int i1951; + int i1952; + int i1953; + int i1954; + int i1955; + int i1956; + int i1957; + int i1958; + int i1959; + int i1960; + int i1961; + int i1962; + int i1963; + int i1964; + int i1965; + int i1966; + int i1967; + int i1968; + int i1969; + int i1970; + int i1971; + int i1972; + int i1973; + int i1974; + int i1975; + int i1976; + int i1977; + int i1978; + int i1979; + int i1980; + int i1981; + int i1982; + int i1983; + int i1984; + int i1985; + int i1986; + int i1987; + int i1988; + int i1989; + int i1990; + int i1991; + int i1992; + int i1993; + int i1994; + int i1995; + int i1996; + int i1997; + int i1998; + int i1999; + int i2000; + int i2001; + int i2002; + int i2003; + int i2004; + int i2005; + int i2006; + int i2007; + int i2008; + int i2009; + int i2010; + int i2011; + int i2012; + int i2013; + int i2014; + int i2015; + int i2016; + int i2017; + int i2018; + int i2019; + int i2020; + int i2021; + int i2022; + int i2023; + int i2024; + int i2025; + int i2026; + int i2027; + int i2028; + int i2029; + int i2030; + int i2031; + int i2032; + int i2033; + int i2034; + int i2035; + int i2036; + int i2037; + int i2038; + int i2039; + int i2040; + int i2041; + int i2042; + int i2043; + int i2044; + int i2045; + int i2046; + int i2047; + int i2048; + int i2049; + int i2050; + int i2051; + int i2052; + int i2053; + int i2054; + int i2055; + int i2056; + int i2057; + int i2058; + int i2059; + int i2060; + int i2061; + int i2062; + int i2063; + int i2064; + int i2065; + int i2066; + int i2067; + int i2068; + int i2069; + int i2070; + int i2071; + int i2072; + int i2073; + int i2074; + int i2075; + int i2076; + int i2077; + int i2078; + int i2079; + int i2080; + int i2081; + int i2082; + int i2083; + int i2084; + int i2085; + int i2086; + int i2087; + int i2088; + int i2089; + int i2090; + int i2091; + int i2092; + int i2093; + int i2094; + int i2095; + int i2096; + int i2097; + int i2098; + int i2099; + int i2100; + int i2101; + int i2102; + int i2103; + int i2104; + int i2105; + int i2106; + int i2107; + int i2108; + int i2109; + int i2110; + int i2111; + int i2112; + int i2113; + int i2114; + int i2115; + int i2116; + int i2117; + int i2118; + int i2119; + int i2120; + int i2121; + int i2122; + int i2123; + int i2124; + int i2125; + int i2126; + int i2127; + int i2128; + int i2129; + int i2130; + int i2131; + int i2132; + int i2133; + int i2134; + int i2135; + int i2136; + int i2137; + int i2138; + int i2139; + int i2140; + int i2141; + int i2142; + int i2143; + int i2144; + int i2145; + int i2146; + int i2147; + int i2148; + int i2149; + int i2150; + int i2151; + int i2152; + int i2153; + int i2154; + int i2155; + int i2156; + int i2157; + int i2158; + int i2159; + int i2160; + int i2161; + int i2162; + int i2163; + int i2164; + int i2165; + int i2166; + int i2167; + int i2168; + int i2169; + int i2170; + int i2171; + int i2172; + int i2173; + int i2174; + int i2175; + int i2176; + int i2177; + int i2178; + int i2179; + int i2180; + int i2181; + int i2182; + int i2183; + int i2184; + int i2185; + int i2186; + int i2187; + int i2188; + int i2189; + int i2190; + int i2191; + int i2192; + int i2193; + int i2194; + int i2195; + int i2196; + int i2197; + int i2198; + int i2199; + int i2200; + int i2201; + int i2202; + int i2203; + int i2204; + int i2205; + int i2206; + int i2207; + int i2208; + int i2209; + int i2210; + int i2211; + int i2212; + int i2213; + int i2214; + int i2215; + int i2216; + int i2217; + int i2218; + int i2219; + int i2220; + int i2221; + int i2222; + int i2223; + int i2224; + int i2225; + int i2226; + int i2227; + int i2228; + int i2229; + int i2230; + int i2231; + int i2232; + int i2233; + int i2234; + int i2235; + int i2236; + int i2237; + int i2238; + int i2239; + int i2240; + int i2241; + int i2242; + int i2243; + int i2244; + int i2245; + int i2246; + int i2247; + int i2248; + int i2249; + int i2250; + int i2251; + int i2252; + int i2253; + int i2254; + int i2255; + int i2256; + int i2257; + int i2258; + int i2259; + int i2260; + int i2261; + int i2262; + int i2263; + int i2264; + int i2265; + int i2266; + int i2267; + int i2268; + int i2269; + int i2270; + int i2271; + int i2272; + int i2273; + int i2274; + int i2275; + int i2276; + int i2277; + int i2278; + int i2279; + int i2280; + int i2281; + int i2282; + int i2283; + int i2284; + int i2285; + int i2286; + int i2287; + int i2288; + int i2289; + int i2290; + int i2291; + int i2292; + int i2293; + int i2294; + int i2295; + int i2296; + int i2297; + int i2298; + int i2299; + int i2300; + int i2301; + int i2302; + int i2303; + int i2304; + int i2305; + int i2306; + int i2307; + int i2308; + int i2309; + int i2310; + int i2311; + int i2312; + int i2313; + int i2314; + int i2315; + int i2316; + int i2317; + int i2318; + int i2319; + int i2320; + int i2321; + int i2322; + int i2323; + int i2324; + int i2325; + int i2326; + int i2327; + int i2328; + int i2329; + int i2330; + int i2331; + int i2332; + int i2333; + int i2334; + int i2335; + int i2336; + int i2337; + int i2338; + int i2339; + int i2340; + int i2341; + int i2342; + int i2343; + int i2344; + int i2345; + int i2346; + int i2347; + int i2348; + int i2349; + int i2350; + int i2351; + int i2352; + int i2353; + int i2354; + int i2355; + int i2356; + int i2357; + int i2358; + int i2359; + int i2360; + int i2361; + int i2362; + int i2363; + int i2364; + int i2365; + int i2366; + int i2367; + int i2368; + int i2369; + int i2370; + int i2371; + int i2372; + int i2373; + int i2374; + int i2375; + int i2376; + int i2377; + int i2378; + int i2379; + int i2380; + int i2381; + int i2382; + int i2383; + int i2384; + int i2385; + int i2386; + int i2387; + int i2388; + int i2389; + int i2390; + int i2391; + int i2392; + int i2393; + int i2394; + int i2395; + int i2396; + int i2397; + int i2398; + int i2399; + int i2400; + int i2401; + int i2402; + int i2403; + int i2404; + int i2405; + int i2406; + int i2407; + int i2408; + int i2409; + int i2410; + int i2411; + int i2412; + int i2413; + int i2414; + int i2415; + int i2416; + int i2417; + int i2418; + int i2419; + int i2420; + int i2421; + int i2422; + int i2423; + int i2424; + int i2425; + int i2426; + int i2427; + int i2428; + int i2429; + int i2430; + int i2431; + int i2432; + int i2433; + int i2434; + int i2435; + int i2436; + int i2437; + int i2438; + int i2439; + int i2440; + int i2441; + int i2442; + int i2443; + int i2444; + int i2445; + int i2446; + int i2447; + int i2448; + int i2449; + int i2450; + int i2451; + int i2452; + int i2453; + int i2454; + int i2455; + int i2456; + int i2457; + int i2458; + int i2459; + int i2460; + int i2461; + int i2462; + int i2463; + int i2464; + int i2465; + int i2466; + int i2467; + int i2468; + int i2469; + int i2470; + int i2471; + int i2472; + int i2473; + int i2474; + int i2475; + int i2476; + int i2477; + int i2478; + int i2479; + int i2480; + int i2481; + int i2482; + int i2483; + int i2484; + int i2485; + int i2486; + int i2487; + int i2488; + int i2489; + int i2490; + int i2491; + int i2492; + int i2493; + int i2494; + int i2495; + int i2496; + int i2497; + int i2498; + int i2499; + int i2500; + int i2501; + int i2502; + int i2503; + int i2504; + int i2505; + int i2506; + int i2507; + int i2508; + int i2509; + int i2510; + int i2511; + int i2512; + int i2513; + int i2514; + int i2515; + int i2516; + int i2517; + int i2518; + int i2519; + int i2520; + int i2521; + int i2522; + int i2523; + int i2524; + int i2525; + int i2526; + int i2527; + int i2528; + int i2529; + int i2530; + int i2531; + int i2532; + int i2533; + int i2534; + int i2535; + int i2536; + int i2537; + int i2538; + int i2539; + int i2540; + int i2541; + int i2542; + int i2543; + int i2544; + int i2545; + int i2546; + int i2547; + int i2548; + int i2549; + int i2550; + int i2551; + int i2552; + int i2553; + int i2554; + int i2555; + int i2556; + int i2557; + int i2558; + int i2559; + int i2560; + int i2561; + int i2562; + int i2563; + int i2564; + int i2565; + int i2566; + int i2567; + int i2568; + int i2569; + int i2570; + int i2571; + int i2572; + int i2573; + int i2574; + int i2575; + int i2576; + int i2577; + int i2578; + int i2579; + int i2580; + int i2581; + int i2582; + int i2583; + int i2584; + int i2585; + int i2586; + int i2587; + int i2588; + int i2589; + int i2590; + int i2591; + int i2592; + int i2593; + int i2594; + int i2595; + int i2596; + int i2597; + int i2598; + int i2599; + int i2600; + int i2601; + int i2602; + int i2603; + int i2604; + int i2605; + int i2606; + int i2607; + int i2608; + int i2609; + int i2610; + int i2611; + int i2612; + int i2613; + int i2614; + int i2615; + int i2616; + int i2617; + int i2618; + int i2619; + int i2620; + int i2621; + int i2622; + int i2623; + int i2624; + int i2625; + int i2626; + int i2627; + int i2628; + int i2629; + int i2630; + int i2631; + int i2632; + int i2633; + int i2634; + int i2635; + int i2636; + int i2637; + int i2638; + int i2639; + int i2640; + int i2641; + int i2642; + int i2643; + int i2644; + int i2645; + int i2646; + int i2647; + int i2648; + int i2649; + int i2650; + int i2651; + int i2652; + int i2653; + int i2654; + int i2655; + int i2656; + int i2657; + int i2658; + int i2659; + int i2660; + int i2661; + int i2662; + int i2663; + int i2664; + int i2665; + int i2666; + int i2667; + int i2668; + int i2669; + int i2670; + int i2671; + int i2672; + int i2673; + int i2674; + int i2675; + int i2676; + int i2677; + int i2678; + int i2679; + int i2680; + int i2681; + int i2682; + int i2683; + int i2684; + int i2685; + int i2686; + int i2687; + int i2688; + int i2689; + int i2690; + int i2691; + int i2692; + int i2693; + int i2694; + int i2695; + int i2696; + int i2697; + int i2698; + int i2699; + int i2700; + int i2701; + int i2702; + int i2703; + int i2704; + int i2705; + int i2706; + int i2707; + int i2708; + int i2709; + int i2710; + int i2711; + int i2712; + int i2713; + int i2714; + int i2715; + int i2716; + int i2717; + int i2718; + int i2719; + int i2720; + int i2721; + int i2722; + int i2723; + int i2724; + int i2725; + int i2726; + int i2727; + int i2728; + int i2729; + int i2730; + int i2731; + int i2732; + int i2733; + int i2734; + int i2735; + int i2736; + int i2737; + int i2738; + int i2739; + int i2740; + int i2741; + int i2742; + int i2743; + int i2744; + int i2745; + int i2746; + int i2747; + int i2748; + int i2749; + int i2750; + int i2751; + int i2752; + int i2753; + int i2754; + int i2755; + int i2756; + int i2757; + int i2758; + int i2759; + int i2760; + int i2761; + int i2762; + int i2763; + int i2764; + int i2765; + int i2766; + int i2767; + int i2768; + int i2769; + int i2770; + int i2771; + int i2772; + int i2773; + int i2774; + int i2775; + int i2776; + int i2777; + int i2778; + int i2779; + int i2780; + int i2781; + int i2782; + int i2783; + int i2784; + int i2785; + int i2786; + int i2787; + int i2788; + int i2789; + int i2790; + int i2791; + int i2792; + int i2793; + int i2794; + int i2795; + int i2796; + int i2797; + int i2798; + int i2799; + int i2800; + int i2801; + int i2802; + int i2803; + int i2804; + int i2805; + int i2806; + int i2807; + int i2808; + int i2809; + int i2810; + int i2811; + int i2812; + int i2813; + int i2814; + int i2815; + int i2816; + int i2817; + int i2818; + int i2819; + int i2820; + int i2821; + int i2822; + int i2823; + int i2824; + int i2825; + int i2826; + int i2827; + int i2828; + int i2829; + int i2830; + int i2831; + int i2832; + int i2833; + int i2834; + int i2835; + int i2836; + int i2837; + int i2838; + int i2839; + int i2840; + int i2841; + int i2842; + int i2843; + int i2844; + int i2845; + int i2846; + int i2847; + int i2848; + int i2849; + int i2850; + int i2851; + int i2852; + int i2853; + int i2854; + int i2855; + int i2856; + int i2857; + int i2858; + int i2859; + int i2860; + int i2861; + int i2862; + int i2863; + int i2864; + int i2865; + int i2866; + int i2867; + int i2868; + int i2869; + int i2870; + int i2871; + int i2872; + int i2873; + int i2874; + int i2875; + int i2876; + int i2877; + int i2878; + int i2879; + int i2880; + int i2881; + int i2882; + int i2883; + int i2884; + int i2885; + int i2886; + int i2887; + int i2888; + int i2889; + int i2890; + int i2891; + int i2892; + int i2893; + int i2894; + int i2895; + int i2896; + int i2897; + int i2898; + int i2899; + int i2900; + int i2901; + int i2902; + int i2903; + int i2904; + int i2905; + int i2906; + int i2907; + int i2908; + int i2909; + int i2910; + int i2911; + int i2912; + int i2913; + int i2914; + int i2915; + int i2916; + int i2917; + int i2918; + int i2919; + int i2920; + int i2921; + int i2922; + int i2923; + int i2924; + int i2925; + int i2926; + int i2927; + int i2928; + int i2929; + int i2930; + int i2931; + int i2932; + int i2933; + int i2934; + int i2935; + int i2936; + int i2937; + int i2938; + int i2939; + int i2940; + int i2941; + int i2942; + int i2943; + int i2944; + int i2945; + int i2946; + int i2947; + int i2948; + int i2949; + int i2950; + int i2951; + int i2952; + int i2953; + int i2954; + int i2955; + int i2956; + int i2957; + int i2958; + int i2959; + int i2960; + int i2961; + int i2962; + int i2963; + int i2964; + int i2965; + int i2966; + int i2967; + int i2968; + int i2969; + int i2970; + int i2971; + int i2972; + int i2973; + int i2974; + int i2975; + int i2976; + int i2977; + int i2978; + int i2979; + int i2980; + int i2981; + int i2982; + int i2983; + int i2984; + int i2985; + int i2986; + int i2987; + int i2988; + int i2989; + int i2990; + int i2991; + int i2992; + int i2993; + int i2994; + int i2995; + int i2996; + int i2997; + int i2998; + int i2999; + int i3000; + int i3001; + int i3002; + int i3003; + int i3004; + int i3005; + int i3006; + int i3007; + int i3008; + int i3009; + int i3010; + int i3011; + int i3012; + int i3013; + int i3014; + int i3015; + int i3016; + int i3017; + int i3018; + int i3019; + int i3020; + int i3021; + int i3022; + int i3023; + int i3024; + int i3025; + int i3026; + int i3027; + int i3028; + int i3029; + int i3030; + int i3031; + int i3032; + int i3033; + int i3034; + int i3035; + int i3036; + int i3037; + int i3038; + int i3039; + int i3040; + int i3041; + int i3042; + int i3043; + int i3044; + int i3045; + int i3046; + int i3047; + int i3048; + int i3049; + int i3050; + int i3051; + int i3052; + int i3053; + int i3054; + int i3055; + int i3056; + int i3057; + int i3058; + int i3059; + int i3060; + int i3061; + int i3062; + int i3063; + int i3064; + int i3065; + int i3066; + int i3067; + int i3068; + int i3069; + int i3070; + int i3071; + int i3072; + int i3073; + int i3074; + int i3075; + int i3076; + int i3077; + int i3078; + int i3079; + int i3080; + int i3081; + int i3082; + int i3083; + int i3084; + int i3085; + int i3086; + int i3087; + int i3088; + int i3089; + int i3090; + int i3091; + int i3092; + int i3093; + int i3094; + int i3095; + int i3096; + int i3097; + int i3098; + int i3099; + int i3100; + int i3101; + int i3102; + int i3103; + int i3104; + int i3105; + int i3106; + int i3107; + int i3108; + int i3109; + int i3110; + int i3111; + int i3112; + int i3113; + int i3114; + int i3115; + int i3116; + int i3117; + int i3118; + int i3119; + int i3120; + int i3121; + int i3122; + int i3123; + int i3124; + int i3125; + int i3126; + int i3127; + int i3128; + int i3129; + int i3130; + int i3131; + int i3132; + int i3133; + int i3134; + int i3135; + int i3136; + int i3137; + int i3138; + int i3139; + int i3140; + int i3141; + int i3142; + int i3143; + int i3144; + int i3145; + int i3146; + int i3147; + int i3148; + int i3149; + int i3150; + int i3151; + int i3152; + int i3153; + int i3154; + int i3155; + int i3156; + int i3157; + int i3158; + int i3159; + int i3160; + int i3161; + int i3162; + int i3163; + int i3164; + int i3165; + int i3166; + int i3167; + int i3168; + int i3169; + int i3170; + int i3171; + int i3172; + int i3173; + int i3174; + int i3175; + int i3176; + int i3177; + int i3178; + int i3179; + int i3180; + int i3181; + int i3182; + int i3183; + int i3184; + int i3185; + int i3186; + int i3187; + int i3188; + int i3189; + int i3190; + int i3191; + int i3192; + int i3193; + int i3194; + int i3195; + int i3196; + int i3197; + int i3198; + int i3199; + int i3200; + int i3201; + int i3202; + int i3203; + int i3204; + int i3205; + int i3206; + int i3207; + int i3208; + int i3209; + int i3210; + int i3211; + int i3212; + int i3213; + int i3214; + int i3215; + int i3216; + int i3217; + int i3218; + int i3219; + int i3220; + int i3221; + int i3222; + int i3223; + int i3224; + int i3225; + int i3226; + int i3227; + int i3228; + int i3229; + int i3230; + int i3231; + int i3232; + int i3233; + int i3234; + int i3235; + int i3236; + int i3237; + int i3238; + int i3239; + int i3240; + int i3241; + int i3242; + int i3243; + int i3244; + int i3245; + int i3246; + int i3247; + int i3248; + int i3249; + int i3250; + int i3251; + int i3252; + int i3253; + int i3254; + int i3255; + int i3256; + int i3257; + int i3258; + int i3259; + int i3260; + int i3261; + int i3262; + int i3263; + int i3264; + int i3265; + int i3266; + int i3267; + int i3268; + int i3269; + int i3270; + int i3271; + int i3272; + int i3273; + int i3274; + int i3275; + int i3276; + int i3277; + int i3278; + int i3279; + int i3280; + int i3281; + int i3282; + int i3283; + int i3284; + int i3285; + int i3286; + int i3287; + int i3288; + int i3289; + int i3290; + int i3291; + int i3292; + int i3293; + int i3294; + int i3295; + int i3296; + int i3297; + int i3298; + int i3299; + int i3300; + int i3301; + int i3302; + int i3303; + int i3304; + int i3305; + int i3306; + int i3307; + int i3308; + int i3309; + int i3310; + int i3311; + int i3312; + int i3313; + int i3314; + int i3315; + int i3316; + int i3317; + int i3318; + int i3319; + int i3320; + int i3321; + int i3322; + int i3323; + int i3324; + int i3325; + int i3326; + int i3327; + int i3328; + int i3329; + int i3330; + int i3331; + int i3332; + int i3333; + int i3334; + int i3335; + int i3336; + int i3337; + int i3338; + int i3339; + int i3340; + int i3341; + int i3342; + int i3343; + int i3344; + int i3345; + int i3346; + int i3347; + int i3348; + int i3349; + int i3350; + int i3351; + int i3352; + int i3353; + int i3354; + int i3355; + int i3356; + int i3357; + int i3358; + int i3359; + int i3360; + int i3361; + int i3362; + int i3363; + int i3364; + int i3365; + int i3366; + int i3367; + int i3368; + int i3369; + int i3370; + int i3371; + int i3372; + int i3373; + int i3374; + int i3375; + int i3376; + int i3377; + int i3378; + int i3379; + int i3380; + int i3381; + int i3382; + int i3383; + int i3384; + int i3385; + int i3386; + int i3387; + int i3388; + int i3389; + int i3390; + int i3391; + int i3392; + int i3393; + int i3394; + int i3395; + int i3396; + int i3397; + int i3398; + int i3399; + int i3400; + int i3401; + int i3402; + int i3403; + int i3404; + int i3405; + int i3406; + int i3407; + int i3408; + int i3409; + int i3410; + int i3411; + int i3412; + int i3413; + int i3414; + int i3415; + int i3416; + int i3417; + int i3418; + int i3419; + int i3420; + int i3421; + int i3422; + int i3423; + int i3424; + int i3425; + int i3426; + int i3427; + int i3428; + int i3429; + int i3430; + int i3431; + int i3432; + int i3433; + int i3434; + int i3435; + int i3436; + int i3437; + int i3438; + int i3439; + int i3440; + int i3441; + int i3442; + int i3443; + int i3444; + int i3445; + int i3446; + int i3447; + int i3448; + int i3449; + int i3450; + int i3451; + int i3452; + int i3453; + int i3454; + int i3455; + int i3456; + int i3457; + int i3458; + int i3459; + int i3460; + int i3461; + int i3462; + int i3463; + int i3464; + int i3465; + int i3466; + int i3467; + int i3468; + int i3469; + int i3470; + int i3471; + int i3472; + int i3473; + int i3474; + int i3475; + int i3476; + int i3477; + int i3478; + int i3479; + int i3480; + int i3481; + int i3482; + int i3483; + int i3484; + int i3485; + int i3486; + int i3487; + int i3488; + int i3489; + int i3490; + int i3491; + int i3492; + int i3493; + int i3494; + int i3495; + int i3496; + int i3497; + int i3498; + int i3499; + int i3500; + int i3501; + int i3502; + int i3503; + int i3504; + int i3505; + int i3506; + int i3507; + int i3508; + int i3509; + int i3510; + int i3511; + int i3512; + int i3513; + int i3514; + int i3515; + int i3516; + int i3517; + int i3518; + int i3519; + int i3520; + int i3521; + int i3522; + int i3523; + int i3524; + int i3525; + int i3526; + int i3527; + int i3528; + int i3529; + int i3530; + int i3531; + int i3532; + int i3533; + int i3534; + int i3535; + int i3536; + int i3537; + int i3538; + int i3539; + int i3540; + int i3541; + int i3542; + int i3543; + int i3544; + int i3545; + int i3546; + int i3547; + int i3548; + int i3549; + int i3550; + int i3551; + int i3552; + int i3553; + int i3554; + int i3555; + int i3556; + int i3557; + int i3558; + int i3559; + int i3560; + int i3561; + int i3562; + int i3563; + int i3564; + int i3565; + int i3566; + int i3567; + int i3568; + int i3569; + int i3570; + int i3571; + int i3572; + int i3573; + int i3574; + int i3575; + int i3576; + int i3577; + int i3578; + int i3579; + int i3580; + int i3581; + int i3582; + int i3583; + int i3584; + int i3585; + int i3586; + int i3587; + int i3588; + int i3589; + int i3590; + int i3591; + int i3592; + int i3593; + int i3594; + int i3595; + int i3596; + int i3597; + int i3598; + int i3599; + int i3600; + int i3601; + int i3602; + int i3603; + int i3604; + int i3605; + int i3606; + int i3607; + int i3608; + int i3609; + int i3610; + int i3611; + int i3612; + int i3613; + int i3614; + int i3615; + int i3616; + int i3617; + int i3618; + int i3619; + int i3620; + int i3621; + int i3622; + int i3623; + int i3624; + int i3625; + int i3626; + int i3627; + int i3628; + int i3629; + int i3630; + int i3631; + int i3632; + int i3633; + int i3634; + int i3635; + int i3636; + int i3637; + int i3638; + int i3639; + int i3640; + int i3641; + int i3642; + int i3643; + int i3644; + int i3645; + int i3646; + int i3647; + int i3648; + int i3649; + int i3650; + int i3651; + int i3652; + int i3653; + int i3654; + int i3655; + int i3656; + int i3657; + int i3658; + int i3659; + int i3660; + int i3661; + int i3662; + int i3663; + int i3664; + int i3665; + int i3666; + int i3667; + int i3668; + int i3669; + int i3670; + int i3671; + int i3672; + int i3673; + int i3674; + int i3675; + int i3676; + int i3677; + int i3678; + int i3679; + int i3680; + int i3681; + int i3682; + int i3683; + int i3684; + int i3685; + int i3686; + int i3687; + int i3688; + int i3689; + int i3690; + int i3691; + int i3692; + int i3693; + int i3694; + int i3695; + int i3696; + int i3697; + int i3698; + int i3699; + int i3700; + int i3701; + int i3702; + int i3703; + int i3704; + int i3705; + int i3706; + int i3707; + int i3708; + int i3709; + int i3710; + int i3711; + int i3712; + int i3713; + int i3714; + int i3715; + int i3716; + int i3717; + int i3718; + int i3719; + int i3720; + int i3721; + int i3722; + int i3723; + int i3724; + int i3725; + int i3726; + int i3727; + int i3728; + int i3729; + int i3730; + int i3731; + int i3732; + int i3733; + int i3734; + int i3735; + int i3736; + int i3737; + int i3738; + int i3739; + int i3740; + int i3741; + int i3742; + int i3743; + int i3744; + int i3745; + int i3746; + int i3747; + int i3748; + int i3749; + int i3750; + int i3751; + int i3752; + int i3753; + int i3754; + int i3755; + int i3756; + int i3757; + int i3758; + int i3759; + int i3760; + int i3761; + int i3762; + int i3763; + int i3764; + int i3765; + int i3766; + int i3767; + int i3768; + int i3769; + int i3770; + int i3771; + int i3772; + int i3773; + int i3774; + int i3775; + int i3776; + int i3777; + int i3778; + int i3779; + int i3780; + int i3781; + int i3782; + int i3783; + int i3784; + int i3785; + int i3786; + int i3787; + int i3788; + int i3789; + int i3790; + int i3791; + int i3792; + int i3793; + int i3794; + int i3795; + int i3796; + int i3797; + int i3798; + int i3799; + int i3800; + int i3801; + int i3802; + int i3803; + int i3804; + int i3805; + int i3806; + int i3807; + int i3808; + int i3809; + int i3810; + int i3811; + int i3812; + int i3813; + int i3814; + int i3815; + int i3816; + int i3817; + int i3818; + int i3819; + int i3820; + int i3821; + int i3822; + int i3823; + int i3824; + int i3825; + int i3826; + int i3827; + int i3828; + int i3829; + int i3830; + int i3831; + int i3832; + int i3833; + int i3834; + int i3835; + int i3836; + int i3837; + int i3838; + int i3839; + int i3840; + int i3841; + int i3842; + int i3843; + int i3844; + int i3845; + int i3846; + int i3847; + int i3848; + int i3849; + int i3850; + int i3851; + int i3852; + int i3853; + int i3854; + int i3855; + int i3856; + int i3857; + int i3858; + int i3859; + int i3860; + int i3861; + int i3862; + int i3863; + int i3864; + int i3865; + int i3866; + int i3867; + int i3868; + int i3869; + int i3870; + int i3871; + int i3872; + int i3873; + int i3874; + int i3875; + int i3876; + int i3877; + int i3878; + int i3879; + int i3880; + int i3881; + int i3882; + int i3883; + int i3884; + int i3885; + int i3886; + int i3887; + int i3888; + int i3889; + int i3890; + int i3891; + int i3892; + int i3893; + int i3894; + int i3895; + int i3896; + int i3897; + int i3898; + int i3899; + int i3900; + int i3901; + int i3902; + int i3903; + int i3904; + int i3905; + int i3906; + int i3907; + int i3908; + int i3909; + int i3910; + int i3911; + int i3912; + int i3913; + int i3914; + int i3915; + int i3916; + int i3917; + int i3918; + int i3919; + int i3920; + int i3921; + int i3922; + int i3923; + int i3924; + int i3925; + int i3926; + int i3927; + int i3928; + int i3929; + int i3930; + int i3931; + int i3932; + int i3933; + int i3934; + int i3935; + int i3936; + int i3937; + int i3938; + int i3939; + int i3940; + int i3941; + int i3942; + int i3943; + int i3944; + int i3945; + int i3946; + int i3947; + int i3948; + int i3949; + int i3950; + int i3951; + int i3952; + int i3953; + int i3954; + int i3955; + int i3956; + int i3957; + int i3958; + int i3959; + int i3960; + int i3961; + int i3962; + int i3963; + int i3964; + int i3965; + int i3966; + int i3967; + int i3968; + int i3969; + int i3970; + int i3971; + int i3972; + int i3973; + int i3974; + int i3975; + int i3976; + int i3977; + int i3978; + int i3979; + int i3980; + int i3981; + int i3982; + int i3983; + int i3984; + int i3985; + int i3986; + int i3987; + int i3988; + int i3989; + int i3990; + int i3991; + int i3992; + int i3993; + int i3994; + int i3995; + int i3996; + int i3997; + int i3998; + int i3999; + int i4000; + int i4001; + int i4002; + int i4003; + int i4004; + int i4005; + int i4006; + int i4007; + int i4008; + int i4009; + int i4010; + int i4011; + int i4012; + int i4013; + int i4014; + int i4015; + int i4016; + int i4017; + int i4018; + int i4019; + int i4020; + int i4021; + int i4022; + int i4023; + int i4024; + int i4025; + int i4026; + int i4027; + int i4028; + int i4029; + int i4030; + int i4031; + int i4032; + int i4033; + int i4034; + int i4035; + int i4036; + int i4037; + int i4038; + int i4039; + int i4040; + int i4041; + int i4042; + int i4043; + int i4044; + int i4045; + int i4046; + int i4047; + int i4048; + int i4049; + int i4050; + int i4051; + int i4052; + int i4053; + int i4054; + int i4055; + int i4056; + int i4057; + int i4058; + int i4059; + int i4060; + int i4061; + int i4062; + int i4063; + int i4064; + int i4065; + int i4066; + int i4067; + int i4068; + int i4069; + int i4070; + int i4071; + int i4072; + int i4073; + int i4074; + int i4075; + int i4076; + int i4077; + int i4078; + int i4079; + int i4080; + int i4081; + int i4082; + int i4083; + int i4084; + int i4085; + int i4086; + int i4087; + int i4088; + int i4089; + int i4090; + int i4091; + int i4092; + int i4093; + int i4094; + int i4095; + int i4096; + int i4097; + int i4098; + int i4099; + int i4100; + int i4101; + int i4102; + int i4103; + int i4104; + int i4105; + int i4106; + int i4107; + int i4108; + int i4109; + int i4110; + int i4111; + int i4112; + int i4113; + int i4114; + int i4115; + int i4116; + int i4117; + int i4118; + int i4119; + int i4120; + int i4121; + int i4122; + int i4123; + int i4124; + int i4125; + int i4126; + int i4127; + int i4128; + int i4129; + int i4130; + int i4131; + int i4132; + int i4133; + int i4134; + int i4135; + int i4136; + int i4137; + int i4138; + int i4139; + int i4140; + int i4141; + int i4142; + int i4143; + int i4144; + int i4145; + int i4146; + int i4147; + int i4148; + int i4149; + int i4150; + int i4151; + int i4152; + int i4153; + int i4154; + int i4155; + int i4156; + int i4157; + int i4158; + int i4159; + int i4160; + int i4161; + int i4162; + int i4163; + int i4164; + int i4165; + int i4166; + int i4167; + int i4168; + int i4169; + int i4170; + int i4171; + int i4172; + int i4173; + int i4174; + int i4175; + int i4176; + int i4177; + int i4178; + int i4179; + int i4180; + int i4181; + int i4182; + int i4183; + int i4184; + int i4185; + int i4186; + int i4187; + int i4188; + int i4189; + int i4190; + int i4191; + int i4192; + int i4193; + int i4194; + int i4195; + int i4196; + int i4197; + int i4198; + int i4199; + int i4200; + int i4201; + int i4202; + int i4203; + int i4204; + int i4205; + int i4206; + int i4207; + int i4208; + int i4209; + int i4210; + int i4211; + int i4212; + int i4213; + int i4214; + int i4215; + int i4216; + int i4217; + int i4218; + int i4219; + int i4220; + int i4221; + int i4222; + int i4223; + int i4224; + int i4225; + int i4226; + int i4227; + int i4228; + int i4229; + int i4230; + int i4231; + int i4232; + int i4233; + int i4234; + int i4235; + int i4236; + int i4237; + int i4238; + int i4239; + int i4240; + int i4241; + int i4242; + int i4243; + int i4244; + int i4245; + int i4246; + int i4247; + int i4248; + int i4249; + int i4250; + int i4251; + int i4252; + int i4253; + int i4254; + int i4255; + int i4256; + int i4257; + int i4258; + int i4259; + int i4260; + int i4261; + int i4262; + int i4263; + int i4264; + int i4265; + int i4266; + int i4267; + int i4268; + int i4269; + int i4270; + int i4271; + int i4272; + int i4273; + int i4274; + int i4275; + int i4276; + int i4277; + int i4278; + int i4279; + int i4280; + int i4281; + int i4282; + int i4283; + int i4284; + int i4285; + int i4286; + int i4287; + int i4288; + int i4289; + int i4290; + int i4291; + int i4292; + int i4293; + int i4294; + int i4295; + int i4296; + int i4297; + int i4298; + int i4299; + int i4300; + int i4301; + int i4302; + int i4303; + int i4304; + int i4305; + int i4306; + int i4307; + int i4308; + int i4309; + int i4310; + int i4311; + int i4312; + int i4313; + int i4314; + int i4315; + int i4316; + int i4317; + int i4318; + int i4319; + int i4320; + int i4321; + int i4322; + int i4323; + int i4324; + int i4325; + int i4326; + int i4327; + int i4328; + int i4329; + int i4330; + int i4331; + int i4332; + int i4333; + int i4334; + int i4335; + int i4336; + int i4337; + int i4338; + int i4339; + int i4340; + int i4341; + int i4342; + int i4343; + int i4344; + int i4345; + int i4346; + int i4347; + int i4348; + int i4349; + int i4350; + int i4351; + int i4352; + int i4353; + int i4354; + int i4355; + int i4356; + int i4357; + int i4358; + int i4359; + int i4360; + int i4361; + int i4362; + int i4363; + int i4364; + int i4365; + int i4366; + int i4367; + int i4368; + int i4369; + int i4370; + int i4371; + int i4372; + int i4373; + int i4374; + int i4375; + int i4376; + int i4377; + int i4378; + int i4379; + int i4380; + int i4381; + int i4382; + int i4383; + int i4384; + int i4385; + int i4386; + int i4387; + int i4388; + int i4389; + int i4390; + int i4391; + int i4392; + int i4393; + int i4394; + int i4395; + int i4396; + int i4397; + int i4398; + int i4399; + int i4400; + int i4401; + int i4402; + int i4403; + int i4404; + int i4405; + int i4406; + int i4407; + int i4408; + int i4409; + int i4410; + int i4411; + int i4412; + int i4413; + int i4414; + int i4415; + int i4416; + int i4417; + int i4418; + int i4419; + int i4420; + int i4421; + int i4422; + int i4423; + int i4424; + int i4425; + int i4426; + int i4427; + int i4428; + int i4429; + int i4430; + int i4431; + int i4432; + int i4433; + int i4434; + int i4435; + int i4436; + int i4437; + int i4438; + int i4439; + int i4440; + int i4441; + int i4442; + int i4443; + int i4444; + int i4445; + int i4446; + int i4447; + int i4448; + int i4449; + int i4450; + int i4451; + int i4452; + int i4453; + int i4454; + int i4455; + int i4456; + int i4457; + int i4458; + int i4459; + int i4460; + int i4461; + int i4462; + int i4463; + int i4464; + int i4465; + int i4466; + int i4467; + int i4468; + int i4469; + int i4470; + int i4471; + int i4472; + int i4473; + int i4474; + int i4475; + int i4476; + int i4477; + int i4478; + int i4479; + int i4480; + int i4481; + int i4482; + int i4483; + int i4484; + int i4485; + int i4486; + int i4487; + int i4488; + int i4489; + int i4490; + int i4491; + int i4492; + int i4493; + int i4494; + int i4495; + int i4496; + int i4497; + int i4498; + int i4499; + int i4500; + int i4501; + int i4502; + int i4503; + int i4504; + int i4505; + int i4506; + int i4507; + int i4508; + int i4509; + int i4510; + int i4511; + int i4512; + int i4513; + int i4514; + int i4515; + int i4516; + int i4517; + int i4518; + int i4519; + int i4520; + int i4521; + int i4522; + int i4523; + int i4524; + int i4525; + int i4526; + int i4527; + int i4528; + int i4529; + int i4530; + int i4531; + int i4532; + int i4533; + int i4534; + int i4535; + int i4536; + int i4537; + int i4538; + int i4539; + int i4540; + int i4541; + int i4542; + int i4543; + int i4544; + int i4545; + int i4546; + int i4547; + int i4548; + int i4549; + int i4550; + int i4551; + int i4552; + int i4553; + int i4554; + int i4555; + int i4556; + int i4557; + int i4558; + int i4559; + int i4560; + int i4561; + int i4562; + int i4563; + int i4564; + int i4565; + int i4566; + int i4567; + int i4568; + int i4569; + int i4570; + int i4571; + int i4572; + int i4573; + int i4574; + int i4575; + int i4576; + int i4577; + int i4578; + int i4579; + int i4580; + int i4581; + int i4582; + int i4583; + int i4584; + int i4585; + int i4586; + int i4587; + int i4588; + int i4589; + int i4590; + int i4591; + int i4592; + int i4593; + int i4594; + int i4595; + int i4596; + int i4597; + int i4598; + int i4599; + int i4600; + int i4601; + int i4602; + int i4603; + int i4604; + int i4605; + int i4606; + int i4607; + int i4608; + int i4609; + int i4610; + int i4611; + int i4612; + int i4613; + int i4614; + int i4615; + int i4616; + int i4617; + int i4618; + int i4619; + int i4620; + int i4621; + int i4622; + int i4623; + int i4624; + int i4625; + int i4626; + int i4627; + int i4628; + int i4629; + int i4630; + int i4631; + int i4632; + int i4633; + int i4634; + int i4635; + int i4636; + int i4637; + int i4638; + int i4639; + int i4640; + int i4641; + int i4642; + int i4643; + int i4644; + int i4645; + int i4646; + int i4647; + int i4648; + int i4649; + int i4650; + int i4651; + int i4652; + int i4653; + int i4654; + int i4655; + int i4656; + int i4657; + int i4658; + int i4659; + int i4660; + int i4661; + int i4662; + int i4663; + int i4664; + int i4665; + int i4666; + int i4667; + int i4668; + int i4669; + int i4670; + int i4671; + int i4672; + int i4673; + int i4674; + int i4675; + int i4676; + int i4677; + int i4678; + int i4679; + int i4680; + int i4681; + int i4682; + int i4683; + int i4684; + int i4685; + int i4686; + int i4687; + int i4688; + int i4689; + int i4690; + int i4691; + int i4692; + int i4693; + int i4694; + int i4695; + int i4696; + int i4697; + int i4698; + int i4699; + int i4700; + int i4701; + int i4702; + int i4703; + int i4704; + int i4705; + int i4706; + int i4707; + int i4708; + int i4709; + int i4710; + int i4711; + int i4712; + int i4713; + int i4714; + int i4715; + int i4716; + int i4717; + int i4718; + int i4719; + int i4720; + int i4721; + int i4722; + int i4723; + int i4724; + int i4725; + int i4726; + int i4727; + int i4728; + int i4729; + int i4730; + int i4731; + int i4732; + int i4733; + int i4734; + int i4735; + int i4736; + int i4737; + int i4738; + int i4739; + int i4740; + int i4741; + int i4742; + int i4743; + int i4744; + int i4745; + int i4746; + int i4747; + int i4748; + int i4749; + int i4750; + int i4751; + int i4752; + int i4753; + int i4754; + int i4755; + int i4756; + int i4757; + int i4758; + int i4759; + int i4760; + int i4761; + int i4762; + int i4763; + int i4764; + int i4765; + int i4766; + int i4767; + int i4768; + int i4769; + int i4770; + int i4771; + int i4772; + int i4773; + int i4774; + int i4775; + int i4776; + int i4777; + int i4778; + int i4779; + int i4780; + int i4781; + int i4782; + int i4783; + int i4784; + int i4785; + int i4786; + int i4787; + int i4788; + int i4789; + int i4790; + int i4791; + int i4792; + int i4793; + int i4794; + int i4795; + int i4796; + int i4797; + int i4798; + int i4799; + int i4800; + int i4801; + int i4802; + int i4803; + int i4804; + int i4805; + int i4806; + int i4807; + int i4808; + int i4809; + int i4810; + int i4811; + int i4812; + int i4813; + int i4814; + int i4815; + int i4816; + int i4817; + int i4818; + int i4819; + int i4820; + int i4821; + int i4822; + int i4823; + int i4824; + int i4825; + int i4826; + int i4827; + int i4828; + int i4829; + int i4830; + int i4831; + int i4832; + int i4833; + int i4834; + int i4835; + int i4836; + int i4837; + int i4838; + int i4839; + int i4840; + int i4841; + int i4842; + int i4843; + int i4844; + int i4845; + int i4846; + int i4847; + int i4848; + int i4849; + int i4850; + int i4851; + int i4852; + int i4853; + int i4854; + int i4855; + int i4856; + int i4857; + int i4858; + int i4859; + int i4860; + int i4861; + int i4862; + int i4863; + int i4864; + int i4865; + int i4866; + int i4867; + int i4868; + int i4869; + int i4870; + int i4871; + int i4872; + int i4873; + int i4874; + int i4875; + int i4876; + int i4877; + int i4878; + int i4879; + int i4880; + int i4881; + int i4882; + int i4883; + int i4884; + int i4885; + int i4886; + int i4887; + int i4888; + int i4889; + int i4890; + int i4891; + int i4892; + int i4893; + int i4894; + int i4895; + int i4896; + int i4897; + int i4898; + int i4899; + int i4900; + int i4901; + int i4902; + int i4903; + int i4904; + int i4905; + int i4906; + int i4907; + int i4908; + int i4909; + int i4910; + int i4911; + int i4912; + int i4913; + int i4914; + int i4915; + int i4916; + int i4917; + int i4918; + int i4919; + int i4920; + int i4921; + int i4922; + int i4923; + int i4924; + int i4925; + int i4926; + int i4927; + int i4928; + int i4929; + int i4930; + int i4931; + int i4932; + int i4933; + int i4934; + int i4935; + int i4936; + int i4937; + int i4938; + int i4939; + int i4940; + int i4941; + int i4942; + int i4943; + int i4944; + int i4945; + int i4946; + int i4947; + int i4948; + int i4949; + int i4950; + int i4951; + int i4952; + int i4953; + int i4954; + int i4955; + int i4956; + int i4957; + int i4958; + int i4959; + int i4960; + int i4961; + int i4962; + int i4963; + int i4964; + int i4965; + int i4966; + int i4967; + int i4968; + int i4969; + int i4970; + int i4971; + int i4972; + int i4973; + int i4974; + int i4975; + int i4976; + int i4977; + int i4978; + int i4979; + int i4980; + int i4981; + int i4982; + int i4983; + int i4984; + int i4985; + int i4986; + int i4987; + int i4988; + int i4989; + int i4990; + int i4991; + int i4992; + int i4993; + int i4994; + int i4995; + int i4996; + int i4997; + int i4998; + int i4999; + int i5000; + int i5001; + int i5002; + int i5003; + int i5004; + int i5005; + int i5006; + int i5007; + int i5008; + int i5009; + int i5010; + int i5011; + int i5012; + int i5013; + int i5014; + int i5015; + int i5016; + int i5017; + int i5018; + int i5019; + int i5020; + int i5021; + int i5022; + int i5023; + int i5024; + int i5025; + int i5026; + int i5027; + int i5028; + int i5029; + int i5030; + int i5031; + int i5032; + int i5033; + int i5034; + int i5035; + int i5036; + int i5037; + int i5038; + int i5039; + int i5040; + int i5041; + int i5042; + int i5043; + int i5044; + int i5045; + int i5046; + int i5047; + int i5048; + int i5049; + int i5050; + int i5051; + int i5052; + int i5053; + int i5054; + int i5055; + int i5056; + int i5057; + int i5058; + int i5059; + int i5060; + int i5061; + int i5062; + int i5063; + int i5064; + int i5065; + int i5066; + int i5067; + int i5068; + int i5069; + int i5070; + int i5071; + int i5072; + int i5073; + int i5074; + int i5075; + int i5076; + int i5077; + int i5078; + int i5079; + int i5080; + int i5081; + int i5082; + int i5083; + int i5084; + int i5085; + int i5086; + int i5087; + int i5088; + int i5089; + int i5090; + int i5091; + int i5092; + int i5093; + int i5094; + int i5095; + int i5096; + int i5097; + int i5098; + int i5099; + int i5100; + int i5101; + int i5102; + int i5103; + int i5104; + int i5105; + int i5106; + int i5107; + int i5108; + int i5109; + int i5110; + int i5111; + int i5112; + int i5113; + int i5114; + int i5115; + int i5116; + int i5117; + int i5118; + int i5119; + int i5120; + int i5121; + int i5122; + int i5123; + int i5124; + int i5125; + int i5126; + int i5127; + int i5128; + int i5129; + int i5130; + int i5131; + int i5132; + int i5133; + int i5134; + int i5135; + int i5136; + int i5137; + int i5138; + int i5139; + int i5140; + int i5141; + int i5142; + int i5143; + int i5144; + int i5145; + int i5146; + int i5147; + int i5148; + int i5149; + int i5150; + int i5151; + int i5152; + int i5153; + int i5154; + int i5155; + int i5156; + int i5157; + int i5158; + int i5159; + int i5160; + int i5161; + int i5162; + int i5163; + int i5164; + int i5165; + int i5166; + int i5167; + int i5168; + int i5169; + int i5170; + int i5171; + int i5172; + int i5173; + int i5174; + int i5175; + int i5176; + int i5177; + int i5178; + int i5179; + int i5180; + int i5181; + int i5182; + int i5183; + int i5184; + int i5185; + int i5186; + int i5187; + int i5188; + int i5189; + int i5190; + int i5191; + int i5192; + int i5193; + int i5194; + int i5195; + int i5196; + int i5197; + int i5198; + int i5199; + int i5200; + int i5201; + int i5202; + int i5203; + int i5204; + int i5205; + int i5206; + int i5207; + int i5208; + int i5209; + int i5210; + int i5211; + int i5212; + int i5213; + int i5214; + int i5215; + int i5216; + int i5217; + int i5218; + int i5219; + int i5220; + int i5221; + int i5222; + int i5223; + int i5224; + int i5225; + int i5226; + int i5227; + int i5228; + int i5229; + int i5230; + int i5231; + int i5232; + int i5233; + int i5234; + int i5235; + int i5236; + int i5237; + int i5238; + int i5239; + int i5240; + int i5241; + int i5242; + int i5243; + int i5244; + int i5245; + int i5246; + int i5247; + int i5248; + int i5249; + int i5250; + int i5251; + int i5252; + int i5253; + int i5254; + int i5255; + int i5256; + int i5257; + int i5258; + int i5259; + int i5260; + int i5261; + int i5262; + int i5263; + int i5264; + int i5265; + int i5266; + int i5267; + int i5268; + int i5269; + int i5270; + int i5271; + int i5272; + int i5273; + int i5274; + int i5275; + int i5276; + int i5277; + int i5278; + int i5279; + int i5280; + int i5281; + int i5282; + int i5283; + int i5284; + int i5285; + int i5286; + int i5287; + int i5288; + int i5289; + int i5290; + int i5291; + int i5292; + int i5293; + int i5294; + int i5295; + int i5296; + int i5297; + int i5298; + int i5299; + int i5300; + int i5301; + int i5302; + int i5303; + int i5304; + int i5305; + int i5306; + int i5307; + int i5308; + int i5309; + int i5310; + int i5311; + int i5312; + int i5313; + int i5314; + int i5315; + int i5316; + int i5317; + int i5318; + int i5319; + int i5320; + int i5321; + int i5322; + int i5323; + int i5324; + int i5325; + int i5326; + int i5327; + int i5328; + int i5329; + int i5330; + int i5331; + int i5332; + int i5333; + int i5334; + int i5335; + int i5336; + int i5337; + int i5338; + int i5339; + int i5340; + int i5341; + int i5342; + int i5343; + int i5344; + int i5345; + int i5346; + int i5347; + int i5348; + int i5349; + int i5350; + int i5351; + int i5352; + int i5353; + int i5354; + int i5355; + int i5356; + int i5357; + int i5358; + int i5359; + int i5360; + int i5361; + int i5362; + int i5363; + int i5364; + int i5365; + int i5366; + int i5367; + int i5368; + int i5369; + int i5370; + int i5371; + int i5372; + int i5373; + int i5374; + int i5375; + int i5376; + int i5377; + int i5378; + int i5379; + int i5380; + int i5381; + int i5382; + int i5383; + int i5384; + int i5385; + int i5386; + int i5387; + int i5388; + int i5389; + int i5390; + int i5391; + int i5392; + int i5393; + int i5394; + int i5395; + int i5396; + int i5397; + int i5398; + int i5399; + int i5400; + int i5401; + int i5402; + int i5403; + int i5404; + int i5405; + int i5406; + int i5407; + int i5408; + int i5409; + int i5410; + int i5411; + int i5412; + int i5413; + int i5414; + int i5415; + int i5416; + int i5417; + int i5418; + int i5419; + int i5420; + int i5421; + int i5422; + int i5423; + int i5424; + int i5425; + int i5426; + int i5427; + int i5428; + int i5429; + int i5430; + int i5431; + int i5432; + int i5433; + int i5434; + int i5435; + int i5436; + int i5437; + int i5438; + int i5439; + int i5440; + int i5441; + int i5442; + int i5443; + int i5444; + int i5445; + int i5446; + int i5447; + int i5448; + int i5449; + int i5450; + int i5451; + int i5452; + int i5453; + int i5454; + int i5455; + int i5456; + int i5457; + int i5458; + int i5459; + int i5460; + int i5461; + int i5462; + int i5463; + int i5464; + int i5465; + int i5466; + int i5467; + int i5468; + int i5469; + int i5470; + int i5471; + int i5472; + int i5473; + int i5474; + int i5475; + int i5476; + int i5477; + int i5478; + int i5479; + int i5480; + int i5481; + int i5482; + int i5483; + int i5484; + int i5485; + int i5486; + int i5487; + int i5488; + int i5489; + int i5490; + int i5491; + int i5492; + int i5493; + int i5494; + int i5495; + int i5496; + int i5497; + int i5498; + int i5499; + int i5500; + int i5501; + int i5502; + int i5503; + int i5504; + int i5505; + int i5506; + int i5507; + int i5508; + int i5509; + int i5510; + int i5511; + int i5512; + int i5513; + int i5514; + int i5515; + int i5516; + int i5517; + int i5518; + int i5519; + int i5520; + int i5521; + int i5522; + int i5523; + int i5524; + int i5525; + int i5526; + int i5527; + int i5528; + int i5529; + int i5530; + int i5531; + int i5532; + int i5533; + int i5534; + int i5535; + int i5536; + int i5537; + int i5538; + int i5539; + int i5540; + int i5541; + int i5542; + int i5543; + int i5544; + int i5545; + int i5546; + int i5547; + int i5548; + int i5549; + int i5550; + int i5551; + int i5552; + int i5553; + int i5554; + int i5555; + int i5556; + int i5557; + int i5558; + int i5559; + int i5560; + int i5561; + int i5562; + int i5563; + int i5564; + int i5565; + int i5566; + int i5567; + int i5568; + int i5569; + int i5570; + int i5571; + int i5572; + int i5573; + int i5574; + int i5575; + int i5576; + int i5577; + int i5578; + int i5579; + int i5580; + int i5581; + int i5582; + int i5583; + int i5584; + int i5585; + int i5586; + int i5587; + int i5588; + int i5589; + int i5590; + int i5591; + int i5592; + int i5593; + int i5594; + int i5595; + int i5596; + int i5597; + int i5598; + int i5599; + int i5600; + int i5601; + int i5602; + int i5603; + int i5604; + int i5605; + int i5606; + int i5607; + int i5608; + int i5609; + int i5610; + int i5611; + int i5612; + int i5613; + int i5614; + int i5615; + int i5616; + int i5617; + int i5618; + int i5619; + int i5620; + int i5621; + int i5622; + int i5623; + int i5624; + int i5625; + int i5626; + int i5627; + int i5628; + int i5629; + int i5630; + int i5631; + int i5632; + int i5633; + int i5634; + int i5635; + int i5636; + int i5637; + int i5638; + int i5639; + int i5640; + int i5641; + int i5642; + int i5643; + int i5644; + int i5645; + int i5646; + int i5647; + int i5648; + int i5649; + int i5650; + int i5651; + int i5652; + int i5653; + int i5654; + int i5655; + int i5656; + int i5657; + int i5658; + int i5659; + int i5660; + int i5661; + int i5662; + int i5663; + int i5664; + int i5665; + int i5666; + int i5667; + int i5668; + int i5669; + int i5670; + int i5671; + int i5672; + int i5673; + int i5674; + int i5675; + int i5676; + int i5677; + int i5678; + int i5679; + int i5680; + int i5681; + int i5682; + int i5683; + int i5684; + int i5685; + int i5686; + int i5687; + int i5688; + int i5689; + int i5690; + int i5691; + int i5692; + int i5693; + int i5694; + int i5695; + int i5696; + int i5697; + int i5698; + int i5699; + int i5700; + int i5701; + int i5702; + int i5703; + int i5704; + int i5705; + int i5706; + int i5707; + int i5708; + int i5709; + int i5710; + int i5711; + int i5712; + int i5713; + int i5714; + int i5715; + int i5716; + int i5717; + int i5718; + int i5719; + int i5720; + int i5721; + int i5722; + int i5723; + int i5724; + int i5725; + int i5726; + int i5727; + int i5728; + int i5729; + int i5730; + int i5731; + int i5732; + int i5733; + int i5734; + int i5735; + int i5736; + int i5737; + int i5738; + int i5739; + int i5740; + int i5741; + int i5742; + int i5743; + int i5744; + int i5745; + int i5746; + int i5747; + int i5748; + int i5749; + int i5750; + int i5751; + int i5752; + int i5753; + int i5754; + int i5755; + int i5756; + int i5757; + int i5758; + int i5759; + int i5760; + int i5761; + int i5762; + int i5763; + int i5764; + int i5765; + int i5766; + int i5767; + int i5768; + int i5769; + int i5770; + int i5771; + int i5772; + int i5773; + int i5774; + int i5775; + int i5776; + int i5777; + int i5778; + int i5779; + int i5780; + int i5781; + int i5782; + int i5783; + int i5784; + int i5785; + int i5786; + int i5787; + int i5788; + int i5789; + int i5790; + int i5791; + int i5792; + int i5793; + int i5794; + int i5795; + int i5796; + int i5797; + int i5798; + int i5799; + int i5800; + int i5801; + int i5802; + int i5803; + int i5804; + int i5805; + int i5806; + int i5807; + int i5808; + int i5809; + int i5810; + int i5811; + int i5812; + int i5813; + int i5814; + int i5815; + int i5816; + int i5817; + int i5818; + int i5819; + int i5820; + int i5821; + int i5822; + int i5823; + int i5824; + int i5825; + int i5826; + int i5827; + int i5828; + int i5829; + int i5830; + int i5831; + int i5832; + int i5833; + int i5834; + int i5835; + int i5836; + int i5837; + int i5838; + int i5839; + int i5840; + int i5841; + int i5842; + int i5843; + int i5844; + int i5845; + int i5846; + int i5847; + int i5848; + int i5849; + int i5850; + int i5851; + int i5852; + int i5853; + int i5854; + int i5855; + int i5856; + int i5857; + int i5858; + int i5859; + int i5860; + int i5861; + int i5862; + int i5863; + int i5864; + int i5865; + int i5866; + int i5867; + int i5868; + int i5869; + int i5870; + int i5871; + int i5872; + int i5873; + int i5874; + int i5875; + int i5876; + int i5877; + int i5878; + int i5879; + int i5880; + int i5881; + int i5882; + int i5883; + int i5884; + int i5885; + int i5886; + int i5887; + int i5888; + int i5889; + int i5890; + int i5891; + int i5892; + int i5893; + int i5894; + int i5895; + int i5896; + int i5897; + int i5898; + int i5899; + int i5900; + int i5901; + int i5902; + int i5903; + int i5904; + int i5905; + int i5906; + int i5907; + int i5908; + int i5909; + int i5910; + int i5911; + int i5912; + int i5913; + int i5914; + int i5915; + int i5916; + int i5917; + int i5918; + int i5919; + int i5920; + int i5921; + int i5922; + int i5923; + int i5924; + int i5925; + int i5926; + int i5927; + int i5928; + int i5929; + int i5930; + int i5931; + int i5932; + int i5933; + int i5934; + int i5935; + int i5936; + int i5937; + int i5938; + int i5939; + int i5940; + int i5941; + int i5942; + int i5943; + int i5944; + int i5945; + int i5946; + int i5947; + int i5948; + int i5949; + int i5950; + int i5951; + int i5952; + int i5953; + int i5954; + int i5955; + int i5956; + int i5957; + int i5958; + int i5959; + int i5960; + int i5961; + int i5962; + int i5963; + int i5964; + int i5965; + int i5966; + int i5967; + int i5968; + int i5969; + int i5970; + int i5971; + int i5972; + int i5973; + int i5974; + int i5975; + int i5976; + int i5977; + int i5978; + int i5979; + int i5980; + int i5981; + int i5982; + int i5983; + int i5984; + int i5985; + int i5986; + int i5987; + int i5988; + int i5989; + int i5990; + int i5991; + int i5992; + int i5993; + int i5994; + int i5995; + int i5996; + int i5997; + int i5998; + int i5999; + int i6000; + int i6001; + int i6002; + int i6003; + int i6004; + int i6005; + int i6006; + int i6007; + int i6008; + int i6009; + int i6010; + int i6011; + int i6012; + int i6013; + int i6014; + int i6015; + int i6016; + int i6017; + int i6018; + int i6019; + int i6020; + int i6021; + int i6022; + int i6023; + int i6024; + int i6025; + int i6026; + int i6027; + int i6028; + int i6029; + int i6030; + int i6031; + int i6032; + int i6033; + int i6034; + int i6035; + int i6036; + int i6037; + int i6038; + int i6039; + int i6040; + int i6041; + int i6042; + int i6043; + int i6044; + int i6045; + int i6046; + int i6047; + int i6048; + int i6049; + int i6050; + int i6051; + int i6052; + int i6053; + int i6054; + int i6055; + int i6056; + int i6057; + int i6058; + int i6059; + int i6060; + int i6061; + int i6062; + int i6063; + int i6064; + int i6065; + int i6066; + int i6067; + int i6068; + int i6069; + int i6070; + int i6071; + int i6072; + int i6073; + int i6074; + int i6075; + int i6076; + int i6077; + int i6078; + int i6079; + int i6080; + int i6081; + int i6082; + int i6083; + int i6084; + int i6085; + int i6086; + int i6087; + int i6088; + int i6089; + int i6090; + int i6091; + int i6092; + int i6093; + int i6094; + int i6095; + int i6096; + int i6097; + int i6098; + int i6099; + int i6100; + int i6101; + int i6102; + int i6103; + int i6104; + int i6105; + int i6106; + int i6107; + int i6108; + int i6109; + int i6110; + int i6111; + int i6112; + int i6113; + int i6114; + int i6115; + int i6116; + int i6117; + int i6118; + int i6119; + int i6120; + int i6121; + int i6122; + int i6123; + int i6124; + int i6125; + int i6126; + int i6127; + int i6128; + int i6129; + int i6130; + int i6131; + int i6132; + int i6133; + int i6134; + int i6135; + int i6136; + int i6137; + int i6138; + int i6139; + int i6140; + int i6141; + int i6142; + int i6143; + int i6144; + int i6145; + int i6146; + int i6147; + int i6148; + int i6149; + int i6150; + int i6151; + int i6152; + int i6153; + int i6154; + int i6155; + int i6156; + int i6157; + int i6158; + int i6159; + int i6160; + int i6161; + int i6162; + int i6163; + int i6164; + int i6165; + int i6166; + int i6167; + int i6168; + int i6169; + int i6170; + int i6171; + int i6172; + int i6173; + int i6174; + int i6175; + int i6176; + int i6177; + int i6178; + int i6179; + int i6180; + int i6181; + int i6182; + int i6183; + int i6184; + int i6185; + int i6186; + int i6187; + int i6188; + int i6189; + int i6190; + int i6191; + int i6192; + int i6193; + int i6194; + int i6195; + int i6196; + int i6197; + int i6198; + int i6199; + int i6200; + int i6201; + int i6202; + int i6203; + int i6204; + int i6205; + int i6206; + int i6207; + int i6208; + int i6209; + int i6210; + int i6211; + int i6212; + int i6213; + int i6214; + int i6215; + int i6216; + int i6217; + int i6218; + int i6219; + int i6220; + int i6221; + int i6222; + int i6223; + int i6224; + int i6225; + int i6226; + int i6227; + int i6228; + int i6229; + int i6230; + int i6231; + int i6232; + int i6233; + int i6234; + int i6235; + int i6236; + int i6237; + int i6238; + int i6239; + int i6240; + int i6241; + int i6242; + int i6243; + int i6244; + int i6245; + int i6246; + int i6247; + int i6248; + int i6249; + int i6250; + int i6251; + int i6252; + int i6253; + int i6254; + int i6255; + int i6256; + int i6257; + int i6258; + int i6259; + int i6260; + int i6261; + int i6262; + int i6263; + int i6264; + int i6265; + int i6266; + int i6267; + int i6268; + int i6269; + int i6270; + int i6271; + int i6272; + int i6273; + int i6274; + int i6275; + int i6276; + int i6277; + int i6278; + int i6279; + int i6280; + int i6281; + int i6282; + int i6283; + int i6284; + int i6285; + int i6286; + int i6287; + int i6288; + int i6289; + int i6290; + int i6291; + int i6292; + int i6293; + int i6294; + int i6295; + int i6296; + int i6297; + int i6298; + int i6299; + int i6300; + int i6301; + int i6302; + int i6303; + int i6304; + int i6305; + int i6306; + int i6307; + int i6308; + int i6309; + int i6310; + int i6311; + int i6312; + int i6313; + int i6314; + int i6315; + int i6316; + int i6317; + int i6318; + int i6319; + int i6320; + int i6321; + int i6322; + int i6323; + int i6324; + int i6325; + int i6326; + int i6327; + int i6328; + int i6329; + int i6330; + int i6331; + int i6332; + int i6333; + int i6334; + int i6335; + int i6336; + int i6337; + int i6338; + int i6339; + int i6340; + int i6341; + int i6342; + int i6343; + int i6344; + int i6345; + int i6346; + int i6347; + int i6348; + int i6349; + int i6350; + int i6351; + int i6352; + int i6353; + int i6354; + int i6355; + int i6356; + int i6357; + int i6358; + int i6359; + int i6360; + int i6361; + int i6362; + int i6363; + int i6364; + int i6365; + int i6366; + int i6367; + int i6368; + int i6369; + int i6370; + int i6371; + int i6372; + int i6373; + int i6374; + int i6375; + int i6376; + int i6377; + int i6378; + int i6379; + int i6380; + int i6381; + int i6382; + int i6383; + int i6384; + int i6385; + int i6386; + int i6387; + int i6388; + int i6389; + int i6390; + int i6391; + int i6392; + int i6393; + int i6394; + int i6395; + int i6396; + int i6397; + int i6398; + int i6399; + int i6400; + int i6401; + int i6402; + int i6403; + int i6404; + int i6405; + int i6406; + int i6407; + int i6408; + int i6409; + int i6410; + int i6411; + int i6412; + int i6413; + int i6414; + int i6415; + int i6416; + int i6417; + int i6418; + int i6419; + int i6420; + int i6421; + int i6422; + int i6423; + int i6424; + int i6425; + int i6426; + int i6427; + int i6428; + int i6429; + int i6430; + int i6431; + int i6432; + int i6433; + int i6434; + int i6435; + int i6436; + int i6437; + int i6438; + int i6439; + int i6440; + int i6441; + int i6442; + int i6443; + int i6444; + int i6445; + int i6446; + int i6447; + int i6448; + int i6449; + int i6450; + int i6451; + int i6452; + int i6453; + int i6454; + int i6455; + int i6456; + int i6457; + int i6458; + int i6459; + int i6460; + int i6461; + int i6462; + int i6463; + int i6464; + int i6465; + int i6466; + int i6467; + int i6468; + int i6469; + int i6470; + int i6471; + int i6472; + int i6473; + int i6474; + int i6475; + int i6476; + int i6477; + int i6478; + int i6479; + int i6480; + int i6481; + int i6482; + int i6483; + int i6484; + int i6485; + int i6486; + int i6487; + int i6488; + int i6489; + int i6490; + int i6491; + int i6492; + int i6493; + int i6494; + int i6495; + int i6496; + int i6497; + int i6498; + int i6499; + int i6500; + int i6501; + int i6502; + int i6503; + int i6504; + int i6505; + int i6506; + int i6507; + int i6508; + int i6509; + int i6510; + int i6511; + int i6512; + int i6513; + int i6514; + int i6515; + int i6516; + int i6517; + int i6518; + int i6519; + int i6520; + int i6521; + int i6522; + int i6523; + int i6524; + int i6525; + int i6526; + int i6527; + int i6528; + int i6529; + int i6530; + int i6531; + int i6532; + int i6533; + int i6534; + int i6535; + int i6536; + int i6537; + int i6538; + int i6539; + int i6540; + int i6541; + int i6542; + int i6543; + int i6544; + int i6545; + int i6546; + int i6547; + int i6548; + int i6549; + int i6550; + int i6551; + int i6552; + int i6553; + int i6554; + int i6555; + int i6556; + int i6557; + int i6558; + int i6559; + int i6560; + int i6561; + int i6562; + int i6563; + int i6564; + int i6565; + int i6566; + int i6567; + int i6568; + int i6569; + int i6570; + int i6571; + int i6572; + int i6573; + int i6574; + int i6575; + int i6576; + int i6577; + int i6578; + int i6579; + int i6580; + int i6581; + int i6582; + int i6583; + int i6584; + int i6585; + int i6586; + int i6587; + int i6588; + int i6589; + int i6590; + int i6591; + int i6592; + int i6593; + int i6594; + int i6595; + int i6596; + int i6597; + int i6598; + int i6599; + int i6600; + int i6601; + int i6602; + int i6603; + int i6604; + int i6605; + int i6606; + int i6607; + int i6608; + int i6609; + int i6610; + int i6611; + int i6612; + int i6613; + int i6614; + int i6615; + int i6616; + int i6617; + int i6618; + int i6619; + int i6620; + int i6621; + int i6622; + int i6623; + int i6624; + int i6625; + int i6626; + int i6627; + int i6628; + int i6629; + int i6630; + int i6631; + int i6632; + int i6633; + int i6634; + int i6635; + int i6636; + int i6637; + int i6638; + int i6639; + int i6640; + int i6641; + int i6642; + int i6643; + int i6644; + int i6645; + int i6646; + int i6647; + int i6648; + int i6649; + int i6650; + int i6651; + int i6652; + int i6653; + int i6654; + int i6655; + int i6656; + int i6657; + int i6658; + int i6659; + int i6660; + int i6661; + int i6662; + int i6663; + int i6664; + int i6665; + int i6666; + int i6667; + int i6668; + int i6669; + int i6670; + int i6671; + int i6672; + int i6673; + int i6674; + int i6675; + int i6676; + int i6677; + int i6678; + int i6679; + int i6680; + int i6681; + int i6682; + int i6683; + int i6684; + int i6685; + int i6686; + int i6687; + int i6688; + int i6689; + int i6690; + int i6691; + int i6692; + int i6693; + int i6694; + int i6695; + int i6696; + int i6697; + int i6698; + int i6699; + int i6700; + int i6701; + int i6702; + int i6703; + int i6704; + int i6705; + int i6706; + int i6707; + int i6708; + int i6709; + int i6710; + int i6711; + int i6712; + int i6713; + int i6714; + int i6715; + int i6716; + int i6717; + int i6718; + int i6719; + int i6720; + int i6721; + int i6722; + int i6723; + int i6724; + int i6725; + int i6726; + int i6727; + int i6728; + int i6729; + int i6730; + int i6731; + int i6732; + int i6733; + int i6734; + int i6735; + int i6736; + int i6737; + int i6738; + int i6739; + int i6740; + int i6741; + int i6742; + int i6743; + int i6744; + int i6745; + int i6746; + int i6747; + int i6748; + int i6749; + int i6750; + int i6751; + int i6752; + int i6753; + int i6754; + int i6755; + int i6756; + int i6757; + int i6758; + int i6759; + int i6760; + int i6761; + int i6762; + int i6763; + int i6764; + int i6765; + int i6766; + int i6767; + int i6768; + int i6769; + int i6770; + int i6771; + int i6772; + int i6773; + int i6774; + int i6775; + int i6776; + int i6777; + int i6778; + int i6779; + int i6780; + int i6781; + int i6782; + int i6783; + int i6784; + int i6785; + int i6786; + int i6787; + int i6788; + int i6789; + int i6790; + int i6791; + int i6792; + int i6793; + int i6794; + int i6795; + int i6796; + int i6797; + int i6798; + int i6799; + int i6800; + int i6801; + int i6802; + int i6803; + int i6804; + int i6805; + int i6806; + int i6807; + int i6808; + int i6809; + int i6810; + int i6811; + int i6812; + int i6813; + int i6814; + int i6815; + int i6816; + int i6817; + int i6818; + int i6819; + int i6820; + int i6821; + int i6822; + int i6823; + int i6824; + int i6825; + int i6826; + int i6827; + int i6828; + int i6829; + int i6830; + int i6831; + int i6832; + int i6833; + int i6834; + int i6835; + int i6836; + int i6837; + int i6838; + int i6839; + int i6840; + int i6841; + int i6842; + int i6843; + int i6844; + int i6845; + int i6846; + int i6847; + int i6848; + int i6849; + int i6850; + int i6851; + int i6852; + int i6853; + int i6854; + int i6855; + int i6856; + int i6857; + int i6858; + int i6859; + int i6860; + int i6861; + int i6862; + int i6863; + int i6864; + int i6865; + int i6866; + int i6867; + int i6868; + int i6869; + int i6870; + int i6871; + int i6872; + int i6873; + int i6874; + int i6875; + int i6876; + int i6877; + int i6878; + int i6879; + int i6880; + int i6881; + int i6882; + int i6883; + int i6884; + int i6885; + int i6886; + int i6887; + int i6888; + int i6889; + int i6890; + int i6891; + int i6892; + int i6893; + int i6894; + int i6895; + int i6896; + int i6897; + int i6898; + int i6899; + int i6900; + int i6901; + int i6902; + int i6903; + int i6904; + int i6905; + int i6906; + int i6907; + int i6908; + int i6909; + int i6910; + int i6911; + int i6912; + int i6913; + int i6914; + int i6915; + int i6916; + int i6917; + int i6918; + int i6919; + int i6920; + int i6921; + int i6922; + int i6923; + int i6924; + int i6925; + int i6926; + int i6927; + int i6928; + int i6929; + int i6930; + int i6931; + int i6932; + int i6933; + int i6934; + int i6935; + int i6936; + int i6937; + int i6938; + int i6939; + int i6940; + int i6941; + int i6942; + int i6943; + int i6944; + int i6945; + int i6946; + int i6947; + int i6948; + int i6949; + int i6950; + int i6951; + int i6952; + int i6953; + int i6954; + int i6955; + int i6956; + int i6957; + int i6958; + int i6959; + int i6960; + int i6961; + int i6962; + int i6963; + int i6964; + int i6965; + int i6966; + int i6967; + int i6968; + int i6969; + int i6970; + int i6971; + int i6972; + int i6973; + int i6974; + int i6975; + int i6976; + int i6977; + int i6978; + int i6979; + int i6980; + int i6981; + int i6982; + int i6983; + int i6984; + int i6985; + int i6986; + int i6987; + int i6988; + int i6989; + int i6990; + int i6991; + int i6992; + int i6993; + int i6994; + int i6995; + int i6996; + int i6997; + int i6998; + int i6999; + int i7000; + int i7001; + int i7002; + int i7003; + int i7004; + int i7005; + int i7006; + int i7007; + int i7008; + int i7009; + int i7010; + int i7011; + int i7012; + int i7013; + int i7014; + int i7015; + int i7016; + int i7017; + int i7018; + int i7019; + int i7020; + int i7021; + int i7022; + int i7023; + int i7024; + int i7025; + int i7026; + int i7027; + int i7028; + int i7029; + int i7030; + int i7031; + int i7032; + int i7033; + int i7034; + int i7035; + int i7036; + int i7037; + int i7038; + int i7039; + int i7040; + int i7041; + int i7042; + int i7043; + int i7044; + int i7045; + int i7046; + int i7047; + int i7048; + int i7049; + int i7050; + int i7051; + int i7052; + int i7053; + int i7054; + int i7055; + int i7056; + int i7057; + int i7058; + int i7059; + int i7060; + int i7061; + int i7062; + int i7063; + int i7064; + int i7065; + int i7066; + int i7067; + int i7068; + int i7069; + int i7070; + int i7071; + int i7072; + int i7073; + int i7074; + int i7075; + int i7076; + int i7077; + int i7078; + int i7079; + int i7080; + int i7081; + int i7082; + int i7083; + int i7084; + int i7085; + int i7086; + int i7087; + int i7088; + int i7089; + int i7090; + int i7091; + int i7092; + int i7093; + int i7094; + int i7095; + int i7096; + int i7097; + int i7098; + int i7099; + int i7100; + int i7101; + int i7102; + int i7103; + int i7104; + int i7105; + int i7106; + int i7107; + int i7108; + int i7109; + int i7110; + int i7111; + int i7112; + int i7113; + int i7114; + int i7115; + int i7116; + int i7117; + int i7118; + int i7119; + int i7120; + int i7121; + int i7122; + int i7123; + int i7124; + int i7125; + int i7126; + int i7127; + int i7128; + int i7129; + int i7130; + int i7131; + int i7132; + int i7133; + int i7134; + int i7135; + int i7136; + int i7137; + int i7138; + int i7139; + int i7140; + int i7141; + int i7142; + int i7143; + int i7144; + int i7145; + int i7146; + int i7147; + int i7148; + int i7149; + int i7150; + int i7151; + int i7152; + int i7153; + int i7154; + int i7155; + int i7156; + int i7157; + int i7158; + int i7159; + int i7160; + int i7161; + int i7162; + int i7163; + int i7164; + int i7165; + int i7166; + int i7167; + int i7168; + int i7169; + int i7170; + int i7171; + int i7172; + int i7173; + int i7174; + int i7175; + int i7176; + int i7177; + int i7178; + int i7179; + int i7180; + int i7181; + int i7182; + int i7183; + int i7184; + int i7185; + int i7186; + int i7187; + int i7188; + int i7189; + int i7190; + int i7191; + int i7192; + int i7193; + int i7194; + int i7195; + int i7196; + int i7197; + int i7198; + int i7199; + int i7200; + int i7201; + int i7202; + int i7203; + int i7204; + int i7205; + int i7206; + int i7207; + int i7208; + int i7209; + int i7210; + int i7211; + int i7212; + int i7213; + int i7214; + int i7215; + int i7216; + int i7217; + int i7218; + int i7219; + int i7220; + int i7221; + int i7222; + int i7223; + int i7224; + int i7225; + int i7226; + int i7227; + int i7228; + int i7229; + int i7230; + int i7231; + int i7232; + int i7233; + int i7234; + int i7235; + int i7236; + int i7237; + int i7238; + int i7239; + int i7240; + int i7241; + int i7242; + int i7243; + int i7244; + int i7245; + int i7246; + int i7247; + int i7248; + int i7249; + int i7250; + int i7251; + int i7252; + int i7253; + int i7254; + int i7255; + int i7256; + int i7257; + int i7258; + int i7259; + int i7260; + int i7261; + int i7262; + int i7263; + int i7264; + int i7265; + int i7266; + int i7267; + int i7268; + int i7269; + int i7270; + int i7271; + int i7272; + int i7273; + int i7274; + int i7275; + int i7276; + int i7277; + int i7278; + int i7279; + int i7280; + int i7281; + int i7282; + int i7283; + int i7284; + int i7285; + int i7286; + int i7287; + int i7288; + int i7289; + int i7290; + int i7291; + int i7292; + int i7293; + int i7294; + int i7295; + int i7296; + int i7297; + int i7298; + int i7299; + int i7300; + int i7301; + int i7302; + int i7303; + int i7304; + int i7305; + int i7306; + int i7307; + int i7308; + int i7309; + int i7310; + int i7311; + int i7312; + int i7313; + int i7314; + int i7315; + int i7316; + int i7317; + int i7318; + int i7319; + int i7320; + int i7321; + int i7322; + int i7323; + int i7324; + int i7325; + int i7326; + int i7327; + int i7328; + int i7329; + int i7330; + int i7331; + int i7332; + int i7333; + int i7334; + int i7335; + int i7336; + int i7337; + int i7338; + int i7339; + int i7340; + int i7341; + int i7342; + int i7343; + int i7344; + int i7345; + int i7346; + int i7347; + int i7348; + int i7349; + int i7350; + int i7351; + int i7352; + int i7353; + int i7354; + int i7355; + int i7356; + int i7357; + int i7358; + int i7359; + int i7360; + int i7361; + int i7362; + int i7363; + int i7364; + int i7365; + int i7366; + int i7367; + int i7368; + int i7369; + int i7370; + int i7371; + int i7372; + int i7373; + int i7374; + int i7375; + int i7376; + int i7377; + int i7378; + int i7379; + int i7380; + int i7381; + int i7382; + int i7383; + int i7384; + int i7385; + int i7386; + int i7387; + int i7388; + int i7389; + int i7390; + int i7391; + int i7392; + int i7393; + int i7394; + int i7395; + int i7396; + int i7397; + int i7398; + int i7399; + int i7400; + int i7401; + int i7402; + int i7403; + int i7404; + int i7405; + int i7406; + int i7407; + int i7408; + int i7409; + int i7410; + int i7411; + int i7412; + int i7413; + int i7414; + int i7415; + int i7416; + int i7417; + int i7418; + int i7419; + int i7420; + int i7421; + int i7422; + int i7423; + int i7424; + int i7425; + int i7426; + int i7427; + int i7428; + int i7429; + int i7430; + int i7431; + int i7432; + int i7433; + int i7434; + int i7435; + int i7436; + int i7437; + int i7438; + int i7439; + int i7440; + int i7441; + int i7442; + int i7443; + int i7444; + int i7445; + int i7446; + int i7447; + int i7448; + int i7449; + int i7450; + int i7451; + int i7452; + int i7453; + int i7454; + int i7455; + int i7456; + int i7457; + int i7458; + int i7459; + int i7460; + int i7461; + int i7462; + int i7463; + int i7464; + int i7465; + int i7466; + int i7467; + int i7468; + int i7469; + int i7470; + int i7471; + int i7472; + int i7473; + int i7474; + int i7475; + int i7476; + int i7477; + int i7478; + int i7479; + int i7480; + int i7481; + int i7482; + int i7483; + int i7484; + int i7485; + int i7486; + int i7487; + int i7488; + int i7489; + int i7490; + int i7491; + int i7492; + int i7493; + int i7494; + int i7495; + int i7496; + int i7497; + int i7498; + int i7499; + int i7500; + int i7501; + int i7502; + int i7503; + int i7504; + int i7505; + int i7506; + int i7507; + int i7508; + int i7509; + int i7510; + int i7511; + int i7512; + int i7513; + int i7514; + int i7515; + int i7516; + int i7517; + int i7518; + int i7519; + int i7520; + int i7521; + int i7522; + int i7523; + int i7524; + int i7525; + int i7526; + int i7527; + int i7528; + int i7529; + int i7530; + int i7531; + int i7532; + int i7533; + int i7534; + int i7535; + int i7536; + int i7537; + int i7538; + int i7539; + int i7540; + int i7541; + int i7542; + int i7543; + int i7544; + int i7545; + int i7546; + int i7547; + int i7548; + int i7549; + int i7550; + int i7551; + int i7552; + int i7553; + int i7554; + int i7555; + int i7556; + int i7557; + int i7558; + int i7559; + int i7560; + int i7561; + int i7562; + int i7563; + int i7564; + int i7565; + int i7566; + int i7567; + int i7568; + int i7569; + int i7570; + int i7571; + int i7572; + int i7573; + int i7574; + int i7575; + int i7576; + int i7577; + int i7578; + int i7579; + int i7580; + int i7581; + int i7582; + int i7583; + int i7584; + int i7585; + int i7586; + int i7587; + int i7588; + int i7589; + int i7590; + int i7591; + int i7592; + int i7593; + int i7594; + int i7595; + int i7596; + int i7597; + int i7598; + int i7599; + int i7600; + int i7601; + int i7602; + int i7603; + int i7604; + int i7605; + int i7606; + int i7607; + int i7608; + int i7609; + int i7610; + int i7611; + int i7612; + int i7613; + int i7614; + int i7615; + int i7616; + int i7617; + int i7618; + int i7619; + int i7620; + int i7621; + int i7622; + int i7623; + int i7624; + int i7625; + int i7626; + int i7627; + int i7628; + int i7629; + int i7630; + int i7631; + int i7632; + int i7633; + int i7634; + int i7635; + int i7636; + int i7637; + int i7638; + int i7639; + int i7640; + int i7641; + int i7642; + int i7643; + int i7644; + int i7645; + int i7646; + int i7647; + int i7648; + int i7649; + int i7650; + int i7651; + int i7652; + int i7653; + int i7654; + int i7655; + int i7656; + int i7657; + int i7658; + int i7659; + int i7660; + int i7661; + int i7662; + int i7663; + int i7664; + int i7665; + int i7666; + int i7667; + int i7668; + int i7669; + int i7670; + int i7671; + int i7672; + int i7673; + int i7674; + int i7675; + int i7676; + int i7677; + int i7678; + int i7679; + int i7680; + int i7681; + int i7682; + int i7683; + int i7684; + int i7685; + int i7686; + int i7687; + int i7688; + int i7689; + int i7690; + int i7691; + int i7692; + int i7693; + int i7694; + int i7695; + int i7696; + int i7697; + int i7698; + int i7699; + int i7700; + int i7701; + int i7702; + int i7703; + int i7704; + int i7705; + int i7706; + int i7707; + int i7708; + int i7709; + int i7710; + int i7711; + int i7712; + int i7713; + int i7714; + int i7715; + int i7716; + int i7717; + int i7718; + int i7719; + int i7720; + int i7721; + int i7722; + int i7723; + int i7724; + int i7725; + int i7726; + int i7727; + int i7728; + int i7729; + int i7730; + int i7731; + int i7732; + int i7733; + int i7734; + int i7735; + int i7736; + int i7737; + int i7738; + int i7739; + int i7740; + int i7741; + int i7742; + int i7743; + int i7744; + int i7745; + int i7746; + int i7747; + int i7748; + int i7749; + int i7750; + int i7751; + int i7752; + int i7753; + int i7754; + int i7755; + int i7756; + int i7757; + int i7758; + int i7759; + int i7760; + int i7761; + int i7762; + int i7763; + int i7764; + int i7765; + int i7766; + int i7767; + int i7768; + int i7769; + int i7770; + int i7771; + int i7772; + int i7773; + int i7774; + int i7775; + int i7776; + int i7777; + int i7778; + int i7779; + int i7780; + int i7781; + int i7782; + int i7783; + int i7784; + int i7785; + int i7786; + int i7787; + int i7788; + int i7789; + int i7790; + int i7791; + int i7792; + int i7793; + int i7794; + int i7795; + int i7796; + int i7797; + int i7798; + int i7799; + int i7800; + int i7801; + int i7802; + int i7803; + int i7804; + int i7805; + int i7806; + int i7807; + int i7808; + int i7809; + int i7810; + int i7811; + int i7812; + int i7813; + int i7814; + int i7815; + int i7816; + int i7817; + int i7818; + int i7819; + int i7820; + int i7821; + int i7822; + int i7823; + int i7824; + int i7825; + int i7826; + int i7827; + int i7828; + int i7829; + int i7830; + int i7831; + int i7832; + int i7833; + int i7834; + int i7835; + int i7836; + int i7837; + int i7838; + int i7839; + int i7840; + int i7841; + int i7842; + int i7843; + int i7844; + int i7845; + int i7846; + int i7847; + int i7848; + int i7849; + int i7850; + int i7851; + int i7852; + int i7853; + int i7854; + int i7855; + int i7856; + int i7857; + int i7858; + int i7859; + int i7860; + int i7861; + int i7862; + int i7863; + int i7864; + int i7865; + int i7866; + int i7867; + int i7868; + int i7869; + int i7870; + int i7871; + int i7872; + int i7873; + int i7874; + int i7875; + int i7876; + int i7877; + int i7878; + int i7879; + int i7880; + int i7881; + int i7882; + int i7883; + int i7884; + int i7885; + int i7886; + int i7887; + int i7888; + int i7889; + int i7890; + int i7891; + int i7892; + int i7893; + int i7894; + int i7895; + int i7896; + int i7897; + int i7898; + int i7899; + int i7900; + int i7901; + int i7902; + int i7903; + int i7904; + int i7905; + int i7906; + int i7907; + int i7908; + int i7909; + int i7910; + int i7911; + int i7912; + int i7913; + int i7914; + int i7915; + int i7916; + int i7917; + int i7918; + int i7919; + int i7920; + int i7921; + int i7922; + int i7923; + int i7924; + int i7925; + int i7926; + int i7927; + int i7928; + int i7929; + int i7930; + int i7931; + int i7932; + int i7933; + int i7934; + int i7935; + int i7936; + int i7937; + int i7938; + int i7939; + int i7940; + int i7941; + int i7942; + int i7943; + int i7944; + int i7945; + int i7946; + int i7947; + int i7948; + int i7949; + int i7950; + int i7951; + int i7952; + int i7953; + int i7954; + int i7955; + int i7956; + int i7957; + int i7958; + int i7959; + int i7960; + int i7961; + int i7962; + int i7963; + int i7964; + int i7965; + int i7966; + int i7967; + int i7968; + int i7969; + int i7970; + int i7971; + int i7972; + int i7973; + int i7974; + int i7975; + int i7976; + int i7977; + int i7978; + int i7979; + int i7980; + int i7981; + int i7982; + int i7983; + int i7984; + int i7985; + int i7986; + int i7987; + int i7988; + int i7989; + int i7990; + int i7991; + int i7992; + int i7993; + int i7994; + int i7995; + int i7996; + int i7997; + int i7998; + int i7999; + int i8000; + int i8001; + int i8002; + int i8003; + int i8004; + int i8005; + int i8006; + int i8007; + int i8008; + int i8009; + int i8010; + int i8011; + int i8012; + int i8013; + int i8014; + int i8015; + int i8016; + int i8017; + int i8018; + int i8019; + int i8020; + int i8021; + int i8022; + int i8023; + int i8024; + int i8025; + int i8026; + int i8027; + int i8028; + int i8029; + int i8030; + int i8031; + int i8032; + int i8033; + int i8034; + int i8035; + int i8036; + int i8037; + int i8038; + int i8039; + int i8040; + int i8041; + int i8042; + int i8043; + int i8044; + int i8045; + int i8046; + int i8047; + int i8048; + int i8049; + int i8050; + int i8051; + int i8052; + int i8053; + int i8054; + int i8055; + int i8056; + int i8057; + int i8058; + int i8059; + int i8060; + int i8061; + int i8062; + int i8063; + int i8064; + int i8065; + int i8066; + int i8067; + int i8068; + int i8069; + int i8070; + int i8071; + int i8072; + int i8073; + int i8074; + int i8075; + int i8076; + int i8077; + int i8078; + int i8079; + int i8080; + int i8081; + int i8082; + int i8083; + int i8084; + int i8085; + int i8086; + int i8087; + int i8088; + int i8089; + int i8090; + int i8091; + int i8092; + int i8093; + int i8094; + int i8095; + int i8096; + int i8097; + int i8098; + int i8099; + int i8100; + int i8101; + int i8102; + int i8103; + int i8104; + int i8105; + int i8106; + int i8107; + int i8108; + int i8109; + int i8110; + int i8111; + int i8112; + int i8113; + int i8114; + int i8115; + int i8116; + int i8117; + int i8118; + int i8119; + int i8120; + int i8121; + int i8122; + int i8123; + int i8124; + int i8125; + int i8126; + int i8127; + int i8128; + int i8129; + int i8130; + int i8131; + int i8132; + int i8133; + int i8134; + int i8135; + int i8136; + int i8137; + int i8138; + int i8139; + int i8140; + int i8141; + int i8142; + int i8143; + int i8144; + int i8145; + int i8146; + int i8147; + int i8148; + int i8149; + int i8150; + int i8151; + int i8152; + int i8153; + int i8154; + int i8155; + int i8156; + int i8157; + int i8158; + int i8159; + int i8160; + int i8161; + int i8162; + int i8163; + int i8164; + int i8165; + int i8166; + int i8167; + int i8168; + int i8169; + int i8170; + int i8171; + int i8172; + int i8173; + int i8174; + int i8175; + int i8176; + int i8177; + int i8178; + int i8179; + int i8180; + int i8181; + int i8182; + int i8183; + int i8184; + int i8185; + int i8186; + int i8187; + int i8188; + int i8189; + int i8190; + int i8191; + int i8192; + int i8193; + int i8194; + int i8195; + int i8196; + int i8197; + int i8198; + int i8199; + int i8200; + int i8201; + int i8202; + int i8203; + int i8204; + int i8205; + int i8206; + int i8207; + int i8208; + int i8209; + int i8210; + int i8211; + int i8212; + int i8213; + int i8214; + int i8215; + int i8216; + int i8217; + int i8218; + int i8219; + int i8220; + int i8221; + int i8222; + int i8223; + int i8224; + int i8225; + int i8226; + int i8227; + int i8228; + int i8229; + int i8230; + int i8231; + int i8232; + int i8233; + int i8234; + int i8235; + int i8236; + int i8237; + int i8238; + int i8239; + int i8240; + int i8241; + int i8242; + int i8243; + int i8244; + int i8245; + int i8246; + int i8247; + int i8248; + int i8249; + int i8250; + int i8251; + int i8252; + int i8253; + int i8254; + int i8255; + int i8256; + int i8257; + int i8258; + int i8259; + int i8260; + int i8261; + int i8262; + int i8263; + int i8264; + int i8265; + int i8266; + int i8267; + int i8268; + int i8269; + int i8270; + int i8271; + int i8272; + int i8273; + int i8274; + int i8275; + int i8276; + int i8277; + int i8278; + int i8279; + int i8280; + int i8281; + int i8282; + int i8283; + int i8284; + int i8285; + int i8286; + int i8287; + int i8288; + int i8289; + int i8290; + int i8291; + int i8292; + int i8293; + int i8294; + int i8295; + int i8296; + int i8297; + int i8298; + int i8299; + int i8300; + int i8301; + int i8302; + int i8303; + int i8304; + int i8305; + int i8306; + int i8307; + int i8308; + int i8309; + int i8310; + int i8311; + int i8312; + int i8313; + int i8314; + int i8315; + int i8316; + int i8317; + int i8318; + int i8319; + int i8320; + int i8321; + int i8322; + int i8323; + int i8324; + int i8325; + int i8326; + int i8327; + int i8328; + int i8329; + int i8330; + int i8331; + int i8332; + int i8333; + int i8334; + int i8335; + int i8336; + int i8337; + int i8338; + int i8339; + int i8340; + int i8341; + int i8342; + int i8343; + int i8344; + int i8345; + int i8346; + int i8347; + int i8348; + int i8349; + int i8350; + int i8351; + int i8352; + int i8353; + int i8354; + int i8355; + int i8356; + int i8357; + int i8358; + int i8359; + int i8360; + int i8361; + int i8362; + int i8363; + int i8364; + int i8365; + int i8366; + int i8367; + int i8368; + int i8369; + int i8370; + int i8371; + int i8372; + int i8373; + int i8374; + int i8375; + int i8376; + int i8377; + int i8378; + int i8379; + int i8380; + int i8381; + int i8382; + int i8383; + int i8384; + int i8385; + int i8386; + int i8387; + int i8388; + int i8389; + int i8390; + int i8391; + int i8392; + int i8393; + int i8394; + int i8395; + int i8396; + int i8397; + int i8398; + int i8399; + int i8400; + int i8401; + int i8402; + int i8403; + int i8404; + int i8405; + int i8406; + int i8407; + int i8408; + int i8409; + int i8410; + int i8411; + int i8412; + int i8413; + int i8414; + int i8415; + int i8416; + int i8417; + int i8418; + int i8419; + int i8420; + int i8421; + int i8422; + int i8423; + int i8424; + int i8425; + int i8426; + int i8427; + int i8428; + int i8429; + int i8430; + int i8431; + int i8432; + int i8433; + int i8434; + int i8435; + int i8436; + int i8437; + int i8438; + int i8439; + int i8440; + int i8441; + int i8442; + int i8443; + int i8444; + int i8445; + int i8446; + int i8447; + int i8448; + int i8449; + int i8450; + int i8451; + int i8452; + int i8453; + int i8454; + int i8455; + int i8456; + int i8457; + int i8458; + int i8459; + int i8460; + int i8461; + int i8462; + int i8463; + int i8464; + int i8465; + int i8466; + int i8467; + int i8468; + int i8469; + int i8470; + int i8471; + int i8472; + int i8473; + int i8474; + int i8475; + int i8476; + int i8477; + int i8478; + int i8479; + int i8480; + int i8481; + int i8482; + int i8483; + int i8484; + int i8485; + int i8486; + int i8487; + int i8488; + int i8489; + int i8490; + int i8491; + int i8492; + int i8493; + int i8494; + int i8495; + int i8496; + int i8497; + int i8498; + int i8499; + int i8500; + int i8501; + int i8502; + int i8503; + int i8504; + int i8505; + int i8506; + int i8507; + int i8508; + int i8509; + int i8510; + int i8511; + int i8512; + int i8513; + int i8514; + int i8515; + int i8516; + int i8517; + int i8518; + int i8519; + int i8520; + int i8521; + int i8522; + int i8523; + int i8524; + int i8525; + int i8526; + int i8527; + int i8528; + int i8529; + int i8530; + int i8531; + int i8532; + int i8533; + int i8534; + int i8535; + int i8536; + int i8537; + int i8538; + int i8539; + int i8540; + int i8541; + int i8542; + int i8543; + int i8544; + int i8545; + int i8546; + int i8547; + int i8548; + int i8549; + int i8550; + int i8551; + int i8552; + int i8553; + int i8554; + int i8555; + int i8556; + int i8557; + int i8558; + int i8559; + int i8560; + int i8561; + int i8562; + int i8563; + int i8564; + int i8565; + int i8566; + int i8567; + int i8568; + int i8569; + int i8570; + int i8571; + int i8572; + int i8573; + int i8574; + int i8575; + int i8576; + int i8577; + int i8578; + int i8579; + int i8580; + int i8581; + int i8582; + int i8583; + int i8584; + int i8585; + int i8586; + int i8587; + int i8588; + int i8589; + int i8590; + int i8591; + int i8592; + int i8593; + int i8594; + int i8595; + int i8596; + int i8597; + int i8598; + int i8599; + int i8600; + int i8601; + int i8602; + int i8603; + int i8604; + int i8605; + int i8606; + int i8607; + int i8608; + int i8609; + int i8610; + int i8611; + int i8612; + int i8613; + int i8614; + int i8615; + int i8616; + int i8617; + int i8618; + int i8619; + int i8620; + int i8621; + int i8622; + int i8623; + int i8624; + int i8625; + int i8626; + int i8627; + int i8628; + int i8629; + int i8630; + int i8631; + int i8632; + int i8633; + int i8634; + int i8635; + int i8636; + int i8637; + int i8638; + int i8639; + int i8640; + int i8641; + int i8642; + int i8643; + int i8644; + int i8645; + int i8646; + int i8647; + int i8648; + int i8649; + int i8650; + int i8651; + int i8652; + int i8653; + int i8654; + int i8655; + int i8656; + int i8657; + int i8658; + int i8659; + int i8660; + int i8661; + int i8662; + int i8663; + int i8664; + int i8665; + int i8666; + int i8667; + int i8668; + int i8669; + int i8670; + int i8671; + int i8672; + int i8673; + int i8674; + int i8675; + int i8676; + int i8677; + int i8678; + int i8679; + int i8680; + int i8681; + int i8682; + int i8683; + int i8684; + int i8685; + int i8686; + int i8687; + int i8688; + int i8689; + int i8690; + int i8691; + int i8692; + int i8693; + int i8694; + int i8695; + int i8696; + int i8697; + int i8698; + int i8699; + int i8700; + int i8701; + int i8702; + int i8703; + int i8704; + int i8705; + int i8706; + int i8707; + int i8708; + int i8709; + int i8710; + int i8711; + int i8712; + int i8713; + int i8714; + int i8715; + int i8716; + int i8717; + int i8718; + int i8719; + int i8720; + int i8721; + int i8722; + int i8723; + int i8724; + int i8725; + int i8726; + int i8727; + int i8728; + int i8729; + int i8730; + int i8731; + int i8732; + int i8733; + int i8734; + int i8735; + int i8736; + int i8737; + int i8738; + int i8739; + int i8740; + int i8741; + int i8742; + int i8743; + int i8744; + int i8745; + int i8746; + int i8747; + int i8748; + int i8749; + int i8750; + int i8751; + int i8752; + int i8753; + int i8754; + int i8755; + int i8756; + int i8757; + int i8758; + int i8759; + int i8760; + int i8761; + int i8762; + int i8763; + int i8764; + int i8765; + int i8766; + int i8767; + int i8768; + int i8769; + int i8770; + int i8771; + int i8772; + int i8773; + int i8774; + int i8775; + int i8776; + int i8777; + int i8778; + int i8779; + int i8780; + int i8781; + int i8782; + int i8783; + int i8784; + int i8785; + int i8786; + int i8787; + int i8788; + int i8789; + int i8790; + int i8791; + int i8792; + int i8793; + int i8794; + int i8795; + int i8796; + int i8797; + int i8798; + int i8799; + int i8800; + int i8801; + int i8802; + int i8803; + int i8804; + int i8805; + int i8806; + int i8807; + int i8808; + int i8809; + int i8810; + int i8811; + int i8812; + int i8813; + int i8814; + int i8815; + int i8816; + int i8817; + int i8818; + int i8819; + int i8820; + int i8821; + int i8822; + int i8823; + int i8824; + int i8825; + int i8826; + int i8827; + int i8828; + int i8829; + int i8830; + int i8831; + int i8832; + int i8833; + int i8834; + int i8835; + int i8836; + int i8837; + int i8838; + int i8839; + int i8840; + int i8841; + int i8842; + int i8843; + int i8844; + int i8845; + int i8846; + int i8847; + int i8848; + int i8849; + int i8850; + int i8851; + int i8852; + int i8853; + int i8854; + int i8855; + int i8856; + int i8857; + int i8858; + int i8859; + int i8860; + int i8861; + int i8862; + int i8863; + int i8864; + int i8865; + int i8866; + int i8867; + int i8868; + int i8869; + int i8870; + int i8871; + int i8872; + int i8873; + int i8874; + int i8875; + int i8876; + int i8877; + int i8878; + int i8879; + int i8880; + int i8881; + int i8882; + int i8883; + int i8884; + int i8885; + int i8886; + int i8887; + int i8888; + int i8889; + int i8890; + int i8891; + int i8892; + int i8893; + int i8894; + int i8895; + int i8896; + int i8897; + int i8898; + int i8899; + int i8900; + int i8901; + int i8902; + int i8903; + int i8904; + int i8905; + int i8906; + int i8907; + int i8908; + int i8909; + int i8910; + int i8911; + int i8912; + int i8913; + int i8914; + int i8915; + int i8916; + int i8917; + int i8918; + int i8919; + int i8920; + int i8921; + int i8922; + int i8923; + int i8924; + int i8925; + int i8926; + int i8927; + int i8928; + int i8929; + int i8930; + int i8931; + int i8932; + int i8933; + int i8934; + int i8935; + int i8936; + int i8937; + int i8938; + int i8939; + int i8940; + int i8941; + int i8942; + int i8943; + int i8944; + int i8945; + int i8946; + int i8947; + int i8948; + int i8949; + int i8950; + int i8951; + int i8952; + int i8953; + int i8954; + int i8955; + int i8956; + int i8957; + int i8958; + int i8959; + int i8960; + int i8961; + int i8962; + int i8963; + int i8964; + int i8965; + int i8966; + int i8967; + int i8968; + int i8969; + int i8970; + int i8971; + int i8972; + int i8973; + int i8974; + int i8975; + int i8976; + int i8977; + int i8978; + int i8979; + int i8980; + int i8981; + int i8982; + int i8983; + int i8984; + int i8985; + int i8986; + int i8987; + int i8988; + int i8989; + int i8990; + int i8991; + int i8992; + int i8993; + int i8994; + int i8995; + int i8996; + int i8997; + int i8998; + int i8999; + int i9000; + int i9001; + int i9002; + int i9003; + int i9004; + int i9005; + int i9006; + int i9007; + int i9008; + int i9009; + int i9010; + int i9011; + int i9012; + int i9013; + int i9014; + int i9015; + int i9016; + int i9017; + int i9018; + int i9019; + int i9020; + int i9021; + int i9022; + int i9023; + int i9024; + int i9025; + int i9026; + int i9027; + int i9028; + int i9029; + int i9030; + int i9031; + int i9032; + int i9033; + int i9034; + int i9035; + int i9036; + int i9037; + int i9038; + int i9039; + int i9040; + int i9041; + int i9042; + int i9043; + int i9044; + int i9045; + int i9046; + int i9047; + int i9048; + int i9049; + int i9050; + int i9051; + int i9052; + int i9053; + int i9054; + int i9055; + int i9056; + int i9057; + int i9058; + int i9059; + int i9060; + int i9061; + int i9062; + int i9063; + int i9064; + int i9065; + int i9066; + int i9067; + int i9068; + int i9069; + int i9070; + int i9071; + int i9072; + int i9073; + int i9074; + int i9075; + int i9076; + int i9077; + int i9078; + int i9079; + int i9080; + int i9081; + int i9082; + int i9083; + int i9084; + int i9085; + int i9086; + int i9087; + int i9088; + int i9089; + int i9090; + int i9091; + int i9092; + int i9093; + int i9094; + int i9095; + int i9096; + int i9097; + int i9098; + int i9099; + int i9100; + int i9101; + int i9102; + int i9103; + int i9104; + int i9105; + int i9106; + int i9107; + int i9108; + int i9109; + int i9110; + int i9111; + int i9112; + int i9113; + int i9114; + int i9115; + int i9116; + int i9117; + int i9118; + int i9119; + int i9120; + int i9121; + int i9122; + int i9123; + int i9124; + int i9125; + int i9126; + int i9127; + int i9128; + int i9129; + int i9130; + int i9131; + int i9132; + int i9133; + int i9134; + int i9135; + int i9136; + int i9137; + int i9138; + int i9139; + int i9140; + int i9141; + int i9142; + int i9143; + int i9144; + int i9145; + int i9146; + int i9147; + int i9148; + int i9149; + int i9150; + int i9151; + int i9152; + int i9153; + int i9154; + int i9155; + int i9156; + int i9157; + int i9158; + int i9159; + int i9160; + int i9161; + int i9162; + int i9163; + int i9164; + int i9165; + int i9166; + int i9167; + int i9168; + int i9169; + int i9170; + int i9171; + int i9172; + int i9173; + int i9174; + int i9175; + int i9176; + int i9177; + int i9178; + int i9179; + int i9180; + int i9181; + int i9182; + int i9183; + int i9184; + int i9185; + int i9186; + int i9187; + int i9188; + int i9189; + int i9190; + int i9191; + int i9192; + int i9193; + int i9194; + int i9195; + int i9196; + int i9197; + int i9198; + int i9199; + int i9200; + int i9201; + int i9202; + int i9203; + int i9204; + int i9205; + int i9206; + int i9207; + int i9208; + int i9209; + int i9210; + int i9211; + int i9212; + int i9213; + int i9214; + int i9215; + int i9216; + int i9217; + int i9218; + int i9219; + int i9220; + int i9221; + int i9222; + int i9223; + int i9224; + int i9225; + int i9226; + int i9227; + int i9228; + int i9229; + int i9230; + int i9231; + int i9232; + int i9233; + int i9234; + int i9235; + int i9236; + int i9237; + int i9238; + int i9239; + int i9240; + int i9241; + int i9242; + int i9243; + int i9244; + int i9245; + int i9246; + int i9247; + int i9248; + int i9249; + int i9250; + int i9251; + int i9252; + int i9253; + int i9254; + int i9255; + int i9256; + int i9257; + int i9258; + int i9259; + int i9260; + int i9261; + int i9262; + int i9263; + int i9264; + int i9265; + int i9266; + int i9267; + int i9268; + int i9269; + int i9270; + int i9271; + int i9272; + int i9273; + int i9274; + int i9275; + int i9276; + int i9277; + int i9278; + int i9279; + int i9280; + int i9281; + int i9282; + int i9283; + int i9284; + int i9285; + int i9286; + int i9287; + int i9288; + int i9289; + int i9290; + int i9291; + int i9292; + int i9293; + int i9294; + int i9295; + int i9296; + int i9297; + int i9298; + int i9299; + int i9300; + int i9301; + int i9302; + int i9303; + int i9304; + int i9305; + int i9306; + int i9307; + int i9308; + int i9309; + int i9310; + int i9311; + int i9312; + int i9313; + int i9314; + int i9315; + int i9316; + int i9317; + int i9318; + int i9319; + int i9320; + int i9321; + int i9322; + int i9323; + int i9324; + int i9325; + int i9326; + int i9327; + int i9328; + int i9329; + int i9330; + int i9331; + int i9332; + int i9333; + int i9334; + int i9335; + int i9336; + int i9337; + int i9338; + int i9339; + int i9340; + int i9341; + int i9342; + int i9343; + int i9344; + int i9345; + int i9346; + int i9347; + int i9348; + int i9349; + int i9350; + int i9351; + int i9352; + int i9353; + int i9354; + int i9355; + int i9356; + int i9357; + int i9358; + int i9359; + int i9360; + int i9361; + int i9362; + int i9363; + int i9364; + int i9365; + int i9366; + int i9367; + int i9368; + int i9369; + int i9370; + int i9371; + int i9372; + int i9373; + int i9374; + int i9375; + int i9376; + int i9377; + int i9378; + int i9379; + int i9380; + int i9381; + int i9382; + int i9383; + int i9384; + int i9385; + int i9386; + int i9387; + int i9388; + int i9389; + int i9390; + int i9391; + int i9392; + int i9393; + int i9394; + int i9395; + int i9396; + int i9397; + int i9398; + int i9399; + int i9400; + int i9401; + int i9402; + int i9403; + int i9404; + int i9405; + int i9406; + int i9407; + int i9408; + int i9409; + int i9410; + int i9411; + int i9412; + int i9413; + int i9414; + int i9415; + int i9416; + int i9417; + int i9418; + int i9419; + int i9420; + int i9421; + int i9422; + int i9423; + int i9424; + int i9425; + int i9426; + int i9427; + int i9428; + int i9429; + int i9430; + int i9431; + int i9432; + int i9433; + int i9434; + int i9435; + int i9436; + int i9437; + int i9438; + int i9439; + int i9440; + int i9441; + int i9442; + int i9443; + int i9444; + int i9445; + int i9446; + int i9447; + int i9448; + int i9449; + int i9450; + int i9451; + int i9452; + int i9453; + int i9454; + int i9455; + int i9456; + int i9457; + int i9458; + int i9459; + int i9460; + int i9461; + int i9462; + int i9463; + int i9464; + int i9465; + int i9466; + int i9467; + int i9468; + int i9469; + int i9470; + int i9471; + int i9472; + int i9473; + int i9474; + int i9475; + int i9476; + int i9477; + int i9478; + int i9479; + int i9480; + int i9481; + int i9482; + int i9483; + int i9484; + int i9485; + int i9486; + int i9487; + int i9488; + int i9489; + int i9490; + int i9491; + int i9492; + int i9493; + int i9494; + int i9495; + int i9496; + int i9497; + int i9498; + int i9499; + int i9500; + int i9501; + int i9502; + int i9503; + int i9504; + int i9505; + int i9506; + int i9507; + int i9508; + int i9509; + int i9510; + int i9511; + int i9512; + int i9513; + int i9514; + int i9515; + int i9516; + int i9517; + int i9518; + int i9519; + int i9520; + int i9521; + int i9522; + int i9523; + int i9524; + int i9525; + int i9526; + int i9527; + int i9528; + int i9529; + int i9530; + int i9531; + int i9532; + int i9533; + int i9534; + int i9535; + int i9536; + int i9537; + int i9538; + int i9539; + int i9540; + int i9541; + int i9542; + int i9543; + int i9544; + int i9545; + int i9546; + int i9547; + int i9548; + int i9549; + int i9550; + int i9551; + int i9552; + int i9553; + int i9554; + int i9555; + int i9556; + int i9557; + int i9558; + int i9559; + int i9560; + int i9561; + int i9562; + int i9563; + int i9564; + int i9565; + int i9566; + int i9567; + int i9568; + int i9569; + int i9570; + int i9571; + int i9572; + int i9573; + int i9574; + int i9575; + int i9576; + int i9577; + int i9578; + int i9579; + int i9580; + int i9581; + int i9582; + int i9583; + int i9584; + int i9585; + int i9586; + int i9587; + int i9588; + int i9589; + int i9590; + int i9591; + int i9592; + int i9593; + int i9594; + int i9595; + int i9596; + int i9597; + int i9598; + int i9599; + int i9600; + int i9601; + int i9602; + int i9603; + int i9604; + int i9605; + int i9606; + int i9607; + int i9608; + int i9609; + int i9610; + int i9611; + int i9612; + int i9613; + int i9614; + int i9615; + int i9616; + int i9617; + int i9618; + int i9619; + int i9620; + int i9621; + int i9622; + int i9623; + int i9624; + int i9625; + int i9626; + int i9627; + int i9628; + int i9629; + int i9630; + int i9631; + int i9632; + int i9633; + int i9634; + int i9635; + int i9636; + int i9637; + int i9638; + int i9639; + int i9640; + int i9641; + int i9642; + int i9643; + int i9644; + int i9645; + int i9646; + int i9647; + int i9648; + int i9649; + int i9650; + int i9651; + int i9652; + int i9653; + int i9654; + int i9655; + int i9656; + int i9657; + int i9658; + int i9659; + int i9660; + int i9661; + int i9662; + int i9663; + int i9664; + int i9665; + int i9666; + int i9667; + int i9668; + int i9669; + int i9670; + int i9671; + int i9672; + int i9673; + int i9674; + int i9675; + int i9676; + int i9677; + int i9678; + int i9679; + int i9680; + int i9681; + int i9682; + int i9683; + int i9684; + int i9685; + int i9686; + int i9687; + int i9688; + int i9689; + int i9690; + int i9691; + int i9692; + int i9693; + int i9694; + int i9695; + int i9696; + int i9697; + int i9698; + int i9699; + int i9700; + int i9701; + int i9702; + int i9703; + int i9704; + int i9705; + int i9706; + int i9707; + int i9708; + int i9709; + int i9710; + int i9711; + int i9712; + int i9713; + int i9714; + int i9715; + int i9716; + int i9717; + int i9718; + int i9719; + int i9720; + int i9721; + int i9722; + int i9723; + int i9724; + int i9725; + int i9726; + int i9727; + int i9728; + int i9729; + int i9730; + int i9731; + int i9732; + int i9733; + int i9734; + int i9735; + int i9736; + int i9737; + int i9738; + int i9739; + int i9740; + int i9741; + int i9742; + int i9743; + int i9744; + int i9745; + int i9746; + int i9747; + int i9748; + int i9749; + int i9750; + int i9751; + int i9752; + int i9753; + int i9754; + int i9755; + int i9756; + int i9757; + int i9758; + int i9759; + int i9760; + int i9761; + int i9762; + int i9763; + int i9764; + int i9765; + int i9766; + int i9767; + int i9768; + int i9769; + int i9770; + int i9771; + int i9772; + int i9773; + int i9774; + int i9775; + int i9776; + int i9777; + int i9778; + int i9779; + int i9780; + int i9781; + int i9782; + int i9783; + int i9784; + int i9785; + int i9786; + int i9787; + int i9788; + int i9789; + int i9790; + int i9791; + int i9792; + int i9793; + int i9794; + int i9795; + int i9796; + int i9797; + int i9798; + int i9799; + int i9800; + int i9801; + int i9802; + int i9803; + int i9804; + int i9805; + int i9806; + int i9807; + int i9808; + int i9809; + int i9810; + int i9811; + int i9812; + int i9813; + int i9814; + int i9815; + int i9816; + int i9817; + int i9818; + int i9819; + int i9820; + int i9821; + int i9822; + int i9823; + int i9824; + int i9825; + int i9826; + int i9827; + int i9828; + int i9829; + int i9830; + int i9831; + int i9832; + int i9833; + int i9834; + int i9835; + int i9836; + int i9837; + int i9838; + int i9839; + int i9840; + int i9841; + int i9842; + int i9843; + int i9844; + int i9845; + int i9846; + int i9847; + int i9848; + int i9849; + int i9850; + int i9851; + int i9852; + int i9853; + int i9854; + int i9855; + int i9856; + int i9857; + int i9858; + int i9859; + int i9860; + int i9861; + int i9862; + int i9863; + int i9864; + int i9865; + int i9866; + int i9867; + int i9868; + int i9869; + int i9870; + int i9871; + int i9872; + int i9873; + int i9874; + int i9875; + int i9876; + int i9877; + int i9878; + int i9879; + int i9880; + int i9881; + int i9882; + int i9883; + int i9884; + int i9885; + int i9886; + int i9887; + int i9888; + int i9889; + int i9890; + int i9891; + int i9892; + int i9893; + int i9894; + int i9895; + int i9896; + int i9897; + int i9898; + int i9899; + int i9900; + int i9901; + int i9902; + int i9903; + int i9904; + int i9905; + int i9906; + int i9907; + int i9908; + int i9909; + int i9910; + int i9911; + int i9912; + int i9913; + int i9914; + int i9915; + int i9916; + int i9917; + int i9918; + int i9919; + int i9920; + int i9921; + int i9922; + int i9923; + int i9924; + int i9925; + int i9926; + int i9927; + int i9928; + int i9929; + int i9930; + int i9931; + int i9932; + int i9933; + int i9934; + int i9935; + int i9936; + int i9937; + int i9938; + int i9939; + int i9940; + int i9941; + int i9942; + int i9943; + int i9944; + int i9945; + int i9946; + int i9947; + int i9948; + int i9949; + int i9950; + int i9951; + int i9952; + int i9953; + int i9954; + int i9955; + int i9956; + int i9957; + int i9958; + int i9959; + int i9960; + int i9961; + int i9962; + int i9963; + int i9964; + int i9965; + int i9966; + int i9967; + int i9968; + int i9969; + int i9970; + int i9971; + int i9972; + int i9973; + int i9974; + int i9975; + int i9976; + int i9977; + int i9978; + int i9979; + int i9980; + int i9981; + int i9982; + int i9983; + int i9984; + int i9985; + int i9986; + int i9987; + int i9988; + int i9989; + int i9990; + int i9991; + int i9992; + int i9993; + int i9994; + int i9995; + int i9996; + int i9997; + int i9998; + int i9999; + int i10000; + int i10001; + int i10002; + int i10003; + int i10004; + int i10005; + int i10006; + int i10007; + int i10008; + int i10009; + int i10010; + int i10011; + int i10012; + int i10013; + int i10014; + int i10015; + int i10016; + int i10017; + int i10018; + int i10019; + int i10020; + int i10021; + int i10022; + int i10023; + int i10024; + int i10025; + int i10026; + int i10027; + int i10028; + int i10029; + int i10030; + int i10031; + int i10032; + int i10033; + int i10034; + int i10035; + int i10036; + int i10037; + int i10038; + int i10039; + int i10040; + int i10041; + int i10042; + int i10043; + int i10044; + int i10045; + int i10046; + int i10047; + int i10048; + int i10049; + int i10050; + int i10051; + int i10052; + int i10053; + int i10054; + int i10055; + int i10056; + int i10057; + int i10058; + int i10059; + int i10060; + int i10061; + int i10062; + int i10063; + int i10064; + int i10065; + int i10066; + int i10067; + int i10068; + int i10069; + int i10070; + int i10071; + int i10072; + int i10073; + int i10074; + int i10075; + int i10076; + int i10077; + int i10078; + int i10079; + int i10080; + int i10081; + int i10082; + int i10083; + int i10084; + int i10085; + int i10086; + int i10087; + int i10088; + int i10089; + int i10090; + int i10091; + int i10092; + int i10093; + int i10094; + int i10095; + int i10096; + int i10097; + int i10098; + int i10099; + int i10100; + int i10101; + int i10102; + int i10103; + int i10104; + int i10105; + int i10106; + int i10107; + int i10108; + int i10109; + int i10110; + int i10111; + int i10112; + int i10113; + int i10114; + int i10115; + int i10116; + int i10117; + int i10118; + int i10119; + int i10120; + int i10121; + int i10122; + int i10123; + int i10124; + int i10125; + int i10126; + int i10127; + int i10128; + int i10129; + int i10130; + int i10131; + int i10132; + int i10133; + int i10134; + int i10135; + int i10136; + int i10137; + int i10138; + int i10139; + int i10140; + int i10141; + int i10142; + int i10143; + int i10144; + int i10145; + int i10146; + int i10147; + int i10148; + int i10149; + int i10150; + int i10151; + int i10152; + int i10153; + int i10154; + int i10155; + int i10156; + int i10157; + int i10158; + int i10159; + int i10160; + int i10161; + int i10162; + int i10163; + int i10164; + int i10165; + int i10166; + int i10167; + int i10168; + int i10169; + int i10170; + int i10171; + int i10172; + int i10173; + int i10174; + int i10175; + int i10176; + int i10177; + int i10178; + int i10179; + int i10180; + int i10181; + int i10182; + int i10183; + int i10184; + int i10185; + int i10186; + int i10187; + int i10188; + int i10189; + int i10190; + int i10191; + int i10192; + int i10193; + int i10194; + int i10195; + int i10196; + int i10197; + int i10198; + int i10199; + int i10200; + int i10201; + int i10202; + int i10203; + int i10204; + int i10205; + int i10206; + int i10207; + int i10208; + int i10209; + int i10210; + int i10211; + int i10212; + int i10213; + int i10214; + int i10215; + int i10216; + int i10217; + int i10218; + int i10219; + int i10220; + int i10221; + int i10222; + int i10223; + int i10224; + int i10225; + int i10226; + int i10227; + int i10228; + int i10229; + int i10230; + int i10231; + int i10232; + int i10233; + int i10234; + int i10235; + int i10236; + int i10237; + int i10238; + int i10239; + int i10240; + int i10241; + int i10242; + int i10243; + int i10244; + int i10245; + int i10246; + int i10247; + int i10248; + int i10249; + int i10250; + int i10251; + int i10252; + int i10253; + int i10254; + int i10255; + int i10256; + int i10257; + int i10258; + int i10259; + int i10260; + int i10261; + int i10262; + int i10263; + int i10264; + int i10265; + int i10266; + int i10267; + int i10268; + int i10269; + int i10270; + int i10271; + int i10272; + int i10273; + int i10274; + int i10275; + int i10276; + int i10277; + int i10278; + int i10279; + int i10280; + int i10281; + int i10282; + int i10283; + int i10284; + int i10285; + int i10286; + int i10287; + int i10288; + int i10289; + int i10290; + int i10291; + int i10292; + int i10293; + int i10294; + int i10295; + int i10296; + int i10297; + int i10298; + int i10299; + int i10300; + int i10301; + int i10302; + int i10303; + int i10304; + int i10305; + int i10306; + int i10307; + int i10308; + int i10309; + int i10310; + int i10311; + int i10312; + int i10313; + int i10314; + int i10315; + int i10316; + int i10317; + int i10318; + int i10319; + int i10320; + int i10321; + int i10322; + int i10323; + int i10324; + int i10325; + int i10326; + int i10327; + int i10328; + int i10329; + int i10330; + int i10331; + int i10332; + int i10333; + int i10334; + int i10335; + int i10336; + int i10337; + int i10338; + int i10339; + int i10340; + int i10341; + int i10342; + int i10343; + int i10344; + int i10345; + int i10346; + int i10347; + int i10348; + int i10349; + int i10350; + int i10351; + int i10352; + int i10353; + int i10354; + int i10355; + int i10356; + int i10357; + int i10358; + int i10359; + int i10360; + int i10361; + int i10362; + int i10363; + int i10364; + int i10365; + int i10366; + int i10367; + int i10368; + int i10369; + int i10370; + int i10371; + int i10372; + int i10373; + int i10374; + int i10375; + int i10376; + int i10377; + int i10378; + int i10379; + int i10380; + int i10381; + int i10382; + int i10383; + int i10384; + int i10385; + int i10386; + int i10387; + int i10388; + int i10389; + int i10390; + int i10391; + int i10392; + int i10393; + int i10394; + int i10395; + int i10396; + int i10397; + int i10398; + int i10399; + int i10400; + int i10401; + int i10402; + int i10403; + int i10404; + int i10405; + int i10406; + int i10407; + int i10408; + int i10409; + int i10410; + int i10411; + int i10412; + int i10413; + int i10414; + int i10415; + int i10416; + int i10417; + int i10418; + int i10419; + int i10420; + int i10421; + int i10422; + int i10423; + int i10424; + int i10425; + int i10426; + int i10427; + int i10428; + int i10429; + int i10430; + int i10431; + int i10432; + int i10433; + int i10434; + int i10435; + int i10436; + int i10437; + int i10438; + int i10439; + int i10440; + int i10441; + int i10442; + int i10443; + int i10444; + int i10445; + int i10446; + int i10447; + int i10448; + int i10449; + int i10450; + int i10451; + int i10452; + int i10453; + int i10454; + int i10455; + int i10456; + int i10457; + int i10458; + int i10459; + int i10460; + int i10461; + int i10462; + int i10463; + int i10464; + int i10465; + int i10466; + int i10467; + int i10468; + int i10469; + int i10470; + int i10471; + int i10472; + int i10473; + int i10474; + int i10475; + int i10476; + int i10477; + int i10478; + int i10479; + int i10480; + int i10481; + int i10482; + int i10483; + int i10484; + int i10485; + int i10486; + int i10487; + int i10488; + int i10489; + int i10490; + int i10491; + int i10492; + int i10493; + int i10494; + int i10495; + int i10496; + int i10497; + int i10498; + int i10499; + int i10500; + int i10501; + int i10502; + int i10503; + int i10504; + int i10505; + int i10506; + int i10507; + int i10508; + int i10509; + int i10510; + int i10511; + int i10512; + int i10513; + int i10514; + int i10515; + int i10516; + int i10517; + int i10518; + int i10519; + int i10520; + int i10521; + int i10522; + int i10523; + int i10524; + int i10525; + int i10526; + int i10527; + int i10528; + int i10529; + int i10530; + int i10531; + int i10532; + int i10533; + int i10534; + int i10535; + int i10536; + int i10537; + int i10538; + int i10539; + int i10540; + int i10541; + int i10542; + int i10543; + int i10544; + int i10545; + int i10546; + int i10547; + int i10548; + int i10549; + int i10550; + int i10551; + int i10552; + int i10553; + int i10554; + int i10555; + int i10556; + int i10557; + int i10558; + int i10559; + int i10560; + int i10561; + int i10562; + int i10563; + int i10564; + int i10565; + int i10566; + int i10567; + int i10568; + int i10569; + int i10570; + int i10571; + int i10572; + int i10573; + int i10574; + int i10575; + int i10576; + int i10577; + int i10578; + int i10579; + int i10580; + int i10581; + int i10582; + int i10583; + int i10584; + int i10585; + int i10586; + int i10587; + int i10588; + int i10589; + int i10590; + int i10591; + int i10592; + int i10593; + int i10594; + int i10595; + int i10596; + int i10597; + int i10598; + int i10599; + int i10600; + int i10601; + int i10602; + int i10603; + int i10604; + int i10605; + int i10606; + int i10607; + int i10608; + int i10609; + int i10610; + int i10611; + int i10612; + int i10613; + int i10614; + int i10615; + int i10616; + int i10617; + int i10618; + int i10619; + int i10620; + int i10621; + int i10622; + int i10623; + int i10624; + int i10625; + int i10626; + int i10627; + int i10628; + int i10629; + int i10630; + int i10631; + int i10632; + int i10633; + int i10634; + int i10635; + int i10636; + int i10637; + int i10638; + int i10639; + int i10640; + int i10641; + int i10642; + int i10643; + int i10644; + int i10645; + int i10646; + int i10647; + int i10648; + int i10649; + int i10650; + int i10651; + int i10652; + int i10653; + int i10654; + int i10655; + int i10656; + int i10657; + int i10658; + int i10659; + int i10660; + int i10661; + int i10662; + int i10663; + int i10664; + int i10665; + int i10666; + int i10667; + int i10668; + int i10669; + int i10670; + int i10671; + int i10672; + int i10673; + int i10674; + int i10675; + int i10676; + int i10677; + int i10678; + int i10679; + int i10680; + int i10681; + int i10682; + int i10683; + int i10684; + int i10685; + int i10686; + int i10687; + int i10688; + int i10689; + int i10690; + int i10691; + int i10692; + int i10693; + int i10694; + int i10695; + int i10696; + int i10697; + int i10698; + int i10699; + int i10700; + int i10701; + int i10702; + int i10703; + int i10704; + int i10705; + int i10706; + int i10707; + int i10708; + int i10709; + int i10710; + int i10711; + int i10712; + int i10713; + int i10714; + int i10715; + int i10716; + int i10717; + int i10718; + int i10719; + int i10720; + int i10721; + int i10722; + int i10723; + int i10724; + int i10725; + int i10726; + int i10727; + int i10728; + int i10729; + int i10730; + int i10731; + int i10732; + int i10733; + int i10734; + int i10735; + int i10736; + int i10737; + int i10738; + int i10739; + int i10740; + int i10741; + int i10742; + int i10743; + int i10744; + int i10745; + int i10746; + int i10747; + int i10748; + int i10749; + int i10750; + int i10751; + int i10752; + int i10753; + int i10754; + int i10755; + int i10756; + int i10757; + int i10758; + int i10759; + int i10760; + int i10761; + int i10762; + int i10763; + int i10764; + int i10765; + int i10766; + int i10767; + int i10768; + int i10769; + int i10770; + int i10771; + int i10772; + int i10773; + int i10774; + int i10775; + int i10776; + int i10777; + int i10778; + int i10779; + int i10780; + int i10781; + int i10782; + int i10783; + int i10784; + int i10785; + int i10786; + int i10787; + int i10788; + int i10789; + int i10790; + int i10791; + int i10792; + int i10793; + int i10794; + int i10795; + int i10796; + int i10797; + int i10798; + int i10799; + + if (unlikely) { + // Since Foo is not loaded this will turn into an uncommon trap + Class c = Foo.class; + + StringBuilder sb = new StringBuilder(); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/compiler/whitebox/CompilerWhiteBoxTest.java --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -80,8 +80,7 @@ static { if (TIERED_COMPILATION) { - THRESHOLD = 150000; - BACKEDGE_THRESHOLD = 0xFFFFFFFFL; + BACKEDGE_THRESHOLD = THRESHOLD = 150000; } else { THRESHOLD = COMPILE_THRESHOLD; BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption( @@ -364,7 +363,7 @@ /** OSR constructor test case */ OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR, Helper.OSR_CONSTRUCTOR_CALLABLE, true), - /** OSR method test case */ + /** OSR method test case */ OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true), /** OSR static method test case */ OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true); @@ -373,7 +372,7 @@ final Executable executable; /** object to invoke {@linkplain #executable} */ final Callable callable; - /** flag for OSR test case */ + /** flag for OSR test case */ final boolean isOsr; private TestCase(Executable executable, Callable callable, diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/7168848/HumongousAlloc.java --- a/test/gc/7168848/HumongousAlloc.java Thu Nov 21 15:04:26 2013 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test Humongous.java - * @bug 7168848 - * @summary G1: humongous object allocations should initiate marking cycles when necessary - * @run main/othervm -Xms100m -Xmx100m -XX:+PrintGC -XX:G1HeapRegionSize=1m -XX:+UseG1GC HumongousAlloc - * - */ -import java.lang.management.GarbageCollectorMXBean; -import java.lang.management.ManagementFactory; -import java.util.List; - -public class HumongousAlloc { - - public static byte[] dummy; - private static int sleepFreq = 40; - private static int sleepTime = 1000; - private static double size = 0.75; - private static int iterations = 50; - private static int MB = 1024 * 1024; - - public static void allocate(int size, int sleepTime, int sleepFreq) throws InterruptedException { - System.out.println("Will allocate objects of size: " + size - + " bytes and sleep for " + sleepTime - + " ms after every " + sleepFreq + "th allocation."); - int count = 0; - while (count < iterations) { - for (int i = 0; i < sleepFreq; i++) { - dummy = new byte[size - 16]; - } - Thread.sleep(sleepTime); - count++; - } - } - - public static void main(String[] args) throws InterruptedException { - allocate((int) (size * MB), sleepTime, sleepFreq); - List collectors = ManagementFactory.getGarbageCollectorMXBeans(); - for (GarbageCollectorMXBean collector : collectors) { - if (collector.getName().contains("G1 Old")) { - long count = collector.getCollectionCount(); - if (count > 0) { - throw new RuntimeException("Failed: FullGCs should not have happened. The number of FullGC run is " + count); - } - else { - System.out.println("Passed."); - } - } - } - } -} - diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/TestSystemGC.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/TestSystemGC.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,46 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestSystemGC + * @key gc + * @summary Runs System.gc() with different flags. + * @run main/othervm TestSystemGC + * @run main/othervm -XX:+UseSerialGC TestSystemGC + * @run main/othervm -XX:+UseParNewGC TestSystemGC + * @run main/othervm -XX:+UseParallelGC TestSystemGC + * @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC + * @run main/othervm -XX:+UseG1GC TestSystemGC + * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + * @run main/othervm -XX:+UseLargePages TestSystemGC + * @run main/othervm -XX:+UseLargePages -XX:+UseLargePagesInMetaspace TestSystemGC + */ + +public class TestSystemGC { + public static void main(String args[]) throws Exception { + System.gc(); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/arguments/TestHeapFreeRatio.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestHeapFreeRatio.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestHeapFreeRatio + * @key gc + * @bug 8025661 + * @summary Test parsing of -Xminf and -Xmaxf + * @library /testlibrary + * @run main/othervm TestHeapFreeRatio + */ + +import com.oracle.java.testlibrary.*; + +public class TestHeapFreeRatio { + + enum Validation { + VALID, + MIN_INVALID, + MAX_INVALID, + COMBINATION_INVALID + } + + private static void testMinMaxFreeRatio(String min, String max, Validation type) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xminf" + min, + "-Xmaxf" + max, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + switch (type) { + case VALID: + output.shouldNotContain("Error"); + output.shouldHaveExitValue(0); + break; + case MIN_INVALID: + output.shouldContain("Bad min heap free percentage size: -Xminf" + min); + output.shouldContain("Error"); + output.shouldHaveExitValue(1); + break; + case MAX_INVALID: + output.shouldContain("Bad max heap free percentage size: -Xmaxf" + max); + output.shouldContain("Error"); + output.shouldHaveExitValue(1); + break; + case COMBINATION_INVALID: + output.shouldContain("must be less than or equal to MaxHeapFreeRatio"); + output.shouldContain("Error"); + output.shouldHaveExitValue(1); + break; + default: + throw new IllegalStateException("Must specify expected validation type"); + } + + System.out.println(output.getOutput()); + } + + public static void main(String args[]) throws Exception { + testMinMaxFreeRatio( "0.1", "0.5", Validation.VALID); + testMinMaxFreeRatio( ".1", ".5", Validation.VALID); + testMinMaxFreeRatio( "0.5", "0.5", Validation.VALID); + + testMinMaxFreeRatio("-0.1", "0.5", Validation.MIN_INVALID); + testMinMaxFreeRatio( "1.1", "0.5", Validation.MIN_INVALID); + testMinMaxFreeRatio("=0.1", "0.5", Validation.MIN_INVALID); + testMinMaxFreeRatio("0.1f", "0.5", Validation.MIN_INVALID); + testMinMaxFreeRatio( + "INVALID", "0.5", Validation.MIN_INVALID); + testMinMaxFreeRatio( + "2147483647", "0.5", Validation.MIN_INVALID); + + testMinMaxFreeRatio( "0.1", "-0.5", Validation.MAX_INVALID); + testMinMaxFreeRatio( "0.1", "1.5", Validation.MAX_INVALID); + testMinMaxFreeRatio( "0.1", "0.5f", Validation.MAX_INVALID); + testMinMaxFreeRatio( "0.1", "=0.5", Validation.MAX_INVALID); + testMinMaxFreeRatio( + "0.1", "INVALID", Validation.MAX_INVALID); + testMinMaxFreeRatio( + "0.1", "2147483647", Validation.MAX_INVALID); + + testMinMaxFreeRatio( "0.5", "0.1", Validation.COMBINATION_INVALID); + testMinMaxFreeRatio( ".5", ".10", Validation.COMBINATION_INVALID); + testMinMaxFreeRatio("0.12","0.100", Validation.COMBINATION_INVALID); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/arguments/TestMaxHeapSizeTools.java --- a/test/gc/arguments/TestMaxHeapSizeTools.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/arguments/TestMaxHeapSizeTools.java Thu Nov 21 15:04:54 2013 +0100 @@ -64,32 +64,29 @@ long newPlusOldSize = values[0] + values[1]; long smallValue = newPlusOldSize / 2; long largeValue = newPlusOldSize * 2; + long maxHeapSize = largeValue + (2 * 1024 * 1024); // -Xms is not set - checkErgonomics(new String[] { gcflag, "-Xmx16M" }, values, -1, -1); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=0" }, values, -1, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize }, values, -1, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=" + smallValue }, values, -1, smallValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=0" }, values, -1, -1); // -Xms is set to zero - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0" }, values, -1, -1); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=0" }, values, -1, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0" }, values, -1, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=" + smallValue }, values, -1, smallValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=0" }, values, -1, -1); // -Xms is set to small value - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue }, values, -1, -1); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=" + largeValue }, values, smallValue, largeValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=0" }, values, smallValue, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue }, values, -1, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=" + largeValue }, values, smallValue, largeValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=0" }, values, smallValue, -1); // -Xms is set to large value - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue }, values, largeValue, largeValue); - // the next case has already been checked elsewhere and gives an error - // checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue); - // the next case has already been checked elsewhere too - // checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=" + largeValue }, values, values[0], largeValue); - checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=0" }, values, largeValue, -1); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + largeValue }, values, largeValue, largeValue); + checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + largeValue, "-XX:InitialHeapSize=0" }, values, largeValue, -1); } private static long align_up(long value, long alignment) { diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/arguments/TestMaxNewSize.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestMaxNewSize.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,122 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestMaxNewSize + * @key gc + * @bug 7057939 + * @summary Make sure that MaxNewSize always has a useful value after argument + * processing. + * @library /testlibrary + * @build TestMaxNewSize + * @run main TestMaxNewSize -XX:+UseSerialGC + * @run main TestMaxNewSize -XX:+UseParallelGC + * @run main TestMaxNewSize -XX:+UseConcMarkSweepGC + * @run main TestMaxNewSize -XX:+UseG1GC + * @author thomas.schatzl@oracle.com, jesper.wilhelmsson@oracle.com + */ + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import java.math.BigInteger; + +import java.util.ArrayList; +import java.util.Arrays; + +import com.oracle.java.testlibrary.*; + +public class TestMaxNewSize { + + private static void checkMaxNewSize(String[] flags, int heapsize) throws Exception { + BigInteger actual = new BigInteger(getMaxNewSize(flags)); + System.out.println(actual); + if (actual.compareTo(new BigInteger((new Long(heapsize)).toString())) == 1) { + throw new RuntimeException("MaxNewSize value set to \"" + actual + + "\", expected otherwise when running with the following flags: " + Arrays.asList(flags).toString()); + } + } + + private static void checkIncompatibleNewSize(String[] flags) throws Exception { + ArrayList finalargs = new ArrayList(); + finalargs.addAll(Arrays.asList(flags)); + finalargs.add("-version"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Initial young gen size set larger than the maximum young gen size"); + } + + private static boolean isRunningG1(String[] args) { + for (int i = 0; i < args.length; i++) { + if (args[i].contains("+UseG1GC")) { + return true; + } + } + return false; + } + + private static String getMaxNewSize(String[] flags) throws Exception { + ArrayList finalargs = new ArrayList(); + finalargs.addAll(Arrays.asList(flags)); + if (isRunningG1(flags)) { + finalargs.add("-XX:G1HeapRegionSize=1M"); + } + finalargs.add("-XX:+PrintFlagsFinal"); + finalargs.add("-version"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + String stdout = output.getStdout(); + //System.out.println(stdout); + return getFlagValue("MaxNewSize", stdout); + } + + private static String getFlagValue(String flag, String where) { + Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where); + if (!m.find()) { + throw new RuntimeException("Could not find value for flag " + flag + " in output string"); + } + String match = m.group(); + return match.substring(match.lastIndexOf(" ") + 1, match.length()); + } + + public static void main(String args[]) throws Exception { + String gcName = args[0]; + final int M32 = 32 * 1024 * 1024; + final int M64 = 64 * 1024 * 1024; + final int M96 = 96 * 1024 * 1024; + final int M128 = 128 * 1024 * 1024; + checkMaxNewSize(new String[] { gcName, "-Xmx128M" }, M128); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewRatio=5" }, M128); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewSize=32M" }, M128); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:OldSize=96M" }, M128); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:MaxNewSize=32M" }, M32); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewSize=32M", "-XX:MaxNewSize=32M" }, M32); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewRatio=6", "-XX:MaxNewSize=32M" }, M32); + checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-Xms96M" }, M128); + checkMaxNewSize(new String[] { gcName, "-Xmx96M", "-Xms96M" }, M96); + checkMaxNewSize(new String[] { gcName, "-XX:NewSize=128M", "-XX:MaxNewSize=50M"}, M128); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/g1/TestHumongousAllocInitialMark.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestHumongousAllocInitialMark.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestHumongousAllocInitialMark + * @bug 7168848 + * @summary G1: humongous object allocations should initiate marking cycles when necessary + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class TestHumongousAllocInitialMark { + private static final int heapSize = 200; // MB + private static final int heapRegionSize = 1; // MB + private static final int initiatingHeapOccupancyPercent = 50; // % + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UseG1GC", + "-Xms" + heapSize + "m", + "-Xmx" + heapSize + "m", + "-XX:G1HeapRegionSize=" + heapRegionSize + "m", + "-XX:InitiatingHeapOccupancyPercent=" + initiatingHeapOccupancyPercent, + "-XX:+PrintGC", + HumongousObjectAllocator.class.getName()); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)"); + output.shouldNotContain("Full GC"); + output.shouldHaveExitValue(0); + } + + static class HumongousObjectAllocator { + private static byte[] dummy; + + public static void main(String [] args) throws Exception { + // Make object size 75% of region size + final int humongousObjectSize = + (int)(heapRegionSize * 1024 * 1024 * 0.75); + + // Number of objects to allocate to go above IHOP + final int humongousObjectAllocations = + (int)((heapSize * initiatingHeapOccupancyPercent / 100.0) / heapRegionSize) + 1; + + // Allocate + for (int i = 1; i <= humongousObjectAllocations; i++) { + System.out.println("Allocating humongous object " + i + "/" + humongousObjectAllocations + + " of size " + humongousObjectSize + " bytes"); + dummy = new byte[humongousObjectSize]; + } + } + } +} + diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/g1/TestHumongousCodeCacheRoots.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestHumongousCodeCacheRoots.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key regression + * @key gc + * @bug 8027756 + * @library /testlibrary /testlibrary/whitebox + * @build TestHumongousCodeCacheRoots + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @summary Humongous objects may have references from the code cache + * @run main TestHumongousCodeCacheRoots +*/ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +import java.util.ArrayList; +import java.util.Arrays; + +class TestHumongousCodeCacheRootsHelper { + + static final int n = 1000000; + static final int[] AA = new int[n]; + static final int[] BB = new int[n]; + + public static void main(String args[]) throws Exception { + // do some work so that the compiler compiles this method, inlining the + // reference to the integer array (which is a humonguous object) into + // the code cache. + for(int i = 0; i < n; i++) { + AA[i] = 0; + BB[i] = 0; + } + // trigger a GC that checks that the verification code allows humongous + // objects with code cache roots; objects should be all live here. + System.gc(); + + // deoptimize everyhing: this should make all compiled code zombies. + WhiteBox wb = WhiteBox.getWhiteBox(); + wb.deoptimizeAll(); + + // trigger a GC that checks that the verification code allows humongous + // objects with code cache roots; objects should be all live here. + System.gc(); + + // wait a little for the code cache sweeper to try to clean up zombie nmethods + // and unregister the code roots. + try { Thread.sleep(5000); } catch (InterruptedException ex) { } + + // do some work on the arrays to make sure that they need to be live after the GCs + for(int i = 0; i < n; i++) { + AA[i] = 1; + BB[i] = 10; + } + + System.out.println(); + } +} + +public class TestHumongousCodeCacheRoots { + + /** + * Executes a class in a new VM process with the given parameters. + * @param vmargs Arguments to the VM to run + * @param classname Name of the class to run + * @param arguments Arguments to the class + * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string + * @return The OutputAnalyzer with the results for the invocation. + */ + public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception { + ArrayList finalargs = new ArrayList(); + + String[] whiteboxOpts = new String[] { + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", + "-cp", System.getProperty("java.class.path"), + }; + + if (useTestDotJavaDotOpts) { + // System.getProperty("test.java.opts") is '' if no options is set, + // we need to skip such a result + String[] externalVMOpts = new String[0]; + if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) { + externalVMOpts = System.getProperty("test.java.opts").split(" "); + } + finalargs.addAll(Arrays.asList(externalVMOpts)); + } + + finalargs.addAll(Arrays.asList(vmargs)); + finalargs.addAll(Arrays.asList(whiteboxOpts)); + finalargs.add(classname); + finalargs.addAll(Arrays.asList(arguments)); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + return output; + } + + public static void runTest(String compiler, String[] other) throws Exception { + ArrayList joined = new ArrayList(); + joined.add(compiler); + joined.addAll(Arrays.asList(other)); + runWhiteBoxTest(joined.toArray(new String[0]), TestHumongousCodeCacheRootsHelper.class.getName(), + new String[] {}, false); + } + + public static void main(String[] args) throws Exception { + final String[] baseArguments = new String[] { + "-XX:+UseG1GC", "-XX:G1HeapRegionSize=1M", "-Xmx100M", // make sure we get a humongous region + "-XX:+UnlockDiagnosticVMOptions", + "-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking + "-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run + "-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1", // make the code cache sweep more predictable + }; + runTest("-client", baseArguments); + runTest("-server", baseArguments); + } +} + diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestCMS.java --- a/test/gc/startup_warnings/TestCMS.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestCMS.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestCMSForegroundFlags.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/startup_warnings/TestCMSForegroundFlags.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,52 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* +* @test TestCMSForegroundFlags +* @key gc +* @bug 8027132 +* @summary Test that the deprecated CMS foreground collector flags print warning messages +* @library /testlibrary +* @run main TestCMSForegroundFlags -XX:-UseCMSCompactAtFullCollection UseCMSCompactAtFullCollection +* @run main TestCMSForegroundFlags -XX:CMSFullGCsBeforeCompaction=4 CMSFullGCsBeforeCompaction +* @run main TestCMSForegroundFlags -XX:-UseCMSCollectionPassing UseCMSCollectionPassing +*/ + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + +public class TestCMSForegroundFlags { + public static void main(String[] args) throws Exception { + if (args.length != 2) { + throw new Exception("Expected two arguments,flagValue and flagName"); + } + String flagValue = args[0]; + String flagName = args[1]; + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flagValue, "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("warning: " + flagName + " is deprecated and will likely be removed in a future release."); + output.shouldNotContain("error"); + output.shouldHaveExitValue(0); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestCMSNoIncrementalMode.java --- a/test/gc/startup_warnings/TestCMSNoIncrementalMode.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestCMSNoIncrementalMode.java Thu Nov 21 15:04:54 2013 +0100 @@ -37,7 +37,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestG1.java --- a/test/gc/startup_warnings/TestG1.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestG1.java Thu Nov 21 15:04:54 2013 +0100 @@ -37,7 +37,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestParNewCMS.java --- a/test/gc/startup_warnings/TestParNewCMS.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestParNewCMS.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestParallelGC.java --- a/test/gc/startup_warnings/TestParallelGC.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestParallelGC.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestParallelScavengeSerialOld.java --- a/test/gc/startup_warnings/TestParallelScavengeSerialOld.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestParallelScavengeSerialOld.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-XX:-UseParallelOldGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/gc/startup_warnings/TestSerialGC.java --- a/test/gc/startup_warnings/TestSerialGC.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/gc/startup_warnings/TestSerialGC.java Thu Nov 21 15:04:54 2013 +0100 @@ -38,7 +38,7 @@ public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseSerialGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("warning"); + output.shouldNotContain("deprecated"); output.shouldNotContain("error"); output.shouldHaveExitValue(0); } diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/6888954/vmerrors.sh --- a/test/runtime/6888954/vmerrors.sh Thu Nov 21 15:04:26 2013 +0100 +++ b/test/runtime/6888954/vmerrors.sh Thu Nov 21 15:04:54 2013 +0100 @@ -1,3 +1,25 @@ +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + # @test # @bug 6888954 # @bug 8015884 @@ -63,6 +85,7 @@ [ $i -lt 10 ] && i2=0$i "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \ + -XX:-TransmitErrorReport \ -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1 # If ErrorHandlerTest is ignored (product build), stop. diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/8024804/RegisterNatives.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/8024804/RegisterNatives.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024804 + * @summary registerNatives() interface resolution should receive IAE + * @run main RegisterNatives + */ +public class RegisterNatives { + interface I { void registerNatives(); } + interface J extends I {} + static class B implements J { public void registerNatives() { System.out.println("B"); } } + public static void main(String... args) { + System.out.println("Regression test for JDK-8024804, crash when InterfaceMethodref resolves to Object.registerNatives\n"); + J val = new B(); + try { + val.registerNatives(); + } catch (IllegalAccessError e) { + System.out.println("TEST PASSES - according to current JVM spec, IAE expected\n"); + return; + } + System.out.println("TEST FAILS - no IAE resulted\n"); + System.exit(1); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/8026365/InvokeSpecialAnonTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/8026365/InvokeSpecialAnonTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026365 + * @summary Test invokespecial of host class method from an anonymous class + * @author Robert Field + * @library /testlibrary + * @compile -XDignore.symbol.file InvokeSpecialAnonTest.java + * @run main ClassFileInstaller InvokeSpecialAnonTest AnonTester + * @run main/othervm -Xbootclasspath/a:. -Xverify:all InvokeSpecialAnonTest + */ +import jdk.internal.org.objectweb.asm.*; +import java.lang.reflect.Constructor; +import sun.misc.Unsafe; + +public class InvokeSpecialAnonTest implements Opcodes { + + static byte[] anonClassBytes() throws Exception { + ClassWriter cw = new ClassWriter(0); + MethodVisitor mv; + + cw.visit(V1_8, ACC_FINAL + ACC_SUPER, "Anon", null, "java/lang/Object", null); + + { + mv = cw.visitMethod(ACC_PUBLIC, "", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); + mv.visitInsn(RETURN); + mv.visitMaxs(2, 2); + mv.visitEnd(); + } + { + mv = cw.visitMethod(ACC_PUBLIC, "m", "(LInvokeSpecialAnonTest;)I", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn(INVOKESPECIAL, "InvokeSpecialAnonTest", "privMethod", "()I"); + mv.visitInsn(IRETURN); + mv.visitMaxs(2, 3); + mv.visitEnd(); + } + cw.visitEnd(); + + return cw.toByteArray(); + } + + private int privMethod() { return 1234; } + + public static void main(String[] args) throws Exception { + Class klass = InvokeSpecialAnonTest.class; + try { + Class result = AnonTester.defineTest(klass, anonClassBytes()); + System.out.println("Passed."); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } +} + + +class AnonTester { + private static final Unsafe UNSAFE = Unsafe.getUnsafe(); + + public static Class defineTest(Class targetClass, byte[] classBytes) throws Exception { + return UNSAFE.defineAnonymousClass(targetClass, classBytes, null); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/8026394/InterfaceObjectTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/8026394/InterfaceObjectTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026394 + * @summary clone() and finalize() interface resolution should not receive IAE + * @run main InterfaceObjectTest + */ +interface IClone extends Cloneable { + void finalize() throws Throwable; + Object clone(); +} + +interface ICloneExtend extends IClone { } + +public class InterfaceObjectTest implements ICloneExtend { + + public Object clone() { + System.out.println("In InterfaceObjectTest's clone() method\n"); + return null; + } + + public void finalize() throws Throwable { + try { + System.out.println("In InterfaceObjectTest's finalize() method\n"); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + public static void tryIt(ICloneExtend o1) { + try { + Object o2 = o1.clone(); + o1.finalize(); + } catch (Throwable t) { + if (t instanceof IllegalAccessError) { + System.out.println("TEST FAILS - IAE resulted\n"); + System.exit(1); + } + } + } + + public static void main(String[] args) { + InterfaceObjectTest o1 = new InterfaceObjectTest(); + tryIt(o1); + System.out.println("TEST PASSES - no IAE resulted\n"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/CommandLine/PrintGCApplicationConcurrentTime.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/CommandLine/PrintGCApplicationConcurrentTime.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test + * @bug 8026041 + * @run main/othervm -XX:+PrintGCApplicationConcurrentTime -Xcomp PrintGCApplicationConcurrentTime + */ + +public class PrintGCApplicationConcurrentTime { + + public static void main(String args[]) throws Exception { + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/CompressedOops/CompressedClassPointers.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/CompressedOops/CompressedClassPointers.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024927 + * @summary Testing address of compressed class pointer space as best as possible. + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class CompressedClassPointers { + + public static void smallHeapTest() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedBaseAddress=8g", + "-Xmx128m", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base: 0x0000000000000000"); + output.shouldHaveExitValue(0); + } + + public static void smallHeapTestWith3G() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:CompressedClassSpaceSize=3g", + "-Xmx128m", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base: 0x0000000000000000, Narrow klass shift: 3"); + output.shouldHaveExitValue(0); + } + + public static void largeHeapTest() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-Xmx30g", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Narrow klass base: 0x0000000000000000"); + output.shouldContain("Narrow klass shift: 0"); + output.shouldHaveExitValue(0); + } + + public static void largePagesTest() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-Xmx128m", + "-XX:+UseLargePages", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base:"); + output.shouldHaveExitValue(0); + } + + public static void sharingTest() throws Exception { + // Test small heaps + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xmx128m", + "-XX:SharedBaseAddress=8g", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", + "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xmx128m", + "-XX:SharedBaseAddress=8g", + "-XX:+PrintCompressedOopsMode", + "-Xshare:on", + "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } + + public static void main(String[] args) throws Exception { + if (!Platform.is64bit()) { + // Can't test this on 32 bit, just pass + System.out.println("Skipping test on 32bit"); + return; + } + // Solaris 10 can't mmap compressed oops space without a base + if (Platform.isSolaris()) { + String name = System.getProperty("os.version"); + if (name.equals("5.10")) { + System.out.println("Skipping test on Solaris 10"); + return; + } + } + smallHeapTest(); + smallHeapTestWith3G(); + largeHeapTest(); + largePagesTest(); + sharingTest(); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/memory/LargePages/TestLargePagesFlags.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test TestLargePagesFlags + * @summary Tests how large pages are choosen depending on the given large pages flag combinations. + * @library /testlibrary + * @run main TestLargePagesFlags + */ + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.ProcessTools; +import java.util.ArrayList; + +public class TestLargePagesFlags { + + public static void main(String [] args) throws Exception { + if (!Platform.isLinux()) { + System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux."); + return; + } + + testUseTransparentHugePages(); + testUseHugeTLBFS(); + testUseSHM(); + testCombinations(); + } + + public static void testUseTransparentHugePages() throws Exception { + if (!canUse(UseTransparentHugePages(true))) { + System.out.println("Skipping testUseTransparentHugePages"); + return; + } + + // -XX:-UseLargePages overrides all other flags. + new FlagTester() + .use(UseLargePages(false), + UseTransparentHugePages(true)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Explicitly turn on UseTransparentHugePages. + new FlagTester() + .use(UseTransparentHugePages(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(true), + UseHugeTLBFS(false), + UseSHM(false)); + + new FlagTester() + .use(UseLargePages(true), + UseTransparentHugePages(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(true), + UseHugeTLBFS(false), + UseSHM(false)); + + // Setting a specific large pages flag will turn + // off heuristics to choose large pages type. + new FlagTester() + .use(UseLargePages(true), + UseTransparentHugePages(false)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Don't turn on UseTransparentHugePages + // unless the user explicitly asks for them. + new FlagTester() + .use(UseLargePages(true)) + .expect( + UseTransparentHugePages(false)); + } + + public static void testUseHugeTLBFS() throws Exception { + if (!canUse(UseHugeTLBFS(true))) { + System.out.println("Skipping testUseHugeTLBFS"); + return; + } + + // -XX:-UseLargePages overrides all other flags. + new FlagTester() + .use(UseLargePages(false), + UseHugeTLBFS(true)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Explicitly turn on UseHugeTLBFS. + new FlagTester() + .use(UseHugeTLBFS(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(true), + UseSHM(false)); + + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(true), + UseSHM(false)); + + // Setting a specific large pages flag will turn + // off heuristics to choose large pages type. + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(false)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Using UseLargePages will default to UseHugeTLBFS large pages. + new FlagTester() + .use(UseLargePages(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(true), + UseSHM(false)); + } + + public static void testUseSHM() throws Exception { + if (!canUse(UseSHM(true))) { + System.out.println("Skipping testUseSHM"); + return; + } + + // -XX:-UseLargePages overrides all other flags. + new FlagTester() + .use(UseLargePages(false), + UseSHM(true)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Explicitly turn on UseSHM. + new FlagTester() + .use(UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(true)) ; + + new FlagTester() + .use(UseLargePages(true), + UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(true)) ; + + // Setting a specific large pages flag will turn + // off heuristics to choose large pages type. + new FlagTester() + .use(UseLargePages(true), + UseSHM(false)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + // Setting UseLargePages can allow the system to choose + // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages. + new FlagTester() + .use(UseLargePages(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false)); + } + + public static void testCombinations() throws Exception { + if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) { + System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination"); + return; + } + + // UseHugeTLBFS takes precedence over SHM. + + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(true), + UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(true), + UseSHM(false)); + + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(false), + UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(true)); + + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(true), + UseSHM(false)) + .expect( + UseLargePages(true), + UseTransparentHugePages(false), + UseHugeTLBFS(true), + UseSHM(false)); + + new FlagTester() + .use(UseLargePages(true), + UseHugeTLBFS(false), + UseSHM(false)) + .expect( + UseLargePages(false), + UseTransparentHugePages(false), + UseHugeTLBFS(false), + UseSHM(false)); + + + if (!canUse(UseTransparentHugePages(true))) { + return; + } + + // UseTransparentHugePages takes precedence. + + new FlagTester() + .use(UseLargePages(true), + UseTransparentHugePages(true), + UseHugeTLBFS(true), + UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(true), + UseHugeTLBFS(false), + UseSHM(false)); + + new FlagTester() + .use(UseTransparentHugePages(true), + UseHugeTLBFS(true), + UseSHM(true)) + .expect( + UseLargePages(true), + UseTransparentHugePages(true), + UseHugeTLBFS(false), + UseSHM(false)); + } + + private static class FlagTester { + private Flag [] useFlags; + + public FlagTester use(Flag... useFlags) { + this.useFlags = useFlags; + return this; + } + + public void expect(Flag... expectedFlags) throws Exception { + if (useFlags == null) { + throw new IllegalStateException("Must run use() before expect()"); + } + + OutputAnalyzer output = executeNewJVM(useFlags); + + for (Flag flag : expectedFlags) { + System.out.println("Looking for: " + flag.flagString()); + String strValue = output.firstMatch(".* " + flag.name() + " .* :?= (\\S+).*", 1); + + if (strValue == null) { + throw new RuntimeException("Flag " + flag.name() + " couldn't be found"); + } + + if (!flag.value().equals(strValue)) { + throw new RuntimeException("Wrong value for: " + flag.name() + + " expected: " + flag.value() + + " got: " + strValue); + } + } + + output.shouldHaveExitValue(0); + } + } + + private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception { + ArrayList args = new ArrayList<>(); + for (Flag flag : flags) { + args.add(flag.flagString()); + } + args.add("-XX:+PrintFlagsFinal"); + args.add("-version"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + return output; + } + + private static boolean canUse(Flag flag) { + try { + new FlagTester().use(flag).expect(flag); + } catch (Exception e) { + return false; + } + + return true; + } + + private static Flag UseLargePages(boolean value) { + return new BooleanFlag("UseLargePages", value); + } + + private static Flag UseTransparentHugePages(boolean value) { + return new BooleanFlag("UseTransparentHugePages", value); + } + + private static Flag UseHugeTLBFS(boolean value) { + return new BooleanFlag("UseHugeTLBFS", value); + } + + private static Flag UseSHM(boolean value) { + return new BooleanFlag("UseSHM", value); + } + + private static class BooleanFlag implements Flag { + private String name; + private boolean value; + + BooleanFlag(String name, boolean value) { + this.name = name; + this.value = value; + } + + public String flagString() { + return "-XX:" + (value ? "+" : "-") + name; + } + + public String name() { + return name; + } + + public String value() { + return Boolean.toString(value); + } + } + + private static interface Flag { + public String flagString(); + public String name(); + public String value(); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/memory/ReadFromNoaccessArea.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/memory/ReadFromNoaccessArea.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION + * @library /testlibrary /testlibrary/whitebox + * @build ReadFromNoaccessArea + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main ReadFromNoaccessArea + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class ReadFromNoaccessArea { + + public static void main(String args[]) throws Exception { + if (!Platform.is64bit()) { + System.out.println("ReadFromNoaccessArea tests is useful only on 64bit architecture. Passing silently."); + return; + } + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+WhiteBoxAPI", + "-XX:+UseCompressedOops", + "-XX:HeapBaseMinAddress=33G", + DummyClassWithMainTryingToReadFromNoaccessArea.class.getName()); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + System.out.println("******* Printing stdout for analysis in case of failure *******"); + System.out.println(output.getStdout()); + System.out.println("******* Printing stderr for analysis in case of failure *******"); + System.out.println(output.getStderr()); + System.out.println("***************************************************************"); + if (output.getStdout() != null && output.getStdout().contains("WB_ReadFromNoaccessArea method is useless")) { + // Test conditions broken. There is no protected page in ReservedHeapSpace in these circumstances. Silently passing test. + return; + } + if (Platform.isWindows()) { + output.shouldContain("EXCEPTION_ACCESS_VIOLATION"); + } else if (Platform.isOSX()) { + output.shouldContain("SIGBUS"); + } else { + output.shouldContain("SIGSEGV"); + } + } + + public static class DummyClassWithMainTryingToReadFromNoaccessArea { + + // This method calls whitebox method reading from noaccess area + public static void main(String args[]) throws Exception { + WhiteBox.getWhiteBox().readFromNoaccessArea(); + throw new Exception("Call of readFromNoaccessArea succeeded! This is wrong. Crash expected. Test failed."); + } + } + +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/memory/ReserveMemory.java --- a/test/runtime/memory/ReserveMemory.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/runtime/memory/ReserveMemory.java Thu Nov 21 15:04:54 2013 +0100 @@ -56,6 +56,7 @@ "-Xbootclasspath/a:.", "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", + "-XX:-TransmitErrorReport", "ReserveMemory", "test"); diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/memory/RunUnitTestsConcurrently.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/memory/RunUnitTestsConcurrently.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test launches unit tests inside vm concurrently + * @library /testlibrary /testlibrary/whitebox + * @build RunUnitTestsConcurrently + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000 + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class RunUnitTestsConcurrently { + + private static WhiteBox wb; + private static long timeout; + private static long timeStamp; + + public static class Worker implements Runnable { + @Override + public void run() { + while (System.currentTimeMillis() - timeStamp < timeout) { + WhiteBox.getWhiteBox().runMemoryUnitTests(); + } + } + } + + public static void main(String[] args) throws InterruptedException { + if (!Platform.isDebugBuild() || !Platform.is64bit()) { + return; + } + wb = WhiteBox.getWhiteBox(); + System.out.println("Starting threads"); + + int threads = Integer.valueOf(args[0]); + timeout = Long.valueOf(args[1]); + + timeStamp = System.currentTimeMillis(); + + Thread[] threadsArray = new Thread[threads]; + for (int i = 0; i < threads; i++) { + threadsArray[i] = new Thread(new Worker()); + threadsArray[i].start(); + } + for (int i = 0; i < threads; i++) { + threadsArray[i].join(); + } + + System.out.println("Quitting test."); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/runtime/memory/StressVirtualSpaceResize.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/memory/StressVirtualSpaceResize.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Stress test that expands/shrinks VirtualSpace + * @library /testlibrary /testlibrary/whitebox + * @build StressVirtualSpaceResize + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StressVirtualSpaceResize + */ + +import sun.hotspot.WhiteBox; + +public class StressVirtualSpaceResize { + + public static void main(String args[]) throws Exception { + if (WhiteBox.getWhiteBox().stressVirtualSpaceResize(1000, 0xffffL, 0xffffL) != 0) + throw new RuntimeException("Whitebox method stressVirtualSpaceResize returned non zero exit code"); + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; + +import sun.management.VMManagement; + +public class JMapHProfLargeHeapProc { + private static final List heapGarbage = new ArrayList<>(); + + public static void main(String[] args) throws Exception { + + buildLargeHeap(args); + + // Print our pid on stdout + System.out.println("PID[" + getProcessId() + "]"); + + // Wait for input before termination + System.in.read(); + } + + private static void buildLargeHeap(String[] args) { + for (long i = 0; i < Integer.parseInt(args[0]); i++) { + heapGarbage.add(new byte[1024]); + } + } + + public static int getProcessId() throws Exception { + + // Get the current process id using a reflection hack + RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); + Field jvm = runtime.getClass().getDeclaredField("jvm"); + + jvm.setAccessible(true); + VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime); + + Method pid_method = mgmt.getClass().getDeclaredMethod("getProcessId"); + + pid_method.setAccessible(true); + + int pid = (Integer) pid_method.invoke(mgmt); + + return pid; + } + +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java Thu Nov 21 15:04:54 2013 +0100 @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.io.Reader; +import java.nio.CharBuffer; +import java.util.Arrays; +import java.util.Scanner; + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.JDKToolFinder; +import com.oracle.java.testlibrary.JDKToolLauncher; +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.ProcessTools; + +/* + * @test + * @bug 6313383 + * @key regression + * @summary Regression test for hprof export issue due to large heaps (>2G) + * @library /testlibrary + * @compile JMapHProfLargeHeapProc.java + * @run main JMapHProfLargeHeapTest + */ + +public class JMapHProfLargeHeapTest { + private static final String HEAP_DUMP_FILE_NAME = "heap.hprof"; + private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1"; + private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2"; + private static final long M = 1024L; + private static final long G = 1024L * M; + + public static void main(String[] args) throws Exception { + // If we are on MacOSX, test if JMap tool is signed, otherwise return + // since test will fail with privilege error. + if (Platform.isOSX()) { + String jmapToolPath = JDKToolFinder.getTestJDKTool("jmap"); + ProcessBuilder codesignProcessBuilder = new ProcessBuilder( + "codesign", "-v", jmapToolPath); + Process codesignProcess = codesignProcessBuilder.start(); + OutputAnalyzer analyser = new OutputAnalyzer(codesignProcess); + try { + analyser.shouldNotContain("code object is not signed at all"); + System.out.println("Signed jmap found at: " + jmapToolPath); + } catch (Exception e) { + // Abort since we can't know if the test will work + System.out + .println("Test aborted since we are on MacOSX and the jmap tool is not signed."); + return; + } + } + + // Small heap 22 megabytes, should create 1.0.1 file format + testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_1); + + /** + * This test was deliberately commented out since the test system lacks + * support to handle the requirements for this kind of heap size in a + * good way. If or when it becomes possible to run this kind of tests in + * the test environment the test should be enabled again. + * */ + // Large heap 2,2 gigabytes, should create 1.0.2 file format + // testHProfFileFormat("-Xmx4g", 2 * G + 2 * M, HPROF_HEADER_1_0_2); + } + + private static void testHProfFileFormat(String vmArgs, long heapSize, + String expectedFormat) throws Exception, IOException, + InterruptedException, FileNotFoundException { + ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder( + vmArgs, "JMapHProfLargeHeapProc", String.valueOf(heapSize)); + procBuilder.redirectError(ProcessBuilder.Redirect.INHERIT); + Process largeHeapProc = procBuilder.start(); + + try (Scanner largeHeapScanner = new Scanner( + largeHeapProc.getInputStream());) { + String pidstring = null; + while ((pidstring = largeHeapScanner.findInLine("PID\\[[0-9].*\\]")) == null) { + Thread.sleep(500); + } + int pid = Integer.parseInt(pidstring.substring(4, + pidstring.length() - 1)); + System.out.println("Extracted pid: " + pid); + + JDKToolLauncher jMapLauncher = JDKToolLauncher + .createUsingTestJDK("jmap"); + jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-" + + HEAP_DUMP_FILE_NAME); + jMapLauncher.addToolArg(String.valueOf(pid)); + + ProcessBuilder jMapProcessBuilder = new ProcessBuilder( + jMapLauncher.getCommand()); + System.out.println("jmap command: " + + Arrays.toString(jMapLauncher.getCommand())); + + Process jMapProcess = jMapProcessBuilder.start(); + OutputAnalyzer analyzer = new OutputAnalyzer(jMapProcess); + analyzer.shouldHaveExitValue(0); + analyzer.shouldContain(pid + "-" + HEAP_DUMP_FILE_NAME); + analyzer.shouldContain("Heap dump file created"); + + largeHeapProc.getOutputStream().write('\n'); + + File dumpFile = new File(pid + "-" + HEAP_DUMP_FILE_NAME); + Asserts.assertTrue(dumpFile.exists(), "Heap dump file not found."); + + try (Reader reader = new BufferedReader(new FileReader(dumpFile))) { + CharBuffer buf = CharBuffer.allocate(expectedFormat.length()); + reader.read(buf); + buf.clear(); + Asserts.assertEQ(buf.toString(), expectedFormat, + "Wrong file format. Expected '" + expectedFormat + + "', but found '" + buf.toString() + "'"); + } + + System.out.println("Success!"); + + } finally { + largeHeapProc.destroyForcibly(); + } + } +} diff -r 790ebab62d23 -r f9f4503a4ab5 test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java --- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java Thu Nov 21 15:04:26 2013 +0100 +++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java Thu Nov 21 15:04:54 2013 +0100 @@ -23,20 +23,17 @@ package com.oracle.java.testlibrary; -import java.util.List; import java.util.ArrayList; import java.util.Arrays; - -import com.oracle.java.testlibrary.JDKToolFinder; -import com.oracle.java.testlibrary.ProcessTools; +import java.util.List; /** * A utility for constructing command lines for starting JDK tool processes. * * The JDKToolLauncher can in particular be combined with a - * java.lang.ProcessBuilder to easily run a JDK tool. For example, the - * following code run {@code jmap -heap} against a process with GC logging - * turned on for the {@code jmap} process: + * java.lang.ProcessBuilder to easily run a JDK tool. For example, the following + * code run {@code jmap -heap} against a process with GC logging turned on for + * the {@code jmap} process: * *
      * {@code
    @@ -55,19 +52,37 @@
         private final List vmArgs = new ArrayList();
         private final List toolArgs = new ArrayList();
     
    -    private JDKToolLauncher(String tool) {
    -        executable = JDKToolFinder.getJDKTool(tool);
    +    private JDKToolLauncher(String tool, boolean useCompilerJDK) {
    +        if (useCompilerJDK) {
    +            executable = JDKToolFinder.getJDKTool(tool);
    +        } else {
    +            executable = JDKToolFinder.getTestJDKTool(tool);
    +        }
             vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
         }
     
         /**
    -     * Creates a new JDKToolLauncher for the specified tool.
    +     * Creates a new JDKToolLauncher for the specified tool. Using tools path
    +     * from the compiler JDK.
          *
    -     * @param tool The name of the tool
    +     * @param tool
    +     *            The name of the tool
          * @return A new JDKToolLauncher
          */
         public static JDKToolLauncher create(String tool) {
    -        return new JDKToolLauncher(tool);
    +        return new JDKToolLauncher(tool, true);
    +    }
    +
    +    /**
    +     * Creates a new JDKToolLauncher for the specified tool in the Tested JDK.
    +     *
    +     * @param tool
    +     *            The name of the tool
    +     *
    +     * @return A new JDKToolLauncher
    +     */
    +    public static JDKToolLauncher createUsingTestJDK(String tool) {
    +        return new JDKToolLauncher(tool, false);
         }
     
         /**
    @@ -80,18 +95,20 @@
          * automatically added.
          *
          *
    -     * @param arg The argument to VM running the tool
    +     * @param arg
    +     *            The argument to VM running the tool
          * @return The JDKToolLauncher instance
          */
         public JDKToolLauncher addVMArg(String arg) {
    -        vmArgs.add("-J" + arg);
    +        vmArgs.add(arg);
             return this;
         }
     
         /**
          * Adds an argument to the tool.
          *
    -     * @param arg The argument to the tool
    +     * @param arg
    +     *            The argument to the tool
          * @return The JDKToolLauncher instance
          */
         public JDKToolLauncher addToolArg(String arg) {
    @@ -107,7 +124,10 @@
         public String[] getCommand() {
             List command = new ArrayList();
             command.add(executable);
    -        command.addAll(vmArgs);
    +        // Add -J in front of all vmArgs
    +        for (String arg : vmArgs) {
    +            command.add("-J" + arg);
    +        }
             command.addAll(toolArgs);
             return command.toArray(new String[command.size()]);
         }
    diff -r 790ebab62d23 -r f9f4503a4ab5 test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
    --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Nov 21 15:04:26 2013 +0100
    +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Nov 21 15:04:54 2013 +0100
    @@ -144,4 +144,10 @@
     
       // force Full GC
       public native void fullGC();
    +
    +  // Tests on ReservedSpace/VirtualSpace classes
    +  public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
    +  public native void runMemoryUnitTests();
    +  public native void readFromNoaccessArea();
    +
     }