comparison src/os/linux/vm/os_linux.hpp @ 12176:88c255656030

8016155: SIGBUS when running Kitchensink with ParallelScavenge and ParallelOld Summary: When using NUMA and large pages we need to ease the requirement on which node the memory should be allocated on. To avoid the SIGBUS we now use the memory policy MPOL_PREFERRED, which prefers a certain node, instead of MPOL_BIND, which requires a certain node. Reviewed-by: jmasa, pliden Contributed-by: stefan.johansson@oracle.com
author mgerdin
date Thu, 22 Aug 2013 10:50:41 +0200
parents a837fa3d3f86
children 0d59407e7e09
comparison
equal deleted inserted replaced
12115:ec145d04eda8 12176:88c255656030
219 typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); 219 typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
220 typedef int (*numa_max_node_func_t)(void); 220 typedef int (*numa_max_node_func_t)(void);
221 typedef int (*numa_available_func_t)(void); 221 typedef int (*numa_available_func_t)(void);
222 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); 222 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
223 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); 223 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
224 typedef void (*numa_set_bind_policy_func_t)(int policy);
224 225
225 static sched_getcpu_func_t _sched_getcpu; 226 static sched_getcpu_func_t _sched_getcpu;
226 static numa_node_to_cpus_func_t _numa_node_to_cpus; 227 static numa_node_to_cpus_func_t _numa_node_to_cpus;
227 static numa_max_node_func_t _numa_max_node; 228 static numa_max_node_func_t _numa_max_node;
228 static numa_available_func_t _numa_available; 229 static numa_available_func_t _numa_available;
229 static numa_tonode_memory_func_t _numa_tonode_memory; 230 static numa_tonode_memory_func_t _numa_tonode_memory;
230 static numa_interleave_memory_func_t _numa_interleave_memory; 231 static numa_interleave_memory_func_t _numa_interleave_memory;
232 static numa_set_bind_policy_func_t _numa_set_bind_policy;
231 static unsigned long* _numa_all_nodes; 233 static unsigned long* _numa_all_nodes;
232 234
233 static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } 235 static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
234 static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; } 236 static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
235 static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; } 237 static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
236 static void set_numa_available(numa_available_func_t func) { _numa_available = func; } 238 static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
237 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } 239 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
238 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } 240 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
241 static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
239 static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } 242 static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
240 static int sched_getcpu_syscall(void); 243 static int sched_getcpu_syscall(void);
241 public: 244 public:
242 static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; } 245 static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
243 static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) { 246 static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
249 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1; 252 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
250 } 253 }
251 static void numa_interleave_memory(void *start, size_t size) { 254 static void numa_interleave_memory(void *start, size_t size) {
252 if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { 255 if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
253 _numa_interleave_memory(start, size, _numa_all_nodes); 256 _numa_interleave_memory(start, size, _numa_all_nodes);
257 }
258 }
259 static void numa_set_bind_policy(int policy) {
260 if (_numa_set_bind_policy != NULL) {
261 _numa_set_bind_policy(policy);
254 } 262 }
255 } 263 }
256 static int get_node_by_cpu(int cpu_id); 264 static int get_node_by_cpu(int cpu_id);
257 }; 265 };
258 266