1 /*
2 * Copyright © 2009 CNRS
3 * Copyright © 2009-2016 Inria. All rights reserved.
4 * Copyright © 2009-2010, 2012 Université Bordeaux
5 * Copyright © 2011-2015 Cisco Systems, Inc. All rights reserved.
6 * See COPYING in top-level directory.
7 */
8
9 #include <private/autogen/config.h>
10 #include <hwloc.h>
11 #include <private/private.h>
12 #include <hwloc/helper.h>
13 #ifdef HAVE_SYS_MMAN_H
14 # include <sys/mman.h>
15 #endif
16 /* <malloc.h> is only needed if we don't have posix_memalign() */
17 #if defined(hwloc_getpagesize) && !defined(HAVE_POSIX_MEMALIGN) && defined(HAVE_MEMALIGN) && defined(HAVE_MALLOC_H)
18 #include <malloc.h>
19 #endif
20 #ifdef HAVE_UNISTD_H
21 #include <unistd.h>
22 #endif
23 #include <stdlib.h>
24 #include <errno.h>
25
26 /* TODO: HWLOC_GNU_SYS, HWLOC_IRIX_SYS,
27 *
28 * IRIX: see MP_MUSTRUN / _DSM_MUSTRUN, pthread_setrunon_np, /hw, procss_cpulink, numa_create
29 *
30 * We could use glibc's sched_setaffinity generically when it is available
31 *
32 * Darwin and OpenBSD don't seem to have binding facilities.
33 */
34
35 static hwloc_const_bitmap_t
hwloc_fix_cpubind(hwloc_topology_t topology,hwloc_const_bitmap_t set)36 hwloc_fix_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t set)
37 {
38 hwloc_const_bitmap_t topology_set = hwloc_topology_get_topology_cpuset(topology);
39 hwloc_const_bitmap_t complete_set = hwloc_topology_get_complete_cpuset(topology);
40
41 if (!topology_set) {
42 /* The topology is composed of several systems, the cpuset is ambiguous. */
43 errno = EXDEV;
44 return NULL;
45 }
46
47 if (hwloc_bitmap_iszero(set)) {
48 errno = EINVAL;
49 return NULL;
50 }
51
52 if (!hwloc_bitmap_isincluded(set, complete_set)) {
53 errno = EINVAL;
54 return NULL;
55 }
56
57 if (hwloc_bitmap_isincluded(topology_set, set))
58 set = complete_set;
59
60 return set;
61 }
62
63 int
hwloc_set_cpubind(hwloc_topology_t topology,hwloc_const_bitmap_t set,int flags)64 hwloc_set_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t set, int flags)
65 {
66 set = hwloc_fix_cpubind(topology, set);
67 if (!set)
68 return -1;
69
70 if (flags & HWLOC_CPUBIND_PROCESS) {
71 if (topology->binding_hooks.set_thisproc_cpubind)
72 return topology->binding_hooks.set_thisproc_cpubind(topology, set, flags);
73 } else if (flags & HWLOC_CPUBIND_THREAD) {
74 if (topology->binding_hooks.set_thisthread_cpubind)
75 return topology->binding_hooks.set_thisthread_cpubind(topology, set, flags);
76 } else {
77 if (topology->binding_hooks.set_thisproc_cpubind) {
78 int err = topology->binding_hooks.set_thisproc_cpubind(topology, set, flags);
79 if (err >= 0 || errno != ENOSYS)
80 return err;
81 /* ENOSYS, fallback */
82 }
83 if (topology->binding_hooks.set_thisthread_cpubind)
84 return topology->binding_hooks.set_thisthread_cpubind(topology, set, flags);
85 }
86
87 errno = ENOSYS;
88 return -1;
89 }
90
91 int
hwloc_get_cpubind(hwloc_topology_t topology,hwloc_bitmap_t set,int flags)92 hwloc_get_cpubind(hwloc_topology_t topology, hwloc_bitmap_t set, int flags)
93 {
94 if (flags & HWLOC_CPUBIND_PROCESS) {
95 if (topology->binding_hooks.get_thisproc_cpubind)
96 return topology->binding_hooks.get_thisproc_cpubind(topology, set, flags);
97 } else if (flags & HWLOC_CPUBIND_THREAD) {
98 if (topology->binding_hooks.get_thisthread_cpubind)
99 return topology->binding_hooks.get_thisthread_cpubind(topology, set, flags);
100 } else {
101 if (topology->binding_hooks.get_thisproc_cpubind) {
102 int err = topology->binding_hooks.get_thisproc_cpubind(topology, set, flags);
103 if (err >= 0 || errno != ENOSYS)
104 return err;
105 /* ENOSYS, fallback */
106 }
107 if (topology->binding_hooks.get_thisthread_cpubind)
108 return topology->binding_hooks.get_thisthread_cpubind(topology, set, flags);
109 }
110
111 errno = ENOSYS;
112 return -1;
113 }
114
115 int
hwloc_set_proc_cpubind(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_const_bitmap_t set,int flags)116 hwloc_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t set, int flags)
117 {
118 set = hwloc_fix_cpubind(topology, set);
119 if (!set)
120 return -1;
121
122 if (topology->binding_hooks.set_proc_cpubind)
123 return topology->binding_hooks.set_proc_cpubind(topology, pid, set, flags);
124
125 errno = ENOSYS;
126 return -1;
127 }
128
129 int
hwloc_get_proc_cpubind(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_bitmap_t set,int flags)130 hwloc_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, int flags)
131 {
132 if (topology->binding_hooks.get_proc_cpubind)
133 return topology->binding_hooks.get_proc_cpubind(topology, pid, set, flags);
134
135 errno = ENOSYS;
136 return -1;
137 }
138
139 #ifdef hwloc_thread_t
140 int
hwloc_set_thread_cpubind(hwloc_topology_t topology,hwloc_thread_t tid,hwloc_const_bitmap_t set,int flags)141 hwloc_set_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_const_bitmap_t set, int flags)
142 {
143 set = hwloc_fix_cpubind(topology, set);
144 if (!set)
145 return -1;
146
147 if (topology->binding_hooks.set_thread_cpubind)
148 return topology->binding_hooks.set_thread_cpubind(topology, tid, set, flags);
149
150 errno = ENOSYS;
151 return -1;
152 }
153
154 int
hwloc_get_thread_cpubind(hwloc_topology_t topology,hwloc_thread_t tid,hwloc_bitmap_t set,int flags)155 hwloc_get_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_bitmap_t set, int flags)
156 {
157 if (topology->binding_hooks.get_thread_cpubind)
158 return topology->binding_hooks.get_thread_cpubind(topology, tid, set, flags);
159
160 errno = ENOSYS;
161 return -1;
162 }
163 #endif
164
165 int
hwloc_get_last_cpu_location(hwloc_topology_t topology,hwloc_bitmap_t set,int flags)166 hwloc_get_last_cpu_location(hwloc_topology_t topology, hwloc_bitmap_t set, int flags)
167 {
168 if (flags & HWLOC_CPUBIND_PROCESS) {
169 if (topology->binding_hooks.get_thisproc_last_cpu_location)
170 return topology->binding_hooks.get_thisproc_last_cpu_location(topology, set, flags);
171 } else if (flags & HWLOC_CPUBIND_THREAD) {
172 if (topology->binding_hooks.get_thisthread_last_cpu_location)
173 return topology->binding_hooks.get_thisthread_last_cpu_location(topology, set, flags);
174 } else {
175 if (topology->binding_hooks.get_thisproc_last_cpu_location) {
176 int err = topology->binding_hooks.get_thisproc_last_cpu_location(topology, set, flags);
177 if (err >= 0 || errno != ENOSYS)
178 return err;
179 /* ENOSYS, fallback */
180 }
181 if (topology->binding_hooks.get_thisthread_last_cpu_location)
182 return topology->binding_hooks.get_thisthread_last_cpu_location(topology, set, flags);
183 }
184
185 errno = ENOSYS;
186 return -1;
187 }
188
189 int
hwloc_get_proc_last_cpu_location(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_bitmap_t set,int flags)190 hwloc_get_proc_last_cpu_location(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, int flags)
191 {
192 if (topology->binding_hooks.get_proc_last_cpu_location)
193 return topology->binding_hooks.get_proc_last_cpu_location(topology, pid, set, flags);
194
195 errno = ENOSYS;
196 return -1;
197 }
198
199 #define HWLOC_MEMBIND_ALLFLAGS (HWLOC_MEMBIND_PROCESS|HWLOC_MEMBIND_THREAD|HWLOC_MEMBIND_STRICT|HWLOC_MEMBIND_MIGRATE|HWLOC_MEMBIND_NOCPUBIND|HWLOC_MEMBIND_BYNODESET)
200
201 static hwloc_const_nodeset_t
hwloc_fix_membind(hwloc_topology_t topology,hwloc_const_nodeset_t nodeset)202 hwloc_fix_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset)
203 {
204 hwloc_const_bitmap_t topology_nodeset = hwloc_topology_get_topology_nodeset(topology);
205 hwloc_const_bitmap_t complete_nodeset = hwloc_topology_get_complete_nodeset(topology);
206
207 if (!hwloc_topology_get_topology_cpuset(topology)) {
208 /* The topology is composed of several systems, the nodeset is thus
209 * ambiguous. */
210 errno = EXDEV;
211 return NULL;
212 }
213
214 if (!complete_nodeset) {
215 /* There is no NUMA node */
216 errno = ENODEV;
217 return NULL;
218 }
219
220 if (hwloc_bitmap_iszero(nodeset)) {
221 errno = EINVAL;
222 return NULL;
223 }
224
225 if (!hwloc_bitmap_isincluded(nodeset, complete_nodeset)) {
226 errno = EINVAL;
227 return NULL;
228 }
229
230 if (hwloc_bitmap_isincluded(topology_nodeset, nodeset))
231 return complete_nodeset;
232
233 return nodeset;
234 }
235
236 static int
hwloc_fix_membind_cpuset(hwloc_topology_t topology,hwloc_nodeset_t nodeset,hwloc_const_cpuset_t cpuset)237 hwloc_fix_membind_cpuset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_const_cpuset_t cpuset)
238 {
239 hwloc_const_bitmap_t topology_set = hwloc_topology_get_topology_cpuset(topology);
240 hwloc_const_bitmap_t complete_set = hwloc_topology_get_complete_cpuset(topology);
241 hwloc_const_bitmap_t complete_nodeset = hwloc_topology_get_complete_nodeset(topology);
242
243 if (!topology_set) {
244 /* The topology is composed of several systems, the cpuset is thus
245 * ambiguous. */
246 errno = EXDEV;
247 return -1;
248 }
249
250 if (!complete_nodeset) {
251 /* There is no NUMA node */
252 errno = ENODEV;
253 return -1;
254 }
255
256 if (hwloc_bitmap_iszero(cpuset)) {
257 errno = EINVAL;
258 return -1;
259 }
260
261 if (!hwloc_bitmap_isincluded(cpuset, complete_set)) {
262 errno = EINVAL;
263 return -1;
264 }
265
266 if (hwloc_bitmap_isincluded(topology_set, cpuset)) {
267 hwloc_bitmap_copy(nodeset, complete_nodeset);
268 return 0;
269 }
270
271 hwloc_cpuset_to_nodeset(topology, cpuset, nodeset);
272 return 0;
273 }
274
275 int
hwloc_set_membind_nodeset(hwloc_topology_t topology,hwloc_const_nodeset_t nodeset,hwloc_membind_policy_t policy,int flags)276 hwloc_set_membind_nodeset(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
277 {
278 nodeset = hwloc_fix_membind(topology, nodeset);
279 if (!nodeset)
280 return -1;
281
282 if (flags & HWLOC_MEMBIND_PROCESS) {
283 if (topology->binding_hooks.set_thisproc_membind)
284 return topology->binding_hooks.set_thisproc_membind(topology, nodeset, policy, flags);
285 } else if (flags & HWLOC_MEMBIND_THREAD) {
286 if (topology->binding_hooks.set_thisthread_membind)
287 return topology->binding_hooks.set_thisthread_membind(topology, nodeset, policy, flags);
288 } else {
289 if (topology->binding_hooks.set_thisproc_membind) {
290 int err = topology->binding_hooks.set_thisproc_membind(topology, nodeset, policy, flags);
291 if (err >= 0 || errno != ENOSYS)
292 return err;
293 /* ENOSYS, fallback */
294 }
295 if (topology->binding_hooks.set_thisthread_membind)
296 return topology->binding_hooks.set_thisthread_membind(topology, nodeset, policy, flags);
297 }
298
299 errno = ENOSYS;
300 return -1;
301 }
302
303 int
hwloc_set_membind(hwloc_topology_t topology,hwloc_const_bitmap_t set,hwloc_membind_policy_t policy,int flags)304 hwloc_set_membind(hwloc_topology_t topology, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags)
305 {
306 int ret;
307
308 if (flags & HWLOC_MEMBIND_BYNODESET) {
309 ret = hwloc_set_membind_nodeset(topology, set, policy, flags);
310 } else {
311 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
312 if (hwloc_fix_membind_cpuset(topology, nodeset, set))
313 ret = -1;
314 else
315 ret = hwloc_set_membind_nodeset(topology, nodeset, policy, flags);
316 hwloc_bitmap_free(nodeset);
317 }
318 return ret;
319 }
320
321 int
hwloc_get_membind_nodeset(hwloc_topology_t topology,hwloc_nodeset_t nodeset,hwloc_membind_policy_t * policy,int flags)322 hwloc_get_membind_nodeset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags)
323 {
324 if (flags & HWLOC_MEMBIND_PROCESS) {
325 if (topology->binding_hooks.get_thisproc_membind)
326 return topology->binding_hooks.get_thisproc_membind(topology, nodeset, policy, flags);
327 } else if (flags & HWLOC_MEMBIND_THREAD) {
328 if (topology->binding_hooks.get_thisthread_membind)
329 return topology->binding_hooks.get_thisthread_membind(topology, nodeset, policy, flags);
330 } else {
331 if (topology->binding_hooks.get_thisproc_membind) {
332 int err = topology->binding_hooks.get_thisproc_membind(topology, nodeset, policy, flags);
333 if (err >= 0 || errno != ENOSYS)
334 return err;
335 /* ENOSYS, fallback */
336 }
337 if (topology->binding_hooks.get_thisthread_membind)
338 return topology->binding_hooks.get_thisthread_membind(topology, nodeset, policy, flags);
339 }
340
341 errno = ENOSYS;
342 return -1;
343 }
344
345 int
hwloc_get_membind(hwloc_topology_t topology,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags)346 hwloc_get_membind(hwloc_topology_t topology, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags)
347 {
348 int ret;
349
350 if (flags & HWLOC_MEMBIND_BYNODESET) {
351 ret = hwloc_get_membind_nodeset(topology, set, policy, flags);
352 } else {
353 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
354 ret = hwloc_get_membind_nodeset(topology, nodeset, policy, flags);
355 if (!ret)
356 hwloc_cpuset_from_nodeset(topology, set, nodeset);
357 hwloc_bitmap_free(nodeset);
358 }
359
360 return ret;
361 }
362
363 int
hwloc_set_proc_membind_nodeset(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_const_nodeset_t nodeset,hwloc_membind_policy_t policy,int flags)364 hwloc_set_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
365 {
366 nodeset = hwloc_fix_membind(topology, nodeset);
367 if (!nodeset)
368 return -1;
369
370 if (topology->binding_hooks.set_proc_membind)
371 return topology->binding_hooks.set_proc_membind(topology, pid, nodeset, policy, flags);
372
373 errno = ENOSYS;
374 return -1;
375 }
376
377
378 int
hwloc_set_proc_membind(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_const_bitmap_t set,hwloc_membind_policy_t policy,int flags)379 hwloc_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags)
380 {
381 int ret;
382
383 if (flags & HWLOC_MEMBIND_BYNODESET) {
384 ret = hwloc_set_proc_membind_nodeset(topology, pid, set, policy, flags);
385 } else {
386 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
387 if (hwloc_fix_membind_cpuset(topology, nodeset, set))
388 ret = -1;
389 else
390 ret = hwloc_set_proc_membind_nodeset(topology, pid, nodeset, policy, flags);
391 hwloc_bitmap_free(nodeset);
392 }
393
394 return ret;
395 }
396
397 int
hwloc_get_proc_membind_nodeset(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_nodeset_t nodeset,hwloc_membind_policy_t * policy,int flags)398 hwloc_get_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags)
399 {
400 if (topology->binding_hooks.get_proc_membind)
401 return topology->binding_hooks.get_proc_membind(topology, pid, nodeset, policy, flags);
402
403 errno = ENOSYS;
404 return -1;
405 }
406
407 int
hwloc_get_proc_membind(hwloc_topology_t topology,hwloc_pid_t pid,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags)408 hwloc_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags)
409 {
410 int ret;
411
412 if (flags & HWLOC_MEMBIND_BYNODESET) {
413 ret = hwloc_get_proc_membind_nodeset(topology, pid, set, policy, flags);
414 } else {
415 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
416 ret = hwloc_get_proc_membind_nodeset(topology, pid, nodeset, policy, flags);
417 if (!ret)
418 hwloc_cpuset_from_nodeset(topology, set, nodeset);
419 hwloc_bitmap_free(nodeset);
420 }
421
422 return ret;
423 }
424
425 int
hwloc_set_area_membind_nodeset(hwloc_topology_t topology,const void * addr,size_t len,hwloc_const_nodeset_t nodeset,hwloc_membind_policy_t policy,int flags)426 hwloc_set_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
427 {
428 if (!len)
429 /* nothing to do */
430 return 0;
431
432 nodeset = hwloc_fix_membind(topology, nodeset);
433 if (!nodeset)
434 return -1;
435
436 if (topology->binding_hooks.set_area_membind)
437 return topology->binding_hooks.set_area_membind(topology, addr, len, nodeset, policy, flags);
438
439 errno = ENOSYS;
440 return -1;
441 }
442
443 int
hwloc_set_area_membind(hwloc_topology_t topology,const void * addr,size_t len,hwloc_const_bitmap_t set,hwloc_membind_policy_t policy,int flags)444 hwloc_set_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags)
445 {
446 int ret;
447
448 if (flags & HWLOC_MEMBIND_BYNODESET) {
449 ret = hwloc_set_area_membind_nodeset(topology, addr, len, set, policy, flags);
450 } else {
451 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
452 if (hwloc_fix_membind_cpuset(topology, nodeset, set))
453 ret = -1;
454 else
455 ret = hwloc_set_area_membind_nodeset(topology, addr, len, nodeset, policy, flags);
456 hwloc_bitmap_free(nodeset);
457 }
458
459 return ret;
460 }
461
462 int
hwloc_get_area_membind_nodeset(hwloc_topology_t topology,const void * addr,size_t len,hwloc_nodeset_t nodeset,hwloc_membind_policy_t * policy,int flags)463 hwloc_get_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags)
464 {
465 if (!len) {
466 /* nothing to query */
467 errno = EINVAL;
468 return -1;
469 }
470
471 if (topology->binding_hooks.get_area_membind)
472 return topology->binding_hooks.get_area_membind(topology, addr, len, nodeset, policy, flags);
473
474 errno = ENOSYS;
475 return -1;
476 }
477
478 int
hwloc_get_area_membind(hwloc_topology_t topology,const void * addr,size_t len,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags)479 hwloc_get_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags)
480 {
481 int ret;
482
483 if (flags & HWLOC_MEMBIND_BYNODESET) {
484 ret = hwloc_get_area_membind_nodeset(topology, addr, len, set, policy, flags);
485 } else {
486 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
487 ret = hwloc_get_area_membind_nodeset(topology, addr, len, nodeset, policy, flags);
488 if (!ret)
489 hwloc_cpuset_from_nodeset(topology, set, nodeset);
490 hwloc_bitmap_free(nodeset);
491 }
492
493 return ret;
494 }
495
496 static int
hwloc_get_area_memlocation_by_nodeset(hwloc_topology_t topology,const void * addr,size_t len,hwloc_nodeset_t nodeset,int flags)497 hwloc_get_area_memlocation_by_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, int flags)
498 {
499 if (flags & ~HWLOC_MEMBIND_ALLFLAGS) {
500 errno = EINVAL;
501 return -1;
502 }
503
504 if (!len)
505 /* nothing to do */
506 return 0;
507
508 if (topology->binding_hooks.get_area_memlocation)
509 return topology->binding_hooks.get_area_memlocation(topology, addr, len, nodeset, flags);
510
511 errno = ENOSYS;
512 return -1;
513 }
514
515 int
hwloc_get_area_memlocation(hwloc_topology_t topology,const void * addr,size_t len,hwloc_cpuset_t set,int flags)516 hwloc_get_area_memlocation(hwloc_topology_t topology, const void *addr, size_t len, hwloc_cpuset_t set, int flags)
517 {
518 int ret;
519
520 if (flags & HWLOC_MEMBIND_BYNODESET) {
521 ret = hwloc_get_area_memlocation_by_nodeset(topology, addr, len, set, flags);
522 } else {
523 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
524 ret = hwloc_get_area_memlocation_by_nodeset(topology, addr, len, nodeset, flags);
525 if (!ret)
526 hwloc_cpuset_from_nodeset(topology, set, nodeset);
527 hwloc_bitmap_free(nodeset);
528 }
529
530 return ret;
531 }
532
533 void *
hwloc_alloc_heap(hwloc_topology_t topology __hwloc_attribute_unused,size_t len)534 hwloc_alloc_heap(hwloc_topology_t topology __hwloc_attribute_unused, size_t len)
535 {
536 void *p = NULL;
537 #if defined(hwloc_getpagesize) && defined(HAVE_POSIX_MEMALIGN)
538 errno = posix_memalign(&p, hwloc_getpagesize(), len);
539 if (errno)
540 p = NULL;
541 #elif defined(hwloc_getpagesize) && defined(HAVE_MEMALIGN)
542 p = memalign(hwloc_getpagesize(), len);
543 #else
544 p = malloc(len);
545 #endif
546 return p;
547 }
548
549 #ifdef MAP_ANONYMOUS
550 void *
hwloc_alloc_mmap(hwloc_topology_t topology __hwloc_attribute_unused,size_t len)551 hwloc_alloc_mmap(hwloc_topology_t topology __hwloc_attribute_unused, size_t len)
552 {
553 void * buffer = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
554 return buffer == MAP_FAILED ? NULL : buffer;
555 }
556 #endif
557
558 int
hwloc_free_heap(hwloc_topology_t topology __hwloc_attribute_unused,void * addr,size_t len __hwloc_attribute_unused)559 hwloc_free_heap(hwloc_topology_t topology __hwloc_attribute_unused, void *addr, size_t len __hwloc_attribute_unused)
560 {
561 free(addr);
562 return 0;
563 }
564
565 #ifdef MAP_ANONYMOUS
566 int
hwloc_free_mmap(hwloc_topology_t topology __hwloc_attribute_unused,void * addr,size_t len)567 hwloc_free_mmap(hwloc_topology_t topology __hwloc_attribute_unused, void *addr, size_t len)
568 {
569 if (!addr)
570 return 0;
571 return munmap(addr, len);
572 }
573 #endif
574
575 void *
hwloc_alloc(hwloc_topology_t topology,size_t len)576 hwloc_alloc(hwloc_topology_t topology, size_t len)
577 {
578 if (topology->binding_hooks.alloc)
579 return topology->binding_hooks.alloc(topology, len);
580 return hwloc_alloc_heap(topology, len);
581 }
582
583 void *
hwloc_alloc_membind_nodeset(hwloc_topology_t topology,size_t len,hwloc_const_nodeset_t nodeset,hwloc_membind_policy_t policy,int flags)584 hwloc_alloc_membind_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
585 {
586 void *p;
587 nodeset = hwloc_fix_membind(topology, nodeset);
588 if (!nodeset)
589 goto fallback;
590 if (flags & HWLOC_MEMBIND_MIGRATE) {
591 errno = EINVAL;
592 goto fallback;
593 }
594
595 if (topology->binding_hooks.alloc_membind)
596 return topology->binding_hooks.alloc_membind(topology, len, nodeset, policy, flags);
597 else if (topology->binding_hooks.set_area_membind) {
598 p = hwloc_alloc(topology, len);
599 if (!p)
600 return NULL;
601 if (topology->binding_hooks.set_area_membind(topology, p, len, nodeset, policy, flags) && flags & HWLOC_MEMBIND_STRICT) {
602 int error = errno;
603 free(p);
604 errno = error;
605 return NULL;
606 }
607 return p;
608 } else {
609 errno = ENOSYS;
610 }
611
612 fallback:
613 if (flags & HWLOC_MEMBIND_STRICT)
614 /* Report error */
615 return NULL;
616 /* Never mind, allocate anyway */
617 return hwloc_alloc(topology, len);
618 }
619
620 void *
hwloc_alloc_membind(hwloc_topology_t topology,size_t len,hwloc_const_bitmap_t set,hwloc_membind_policy_t policy,int flags)621 hwloc_alloc_membind(hwloc_topology_t topology, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags)
622 {
623 void *ret;
624
625 if (flags & HWLOC_MEMBIND_BYNODESET) {
626 ret = hwloc_alloc_membind_nodeset(topology, len, set, policy, flags);
627 } else {
628 hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
629 if (hwloc_fix_membind_cpuset(topology, nodeset, set)) {
630 if (flags & HWLOC_MEMBIND_STRICT)
631 ret = NULL;
632 else
633 ret = hwloc_alloc(topology, len);
634 } else
635 ret = hwloc_alloc_membind_nodeset(topology, len, nodeset, policy, flags);
636 hwloc_bitmap_free(nodeset);
637 }
638
639 return ret;
640 }
641
642 int
hwloc_free(hwloc_topology_t topology,void * addr,size_t len)643 hwloc_free(hwloc_topology_t topology, void *addr, size_t len)
644 {
645 if (topology->binding_hooks.free_membind)
646 return topology->binding_hooks.free_membind(topology, addr, len);
647 return hwloc_free_heap(topology, addr, len);
648 }
649
650 /*
651 * Empty binding hooks always returning success
652 */
653
dontset_return_complete_cpuset(hwloc_topology_t topology,hwloc_cpuset_t set)654 static int dontset_return_complete_cpuset(hwloc_topology_t topology, hwloc_cpuset_t set)
655 {
656 hwloc_const_cpuset_t cpuset = hwloc_topology_get_complete_cpuset(topology);
657 if (cpuset) {
658 hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
659 return 0;
660 } else
661 return -1;
662 }
663
dontset_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,int flags __hwloc_attribute_unused)664 static int dontset_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
665 {
666 return 0;
667 }
dontget_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_bitmap_t set,int flags __hwloc_attribute_unused)668 static int dontget_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused)
669 {
670 return dontset_return_complete_cpuset(topology, set);
671 }
dontset_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,int flags __hwloc_attribute_unused)672 static int dontset_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
673 {
674 return 0;
675 }
dontget_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_bitmap_t set,int flags __hwloc_attribute_unused)676 static int dontget_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused)
677 {
678 return dontset_return_complete_cpuset(topology, set);
679 }
dontset_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_pid_t pid __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,int flags __hwloc_attribute_unused)680 static int dontset_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
681 {
682 return 0;
683 }
dontget_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_pid_t pid __hwloc_attribute_unused,hwloc_bitmap_t cpuset,int flags __hwloc_attribute_unused)684 static int dontget_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_bitmap_t cpuset, int flags __hwloc_attribute_unused)
685 {
686 return dontset_return_complete_cpuset(topology, cpuset);
687 }
688 #ifdef hwloc_thread_t
dontset_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_thread_t tid __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,int flags __hwloc_attribute_unused)689 static int dontset_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t tid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
690 {
691 return 0;
692 }
dontget_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_thread_t tid __hwloc_attribute_unused,hwloc_bitmap_t cpuset,int flags __hwloc_attribute_unused)693 static int dontget_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t tid __hwloc_attribute_unused, hwloc_bitmap_t cpuset, int flags __hwloc_attribute_unused)
694 {
695 return dontset_return_complete_cpuset(topology, cpuset);
696 }
697 #endif
698
dontset_return_complete_nodeset(hwloc_topology_t topology,hwloc_nodeset_t set,hwloc_membind_policy_t * policy)699 static int dontset_return_complete_nodeset(hwloc_topology_t topology, hwloc_nodeset_t set, hwloc_membind_policy_t *policy)
700 {
701 hwloc_const_nodeset_t nodeset = hwloc_topology_get_complete_nodeset(topology);
702 if (nodeset) {
703 hwloc_bitmap_copy(set, hwloc_topology_get_complete_nodeset(topology));
704 *policy = HWLOC_MEMBIND_DEFAULT;
705 return 0;
706 } else
707 return -1;
708 }
709
dontset_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,hwloc_membind_policy_t policy __hwloc_attribute_unused,int flags __hwloc_attribute_unused)710 static int dontset_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
711 {
712 return 0;
713 }
dontget_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags __hwloc_attribute_unused)714 static int dontget_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused)
715 {
716 return dontset_return_complete_nodeset(topology, set, policy);
717 }
718
dontset_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,hwloc_membind_policy_t policy __hwloc_attribute_unused,int flags __hwloc_attribute_unused)719 static int dontset_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
720 {
721 return 0;
722 }
dontget_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags __hwloc_attribute_unused)723 static int dontget_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused)
724 {
725 return dontset_return_complete_nodeset(topology, set, policy);
726 }
727
dontset_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_pid_t pid __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,hwloc_membind_policy_t policy __hwloc_attribute_unused,int flags __hwloc_attribute_unused)728 static int dontset_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
729 {
730 return 0;
731 }
dontget_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused,hwloc_pid_t pid __hwloc_attribute_unused,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags __hwloc_attribute_unused)732 static int dontget_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused)
733 {
734 return dontset_return_complete_nodeset(topology, set, policy);
735 }
736
dontset_area_membind(hwloc_topology_t topology __hwloc_attribute_unused,const void * addr __hwloc_attribute_unused,size_t size __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,hwloc_membind_policy_t policy __hwloc_attribute_unused,int flags __hwloc_attribute_unused)737 static int dontset_area_membind(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
738 {
739 return 0;
740 }
dontget_area_membind(hwloc_topology_t topology __hwloc_attribute_unused,const void * addr __hwloc_attribute_unused,size_t size __hwloc_attribute_unused,hwloc_bitmap_t set,hwloc_membind_policy_t * policy,int flags __hwloc_attribute_unused)741 static int dontget_area_membind(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused)
742 {
743 return dontset_return_complete_nodeset(topology, set, policy);
744 }
dontget_area_memlocation(hwloc_topology_t topology __hwloc_attribute_unused,const void * addr __hwloc_attribute_unused,size_t size __hwloc_attribute_unused,hwloc_bitmap_t set,int flags __hwloc_attribute_unused)745 static int dontget_area_memlocation(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused)
746 {
747 hwloc_membind_policy_t policy;
748 return dontset_return_complete_nodeset(topology, set, &policy);
749 }
750
dontalloc_membind(hwloc_topology_t topology __hwloc_attribute_unused,size_t size __hwloc_attribute_unused,hwloc_const_bitmap_t set __hwloc_attribute_unused,hwloc_membind_policy_t policy __hwloc_attribute_unused,int flags __hwloc_attribute_unused)751 static void * dontalloc_membind(hwloc_topology_t topology __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused)
752 {
753 return malloc(size);
754 }
dontfree_membind(hwloc_topology_t topology __hwloc_attribute_unused,void * addr __hwloc_attribute_unused,size_t size __hwloc_attribute_unused)755 static int dontfree_membind(hwloc_topology_t topology __hwloc_attribute_unused, void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused)
756 {
757 free(addr);
758 return 0;
759 }
760
hwloc_set_dummy_hooks(struct hwloc_binding_hooks * hooks,struct hwloc_topology_support * support __hwloc_attribute_unused)761 static void hwloc_set_dummy_hooks(struct hwloc_binding_hooks *hooks,
762 struct hwloc_topology_support *support __hwloc_attribute_unused)
763 {
764 hooks->set_thisproc_cpubind = dontset_thisproc_cpubind;
765 hooks->get_thisproc_cpubind = dontget_thisproc_cpubind;
766 hooks->set_thisthread_cpubind = dontset_thisthread_cpubind;
767 hooks->get_thisthread_cpubind = dontget_thisthread_cpubind;
768 hooks->set_proc_cpubind = dontset_proc_cpubind;
769 hooks->get_proc_cpubind = dontget_proc_cpubind;
770 #ifdef hwloc_thread_t
771 hooks->set_thread_cpubind = dontset_thread_cpubind;
772 hooks->get_thread_cpubind = dontget_thread_cpubind;
773 #endif
774 hooks->get_thisproc_last_cpu_location = dontget_thisproc_cpubind; /* cpubind instead of last_cpu_location is ok */
775 hooks->get_thisthread_last_cpu_location = dontget_thisthread_cpubind; /* cpubind instead of last_cpu_location is ok */
776 hooks->get_proc_last_cpu_location = dontget_proc_cpubind; /* cpubind instead of last_cpu_location is ok */
777 /* TODO: get_thread_last_cpu_location */
778 hooks->set_thisproc_membind = dontset_thisproc_membind;
779 hooks->get_thisproc_membind = dontget_thisproc_membind;
780 hooks->set_thisthread_membind = dontset_thisthread_membind;
781 hooks->get_thisthread_membind = dontget_thisthread_membind;
782 hooks->set_proc_membind = dontset_proc_membind;
783 hooks->get_proc_membind = dontget_proc_membind;
784 hooks->set_area_membind = dontset_area_membind;
785 hooks->get_area_membind = dontget_area_membind;
786 hooks->get_area_memlocation = dontget_area_memlocation;
787 hooks->alloc_membind = dontalloc_membind;
788 hooks->free_membind = dontfree_membind;
789 }
790
791 void
hwloc_set_native_binding_hooks(struct hwloc_binding_hooks * hooks,struct hwloc_topology_support * support)792 hwloc_set_native_binding_hooks(struct hwloc_binding_hooks *hooks, struct hwloc_topology_support *support)
793 {
794 # ifdef HWLOC_LINUX_SYS
795 hwloc_set_linuxfs_hooks(hooks, support);
796 # endif /* HWLOC_LINUX_SYS */
797
798 # ifdef HWLOC_BGQ_SYS
799 hwloc_set_bgq_hooks(hooks, support);
800 # endif /* HWLOC_BGQ_SYS */
801
802 # ifdef HWLOC_AIX_SYS
803 hwloc_set_aix_hooks(hooks, support);
804 # endif /* HWLOC_AIX_SYS */
805
806 # ifdef HWLOC_OSF_SYS
807 hwloc_set_osf_hooks(hooks, support);
808 # endif /* HWLOC_OSF_SYS */
809
810 # ifdef HWLOC_SOLARIS_SYS
811 hwloc_set_solaris_hooks(hooks, support);
812 # endif /* HWLOC_SOLARIS_SYS */
813
814 # ifdef HWLOC_WIN_SYS
815 hwloc_set_windows_hooks(hooks, support);
816 # endif /* HWLOC_WIN_SYS */
817
818 # ifdef HWLOC_DARWIN_SYS
819 hwloc_set_darwin_hooks(hooks, support);
820 # endif /* HWLOC_DARWIN_SYS */
821
822 # ifdef HWLOC_FREEBSD_SYS
823 hwloc_set_freebsd_hooks(hooks, support);
824 # endif /* HWLOC_FREEBSD_SYS */
825
826 # ifdef HWLOC_NETBSD_SYS
827 hwloc_set_netbsd_hooks(hooks, support);
828 # endif /* HWLOC_NETBSD_SYS */
829
830 # ifdef HWLOC_DRAGONFLY_SYS
831 hwloc_set_dragonfly_hooks(hooks, support);
832 # endif /* HWLOC_DRAGONFLY_SYS */
833
834 # ifdef HWLOC_HPUX_SYS
835 hwloc_set_hpux_hooks(hooks, support);
836 # endif /* HWLOC_HPUX_SYS */
837 }
838
839 /* If the represented system is actually not this system, use dummy binding hooks. */
840 void
hwloc_set_binding_hooks(struct hwloc_topology * topology)841 hwloc_set_binding_hooks(struct hwloc_topology *topology)
842 {
843 if (topology->is_thissystem) {
844 hwloc_set_native_binding_hooks(&topology->binding_hooks, &topology->support);
845 /* every hook not set above will return ENOSYS */
846 } else {
847 /* not this system, use dummy binding hooks that do nothing (but don't return ENOSYS) */
848 hwloc_set_dummy_hooks(&topology->binding_hooks, &topology->support);
849 }
850
851 /* if not is_thissystem, set_cpubind is fake
852 * and get_cpubind returns the whole system cpuset,
853 * so don't report that set/get_cpubind as supported
854 */
855 if (topology->is_thissystem) {
856 #define DO(which,kind) \
857 if (topology->binding_hooks.kind) \
858 topology->support.which##bind->kind = 1;
859 DO(cpu,set_thisproc_cpubind);
860 DO(cpu,get_thisproc_cpubind);
861 DO(cpu,set_proc_cpubind);
862 DO(cpu,get_proc_cpubind);
863 DO(cpu,set_thisthread_cpubind);
864 DO(cpu,get_thisthread_cpubind);
865 #ifdef hwloc_thread_t
866 DO(cpu,set_thread_cpubind);
867 DO(cpu,get_thread_cpubind);
868 #endif
869 DO(cpu,get_thisproc_last_cpu_location);
870 DO(cpu,get_proc_last_cpu_location);
871 DO(cpu,get_thisthread_last_cpu_location);
872 DO(mem,set_thisproc_membind);
873 DO(mem,get_thisproc_membind);
874 DO(mem,set_thisthread_membind);
875 DO(mem,get_thisthread_membind);
876 DO(mem,set_proc_membind);
877 DO(mem,get_proc_membind);
878 DO(mem,set_area_membind);
879 DO(mem,get_area_membind);
880 DO(mem,get_area_memlocation);
881 DO(mem,alloc_membind);
882 }
883 }
884