1 /*
2 * Copyright © 2009 CNRS
3 * Copyright © 2009-2019 Inria. All rights reserved.
4 * Copyright © 2009-2011, 2013 Université Bordeaux
5 * Copyright © 2011 Cisco Systems, Inc. All rights reserved.
6 * See COPYING in top-level directory.
7 */
8
9 /* TODO: use SIGRECONFIG & dr_reconfig for state change */
10
11 #include "private/autogen/config.h"
12
13 #include <sys/types.h>
14 #ifdef HAVE_DIRENT_H
15 #include <dirent.h>
16 #endif
17 #ifdef HAVE_UNISTD_H
18 #include <unistd.h>
19 #endif
20 #include <string.h>
21 #include <errno.h>
22 #include <stdio.h>
23 #include <sys/stat.h>
24 #include <fcntl.h>
25
26 #include "hwloc.h"
27 #include "private/private.h"
28 #include "private/misc.h"
29 #include "private/debug.h"
30
31 #include <procinfo.h>
32 #include <sys/types.h>
33 #include <sys/rset.h>
34 #include <sys/processor.h>
35 #include <sys/thread.h>
36 #include <sys/mman.h>
37 #include <sys/systemcfg.h>
38
39 #ifndef __power_pc
40 #define __power_pc() 0
41 #endif
42 #ifndef __power_4
43 #define __power_4() 0
44 #endif
45 #ifndef __power_5
46 #define __power_5() 0
47 #endif
48 #ifndef __power_6
49 #define __power_6() 0
50 #endif
51 #ifndef __power_7
52 #define __power_7() 0
53 #endif
54
55 static int
hwloc_aix_set_sth_cpubind(hwloc_topology_t topology,rstype_t what,rsid_t who,pid_t pid,hwloc_const_bitmap_t hwloc_set,int flags __hwloc_attribute_unused)56 hwloc_aix_set_sth_cpubind(hwloc_topology_t topology, rstype_t what, rsid_t who, pid_t pid, hwloc_const_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
57 {
58 rsethandle_t rad;
59 int res;
60 unsigned cpu;
61
62 if (flags & HWLOC_CPUBIND_NOMEMBIND) {
63 errno = ENOSYS;
64 return -1;
65 }
66
67 /* The resulting binding is always strict */
68
69 if (hwloc_bitmap_isequal(hwloc_set, hwloc_topology_get_complete_cpuset(topology))) {
70 if (ra_detachrset(what, who, 0))
71 return -1;
72 return 0;
73 }
74
75 rad = rs_alloc(RS_EMPTY);
76 hwloc_bitmap_foreach_begin(cpu, hwloc_set)
77 rs_op(RS_ADDRESOURCE, rad, NULL, R_PROCS, cpu);
78 hwloc_bitmap_foreach_end();
79
80 res = ra_attachrset(what, who, rad, 0);
81 if (res < 0 && errno == EPERM) {
82 /* EPERM may mean that one thread has ben bound with bindprocessor().
83 * Unbind the entire process (we can't unbind individual threads)
84 * and try again.
85 */
86 bindprocessor(BINDPROCESS, pid, PROCESSOR_CLASS_ANY);
87 res = ra_attachrset(what, who, rad, 0);
88 }
89
90 rs_free(rad);
91 return res;
92 }
93
94 static int
hwloc_aix_get_sth_rset_cpubind(hwloc_topology_t topology,rstype_t what,rsid_t who,hwloc_bitmap_t hwloc_set,int flags __hwloc_attribute_unused,int * boundp)95 hwloc_aix_get_sth_rset_cpubind(hwloc_topology_t topology, rstype_t what, rsid_t who, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused, int *boundp)
96 {
97 rsethandle_t rset;
98 unsigned cpu, maxcpus;
99 int res = -1;
100 int bound = 0;
101
102 rset = rs_alloc(RS_EMPTY);
103
104 if (ra_getrset(what, who, 0, rset) == -1)
105 goto out;
106
107 hwloc_bitmap_zero(hwloc_set);
108 maxcpus = rs_getinfo(rset, R_MAXPROCS, 0);
109 for (cpu = 0; cpu < maxcpus; cpu++)
110 if (rs_op(RS_TESTRESOURCE, rset, NULL, R_PROCS, cpu) == 1)
111 hwloc_bitmap_set(hwloc_set, cpu);
112 else
113 bound = 1;
114 hwloc_bitmap_and(hwloc_set, hwloc_set, hwloc_topology_get_complete_cpuset(topology));
115 res = 0;
116 *boundp = bound;
117
118 out:
119 rs_free(rset);
120 return res;
121 }
122
123 static int
hwloc_aix_get_pid_getthrds_cpubind(hwloc_topology_t topology __hwloc_attribute_unused,pid_t pid,hwloc_bitmap_t hwloc_set,int flags __hwloc_attribute_unused)124 hwloc_aix_get_pid_getthrds_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, pid_t pid, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
125 {
126 #if HWLOC_BITS_PER_LONG == 64
127 struct thrdentry64 thread_info;
128 tid64_t next_thread;
129 #else
130 struct thrdsinfo thread_info;
131 tid_t next_thread;
132 #endif
133
134 next_thread = 0;
135 /* TODO: get multiple at once */
136 #if HWLOC_BITS_PER_LONG == 64
137 while (getthrds64 (pid, &thread_info, sizeof (thread_info),
138 &next_thread, 1) == 1) {
139 #else
140 while (getthrds (pid, &thread_info, sizeof (thread_info),
141 &next_thread, 1) == 1) {
142 #endif
143 if (PROCESSOR_CLASS_ANY != thread_info.ti_cpuid)
144 hwloc_bitmap_set(hwloc_set, thread_info.ti_cpuid);
145 else
146 hwloc_bitmap_fill(hwloc_set);
147 }
148 /* TODO: what if the thread list changes and we get nothing? */
149
150 return 0;
151 }
152
153 static int
154 hwloc_aix_get_tid_getthrds_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, tid_t tid, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
155 {
156 #if HWLOC_BITS_PER_LONG == 64
157 struct thrdentry64 thread_info;
158 tid64_t next_thread;
159 #else
160 struct thrdsinfo thread_info;
161 tid_t next_thread;
162 #endif
163 pid_t pid = getpid();
164
165 next_thread = 0;
166 /* TODO: get multiple at once */
167 #if HWLOC_BITS_PER_LONG == 64
168 while (getthrds64 (pid, &thread_info, sizeof (thread_info),
169 &next_thread, 1) == 1) {
170 #else
171 while (getthrds (pid, &thread_info, sizeof (thread_info),
172 &next_thread, 1) == 1) {
173 #endif
174 if (thread_info.ti_tid == tid) {
175 if (PROCESSOR_CLASS_ANY != thread_info.ti_cpuid)
176 hwloc_bitmap_set(hwloc_set, thread_info.ti_cpuid);
177 else
178 hwloc_bitmap_fill(hwloc_set);
179 break;
180 }
181 }
182 /* TODO: what if the thread goes away in the meantime? */
183
184 return 0;
185 }
186
187 static int
188 hwloc_aix_set_thisproc_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags)
189 {
190 rsid_t who;
191 who.at_pid = getpid();
192 return hwloc_aix_set_sth_cpubind(topology, R_PROCESS, who, who.at_pid, hwloc_set, flags);
193 }
194
195 static int
196 hwloc_aix_get_thisproc_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags)
197 {
198 int ret, bound;
199 rsid_t who;
200 who.at_pid = getpid();
201 ret = hwloc_aix_get_sth_rset_cpubind(topology, R_PROCESS, who, hwloc_set, flags, &bound);
202 if (!ret && !bound) {
203 hwloc_bitmap_zero(hwloc_set);
204 ret = hwloc_aix_get_pid_getthrds_cpubind(topology, who.at_pid, hwloc_set, flags);
205 }
206 return ret;
207 }
208
209 #ifdef R_THREAD
210 static int
211 hwloc_aix_set_thisthread_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags)
212 {
213 rsid_t who;
214 who.at_tid = thread_self();
215 return hwloc_aix_set_sth_cpubind(topology, R_THREAD, who, getpid(), hwloc_set, flags);
216 }
217
218 static int
219 hwloc_aix_get_thisthread_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags)
220 {
221 int ret, bound;
222 rsid_t who;
223 who.at_tid = thread_self();
224 ret = hwloc_aix_get_sth_rset_cpubind(topology, R_THREAD, who, hwloc_set, flags, &bound);
225 if (!ret && !bound) {
226 hwloc_bitmap_zero(hwloc_set);
227 ret = hwloc_aix_get_tid_getthrds_cpubind(topology, who.at_tid, hwloc_set, flags);
228 }
229 return ret;
230 }
231 #endif /* R_THREAD */
232
233 static int
234 hwloc_aix_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t hwloc_set, int flags)
235 {
236 rsid_t who;
237 who.at_pid = pid;
238 return hwloc_aix_set_sth_cpubind(topology, R_PROCESS, who, pid, hwloc_set, flags);
239 }
240
241 static int
242 hwloc_aix_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t hwloc_set, int flags)
243 {
244 int ret, bound;
245 rsid_t who;
246 who.at_pid = pid;
247 ret = hwloc_aix_get_sth_rset_cpubind(topology, R_PROCESS, who, hwloc_set, flags, &bound);
248 if (!ret && !bound) {
249 hwloc_bitmap_zero(hwloc_set);
250 ret = hwloc_aix_get_pid_getthrds_cpubind(topology, who.at_pid, hwloc_set, flags);
251 }
252 return ret;
253 }
254
255 #ifdef R_THREAD
256 #ifdef HWLOC_HAVE_PTHREAD_GETTHRDS_NP
257 static int
258 hwloc_aix_set_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_const_bitmap_t hwloc_set, int flags)
259 {
260 struct __pthrdsinfo info;
261 int size;
262 if ((errno = pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size)))
263 return -1;
264 {
265 rsid_t who;
266 who.at_tid = info.__pi_tid;
267 return hwloc_aix_set_sth_cpubind(topology, R_THREAD, who, getpid(), hwloc_set, flags);
268 }
269 }
270
271 static int
272 hwloc_aix_get_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_bitmap_t hwloc_set, int flags)
273 {
274 struct __pthrdsinfo info;
275 int size;
276 if (pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size))
277 return -1;
278 {
279 int ret, bound;
280 rsid_t who;
281 who.at_tid = info.__pi_tid;
282 ret = hwloc_aix_get_sth_rset_cpubind(topology, R_THREAD, who, hwloc_set, flags, &bound);
283 if (!ret && !bound) {
284 hwloc_bitmap_zero(hwloc_set);
285 ret = hwloc_aix_get_tid_getthrds_cpubind(topology, who.at_tid, hwloc_set, flags);
286 }
287 return ret;
288 }
289 }
290 #endif /* HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
291 #endif /* R_THREAD */
292
293 static int
294 hwloc_aix_get_thisthread_last_cpu_location(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
295 {
296 cpu_t cpu;
297
298 if (topology->pid) {
299 errno = ENOSYS;
300 return -1;
301 }
302
303 cpu = mycpu();
304 if (cpu < 0)
305 return -1;
306
307 hwloc_bitmap_only(hwloc_set, cpu);
308 return 0;
309 }
310
311 #ifdef P_DEFAULT
312
313 static int
314 hwloc_aix_membind_policy_from_hwloc(uint_t *aix_policy, int policy)
315 {
316 switch (policy) {
317 case HWLOC_MEMBIND_DEFAULT:
318 case HWLOC_MEMBIND_BIND:
319 *aix_policy = P_DEFAULT;
320 break;
321 case HWLOC_MEMBIND_FIRSTTOUCH:
322 *aix_policy = P_FIRST_TOUCH;
323 break;
324 case HWLOC_MEMBIND_INTERLEAVE:
325 *aix_policy = P_BALANCED;
326 break;
327 default:
328 errno = ENOSYS;
329 return -1;
330 }
331 return 0;
332 }
333
334 static int
335 hwloc_aix_prepare_membind(hwloc_topology_t topology, rsethandle_t *rad, hwloc_const_nodeset_t nodeset, int flags __hwloc_attribute_unused)
336 {
337 rsethandle_t rset, noderad;
338 int MCMlevel;
339 int node;
340
341 MCMlevel = rs_getinfo(NULL, R_MCMSDL, 0);
342 if ((topology->flags & HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED))
343 rset = rs_alloc(RS_ALL);
344 else
345 rset = rs_alloc(RS_PARTITION);
346 *rad = rs_alloc(RS_EMPTY);
347 noderad = rs_alloc(RS_EMPTY);
348
349 hwloc_bitmap_foreach_begin(node, nodeset)
350 /* we used MCMlevel rad number for node->os_index during lookup */
351 rs_getrad(rset, noderad, MCMlevel, node, 0);
352 rs_op(RS_UNION, noderad, *rad, 0, 0);
353 hwloc_bitmap_foreach_end();
354
355 rs_free(rset);
356 rs_free(noderad);
357
358 return 0;
359 }
360
361 static int
362 hwloc_aix_set_sth_membind(hwloc_topology_t topology, rstype_t what, rsid_t who, pid_t pid, hwloc_const_bitmap_t _nodeset, hwloc_membind_policy_t policy, int flags)
363 {
364 hwloc_const_nodeset_t nodeset;
365 rsethandle_t rad;
366 int res;
367
368 if (flags & HWLOC_MEMBIND_NOCPUBIND) {
369 errno = ENOSYS;
370 return -1;
371 }
372
373 if (policy == HWLOC_MEMBIND_DEFAULT)
374 nodeset = hwloc_topology_get_complete_nodeset(topology);
375 else
376 nodeset = _nodeset;
377
378 switch (policy) {
379 case HWLOC_MEMBIND_DEFAULT:
380 case HWLOC_MEMBIND_BIND:
381 break;
382 default:
383 errno = ENOSYS;
384 return -1;
385 }
386
387 if (hwloc_aix_prepare_membind(topology, &rad, nodeset, flags))
388 return -1;
389
390 res = ra_attachrset(what, who, rad, 0);
391 if (res < 0 && errno == EPERM) {
392 /* EPERM may mean that one thread has ben bound with bindprocessor().
393 * Unbind the entire process (we can't unbind individual threads)
394 * and try again.
395 */
396 bindprocessor(BINDPROCESS, pid, PROCESSOR_CLASS_ANY);
397 res = ra_attachrset(what, who, rad, 0);
398 }
399
400 rs_free(rad);
401 return res;
402 }
403
404 static int
405 hwloc_aix_get_sth_membind(hwloc_topology_t topology, rstype_t what, rsid_t who, hwloc_bitmap_t nodeset, hwloc_membind_policy_t *policy, int flags __hwloc_attribute_unused)
406 {
407 hwloc_bitmap_t hwloc_set;
408 rsethandle_t rset;
409 unsigned cpu, maxcpus;
410 int res = -1;
411 int n, i;
412
413 n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
414
415 rset = rs_alloc(RS_EMPTY);
416
417 if (ra_getrset(what, who, 0, rset) == -1)
418 goto out;
419
420 hwloc_set = hwloc_bitmap_alloc();
421
422 maxcpus = rs_getinfo(rset, R_MAXPROCS, 0);
423 for (cpu = 0; cpu < maxcpus; cpu++)
424 if (rs_op(RS_TESTRESOURCE, rset, NULL, R_PROCS, cpu) == 1)
425 hwloc_bitmap_set(hwloc_set, cpu);
426 hwloc_bitmap_and(hwloc_set, hwloc_set, hwloc_topology_get_complete_cpuset(topology));
427
428 hwloc_bitmap_zero(nodeset);
429 for (i = 0; i < n; i++) {
430 hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
431 if (hwloc_bitmap_isincluded(obj->cpuset, hwloc_set))
432 hwloc_bitmap_set(nodeset, obj->os_index);
433 }
434
435 hwloc_bitmap_free(hwloc_set);
436
437 *policy = HWLOC_MEMBIND_BIND;
438 res = 0;
439
440 out:
441 rs_free(rset);
442 return res;
443 }
444
445 static int
446 hwloc_aix_set_thisproc_membind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, hwloc_membind_policy_t policy, int flags)
447 {
448 rsid_t who;
449 who.at_pid = getpid();
450 return hwloc_aix_set_sth_membind(topology, R_PROCESS, who, who.at_pid, hwloc_set, policy, flags);
451 }
452
453 static int
454 hwloc_aix_get_thisproc_membind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, hwloc_membind_policy_t *policy, int flags)
455 {
456 rsid_t who;
457 who.at_pid = getpid();
458 return hwloc_aix_get_sth_membind(topology, R_PROCESS, who, hwloc_set, policy, flags);
459 }
460
461 #ifdef R_THREAD
462 static int
463 hwloc_aix_set_thisthread_membind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, hwloc_membind_policy_t policy, int flags)
464 {
465 rsid_t who;
466 who.at_tid = thread_self();
467 return hwloc_aix_set_sth_membind(topology, R_THREAD, who, getpid(), hwloc_set, policy, flags);
468 }
469
470 static int
471 hwloc_aix_get_thisthread_membind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, hwloc_membind_policy_t *policy, int flags)
472 {
473 rsid_t who;
474 who.at_tid = thread_self();
475 return hwloc_aix_get_sth_membind(topology, R_THREAD, who, hwloc_set, policy, flags);
476 }
477 #endif /* R_THREAD */
478
479 static int
480 hwloc_aix_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t hwloc_set, hwloc_membind_policy_t policy, int flags)
481 {
482 rsid_t who;
483 who.at_pid = pid;
484 return hwloc_aix_set_sth_membind(topology, R_PROCESS, who, pid, hwloc_set, policy, flags);
485 }
486
487 static int
488 hwloc_aix_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t hwloc_set, hwloc_membind_policy_t *policy, int flags)
489 {
490 rsid_t who;
491 who.at_pid = pid;
492 return hwloc_aix_get_sth_membind(topology, R_PROCESS, who, hwloc_set, policy, flags);
493 }
494
495 #ifdef R_THREAD
496 #if 0 /* def HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
497 static int
498 hwloc_aix_set_thread_membind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_const_bitmap_t hwloc_set, hwloc_membind_policy_t policy, int flags)
499 {
500 struct __pthrdsinfo info;
501 int size;
502 if ((errno = pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size)))
503 return -1;
504 {
505 rsid_t who;
506 who.at_tid = info.__pi_tid;
507 return hwloc_aix_set_sth_membind(topology, R_THREAD, who, getpid(), hwloc_set, policy, flags);
508 }
509 }
510
511 static int
512 hwloc_aix_get_thread_membind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_bitmap_t hwloc_set, hwloc_membind_policy_t *policy, int flags)
513 {
514 struct __pthrdsinfo info;
515 int size;
516 if (pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size))
517 return -1;
518 {
519 rsid_t who;
520 who.at_tid = info.__pi_tid;
521 return hwloc_aix_get_sth_membind(topology, R_THREAD, who, hwloc_set, policy, flags);
522 }
523 }
524 #endif /* HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
525 #endif /* R_THREAD */
526
527 #if 0
528 /* TODO: seems to be right, but doesn't seem to be working (EINVAL), even after
529 * aligning the range on 64K... */
530 static int
531 hwloc_aix_set_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t _nodeset, hwloc_membind_policy_t policy, int flags)
532 {
533 hwloc_const_nodeset_t nodeset;
534 subrange_t subrange;
535 rsid_t rsid = { .at_subrange = &subrange };
536 uint_t aix_policy;
537 int ret;
538 fprintf(stderr,"yop\n");
539
540 if ((flags & (HWLOC_MEMBIND_MIGRATE|HWLOC_MEMBIND_STRICT))
541 == (HWLOC_MEMBIND_MIGRATE|HWLOC_MEMBIND_STRICT)) {
542 errno = ENOSYS;
543 return -1;
544 }
545
546 if (policy == HWLOC_MEMBIND_DEFAULT)
547 nodeset = hwloc_topology_get_complete_nodeset(topology);
548 else
549 nodeset = _nodeset;
550
551 subrange.su_offset = (uintptr_t) addr;
552 subrange.su_length = len;
553 subrange.su_rstype = R_RSET;
554
555 if (hwloc_aix_membind_policy_from_hwloc(&aix_policy, policy))
556 return -1;
557
558 if (hwloc_aix_prepare_membind(topology, &subrange.su_rsid.at_rset, nodeset, flags))
559 return -1;
560
561 subrange.su_policy = aix_policy;
562
563 res = ra_attachrset(R_SUBRANGE, rsid, subrange.su_rsid.at_rset, 0);
564 if (res < 0 && errno == EPERM) {
565 /* EPERM may mean that one thread has ben bound with bindprocessor().
566 * Unbind the entire process (we can't unbind individual threads)
567 * and try again.
568 * FIXME: actually check that this EPERM can happen
569 */
570 bindprocessor(BINDPROCESS, getpid(), PROCESSOR_CLASS_ANY);
571 res = ra_attachrset(R_SUBRANGE, rsid, subrange.su_rsid.at_rset, 0);
572 }
573
574 rs_free(subrange.su_rsid.at_rset);
575 return ret;
576 }
577 #endif
578
579 static void *
580 hwloc_aix_alloc_membind(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t _nodeset, hwloc_membind_policy_t policy, int flags)
581 {
582 hwloc_const_nodeset_t nodeset;
583 void *ret;
584 rsid_t rsid;
585 uint_t aix_policy;
586
587 if (policy == HWLOC_MEMBIND_DEFAULT)
588 nodeset = hwloc_topology_get_complete_nodeset(topology);
589 else
590 nodeset = _nodeset;
591
592 if (hwloc_aix_membind_policy_from_hwloc(&aix_policy, policy))
593 return hwloc_alloc_or_fail(topology, len, flags);
594
595 if (hwloc_aix_prepare_membind(topology, &rsid.at_rset, nodeset, flags))
596 return hwloc_alloc_or_fail(topology, len, flags);
597
598 ret = ra_mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0, R_RSET, rsid, aix_policy);
599
600 rs_free(rsid.at_rset);
601 return ret == (void*)-1 ? NULL : ret;
602 }
603 #endif /* P_DEFAULT */
604
605 static void
606 look_rset(int sdl, hwloc_obj_type_t type, struct hwloc_topology *topology, int level)
607 {
608 rsethandle_t rset, rad;
609 int i,maxcpus,j;
610 int nbnodes;
611 struct hwloc_obj *obj;
612
613 if ((topology->flags & HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED))
614 rset = rs_alloc(RS_ALL);
615 else
616 rset = rs_alloc(RS_PARTITION);
617 rad = rs_alloc(RS_EMPTY);
618 nbnodes = rs_numrads(rset, sdl, 0);
619 if (nbnodes == -1) {
620 perror("rs_numrads");
621 return;
622 }
623
624 for (i = 0; i < nbnodes; i++) {
625 hwloc_bitmap_t cpuset;
626 unsigned os_index = HWLOC_UNKNOWN_INDEX; /* no os_index except for PU and NUMANODE below */
627
628 if (rs_getrad(rset, rad, sdl, i, 0)) {
629 fprintf(stderr,"rs_getrad(%d) failed: %s\n", i, strerror(errno));
630 continue;
631 }
632 if (!rs_getinfo(rad, R_NUMPROCS, 0))
633 continue;
634
635 maxcpus = rs_getinfo(rad, R_MAXPROCS, 0);
636 cpuset = hwloc_bitmap_alloc();
637 for (j = 0; j < maxcpus; j++) {
638 if (rs_op(RS_TESTRESOURCE, rad, NULL, R_PROCS, j))
639 hwloc_bitmap_set(cpuset, j);
640 }
641
642 if (type == HWLOC_OBJ_PU) {
643 os_index = hwloc_bitmap_first(cpuset);
644 hwloc_debug("Found PU #%u inside node %d for sdl %d\n", os_index, i, sdl);
645 assert(hwloc_bitmap_weight(cpuset) == 1);
646 } else if (type == HWLOC_OBJ_NUMANODE) {
647 /* NUMA node os_index isn't used for binding, just use the rad number to get unique values.
648 * Note that we'll use that fact in hwloc_aix_prepare_membind(). */
649 os_index = i;
650 hwloc_debug("Using os_index #%u for NUMA node inside node %d for sdl %d\n", os_index, i, sdl);
651 }
652
653 obj = hwloc_alloc_setup_object(topology, type, os_index);
654 obj->cpuset = cpuset;
655
656 switch(type) {
657 case HWLOC_OBJ_NUMANODE:
658 obj->nodeset = hwloc_bitmap_alloc();
659 hwloc_bitmap_set(obj->nodeset, i);
660 obj->attr->numanode.local_memory = 0; /* TODO: odd, rs_getinfo(rad, R_MEMSIZE, 0) << 10 returns the total memory ... */
661 obj->attr->numanode.page_types_len = 2;
662 obj->attr->numanode.page_types = malloc(2*sizeof(*obj->attr->numanode.page_types));
663 memset(obj->attr->numanode.page_types, 0, 2*sizeof(*obj->attr->numanode.page_types));
664 obj->attr->numanode.page_types[0].size = hwloc_getpagesize();
665 #if HAVE_DECL__SC_LARGE_PAGESIZE
666 obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE);
667 #endif
668 /* TODO: obj->attr->numanode.page_types[1].count = rs_getinfo(rset, R_LGPGFREE, 0) / hugepagesize */
669 break;
670 case HWLOC_OBJ_L2CACHE:
671 obj->attr->cache.size = _system_configuration.L2_cache_size;
672 obj->attr->cache.associativity = _system_configuration.L2_cache_asc;
673
674 obj->attr->cache.linesize = 0; /* unknown by default */
675 if (__power_pc())
676 if (__power_4() || __power_5() || __power_6() || __power_7())
677 obj->attr->cache.linesize = 128;
678
679 obj->attr->cache.depth = 2;
680 obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; /* OK for power[4567], unknown for others */
681 break;
682 case HWLOC_OBJ_GROUP:
683 obj->attr->group.kind = HWLOC_GROUP_KIND_AIX_SDL_UNKNOWN;
684 obj->attr->group.subkind = level;
685 break;
686 case HWLOC_OBJ_CORE:
687 {
688 hwloc_obj_t obj2, obj3;
689 obj2 = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1CACHE, HWLOC_UNKNOWN_INDEX);
690 obj2->cpuset = hwloc_bitmap_dup(obj->cpuset);
691 obj2->attr->cache.size = _system_configuration.dcache_size;
692 obj2->attr->cache.associativity = _system_configuration.dcache_asc;
693 obj2->attr->cache.linesize = _system_configuration.dcache_line;
694 obj2->attr->cache.depth = 1;
695 if (_system_configuration.cache_attrib & (1<<30)) {
696 /* Unified cache */
697 obj2->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED;
698 hwloc_debug("Adding an L1u cache for core %d\n", i);
699 } else {
700 /* Separate Instruction and Data caches */
701 obj2->attr->cache.type = HWLOC_OBJ_CACHE_DATA;
702 hwloc_debug("Adding an L1d cache for core %d\n", i);
703
704 if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1ICACHE)) {
705 obj3 = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1ICACHE, HWLOC_UNKNOWN_INDEX);
706 obj3->cpuset = hwloc_bitmap_dup(obj->cpuset);
707 obj3->attr->cache.size = _system_configuration.icache_size;
708 obj3->attr->cache.associativity = _system_configuration.icache_asc;
709 obj3->attr->cache.linesize = _system_configuration.icache_line;
710 obj3->attr->cache.depth = 1;
711 obj3->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION;
712 hwloc_debug("Adding an L1i cache for core %d\n", i);
713 hwloc__insert_object_by_cpuset(topology, NULL, obj3, "aix:l1icache");
714 }
715 }
716 if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1CACHE))
717 hwloc__insert_object_by_cpuset(topology, NULL, obj2, "aix:l1cache");
718 else
719 hwloc_free_unlinked_object(obj2); /* FIXME: don't built at all, just build the cpuset in case l1/l1i needs it */
720 break;
721 }
722 default:
723 break;
724 }
725 hwloc_debug_2args_bitmap("%s %d has cpuset %s\n",
726 hwloc_obj_type_string(type),
727 i, obj->cpuset);
728 if (hwloc_filter_check_keep_object_type(topology, obj->type))
729 hwloc__insert_object_by_cpuset(topology, NULL, obj, "aix:cache");
730 else
731 hwloc_free_unlinked_object(obj);
732 }
733
734 rs_free(rset);
735 rs_free(rad);
736 }
737
738 static int
739 hwloc_look_aix(struct hwloc_backend *backend, struct hwloc_disc_status *dstatus)
740 {
741 /*
742 * This backend uses the underlying OS.
743 * However we don't enforce topology->is_thissystem so that
744 * we may still force use this backend when debugging with !thissystem.
745 */
746
747 struct hwloc_topology *topology = backend->topology;
748 int i;
749
750 assert(dstatus->phase == HWLOC_DISC_PHASE_CPU);
751
752 if (topology->levels[0][0]->cpuset)
753 /* somebody discovered things */
754 return -1;
755
756 hwloc_alloc_root_sets(topology->levels[0][0]);
757
758 /* TODO: R_LGPGDEF/R_LGPGFREE for large pages */
759
760 hwloc_debug("Note: SMPSDL is at %d\n", rs_getinfo(NULL, R_SMPSDL, 0));
761 #ifdef R_REF1SDL
762 hwloc_debug("Note: REF1SDL is at %d\n", rs_getinfo(NULL, R_REF1SDL, 0));
763 #endif
764
765 for (i=0; i<=rs_getinfo(NULL, R_MAXSDL, 0); i++)
766 {
767 int known = 0;
768 #if 0
769 if (i == rs_getinfo(NULL, R_SMPSDL, 0))
770 /* Not enabled for now because I'm not sure what it corresponds to. On
771 * decrypthon it contains all the cpus. Is it a "machine" or a "system"
772 * level ?
773 */
774 {
775 hwloc_debug("looking AIX \"SMP\" sdl %d\n", i);
776 look_rset(i, HWLOC_OBJ_MACHINE, topology, i);
777 known = 1;
778 }
779 #endif
780 if (i == rs_getinfo(NULL, R_MCMSDL, 0))
781 {
782 hwloc_debug("looking AIX node sdl %d\n", i);
783 look_rset(i, HWLOC_OBJ_NUMANODE, topology, i);
784 known = 1;
785 topology->support.discovery->numa = 1;
786 }
787 # ifdef R_L2CSDL
788 if (i == rs_getinfo(NULL, R_L2CSDL, 0))
789 {
790 hwloc_debug("looking AIX L2 sdl %d\n", i);
791 look_rset(i, HWLOC_OBJ_L2CACHE, topology, i);
792 known = 1;
793 }
794 # endif
795 # ifdef R_PCORESDL
796 if (i == rs_getinfo(NULL, R_PCORESDL, 0))
797 {
798 hwloc_debug("looking AIX core sdl %d\n", i);
799 look_rset(i, HWLOC_OBJ_CORE, topology, i);
800 known = 1;
801 }
802 # endif
803 if (i == rs_getinfo(NULL, R_MAXSDL, 0))
804 {
805 hwloc_debug("looking AIX max sdl %d\n", i);
806 look_rset(i, HWLOC_OBJ_PU, topology, i);
807 known = 1;
808 topology->support.discovery->pu = 1;
809 }
810
811 /* Don't know how it should be rendered, make a misc object for it. */
812 if (!known)
813 {
814 hwloc_debug("looking AIX unknown sdl %d\n", i);
815 look_rset(i, HWLOC_OBJ_GROUP, topology, i);
816 }
817 }
818
819 hwloc_obj_add_info(topology->levels[0][0], "Backend", "AIX");
820 hwloc_add_uname_info(topology, NULL);
821 return 0;
822 }
823
824 void
825 hwloc_set_aix_hooks(struct hwloc_binding_hooks *hooks,
826 struct hwloc_topology_support *support __hwloc_attribute_unused)
827 {
828 hooks->set_proc_cpubind = hwloc_aix_set_proc_cpubind;
829 hooks->get_proc_cpubind = hwloc_aix_get_proc_cpubind;
830 #ifdef R_THREAD
831 #ifdef HWLOC_HAVE_PTHREAD_GETTHRDS_NP
832 hooks->set_thread_cpubind = hwloc_aix_set_thread_cpubind;
833 hooks->get_thread_cpubind = hwloc_aix_get_thread_cpubind;
834 #endif /* HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
835 #endif /* R_THREAD */
836 hooks->set_thisproc_cpubind = hwloc_aix_set_thisproc_cpubind;
837 hooks->get_thisproc_cpubind = hwloc_aix_get_thisproc_cpubind;
838 #ifdef R_THREAD
839 hooks->set_thisthread_cpubind = hwloc_aix_set_thisthread_cpubind;
840 hooks->get_thisthread_cpubind = hwloc_aix_get_thisthread_cpubind;
841 #endif /* R_THREAD */
842 hooks->get_thisthread_last_cpu_location = hwloc_aix_get_thisthread_last_cpu_location;
843 /* TODO: get_last_cpu_location: mycpu() only works for the current thread? */
844 #ifdef P_DEFAULT
845 hooks->set_proc_membind = hwloc_aix_set_proc_membind;
846 hooks->get_proc_membind = hwloc_aix_get_proc_membind;
847 #ifdef R_THREAD
848 #if 0 /* def HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
849 /* Does it really make sense to set the memory binding of another thread? */
850 hooks->set_thread_membind = hwloc_aix_set_thread_membind;
851 hooks->get_thread_membind = hwloc_aix_get_thread_membind;
852 #endif /* HWLOC_HAVE_PTHREAD_GETTHRDS_NP */
853 #endif /* R_THREAD */
854 hooks->set_thisproc_membind = hwloc_aix_set_thisproc_membind;
855 hooks->get_thisproc_membind = hwloc_aix_get_thisproc_membind;
856 #ifdef R_THREAD
857 hooks->set_thisthread_membind = hwloc_aix_set_thisthread_membind;
858 hooks->get_thisthread_membind = hwloc_aix_get_thisthread_membind;
859 #endif /* R_THREAD */
860 /* hooks->set_area_membind = hwloc_aix_set_area_membind; */
861 /* get_area_membind is not available */
862 hooks->alloc_membind = hwloc_aix_alloc_membind;
863 hooks->alloc = hwloc_alloc_mmap;
864 hooks->free_membind = hwloc_free_mmap;
865 support->membind->firsttouch_membind = 1;
866 support->membind->bind_membind = 1;
867 support->membind->interleave_membind = 1;
868 #endif /* P_DEFAULT */
869 }
870
871 static struct hwloc_backend *
872 hwloc_aix_component_instantiate(struct hwloc_topology *topology,
873 struct hwloc_disc_component *component,
874 unsigned excluded_phases __hwloc_attribute_unused,
875 const void *_data1 __hwloc_attribute_unused,
876 const void *_data2 __hwloc_attribute_unused,
877 const void *_data3 __hwloc_attribute_unused)
878 {
879 struct hwloc_backend *backend;
880 backend = hwloc_backend_alloc(topology, component);
881 if (!backend)
882 return NULL;
883 backend->discover = hwloc_look_aix;
884 return backend;
885 }
886
887 static struct hwloc_disc_component hwloc_aix_disc_component = {
888 "aix",
889 HWLOC_DISC_PHASE_CPU,
890 HWLOC_DISC_PHASE_GLOBAL,
891 hwloc_aix_component_instantiate,
892 50,
893 1,
894 NULL
895 };
896
897 const struct hwloc_component hwloc_aix_component = {
898 HWLOC_COMPONENT_ABI,
899 NULL, NULL,
900 HWLOC_COMPONENT_TYPE_DISC,
901 0,
902 &hwloc_aix_disc_component
903 };
904