1 /*
2 * kmp_affinity.cpp -- affinity management
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22
23 // Store the real or imagined machine hierarchy here
24 static hierarchy_info machine_hierarchy;
25
__kmp_cleanup_hierarchy()26 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
27
__kmp_get_hierarchy(kmp_uint32 nproc,kmp_bstate_t * thr_bar)28 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
29 kmp_uint32 depth;
30 // The test below is true if affinity is available, but set to "none". Need to
31 // init on first use of hierarchical barrier.
32 if (TCR_1(machine_hierarchy.uninitialized))
33 machine_hierarchy.init(NULL, nproc);
34
35 // Adjust the hierarchy in case num threads exceeds original
36 if (nproc > machine_hierarchy.base_num_threads)
37 machine_hierarchy.resize(nproc);
38
39 depth = machine_hierarchy.depth;
40 KMP_DEBUG_ASSERT(depth > 0);
41
42 thr_bar->depth = depth;
43 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
44 &(thr_bar->base_leaf_kids));
45 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
46 }
47
48 #if KMP_AFFINITY_SUPPORTED
49
50 bool KMPAffinity::picked_api = false;
51
operator new(size_t n)52 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
operator new[](size_t n)53 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
operator delete(void * p)54 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
operator delete[](void * p)55 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
operator new(size_t n)56 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
operator delete(void * p)57 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
58
pick_api()59 void KMPAffinity::pick_api() {
60 KMPAffinity *affinity_dispatch;
61 if (picked_api)
62 return;
63 #if KMP_USE_HWLOC
64 // Only use Hwloc if affinity isn't explicitly disabled and
65 // user requests Hwloc topology method
66 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
67 __kmp_affinity_type != affinity_disabled) {
68 affinity_dispatch = new KMPHwlocAffinity();
69 } else
70 #endif
71 {
72 affinity_dispatch = new KMPNativeAffinity();
73 }
74 __kmp_affinity_dispatch = affinity_dispatch;
75 picked_api = true;
76 }
77
destroy_api()78 void KMPAffinity::destroy_api() {
79 if (__kmp_affinity_dispatch != NULL) {
80 delete __kmp_affinity_dispatch;
81 __kmp_affinity_dispatch = NULL;
82 picked_api = false;
83 }
84 }
85
86 #define KMP_ADVANCE_SCAN(scan) \
87 while (*scan != '\0') { \
88 scan++; \
89 }
90
91 // Print the affinity mask to the character array in a pretty format.
92 // The format is a comma separated list of non-negative integers or integer
93 // ranges: e.g., 1,2,3-5,7,9-15
94 // The format can also be the string "{<empty>}" if no bits are set in mask
__kmp_affinity_print_mask(char * buf,int buf_len,kmp_affin_mask_t * mask)95 char *__kmp_affinity_print_mask(char *buf, int buf_len,
96 kmp_affin_mask_t *mask) {
97 int start = 0, finish = 0, previous = 0;
98 bool first_range;
99 KMP_ASSERT(buf);
100 KMP_ASSERT(buf_len >= 40);
101 KMP_ASSERT(mask);
102 char *scan = buf;
103 char *end = buf + buf_len - 1;
104
105 // Check for empty set.
106 if (mask->begin() == mask->end()) {
107 KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
108 KMP_ADVANCE_SCAN(scan);
109 KMP_ASSERT(scan <= end);
110 return buf;
111 }
112
113 first_range = true;
114 start = mask->begin();
115 while (1) {
116 // Find next range
117 // [start, previous] is inclusive range of contiguous bits in mask
118 for (finish = mask->next(start), previous = start;
119 finish == previous + 1 && finish != mask->end();
120 finish = mask->next(finish)) {
121 previous = finish;
122 }
123
124 // The first range does not need a comma printed before it, but the rest
125 // of the ranges do need a comma beforehand
126 if (!first_range) {
127 KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
128 KMP_ADVANCE_SCAN(scan);
129 } else {
130 first_range = false;
131 }
132 // Range with three or more contiguous bits in the affinity mask
133 if (previous - start > 1) {
134 KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
135 } else {
136 // Range with one or two contiguous bits in the affinity mask
137 KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
138 KMP_ADVANCE_SCAN(scan);
139 if (previous - start > 0) {
140 KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
141 }
142 }
143 KMP_ADVANCE_SCAN(scan);
144 // Start over with new start point
145 start = finish;
146 if (start == mask->end())
147 break;
148 // Check for overflow
149 if (end - scan < 2)
150 break;
151 }
152
153 // Check for overflow
154 KMP_ASSERT(scan <= end);
155 return buf;
156 }
157 #undef KMP_ADVANCE_SCAN
158
159 // Print the affinity mask to the string buffer object in a pretty format
160 // The format is a comma separated list of non-negative integers or integer
161 // ranges: e.g., 1,2,3-5,7,9-15
162 // The format can also be the string "{<empty>}" if no bits are set in mask
__kmp_affinity_str_buf_mask(kmp_str_buf_t * buf,kmp_affin_mask_t * mask)163 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
164 kmp_affin_mask_t *mask) {
165 int start = 0, finish = 0, previous = 0;
166 bool first_range;
167 KMP_ASSERT(buf);
168 KMP_ASSERT(mask);
169
170 __kmp_str_buf_clear(buf);
171
172 // Check for empty set.
173 if (mask->begin() == mask->end()) {
174 __kmp_str_buf_print(buf, "%s", "{<empty>}");
175 return buf;
176 }
177
178 first_range = true;
179 start = mask->begin();
180 while (1) {
181 // Find next range
182 // [start, previous] is inclusive range of contiguous bits in mask
183 for (finish = mask->next(start), previous = start;
184 finish == previous + 1 && finish != mask->end();
185 finish = mask->next(finish)) {
186 previous = finish;
187 }
188
189 // The first range does not need a comma printed before it, but the rest
190 // of the ranges do need a comma beforehand
191 if (!first_range) {
192 __kmp_str_buf_print(buf, "%s", ",");
193 } else {
194 first_range = false;
195 }
196 // Range with three or more contiguous bits in the affinity mask
197 if (previous - start > 1) {
198 __kmp_str_buf_print(buf, "%u-%u", start, previous);
199 } else {
200 // Range with one or two contiguous bits in the affinity mask
201 __kmp_str_buf_print(buf, "%u", start);
202 if (previous - start > 0) {
203 __kmp_str_buf_print(buf, ",%u", previous);
204 }
205 }
206 // Start over with new start point
207 start = finish;
208 if (start == mask->end())
209 break;
210 }
211 return buf;
212 }
213
__kmp_affinity_entire_machine_mask(kmp_affin_mask_t * mask)214 void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
215 KMP_CPU_ZERO(mask);
216
217 #if KMP_GROUP_AFFINITY
218
219 if (__kmp_num_proc_groups > 1) {
220 int group;
221 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
222 for (group = 0; group < __kmp_num_proc_groups; group++) {
223 int i;
224 int num = __kmp_GetActiveProcessorCount(group);
225 for (i = 0; i < num; i++) {
226 KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
227 }
228 }
229 } else
230
231 #endif /* KMP_GROUP_AFFINITY */
232
233 {
234 int proc;
235 for (proc = 0; proc < __kmp_xproc; proc++) {
236 KMP_CPU_SET(proc, mask);
237 }
238 }
239 }
240
241 // When sorting by labels, __kmp_affinity_assign_child_nums() must first be
242 // called to renumber the labels from [0..n] and place them into the child_num
243 // vector of the address object. This is done in case the labels used for
244 // the children at one node of the hierarchy differ from those used for
245 // another node at the same level. Example: suppose the machine has 2 nodes
246 // with 2 packages each. The first node contains packages 601 and 602, and
247 // second node contains packages 603 and 604. If we try to sort the table
248 // for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
249 // because we are paying attention to the labels themselves, not the ordinal
250 // child numbers. By using the child numbers in the sort, the result is
251 // {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
__kmp_affinity_assign_child_nums(AddrUnsPair * address2os,int numAddrs)252 static void __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
253 int numAddrs) {
254 KMP_DEBUG_ASSERT(numAddrs > 0);
255 int depth = address2os->first.depth;
256 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
257 unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
258 int labCt;
259 for (labCt = 0; labCt < depth; labCt++) {
260 address2os[0].first.childNums[labCt] = counts[labCt] = 0;
261 lastLabel[labCt] = address2os[0].first.labels[labCt];
262 }
263 int i;
264 for (i = 1; i < numAddrs; i++) {
265 for (labCt = 0; labCt < depth; labCt++) {
266 if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
267 int labCt2;
268 for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
269 counts[labCt2] = 0;
270 lastLabel[labCt2] = address2os[i].first.labels[labCt2];
271 }
272 counts[labCt]++;
273 lastLabel[labCt] = address2os[i].first.labels[labCt];
274 break;
275 }
276 }
277 for (labCt = 0; labCt < depth; labCt++) {
278 address2os[i].first.childNums[labCt] = counts[labCt];
279 }
280 for (; labCt < (int)Address::maxDepth; labCt++) {
281 address2os[i].first.childNums[labCt] = 0;
282 }
283 }
284 __kmp_free(lastLabel);
285 __kmp_free(counts);
286 }
287
288 // All of the __kmp_affinity_create_*_map() routines should set
289 // __kmp_affinity_masks to a vector of affinity mask objects of length
290 // __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and return
291 // the number of levels in the machine topology tree (zero if
292 // __kmp_affinity_type == affinity_none).
293 //
294 // All of the __kmp_affinity_create_*_map() routines should set
295 // *__kmp_affin_fullMask to the affinity mask for the initialization thread.
296 // They need to save and restore the mask, and it could be needed later, so
297 // saving it is just an optimization to avoid calling kmp_get_system_affinity()
298 // again.
299 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
300
301 static int nCoresPerPkg, nPackages;
302 static int __kmp_nThreadsPerCore;
303 #ifndef KMP_DFLT_NTH_CORES
304 static int __kmp_ncores;
305 #endif
306 static int *__kmp_pu_os_idx = NULL;
307
308 // __kmp_affinity_uniform_topology() doesn't work when called from
309 // places which support arbitrarily many levels in the machine topology
310 // map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
311 // __kmp_affinity_create_x2apicid_map().
__kmp_affinity_uniform_topology()312 inline static bool __kmp_affinity_uniform_topology() {
313 return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
314 }
315
316 // Print out the detailed machine topology map, i.e. the physical locations
317 // of each OS proc.
__kmp_affinity_print_topology(AddrUnsPair * address2os,int len,int depth,int pkgLevel,int coreLevel,int threadLevel)318 static void __kmp_affinity_print_topology(AddrUnsPair *address2os, int len,
319 int depth, int pkgLevel,
320 int coreLevel, int threadLevel) {
321 int proc;
322
323 KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
324 for (proc = 0; proc < len; proc++) {
325 int level;
326 kmp_str_buf_t buf;
327 __kmp_str_buf_init(&buf);
328 for (level = 0; level < depth; level++) {
329 if (level == threadLevel) {
330 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
331 } else if (level == coreLevel) {
332 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
333 } else if (level == pkgLevel) {
334 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
335 } else if (level > pkgLevel) {
336 __kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
337 level - pkgLevel - 1);
338 } else {
339 __kmp_str_buf_print(&buf, "L%d ", level);
340 }
341 __kmp_str_buf_print(&buf, "%d ", address2os[proc].first.labels[level]);
342 }
343 KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
344 buf.str);
345 __kmp_str_buf_free(&buf);
346 }
347 }
348
349 #if KMP_USE_HWLOC
350
__kmp_affinity_print_hwloc_tp(AddrUnsPair * addrP,int len,int depth,int * levels)351 static void __kmp_affinity_print_hwloc_tp(AddrUnsPair *addrP, int len,
352 int depth, int *levels) {
353 int proc;
354 kmp_str_buf_t buf;
355 __kmp_str_buf_init(&buf);
356 KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
357 for (proc = 0; proc < len; proc++) {
358 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Package),
359 addrP[proc].first.labels[0]);
360 if (depth > 1) {
361 int level = 1; // iterate over levels
362 int label = 1; // iterate over labels
363 if (__kmp_numa_detected)
364 // node level follows package
365 if (levels[level++] > 0)
366 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Node),
367 addrP[proc].first.labels[label++]);
368 if (__kmp_tile_depth > 0)
369 // tile level follows node if any, or package
370 if (levels[level++] > 0)
371 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Tile),
372 addrP[proc].first.labels[label++]);
373 if (levels[level++] > 0)
374 // core level follows
375 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Core),
376 addrP[proc].first.labels[label++]);
377 if (levels[level++] > 0)
378 // thread level is the latest
379 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Thread),
380 addrP[proc].first.labels[label++]);
381 KMP_DEBUG_ASSERT(label == depth);
382 }
383 KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", addrP[proc].second, buf.str);
384 __kmp_str_buf_clear(&buf);
385 }
386 __kmp_str_buf_free(&buf);
387 }
388
389 static int nNodePerPkg, nTilePerPkg, nTilePerNode, nCorePerNode, nCorePerTile;
390
391 // This function removes the topology levels that are radix 1 and don't offer
392 // further information about the topology. The most common example is when you
393 // have one thread context per core, we don't want the extra thread context
394 // level if it offers no unique labels. So they are removed.
395 // return value: the new depth of address2os
__kmp_affinity_remove_radix_one_levels(AddrUnsPair * addrP,int nTh,int depth,int * levels)396 static int __kmp_affinity_remove_radix_one_levels(AddrUnsPair *addrP, int nTh,
397 int depth, int *levels) {
398 int level;
399 int i;
400 int radix1_detected;
401 int new_depth = depth;
402 for (level = depth - 1; level > 0; --level) {
403 // Detect if this level is radix 1
404 radix1_detected = 1;
405 for (i = 1; i < nTh; ++i) {
406 if (addrP[0].first.labels[level] != addrP[i].first.labels[level]) {
407 // There are differing label values for this level so it stays
408 radix1_detected = 0;
409 break;
410 }
411 }
412 if (!radix1_detected)
413 continue;
414 // Radix 1 was detected
415 --new_depth;
416 levels[level] = -1; // mark level as not present in address2os array
417 if (level == new_depth) {
418 // "turn off" deepest level, just decrement the depth that removes
419 // the level from address2os array
420 for (i = 0; i < nTh; ++i) {
421 addrP[i].first.depth--;
422 }
423 } else {
424 // For other levels, we move labels over and also reduce the depth
425 int j;
426 for (j = level; j < new_depth; ++j) {
427 for (i = 0; i < nTh; ++i) {
428 addrP[i].first.labels[j] = addrP[i].first.labels[j + 1];
429 addrP[i].first.depth--;
430 }
431 levels[j + 1] -= 1;
432 }
433 }
434 }
435 return new_depth;
436 }
437
438 // Returns the number of objects of type 'type' below 'obj' within the topology
439 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
440 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
441 // object.
__kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,hwloc_obj_type_t type)442 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
443 hwloc_obj_type_t type) {
444 int retval = 0;
445 hwloc_obj_t first;
446 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
447 obj->logical_index, type, 0);
448 first != NULL &&
449 hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) ==
450 obj;
451 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
452 first)) {
453 ++retval;
454 }
455 return retval;
456 }
457
__kmp_hwloc_count_children_by_depth(hwloc_topology_t t,hwloc_obj_t o,kmp_hwloc_depth_t depth,hwloc_obj_t * f)458 static int __kmp_hwloc_count_children_by_depth(hwloc_topology_t t,
459 hwloc_obj_t o,
460 kmp_hwloc_depth_t depth,
461 hwloc_obj_t *f) {
462 if (o->depth == depth) {
463 if (*f == NULL)
464 *f = o; // output first descendant found
465 return 1;
466 }
467 int sum = 0;
468 for (unsigned i = 0; i < o->arity; i++)
469 sum += __kmp_hwloc_count_children_by_depth(t, o->children[i], depth, f);
470 return sum; // will be 0 if no one found (as PU arity is 0)
471 }
472
__kmp_hwloc_count_children_by_type(hwloc_topology_t t,hwloc_obj_t o,hwloc_obj_type_t type,hwloc_obj_t * f)473 static int __kmp_hwloc_count_children_by_type(hwloc_topology_t t, hwloc_obj_t o,
474 hwloc_obj_type_t type,
475 hwloc_obj_t *f) {
476 if (!hwloc_compare_types(o->type, type)) {
477 if (*f == NULL)
478 *f = o; // output first descendant found
479 return 1;
480 }
481 int sum = 0;
482 for (unsigned i = 0; i < o->arity; i++)
483 sum += __kmp_hwloc_count_children_by_type(t, o->children[i], type, f);
484 return sum; // will be 0 if no one found (as PU arity is 0)
485 }
486
__kmp_hwloc_process_obj_core_pu(AddrUnsPair * addrPair,int & nActiveThreads,int & num_active_cores,hwloc_obj_t obj,int depth,int * labels)487 static int __kmp_hwloc_process_obj_core_pu(AddrUnsPair *addrPair,
488 int &nActiveThreads,
489 int &num_active_cores,
490 hwloc_obj_t obj, int depth,
491 int *labels) {
492 hwloc_obj_t core = NULL;
493 hwloc_topology_t &tp = __kmp_hwloc_topology;
494 int NC = __kmp_hwloc_count_children_by_type(tp, obj, HWLOC_OBJ_CORE, &core);
495 for (int core_id = 0; core_id < NC; ++core_id, core = core->next_cousin) {
496 hwloc_obj_t pu = NULL;
497 KMP_DEBUG_ASSERT(core != NULL);
498 int num_active_threads = 0;
499 int NT = __kmp_hwloc_count_children_by_type(tp, core, HWLOC_OBJ_PU, &pu);
500 // int NT = core->arity; pu = core->first_child; // faster?
501 for (int pu_id = 0; pu_id < NT; ++pu_id, pu = pu->next_cousin) {
502 KMP_DEBUG_ASSERT(pu != NULL);
503 if (!KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask))
504 continue; // skip inactive (inaccessible) unit
505 Address addr(depth + 2);
506 KA_TRACE(20, ("Hwloc inserting %d (%d) %d (%d) %d (%d) into address2os\n",
507 obj->os_index, obj->logical_index, core->os_index,
508 core->logical_index, pu->os_index, pu->logical_index));
509 for (int i = 0; i < depth; ++i)
510 addr.labels[i] = labels[i]; // package, etc.
511 addr.labels[depth] = core_id; // core
512 addr.labels[depth + 1] = pu_id; // pu
513 addrPair[nActiveThreads] = AddrUnsPair(addr, pu->os_index);
514 __kmp_pu_os_idx[nActiveThreads] = pu->os_index;
515 nActiveThreads++;
516 ++num_active_threads; // count active threads per core
517 }
518 if (num_active_threads) { // were there any active threads on the core?
519 ++__kmp_ncores; // count total active cores
520 ++num_active_cores; // count active cores per socket
521 if (num_active_threads > __kmp_nThreadsPerCore)
522 __kmp_nThreadsPerCore = num_active_threads; // calc maximum
523 }
524 }
525 return 0;
526 }
527
528 // Check if NUMA node detected below the package,
529 // and if tile object is detected and return its depth
__kmp_hwloc_check_numa()530 static int __kmp_hwloc_check_numa() {
531 hwloc_topology_t &tp = __kmp_hwloc_topology;
532 hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
533 int depth, l2cache_depth, package_depth;
534
535 // Get some PU
536 hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, 0);
537 if (hT == NULL) // something has gone wrong
538 return 1;
539
540 // check NUMA node below PACKAGE
541 hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
542 hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
543 KMP_DEBUG_ASSERT(hS != NULL);
544 if (hN != NULL && hN->depth > hS->depth) {
545 __kmp_numa_detected = TRUE; // socket includes node(s)
546 if (__kmp_affinity_gran == affinity_gran_node) {
547 __kmp_affinity_gran = affinity_gran_numa;
548 }
549 }
550
551 package_depth = hwloc_get_type_depth(tp, HWLOC_OBJ_PACKAGE);
552 l2cache_depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
553 // check tile, get object by depth because of multiple caches possible
554 depth = (l2cache_depth < package_depth) ? package_depth : l2cache_depth;
555 hL = hwloc_get_ancestor_obj_by_depth(tp, depth, hT);
556 hC = NULL; // not used, but reset it here just in case
557 if (hL != NULL &&
558 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1)
559 __kmp_tile_depth = depth; // tile consists of multiple cores
560 return 0;
561 }
562
__kmp_affinity_create_hwloc_map(AddrUnsPair ** address2os,kmp_i18n_id_t * const msg_id)563 static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
564 kmp_i18n_id_t *const msg_id) {
565 hwloc_topology_t &tp = __kmp_hwloc_topology; // shortcut of a long name
566 *address2os = NULL;
567 *msg_id = kmp_i18n_null;
568
569 // Save the affinity mask for the current thread.
570 kmp_affin_mask_t *oldMask;
571 KMP_CPU_ALLOC(oldMask);
572 __kmp_get_system_affinity(oldMask, TRUE);
573 __kmp_hwloc_check_numa();
574
575 if (!KMP_AFFINITY_CAPABLE()) {
576 // Hack to try and infer the machine topology using only the data
577 // available from cpuid on the current thread, and __kmp_xproc.
578 KMP_ASSERT(__kmp_affinity_type == affinity_none);
579 // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
580 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
581 if (o != NULL)
582 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
583 else
584 nCoresPerPkg = 1; // no PACKAGE found
585 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
586 if (o != NULL)
587 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
588 else
589 __kmp_nThreadsPerCore = 1; // no CORE found
590 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
591 if (nCoresPerPkg == 0)
592 nCoresPerPkg = 1; // to prevent possible division by 0
593 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
594 if (__kmp_affinity_verbose) {
595 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
596 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
597 if (__kmp_affinity_uniform_topology()) {
598 KMP_INFORM(Uniform, "KMP_AFFINITY");
599 } else {
600 KMP_INFORM(NonUniform, "KMP_AFFINITY");
601 }
602 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
603 __kmp_nThreadsPerCore, __kmp_ncores);
604 }
605 KMP_CPU_FREE(oldMask);
606 return 0;
607 }
608
609 int depth = 3;
610 int levels[5] = {0, 1, 2, 3, 4}; // package, [node,] [tile,] core, thread
611 int labels[3] = {0}; // package [,node] [,tile] - head of labels array
612 if (__kmp_numa_detected)
613 ++depth;
614 if (__kmp_tile_depth)
615 ++depth;
616
617 // Allocate the data structure to be returned.
618 AddrUnsPair *retval =
619 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
620 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
621 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
622
623 // When affinity is off, this routine will still be called to set
624 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
625 // nCoresPerPkg, & nPackages. Make sure all these vars are set
626 // correctly, and return if affinity is not enabled.
627
628 hwloc_obj_t socket, node, tile;
629 int nActiveThreads = 0;
630 int socket_id = 0;
631 // re-calculate globals to count only accessible resources
632 __kmp_ncores = nPackages = nCoresPerPkg = __kmp_nThreadsPerCore = 0;
633 nNodePerPkg = nTilePerPkg = nTilePerNode = nCorePerNode = nCorePerTile = 0;
634 for (socket = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); socket != NULL;
635 socket = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, socket),
636 socket_id++) {
637 labels[0] = socket_id;
638 if (__kmp_numa_detected) {
639 int NN;
640 int n_active_nodes = 0;
641 node = NULL;
642 NN = __kmp_hwloc_count_children_by_type(tp, socket, HWLOC_OBJ_NUMANODE,
643 &node);
644 for (int node_id = 0; node_id < NN; ++node_id, node = node->next_cousin) {
645 labels[1] = node_id;
646 if (__kmp_tile_depth) {
647 // NUMA + tiles
648 int NT;
649 int n_active_tiles = 0;
650 tile = NULL;
651 NT = __kmp_hwloc_count_children_by_depth(tp, node, __kmp_tile_depth,
652 &tile);
653 for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
654 labels[2] = tl_id;
655 int n_active_cores = 0;
656 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
657 n_active_cores, tile, 3, labels);
658 if (n_active_cores) { // were there any active cores on the socket?
659 ++n_active_tiles; // count active tiles per node
660 if (n_active_cores > nCorePerTile)
661 nCorePerTile = n_active_cores; // calc maximum
662 }
663 }
664 if (n_active_tiles) { // were there any active tiles on the socket?
665 ++n_active_nodes; // count active nodes per package
666 if (n_active_tiles > nTilePerNode)
667 nTilePerNode = n_active_tiles; // calc maximum
668 }
669 } else {
670 // NUMA, no tiles
671 int n_active_cores = 0;
672 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
673 n_active_cores, node, 2, labels);
674 if (n_active_cores) { // were there any active cores on the socket?
675 ++n_active_nodes; // count active nodes per package
676 if (n_active_cores > nCorePerNode)
677 nCorePerNode = n_active_cores; // calc maximum
678 }
679 }
680 }
681 if (n_active_nodes) { // were there any active nodes on the socket?
682 ++nPackages; // count total active packages
683 if (n_active_nodes > nNodePerPkg)
684 nNodePerPkg = n_active_nodes; // calc maximum
685 }
686 } else {
687 if (__kmp_tile_depth) {
688 // no NUMA, tiles
689 int NT;
690 int n_active_tiles = 0;
691 tile = NULL;
692 NT = __kmp_hwloc_count_children_by_depth(tp, socket, __kmp_tile_depth,
693 &tile);
694 for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
695 labels[1] = tl_id;
696 int n_active_cores = 0;
697 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
698 n_active_cores, tile, 2, labels);
699 if (n_active_cores) { // were there any active cores on the socket?
700 ++n_active_tiles; // count active tiles per package
701 if (n_active_cores > nCorePerTile)
702 nCorePerTile = n_active_cores; // calc maximum
703 }
704 }
705 if (n_active_tiles) { // were there any active tiles on the socket?
706 ++nPackages; // count total active packages
707 if (n_active_tiles > nTilePerPkg)
708 nTilePerPkg = n_active_tiles; // calc maximum
709 }
710 } else {
711 // no NUMA, no tiles
712 int n_active_cores = 0;
713 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads, n_active_cores,
714 socket, 1, labels);
715 if (n_active_cores) { // were there any active cores on the socket?
716 ++nPackages; // count total active packages
717 if (n_active_cores > nCoresPerPkg)
718 nCoresPerPkg = n_active_cores; // calc maximum
719 }
720 }
721 }
722 }
723
724 // If there's only one thread context to bind to, return now.
725 KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
726 KMP_ASSERT(nActiveThreads > 0);
727 if (nActiveThreads == 1) {
728 __kmp_ncores = nPackages = 1;
729 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
730 if (__kmp_affinity_verbose) {
731 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
732 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
733 KMP_INFORM(Uniform, "KMP_AFFINITY");
734 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
735 __kmp_nThreadsPerCore, __kmp_ncores);
736 }
737
738 if (__kmp_affinity_type == affinity_none) {
739 __kmp_free(retval);
740 KMP_CPU_FREE(oldMask);
741 return 0;
742 }
743
744 // Form an Address object which only includes the package level.
745 Address addr(1);
746 addr.labels[0] = retval[0].first.labels[0];
747 retval[0].first = addr;
748
749 if (__kmp_affinity_gran_levels < 0) {
750 __kmp_affinity_gran_levels = 0;
751 }
752
753 if (__kmp_affinity_verbose) {
754 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
755 }
756
757 *address2os = retval;
758 KMP_CPU_FREE(oldMask);
759 return 1;
760 }
761
762 // Sort the table by physical Id.
763 qsort(retval, nActiveThreads, sizeof(*retval),
764 __kmp_affinity_cmp_Address_labels);
765
766 // Check to see if the machine topology is uniform
767 int nPUs = nPackages * __kmp_nThreadsPerCore;
768 if (__kmp_numa_detected) {
769 if (__kmp_tile_depth) { // NUMA + tiles
770 nPUs *= (nNodePerPkg * nTilePerNode * nCorePerTile);
771 } else { // NUMA, no tiles
772 nPUs *= (nNodePerPkg * nCorePerNode);
773 }
774 } else {
775 if (__kmp_tile_depth) { // no NUMA, tiles
776 nPUs *= (nTilePerPkg * nCorePerTile);
777 } else { // no NUMA, no tiles
778 nPUs *= nCoresPerPkg;
779 }
780 }
781 unsigned uniform = (nPUs == nActiveThreads);
782
783 // Print the machine topology summary.
784 if (__kmp_affinity_verbose) {
785 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
786 if (uniform) {
787 KMP_INFORM(Uniform, "KMP_AFFINITY");
788 } else {
789 KMP_INFORM(NonUniform, "KMP_AFFINITY");
790 }
791 if (__kmp_numa_detected) {
792 if (__kmp_tile_depth) { // NUMA + tiles
793 KMP_INFORM(TopologyExtraNoTi, "KMP_AFFINITY", nPackages, nNodePerPkg,
794 nTilePerNode, nCorePerTile, __kmp_nThreadsPerCore,
795 __kmp_ncores);
796 } else { // NUMA, no tiles
797 KMP_INFORM(TopologyExtraNode, "KMP_AFFINITY", nPackages, nNodePerPkg,
798 nCorePerNode, __kmp_nThreadsPerCore, __kmp_ncores);
799 nPUs *= (nNodePerPkg * nCorePerNode);
800 }
801 } else {
802 if (__kmp_tile_depth) { // no NUMA, tiles
803 KMP_INFORM(TopologyExtraTile, "KMP_AFFINITY", nPackages, nTilePerPkg,
804 nCorePerTile, __kmp_nThreadsPerCore, __kmp_ncores);
805 } else { // no NUMA, no tiles
806 kmp_str_buf_t buf;
807 __kmp_str_buf_init(&buf);
808 __kmp_str_buf_print(&buf, "%d", nPackages);
809 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
810 __kmp_nThreadsPerCore, __kmp_ncores);
811 __kmp_str_buf_free(&buf);
812 }
813 }
814 }
815
816 if (__kmp_affinity_type == affinity_none) {
817 __kmp_free(retval);
818 KMP_CPU_FREE(oldMask);
819 return 0;
820 }
821
822 int depth_full = depth; // number of levels before compressing
823 // Find any levels with radix 1, and remove them from the map
824 // (except for the package level).
825 depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
826 levels);
827 KMP_DEBUG_ASSERT(__kmp_affinity_gran != affinity_gran_default);
828 if (__kmp_affinity_gran_levels < 0) {
829 // Set the granularity level based on what levels are modeled
830 // in the machine topology map.
831 __kmp_affinity_gran_levels = 0; // lowest level (e.g. fine)
832 if (__kmp_affinity_gran > affinity_gran_thread) {
833 for (int i = 1; i <= depth_full; ++i) {
834 if (__kmp_affinity_gran <= i) // only count deeper levels
835 break;
836 if (levels[depth_full - i] > 0)
837 __kmp_affinity_gran_levels++;
838 }
839 }
840 if (__kmp_affinity_gran > affinity_gran_package)
841 __kmp_affinity_gran_levels++; // e.g. granularity = group
842 }
843
844 if (__kmp_affinity_verbose)
845 __kmp_affinity_print_hwloc_tp(retval, nActiveThreads, depth, levels);
846
847 KMP_CPU_FREE(oldMask);
848 *address2os = retval;
849 return depth;
850 }
851 #endif // KMP_USE_HWLOC
852
853 // If we don't know how to retrieve the machine's processor topology, or
854 // encounter an error in doing so, this routine is called to form a "flat"
855 // mapping of os thread id's <-> processor id's.
__kmp_affinity_create_flat_map(AddrUnsPair ** address2os,kmp_i18n_id_t * const msg_id)856 static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
857 kmp_i18n_id_t *const msg_id) {
858 *address2os = NULL;
859 *msg_id = kmp_i18n_null;
860
861 // Even if __kmp_affinity_type == affinity_none, this routine might still
862 // called to set __kmp_ncores, as well as
863 // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
864 if (!KMP_AFFINITY_CAPABLE()) {
865 KMP_ASSERT(__kmp_affinity_type == affinity_none);
866 __kmp_ncores = nPackages = __kmp_xproc;
867 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
868 if (__kmp_affinity_verbose) {
869 KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
870 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
871 KMP_INFORM(Uniform, "KMP_AFFINITY");
872 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
873 __kmp_nThreadsPerCore, __kmp_ncores);
874 }
875 return 0;
876 }
877
878 // When affinity is off, this routine will still be called to set
879 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
880 // Make sure all these vars are set correctly, and return now if affinity is
881 // not enabled.
882 __kmp_ncores = nPackages = __kmp_avail_proc;
883 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
884 if (__kmp_affinity_verbose) {
885 KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
886 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
887 KMP_INFORM(Uniform, "KMP_AFFINITY");
888 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
889 __kmp_nThreadsPerCore, __kmp_ncores);
890 }
891 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
892 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
893 if (__kmp_affinity_type == affinity_none) {
894 int avail_ct = 0;
895 int i;
896 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
897 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask))
898 continue;
899 __kmp_pu_os_idx[avail_ct++] = i; // suppose indices are flat
900 }
901 return 0;
902 }
903
904 // Construct the data structure to be returned.
905 *address2os =
906 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
907 int avail_ct = 0;
908 int i;
909 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
910 // Skip this proc if it is not included in the machine model.
911 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
912 continue;
913 }
914 __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
915 Address addr(1);
916 addr.labels[0] = i;
917 (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
918 }
919 if (__kmp_affinity_verbose) {
920 KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
921 }
922
923 if (__kmp_affinity_gran_levels < 0) {
924 // Only the package level is modeled in the machine topology map,
925 // so the #levels of granularity is either 0 or 1.
926 if (__kmp_affinity_gran > affinity_gran_package) {
927 __kmp_affinity_gran_levels = 1;
928 } else {
929 __kmp_affinity_gran_levels = 0;
930 }
931 }
932 return 1;
933 }
934
935 #if KMP_GROUP_AFFINITY
936
937 // If multiple Windows* OS processor groups exist, we can create a 2-level
938 // topology map with the groups at level 0 and the individual procs at level 1.
939 // This facilitates letting the threads float among all procs in a group,
940 // if granularity=group (the default when there are multiple groups).
__kmp_affinity_create_proc_group_map(AddrUnsPair ** address2os,kmp_i18n_id_t * const msg_id)941 static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
942 kmp_i18n_id_t *const msg_id) {
943 *address2os = NULL;
944 *msg_id = kmp_i18n_null;
945
946 // If we aren't affinity capable, then return now.
947 // The flat mapping will be used.
948 if (!KMP_AFFINITY_CAPABLE()) {
949 // FIXME set *msg_id
950 return -1;
951 }
952
953 // Construct the data structure to be returned.
954 *address2os =
955 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
956 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
957 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
958 int avail_ct = 0;
959 int i;
960 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
961 // Skip this proc if it is not included in the machine model.
962 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
963 continue;
964 }
965 __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
966 Address addr(2);
967 addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
968 addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
969 (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
970
971 if (__kmp_affinity_verbose) {
972 KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
973 addr.labels[1]);
974 }
975 }
976
977 if (__kmp_affinity_gran_levels < 0) {
978 if (__kmp_affinity_gran == affinity_gran_group) {
979 __kmp_affinity_gran_levels = 1;
980 } else if ((__kmp_affinity_gran == affinity_gran_fine) ||
981 (__kmp_affinity_gran == affinity_gran_thread)) {
982 __kmp_affinity_gran_levels = 0;
983 } else {
984 const char *gran_str = NULL;
985 if (__kmp_affinity_gran == affinity_gran_core) {
986 gran_str = "core";
987 } else if (__kmp_affinity_gran == affinity_gran_package) {
988 gran_str = "package";
989 } else if (__kmp_affinity_gran == affinity_gran_node) {
990 gran_str = "node";
991 } else {
992 KMP_ASSERT(0);
993 }
994
995 // Warning: can't use affinity granularity \"gran\" with group topology
996 // method, using "thread"
997 __kmp_affinity_gran_levels = 0;
998 }
999 }
1000 return 2;
1001 }
1002
1003 #endif /* KMP_GROUP_AFFINITY */
1004
1005 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1006
__kmp_cpuid_mask_width(int count)1007 static int __kmp_cpuid_mask_width(int count) {
1008 int r = 0;
1009
1010 while ((1 << r) < count)
1011 ++r;
1012 return r;
1013 }
1014
1015 class apicThreadInfo {
1016 public:
1017 unsigned osId; // param to __kmp_affinity_bind_thread
1018 unsigned apicId; // from cpuid after binding
1019 unsigned maxCoresPerPkg; // ""
1020 unsigned maxThreadsPerPkg; // ""
1021 unsigned pkgId; // inferred from above values
1022 unsigned coreId; // ""
1023 unsigned threadId; // ""
1024 };
1025
__kmp_affinity_cmp_apicThreadInfo_phys_id(const void * a,const void * b)1026 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1027 const void *b) {
1028 const apicThreadInfo *aa = (const apicThreadInfo *)a;
1029 const apicThreadInfo *bb = (const apicThreadInfo *)b;
1030 if (aa->pkgId < bb->pkgId)
1031 return -1;
1032 if (aa->pkgId > bb->pkgId)
1033 return 1;
1034 if (aa->coreId < bb->coreId)
1035 return -1;
1036 if (aa->coreId > bb->coreId)
1037 return 1;
1038 if (aa->threadId < bb->threadId)
1039 return -1;
1040 if (aa->threadId > bb->threadId)
1041 return 1;
1042 return 0;
1043 }
1044
1045 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
1046 // an algorithm which cycles through the available os threads, setting
1047 // the current thread's affinity mask to that thread, and then retrieves
1048 // the Apic Id for each thread context using the cpuid instruction.
__kmp_affinity_create_apicid_map(AddrUnsPair ** address2os,kmp_i18n_id_t * const msg_id)1049 static int __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
1050 kmp_i18n_id_t *const msg_id) {
1051 kmp_cpuid buf;
1052 *address2os = NULL;
1053 *msg_id = kmp_i18n_null;
1054
1055 // Check if cpuid leaf 4 is supported.
1056 __kmp_x86_cpuid(0, 0, &buf);
1057 if (buf.eax < 4) {
1058 *msg_id = kmp_i18n_str_NoLeaf4Support;
1059 return -1;
1060 }
1061
1062 // The algorithm used starts by setting the affinity to each available thread
1063 // and retrieving info from the cpuid instruction, so if we are not capable of
1064 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1065 // need to do something else - use the defaults that we calculated from
1066 // issuing cpuid without binding to each proc.
1067 if (!KMP_AFFINITY_CAPABLE()) {
1068 // Hack to try and infer the machine topology using only the data
1069 // available from cpuid on the current thread, and __kmp_xproc.
1070 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1071
1072 // Get an upper bound on the number of threads per package using cpuid(1).
1073 // On some OS/chps combinations where HT is supported by the chip but is
1074 // disabled, this value will be 2 on a single core chip. Usually, it will be
1075 // 2 if HT is enabled and 1 if HT is disabled.
1076 __kmp_x86_cpuid(1, 0, &buf);
1077 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1078 if (maxThreadsPerPkg == 0) {
1079 maxThreadsPerPkg = 1;
1080 }
1081
1082 // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
1083 // value.
1084 //
1085 // The author of cpu_count.cpp treated this only an upper bound on the
1086 // number of cores, but I haven't seen any cases where it was greater than
1087 // the actual number of cores, so we will treat it as exact in this block of
1088 // code.
1089 //
1090 // First, we need to check if cpuid(4) is supported on this chip. To see if
1091 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
1092 // greater.
1093 __kmp_x86_cpuid(0, 0, &buf);
1094 if (buf.eax >= 4) {
1095 __kmp_x86_cpuid(4, 0, &buf);
1096 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1097 } else {
1098 nCoresPerPkg = 1;
1099 }
1100
1101 // There is no way to reliably tell if HT is enabled without issuing the
1102 // cpuid instruction from every thread, can correlating the cpuid info, so
1103 // if the machine is not affinity capable, we assume that HT is off. We have
1104 // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
1105 // does not support HT.
1106 //
1107 // - Older OSes are usually found on machines with older chips, which do not
1108 // support HT.
1109 // - The performance penalty for mistakenly identifying a machine as HT when
1110 // it isn't (which results in blocktime being incorrectly set to 0) is
1111 // greater than the penalty when for mistakenly identifying a machine as
1112 // being 1 thread/core when it is really HT enabled (which results in
1113 // blocktime being incorrectly set to a positive value).
1114 __kmp_ncores = __kmp_xproc;
1115 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1116 __kmp_nThreadsPerCore = 1;
1117 if (__kmp_affinity_verbose) {
1118 KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
1119 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1120 if (__kmp_affinity_uniform_topology()) {
1121 KMP_INFORM(Uniform, "KMP_AFFINITY");
1122 } else {
1123 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1124 }
1125 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1126 __kmp_nThreadsPerCore, __kmp_ncores);
1127 }
1128 return 0;
1129 }
1130
1131 // From here on, we can assume that it is safe to call
1132 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1133 // __kmp_affinity_type = affinity_none.
1134
1135 // Save the affinity mask for the current thread.
1136 kmp_affin_mask_t *oldMask;
1137 KMP_CPU_ALLOC(oldMask);
1138 KMP_ASSERT(oldMask != NULL);
1139 __kmp_get_system_affinity(oldMask, TRUE);
1140
1141 // Run through each of the available contexts, binding the current thread
1142 // to it, and obtaining the pertinent information using the cpuid instr.
1143 //
1144 // The relevant information is:
1145 // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
1146 // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
1147 // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
1148 // of this field determines the width of the core# + thread# fields in the
1149 // Apic Id. It is also an upper bound on the number of threads per
1150 // package, but it has been verified that situations happen were it is not
1151 // exact. In particular, on certain OS/chip combinations where Intel(R)
1152 // Hyper-Threading Technology is supported by the chip but has been
1153 // disabled, the value of this field will be 2 (for a single core chip).
1154 // On other OS/chip combinations supporting Intel(R) Hyper-Threading
1155 // Technology, the value of this field will be 1 when Intel(R)
1156 // Hyper-Threading Technology is disabled and 2 when it is enabled.
1157 // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
1158 // of this field (+1) determines the width of the core# field in the Apic
1159 // Id. The comments in "cpucount.cpp" say that this value is an upper
1160 // bound, but the IA-32 architecture manual says that it is exactly the
1161 // number of cores per package, and I haven't seen any case where it
1162 // wasn't.
1163 //
1164 // From this information, deduce the package Id, core Id, and thread Id,
1165 // and set the corresponding fields in the apicThreadInfo struct.
1166 unsigned i;
1167 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1168 __kmp_avail_proc * sizeof(apicThreadInfo));
1169 unsigned nApics = 0;
1170 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1171 // Skip this proc if it is not included in the machine model.
1172 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1173 continue;
1174 }
1175 KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
1176
1177 __kmp_affinity_dispatch->bind_thread(i);
1178 threadInfo[nApics].osId = i;
1179
1180 // The apic id and max threads per pkg come from cpuid(1).
1181 __kmp_x86_cpuid(1, 0, &buf);
1182 if (((buf.edx >> 9) & 1) == 0) {
1183 __kmp_set_system_affinity(oldMask, TRUE);
1184 __kmp_free(threadInfo);
1185 KMP_CPU_FREE(oldMask);
1186 *msg_id = kmp_i18n_str_ApicNotPresent;
1187 return -1;
1188 }
1189 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1190 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1191 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1192 threadInfo[nApics].maxThreadsPerPkg = 1;
1193 }
1194
1195 // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
1196 // value.
1197 //
1198 // First, we need to check if cpuid(4) is supported on this chip. To see if
1199 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
1200 // or greater.
1201 __kmp_x86_cpuid(0, 0, &buf);
1202 if (buf.eax >= 4) {
1203 __kmp_x86_cpuid(4, 0, &buf);
1204 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1205 } else {
1206 threadInfo[nApics].maxCoresPerPkg = 1;
1207 }
1208
1209 // Infer the pkgId / coreId / threadId using only the info obtained locally.
1210 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
1211 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1212
1213 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
1214 int widthT = widthCT - widthC;
1215 if (widthT < 0) {
1216 // I've never seen this one happen, but I suppose it could, if the cpuid
1217 // instruction on a chip was really screwed up. Make sure to restore the
1218 // affinity mask before the tail call.
1219 __kmp_set_system_affinity(oldMask, TRUE);
1220 __kmp_free(threadInfo);
1221 KMP_CPU_FREE(oldMask);
1222 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1223 return -1;
1224 }
1225
1226 int maskC = (1 << widthC) - 1;
1227 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
1228
1229 int maskT = (1 << widthT) - 1;
1230 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
1231
1232 nApics++;
1233 }
1234
1235 // We've collected all the info we need.
1236 // Restore the old affinity mask for this thread.
1237 __kmp_set_system_affinity(oldMask, TRUE);
1238
1239 // If there's only one thread context to bind to, form an Address object
1240 // with depth 1 and return immediately (or, if affinity is off, set
1241 // address2os to NULL and return).
1242 //
1243 // If it is configured to omit the package level when there is only a single
1244 // package, the logic at the end of this routine won't work if there is only
1245 // a single thread - it would try to form an Address object with depth 0.
1246 KMP_ASSERT(nApics > 0);
1247 if (nApics == 1) {
1248 __kmp_ncores = nPackages = 1;
1249 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1250 if (__kmp_affinity_verbose) {
1251 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1252 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1253 KMP_INFORM(Uniform, "KMP_AFFINITY");
1254 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1255 __kmp_nThreadsPerCore, __kmp_ncores);
1256 }
1257
1258 if (__kmp_affinity_type == affinity_none) {
1259 __kmp_free(threadInfo);
1260 KMP_CPU_FREE(oldMask);
1261 return 0;
1262 }
1263
1264 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
1265 Address addr(1);
1266 addr.labels[0] = threadInfo[0].pkgId;
1267 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1268
1269 if (__kmp_affinity_gran_levels < 0) {
1270 __kmp_affinity_gran_levels = 0;
1271 }
1272
1273 if (__kmp_affinity_verbose) {
1274 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1275 }
1276
1277 __kmp_free(threadInfo);
1278 KMP_CPU_FREE(oldMask);
1279 return 1;
1280 }
1281
1282 // Sort the threadInfo table by physical Id.
1283 qsort(threadInfo, nApics, sizeof(*threadInfo),
1284 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1285
1286 // The table is now sorted by pkgId / coreId / threadId, but we really don't
1287 // know the radix of any of the fields. pkgId's may be sparsely assigned among
1288 // the chips on a system. Although coreId's are usually assigned
1289 // [0 .. coresPerPkg-1] and threadId's are usually assigned
1290 // [0..threadsPerCore-1], we don't want to make any such assumptions.
1291 //
1292 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
1293 // total # packages) are at this point - we want to determine that now. We
1294 // only have an upper bound on the first two figures.
1295 //
1296 // We also perform a consistency check at this point: the values returned by
1297 // the cpuid instruction for any thread bound to a given package had better
1298 // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
1299 nPackages = 1;
1300 nCoresPerPkg = 1;
1301 __kmp_nThreadsPerCore = 1;
1302 unsigned nCores = 1;
1303
1304 unsigned pkgCt = 1; // to determine radii
1305 unsigned lastPkgId = threadInfo[0].pkgId;
1306 unsigned coreCt = 1;
1307 unsigned lastCoreId = threadInfo[0].coreId;
1308 unsigned threadCt = 1;
1309 unsigned lastThreadId = threadInfo[0].threadId;
1310
1311 // intra-pkg consist checks
1312 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1313 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1314
1315 for (i = 1; i < nApics; i++) {
1316 if (threadInfo[i].pkgId != lastPkgId) {
1317 nCores++;
1318 pkgCt++;
1319 lastPkgId = threadInfo[i].pkgId;
1320 if ((int)coreCt > nCoresPerPkg)
1321 nCoresPerPkg = coreCt;
1322 coreCt = 1;
1323 lastCoreId = threadInfo[i].coreId;
1324 if ((int)threadCt > __kmp_nThreadsPerCore)
1325 __kmp_nThreadsPerCore = threadCt;
1326 threadCt = 1;
1327 lastThreadId = threadInfo[i].threadId;
1328
1329 // This is a different package, so go on to the next iteration without
1330 // doing any consistency checks. Reset the consistency check vars, though.
1331 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1332 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1333 continue;
1334 }
1335
1336 if (threadInfo[i].coreId != lastCoreId) {
1337 nCores++;
1338 coreCt++;
1339 lastCoreId = threadInfo[i].coreId;
1340 if ((int)threadCt > __kmp_nThreadsPerCore)
1341 __kmp_nThreadsPerCore = threadCt;
1342 threadCt = 1;
1343 lastThreadId = threadInfo[i].threadId;
1344 } else if (threadInfo[i].threadId != lastThreadId) {
1345 threadCt++;
1346 lastThreadId = threadInfo[i].threadId;
1347 } else {
1348 __kmp_free(threadInfo);
1349 KMP_CPU_FREE(oldMask);
1350 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1351 return -1;
1352 }
1353
1354 // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
1355 // fields agree between all the threads bounds to a given package.
1356 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
1357 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1358 __kmp_free(threadInfo);
1359 KMP_CPU_FREE(oldMask);
1360 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1361 return -1;
1362 }
1363 }
1364 nPackages = pkgCt;
1365 if ((int)coreCt > nCoresPerPkg)
1366 nCoresPerPkg = coreCt;
1367 if ((int)threadCt > __kmp_nThreadsPerCore)
1368 __kmp_nThreadsPerCore = threadCt;
1369
1370 // When affinity is off, this routine will still be called to set
1371 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1372 // Make sure all these vars are set correctly, and return now if affinity is
1373 // not enabled.
1374 __kmp_ncores = nCores;
1375 if (__kmp_affinity_verbose) {
1376 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1377 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1378 if (__kmp_affinity_uniform_topology()) {
1379 KMP_INFORM(Uniform, "KMP_AFFINITY");
1380 } else {
1381 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1382 }
1383 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1384 __kmp_nThreadsPerCore, __kmp_ncores);
1385 }
1386 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1387 KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
1388 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1389 for (i = 0; i < nApics; ++i) {
1390 __kmp_pu_os_idx[i] = threadInfo[i].osId;
1391 }
1392 if (__kmp_affinity_type == affinity_none) {
1393 __kmp_free(threadInfo);
1394 KMP_CPU_FREE(oldMask);
1395 return 0;
1396 }
1397
1398 // Now that we've determined the number of packages, the number of cores per
1399 // package, and the number of threads per core, we can construct the data
1400 // structure that is to be returned.
1401 int pkgLevel = 0;
1402 int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1403 int threadLevel =
1404 (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1405 unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1406
1407 KMP_ASSERT(depth > 0);
1408 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1409
1410 for (i = 0; i < nApics; ++i) {
1411 Address addr(depth);
1412 unsigned os = threadInfo[i].osId;
1413 int d = 0;
1414
1415 if (pkgLevel >= 0) {
1416 addr.labels[d++] = threadInfo[i].pkgId;
1417 }
1418 if (coreLevel >= 0) {
1419 addr.labels[d++] = threadInfo[i].coreId;
1420 }
1421 if (threadLevel >= 0) {
1422 addr.labels[d++] = threadInfo[i].threadId;
1423 }
1424 (*address2os)[i] = AddrUnsPair(addr, os);
1425 }
1426
1427 if (__kmp_affinity_gran_levels < 0) {
1428 // Set the granularity level based on what levels are modeled in the machine
1429 // topology map.
1430 __kmp_affinity_gran_levels = 0;
1431 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1432 __kmp_affinity_gran_levels++;
1433 }
1434 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1435 __kmp_affinity_gran_levels++;
1436 }
1437 if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1438 __kmp_affinity_gran_levels++;
1439 }
1440 }
1441
1442 if (__kmp_affinity_verbose) {
1443 __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1444 coreLevel, threadLevel);
1445 }
1446
1447 __kmp_free(threadInfo);
1448 KMP_CPU_FREE(oldMask);
1449 return depth;
1450 }
1451
1452 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
1453 // architectures support a newer interface for specifying the x2APIC Ids,
1454 // based on cpuid leaf 11.
__kmp_affinity_create_x2apicid_map(AddrUnsPair ** address2os,kmp_i18n_id_t * const msg_id)1455 static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1456 kmp_i18n_id_t *const msg_id) {
1457 kmp_cpuid buf;
1458 *address2os = NULL;
1459 *msg_id = kmp_i18n_null;
1460
1461 // Check to see if cpuid leaf 11 is supported.
1462 __kmp_x86_cpuid(0, 0, &buf);
1463 if (buf.eax < 11) {
1464 *msg_id = kmp_i18n_str_NoLeaf11Support;
1465 return -1;
1466 }
1467 __kmp_x86_cpuid(11, 0, &buf);
1468 if (buf.ebx == 0) {
1469 *msg_id = kmp_i18n_str_NoLeaf11Support;
1470 return -1;
1471 }
1472
1473 // Find the number of levels in the machine topology. While we're at it, get
1474 // the default values for __kmp_nThreadsPerCore & nCoresPerPkg. We will try to
1475 // get more accurate values later by explicitly counting them, but get
1476 // reasonable defaults now, in case we return early.
1477 int level;
1478 int threadLevel = -1;
1479 int coreLevel = -1;
1480 int pkgLevel = -1;
1481 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1482
1483 for (level = 0;; level++) {
1484 if (level > 31) {
1485 // FIXME: Hack for DPD200163180
1486 //
1487 // If level is big then something went wrong -> exiting
1488 //
1489 // There could actually be 32 valid levels in the machine topology, but so
1490 // far, the only machine we have seen which does not exit this loop before
1491 // iteration 32 has fubar x2APIC settings.
1492 //
1493 // For now, just reject this case based upon loop trip count.
1494 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1495 return -1;
1496 }
1497 __kmp_x86_cpuid(11, level, &buf);
1498 if (buf.ebx == 0) {
1499 if (pkgLevel < 0) {
1500 // Will infer nPackages from __kmp_xproc
1501 pkgLevel = level;
1502 level++;
1503 }
1504 break;
1505 }
1506 int kind = (buf.ecx >> 8) & 0xff;
1507 if (kind == 1) {
1508 // SMT level
1509 threadLevel = level;
1510 coreLevel = -1;
1511 pkgLevel = -1;
1512 __kmp_nThreadsPerCore = buf.ebx & 0xffff;
1513 if (__kmp_nThreadsPerCore == 0) {
1514 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1515 return -1;
1516 }
1517 } else if (kind == 2) {
1518 // core level
1519 coreLevel = level;
1520 pkgLevel = -1;
1521 nCoresPerPkg = buf.ebx & 0xffff;
1522 if (nCoresPerPkg == 0) {
1523 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1524 return -1;
1525 }
1526 } else {
1527 if (level <= 0) {
1528 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1529 return -1;
1530 }
1531 if (pkgLevel >= 0) {
1532 continue;
1533 }
1534 pkgLevel = level;
1535 nPackages = buf.ebx & 0xffff;
1536 if (nPackages == 0) {
1537 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1538 return -1;
1539 }
1540 }
1541 }
1542 int depth = level;
1543
1544 // In the above loop, "level" was counted from the finest level (usually
1545 // thread) to the coarsest. The caller expects that we will place the labels
1546 // in (*address2os)[].first.labels[] in the inverse order, so we need to
1547 // invert the vars saying which level means what.
1548 if (threadLevel >= 0) {
1549 threadLevel = depth - threadLevel - 1;
1550 }
1551 if (coreLevel >= 0) {
1552 coreLevel = depth - coreLevel - 1;
1553 }
1554 KMP_DEBUG_ASSERT(pkgLevel >= 0);
1555 pkgLevel = depth - pkgLevel - 1;
1556
1557 // The algorithm used starts by setting the affinity to each available thread
1558 // and retrieving info from the cpuid instruction, so if we are not capable of
1559 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1560 // need to do something else - use the defaults that we calculated from
1561 // issuing cpuid without binding to each proc.
1562 if (!KMP_AFFINITY_CAPABLE()) {
1563 // Hack to try and infer the machine topology using only the data
1564 // available from cpuid on the current thread, and __kmp_xproc.
1565 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1566
1567 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1568 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1569 if (__kmp_affinity_verbose) {
1570 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
1571 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1572 if (__kmp_affinity_uniform_topology()) {
1573 KMP_INFORM(Uniform, "KMP_AFFINITY");
1574 } else {
1575 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1576 }
1577 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1578 __kmp_nThreadsPerCore, __kmp_ncores);
1579 }
1580 return 0;
1581 }
1582
1583 // From here on, we can assume that it is safe to call
1584 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1585 // __kmp_affinity_type = affinity_none.
1586
1587 // Save the affinity mask for the current thread.
1588 kmp_affin_mask_t *oldMask;
1589 KMP_CPU_ALLOC(oldMask);
1590 __kmp_get_system_affinity(oldMask, TRUE);
1591
1592 // Allocate the data structure to be returned.
1593 AddrUnsPair *retval =
1594 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
1595
1596 // Run through each of the available contexts, binding the current thread
1597 // to it, and obtaining the pertinent information using the cpuid instr.
1598 unsigned int proc;
1599 int nApics = 0;
1600 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
1601 // Skip this proc if it is not included in the machine model.
1602 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
1603 continue;
1604 }
1605 KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1606
1607 __kmp_affinity_dispatch->bind_thread(proc);
1608
1609 // Extract labels for each level in the machine topology map from Apic ID.
1610 Address addr(depth);
1611 int prev_shift = 0;
1612
1613 for (level = 0; level < depth; level++) {
1614 __kmp_x86_cpuid(11, level, &buf);
1615 unsigned apicId = buf.edx;
1616 if (buf.ebx == 0) {
1617 if (level != depth - 1) {
1618 KMP_CPU_FREE(oldMask);
1619 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1620 return -1;
1621 }
1622 addr.labels[depth - level - 1] = apicId >> prev_shift;
1623 level++;
1624 break;
1625 }
1626 int shift = buf.eax & 0x1f;
1627 int mask = (1 << shift) - 1;
1628 addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
1629 prev_shift = shift;
1630 }
1631 if (level != depth) {
1632 KMP_CPU_FREE(oldMask);
1633 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1634 return -1;
1635 }
1636
1637 retval[nApics] = AddrUnsPair(addr, proc);
1638 nApics++;
1639 }
1640
1641 // We've collected all the info we need.
1642 // Restore the old affinity mask for this thread.
1643 __kmp_set_system_affinity(oldMask, TRUE);
1644
1645 // If there's only one thread context to bind to, return now.
1646 KMP_ASSERT(nApics > 0);
1647 if (nApics == 1) {
1648 __kmp_ncores = nPackages = 1;
1649 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1650 if (__kmp_affinity_verbose) {
1651 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1652 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1653 KMP_INFORM(Uniform, "KMP_AFFINITY");
1654 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1655 __kmp_nThreadsPerCore, __kmp_ncores);
1656 }
1657
1658 if (__kmp_affinity_type == affinity_none) {
1659 __kmp_free(retval);
1660 KMP_CPU_FREE(oldMask);
1661 return 0;
1662 }
1663
1664 // Form an Address object which only includes the package level.
1665 Address addr(1);
1666 addr.labels[0] = retval[0].first.labels[pkgLevel];
1667 retval[0].first = addr;
1668
1669 if (__kmp_affinity_gran_levels < 0) {
1670 __kmp_affinity_gran_levels = 0;
1671 }
1672
1673 if (__kmp_affinity_verbose) {
1674 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1675 }
1676
1677 *address2os = retval;
1678 KMP_CPU_FREE(oldMask);
1679 return 1;
1680 }
1681
1682 // Sort the table by physical Id.
1683 qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1684
1685 // Find the radix at each of the levels.
1686 unsigned *totals = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1687 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1688 unsigned *maxCt = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1689 unsigned *last = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1690 for (level = 0; level < depth; level++) {
1691 totals[level] = 1;
1692 maxCt[level] = 1;
1693 counts[level] = 1;
1694 last[level] = retval[0].first.labels[level];
1695 }
1696
1697 // From here on, the iteration variable "level" runs from the finest level to
1698 // the coarsest, i.e. we iterate forward through
1699 // (*address2os)[].first.labels[] - in the previous loops, we iterated
1700 // backwards.
1701 for (proc = 1; (int)proc < nApics; proc++) {
1702 int level;
1703 for (level = 0; level < depth; level++) {
1704 if (retval[proc].first.labels[level] != last[level]) {
1705 int j;
1706 for (j = level + 1; j < depth; j++) {
1707 totals[j]++;
1708 counts[j] = 1;
1709 // The line below causes printing incorrect topology information in
1710 // case the max value for some level (maxCt[level]) is encountered
1711 // earlier than some less value while going through the array. For
1712 // example, let pkg0 has 4 cores and pkg1 has 2 cores. Then
1713 // maxCt[1] == 2
1714 // whereas it must be 4.
1715 // TODO!!! Check if it can be commented safely
1716 // maxCt[j] = 1;
1717 last[j] = retval[proc].first.labels[j];
1718 }
1719 totals[level]++;
1720 counts[level]++;
1721 if (counts[level] > maxCt[level]) {
1722 maxCt[level] = counts[level];
1723 }
1724 last[level] = retval[proc].first.labels[level];
1725 break;
1726 } else if (level == depth - 1) {
1727 __kmp_free(last);
1728 __kmp_free(maxCt);
1729 __kmp_free(counts);
1730 __kmp_free(totals);
1731 __kmp_free(retval);
1732 KMP_CPU_FREE(oldMask);
1733 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
1734 return -1;
1735 }
1736 }
1737 }
1738
1739 // When affinity is off, this routine will still be called to set
1740 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1741 // Make sure all these vars are set correctly, and return if affinity is not
1742 // enabled.
1743 if (threadLevel >= 0) {
1744 __kmp_nThreadsPerCore = maxCt[threadLevel];
1745 } else {
1746 __kmp_nThreadsPerCore = 1;
1747 }
1748 nPackages = totals[pkgLevel];
1749
1750 if (coreLevel >= 0) {
1751 __kmp_ncores = totals[coreLevel];
1752 nCoresPerPkg = maxCt[coreLevel];
1753 } else {
1754 __kmp_ncores = nPackages;
1755 nCoresPerPkg = 1;
1756 }
1757
1758 // Check to see if the machine topology is uniform
1759 unsigned prod = maxCt[0];
1760 for (level = 1; level < depth; level++) {
1761 prod *= maxCt[level];
1762 }
1763 bool uniform = (prod == totals[level - 1]);
1764
1765 // Print the machine topology summary.
1766 if (__kmp_affinity_verbose) {
1767 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1768 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1769 if (uniform) {
1770 KMP_INFORM(Uniform, "KMP_AFFINITY");
1771 } else {
1772 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1773 }
1774
1775 kmp_str_buf_t buf;
1776 __kmp_str_buf_init(&buf);
1777
1778 __kmp_str_buf_print(&buf, "%d", totals[0]);
1779 for (level = 1; level <= pkgLevel; level++) {
1780 __kmp_str_buf_print(&buf, " x %d", maxCt[level]);
1781 }
1782 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
1783 __kmp_nThreadsPerCore, __kmp_ncores);
1784
1785 __kmp_str_buf_free(&buf);
1786 }
1787 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1788 KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1789 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1790 for (proc = 0; (int)proc < nApics; ++proc) {
1791 __kmp_pu_os_idx[proc] = retval[proc].second;
1792 }
1793 if (__kmp_affinity_type == affinity_none) {
1794 __kmp_free(last);
1795 __kmp_free(maxCt);
1796 __kmp_free(counts);
1797 __kmp_free(totals);
1798 __kmp_free(retval);
1799 KMP_CPU_FREE(oldMask);
1800 return 0;
1801 }
1802
1803 // Find any levels with radix 1, and remove them from the map
1804 // (except for the package level).
1805 int new_depth = 0;
1806 for (level = 0; level < depth; level++) {
1807 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1808 continue;
1809 }
1810 new_depth++;
1811 }
1812
1813 // If we are removing any levels, allocate a new vector to return,
1814 // and copy the relevant information to it.
1815 if (new_depth != depth) {
1816 AddrUnsPair *new_retval =
1817 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1818 for (proc = 0; (int)proc < nApics; proc++) {
1819 Address addr(new_depth);
1820 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
1821 }
1822 int new_level = 0;
1823 int newPkgLevel = -1;
1824 int newCoreLevel = -1;
1825 int newThreadLevel = -1;
1826 for (level = 0; level < depth; level++) {
1827 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1828 // Remove this level. Never remove the package level
1829 continue;
1830 }
1831 if (level == pkgLevel) {
1832 newPkgLevel = new_level;
1833 }
1834 if (level == coreLevel) {
1835 newCoreLevel = new_level;
1836 }
1837 if (level == threadLevel) {
1838 newThreadLevel = new_level;
1839 }
1840 for (proc = 0; (int)proc < nApics; proc++) {
1841 new_retval[proc].first.labels[new_level] =
1842 retval[proc].first.labels[level];
1843 }
1844 new_level++;
1845 }
1846
1847 __kmp_free(retval);
1848 retval = new_retval;
1849 depth = new_depth;
1850 pkgLevel = newPkgLevel;
1851 coreLevel = newCoreLevel;
1852 threadLevel = newThreadLevel;
1853 }
1854
1855 if (__kmp_affinity_gran_levels < 0) {
1856 // Set the granularity level based on what levels are modeled
1857 // in the machine topology map.
1858 __kmp_affinity_gran_levels = 0;
1859 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1860 __kmp_affinity_gran_levels++;
1861 }
1862 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1863 __kmp_affinity_gran_levels++;
1864 }
1865 if (__kmp_affinity_gran > affinity_gran_package) {
1866 __kmp_affinity_gran_levels++;
1867 }
1868 }
1869
1870 if (__kmp_affinity_verbose) {
1871 __kmp_affinity_print_topology(retval, nApics, depth, pkgLevel, coreLevel,
1872 threadLevel);
1873 }
1874
1875 __kmp_free(last);
1876 __kmp_free(maxCt);
1877 __kmp_free(counts);
1878 __kmp_free(totals);
1879 KMP_CPU_FREE(oldMask);
1880 *address2os = retval;
1881 return depth;
1882 }
1883
1884 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1885
1886 #define osIdIndex 0
1887 #define threadIdIndex 1
1888 #define coreIdIndex 2
1889 #define pkgIdIndex 3
1890 #define nodeIdIndex 4
1891
1892 typedef unsigned *ProcCpuInfo;
1893 static unsigned maxIndex = pkgIdIndex;
1894
__kmp_affinity_cmp_ProcCpuInfo_phys_id(const void * a,const void * b)1895 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
1896 const void *b) {
1897 unsigned i;
1898 const unsigned *aa = *(unsigned *const *)a;
1899 const unsigned *bb = *(unsigned *const *)b;
1900 for (i = maxIndex;; i--) {
1901 if (aa[i] < bb[i])
1902 return -1;
1903 if (aa[i] > bb[i])
1904 return 1;
1905 if (i == osIdIndex)
1906 break;
1907 }
1908 return 0;
1909 }
1910
1911 #if KMP_USE_HIER_SCHED
1912 // Set the array sizes for the hierarchy layers
__kmp_dispatch_set_hierarchy_values()1913 static void __kmp_dispatch_set_hierarchy_values() {
1914 // Set the maximum number of L1's to number of cores
1915 // Set the maximum number of L2's to to either number of cores / 2 for
1916 // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
1917 // Or the number of cores for Intel(R) Xeon(R) processors
1918 // Set the maximum number of NUMA nodes and L3's to number of packages
1919 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
1920 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
1921 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
1922 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
1923 KMP_MIC_SUPPORTED
1924 if (__kmp_mic_type >= mic3)
1925 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
1926 else
1927 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
1928 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
1929 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
1930 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
1931 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
1932 // Set the number of threads per unit
1933 // Number of hardware threads per L1/L2/L3/NUMA/LOOP
1934 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
1935 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
1936 __kmp_nThreadsPerCore;
1937 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
1938 KMP_MIC_SUPPORTED
1939 if (__kmp_mic_type >= mic3)
1940 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
1941 2 * __kmp_nThreadsPerCore;
1942 else
1943 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
1944 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
1945 __kmp_nThreadsPerCore;
1946 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
1947 nCoresPerPkg * __kmp_nThreadsPerCore;
1948 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
1949 nCoresPerPkg * __kmp_nThreadsPerCore;
1950 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
1951 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
1952 }
1953
1954 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
1955 // i.e., this thread's L1 or this thread's L2, etc.
__kmp_dispatch_get_index(int tid,kmp_hier_layer_e type)1956 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
1957 int index = type + 1;
1958 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
1959 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
1960 if (type == kmp_hier_layer_e::LAYER_THREAD)
1961 return tid;
1962 else if (type == kmp_hier_layer_e::LAYER_LOOP)
1963 return 0;
1964 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
1965 if (tid >= num_hw_threads)
1966 tid = tid % num_hw_threads;
1967 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
1968 }
1969
1970 // Return the number of t1's per t2
__kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1,kmp_hier_layer_e t2)1971 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
1972 int i1 = t1 + 1;
1973 int i2 = t2 + 1;
1974 KMP_DEBUG_ASSERT(i1 <= i2);
1975 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
1976 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
1977 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
1978 // (nthreads/t2) / (nthreads/t1) = t1 / t2
1979 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
1980 }
1981 #endif // KMP_USE_HIER_SCHED
1982
1983 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
1984 // affinity map.
__kmp_affinity_create_cpuinfo_map(AddrUnsPair ** address2os,int * line,kmp_i18n_id_t * const msg_id,FILE * f)1985 static int __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
1986 int *line,
1987 kmp_i18n_id_t *const msg_id,
1988 FILE *f) {
1989 *address2os = NULL;
1990 *msg_id = kmp_i18n_null;
1991
1992 // Scan of the file, and count the number of "processor" (osId) fields,
1993 // and find the highest value of <n> for a node_<n> field.
1994 char buf[256];
1995 unsigned num_records = 0;
1996 while (!feof(f)) {
1997 buf[sizeof(buf) - 1] = 1;
1998 if (!fgets(buf, sizeof(buf), f)) {
1999 // Read errors presumably because of EOF
2000 break;
2001 }
2002
2003 char s1[] = "processor";
2004 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2005 num_records++;
2006 continue;
2007 }
2008
2009 // FIXME - this will match "node_<n> <garbage>"
2010 unsigned level;
2011 if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2012 if (nodeIdIndex + level >= maxIndex) {
2013 maxIndex = nodeIdIndex + level;
2014 }
2015 continue;
2016 }
2017 }
2018
2019 // Check for empty file / no valid processor records, or too many. The number
2020 // of records can't exceed the number of valid bits in the affinity mask.
2021 if (num_records == 0) {
2022 *line = 0;
2023 *msg_id = kmp_i18n_str_NoProcRecords;
2024 return -1;
2025 }
2026 if (num_records > (unsigned)__kmp_xproc) {
2027 *line = 0;
2028 *msg_id = kmp_i18n_str_TooManyProcRecords;
2029 return -1;
2030 }
2031
2032 // Set the file pointer back to the beginning, so that we can scan the file
2033 // again, this time performing a full parse of the data. Allocate a vector of
2034 // ProcCpuInfo object, where we will place the data. Adding an extra element
2035 // at the end allows us to remove a lot of extra checks for termination
2036 // conditions.
2037 if (fseek(f, 0, SEEK_SET) != 0) {
2038 *line = 0;
2039 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2040 return -1;
2041 }
2042
2043 // Allocate the array of records to store the proc info in. The dummy
2044 // element at the end makes the logic in filling them out easier to code.
2045 unsigned **threadInfo =
2046 (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2047 unsigned i;
2048 for (i = 0; i <= num_records; i++) {
2049 threadInfo[i] =
2050 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2051 }
2052
2053 #define CLEANUP_THREAD_INFO \
2054 for (i = 0; i <= num_records; i++) { \
2055 __kmp_free(threadInfo[i]); \
2056 } \
2057 __kmp_free(threadInfo);
2058
2059 // A value of UINT_MAX means that we didn't find the field
2060 unsigned __index;
2061
2062 #define INIT_PROC_INFO(p) \
2063 for (__index = 0; __index <= maxIndex; __index++) { \
2064 (p)[__index] = UINT_MAX; \
2065 }
2066
2067 for (i = 0; i <= num_records; i++) {
2068 INIT_PROC_INFO(threadInfo[i]);
2069 }
2070
2071 unsigned num_avail = 0;
2072 *line = 0;
2073 while (!feof(f)) {
2074 // Create an inner scoping level, so that all the goto targets at the end of
2075 // the loop appear in an outer scoping level. This avoids warnings about
2076 // jumping past an initialization to a target in the same block.
2077 {
2078 buf[sizeof(buf) - 1] = 1;
2079 bool long_line = false;
2080 if (!fgets(buf, sizeof(buf), f)) {
2081 // Read errors presumably because of EOF
2082 // If there is valid data in threadInfo[num_avail], then fake
2083 // a blank line in ensure that the last address gets parsed.
2084 bool valid = false;
2085 for (i = 0; i <= maxIndex; i++) {
2086 if (threadInfo[num_avail][i] != UINT_MAX) {
2087 valid = true;
2088 }
2089 }
2090 if (!valid) {
2091 break;
2092 }
2093 buf[0] = 0;
2094 } else if (!buf[sizeof(buf) - 1]) {
2095 // The line is longer than the buffer. Set a flag and don't
2096 // emit an error if we were going to ignore the line, anyway.
2097 long_line = true;
2098
2099 #define CHECK_LINE \
2100 if (long_line) { \
2101 CLEANUP_THREAD_INFO; \
2102 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2103 return -1; \
2104 }
2105 }
2106 (*line)++;
2107
2108 char s1[] = "processor";
2109 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2110 CHECK_LINE;
2111 char *p = strchr(buf + sizeof(s1) - 1, ':');
2112 unsigned val;
2113 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2114 goto no_val;
2115 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2116 #if KMP_ARCH_AARCH64
2117 // Handle the old AArch64 /proc/cpuinfo layout differently,
2118 // it contains all of the 'processor' entries listed in a
2119 // single 'Processor' section, therefore the normal looking
2120 // for duplicates in that section will always fail.
2121 num_avail++;
2122 #else
2123 goto dup_field;
2124 #endif
2125 threadInfo[num_avail][osIdIndex] = val;
2126 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2127 char path[256];
2128 KMP_SNPRINTF(
2129 path, sizeof(path),
2130 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2131 threadInfo[num_avail][osIdIndex]);
2132 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2133
2134 KMP_SNPRINTF(path, sizeof(path),
2135 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2136 threadInfo[num_avail][osIdIndex]);
2137 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2138 continue;
2139 #else
2140 }
2141 char s2[] = "physical id";
2142 if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2143 CHECK_LINE;
2144 char *p = strchr(buf + sizeof(s2) - 1, ':');
2145 unsigned val;
2146 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2147 goto no_val;
2148 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2149 goto dup_field;
2150 threadInfo[num_avail][pkgIdIndex] = val;
2151 continue;
2152 }
2153 char s3[] = "core id";
2154 if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2155 CHECK_LINE;
2156 char *p = strchr(buf + sizeof(s3) - 1, ':');
2157 unsigned val;
2158 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2159 goto no_val;
2160 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2161 goto dup_field;
2162 threadInfo[num_avail][coreIdIndex] = val;
2163 continue;
2164 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
2165 }
2166 char s4[] = "thread id";
2167 if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2168 CHECK_LINE;
2169 char *p = strchr(buf + sizeof(s4) - 1, ':');
2170 unsigned val;
2171 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2172 goto no_val;
2173 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2174 goto dup_field;
2175 threadInfo[num_avail][threadIdIndex] = val;
2176 continue;
2177 }
2178 unsigned level;
2179 if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2180 CHECK_LINE;
2181 char *p = strchr(buf + sizeof(s4) - 1, ':');
2182 unsigned val;
2183 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2184 goto no_val;
2185 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2186 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
2187 goto dup_field;
2188 threadInfo[num_avail][nodeIdIndex + level] = val;
2189 continue;
2190 }
2191
2192 // We didn't recognize the leading token on the line. There are lots of
2193 // leading tokens that we don't recognize - if the line isn't empty, go on
2194 // to the next line.
2195 if ((*buf != 0) && (*buf != '\n')) {
2196 // If the line is longer than the buffer, read characters
2197 // until we find a newline.
2198 if (long_line) {
2199 int ch;
2200 while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
2201 ;
2202 }
2203 continue;
2204 }
2205
2206 // A newline has signalled the end of the processor record.
2207 // Check that there aren't too many procs specified.
2208 if ((int)num_avail == __kmp_xproc) {
2209 CLEANUP_THREAD_INFO;
2210 *msg_id = kmp_i18n_str_TooManyEntries;
2211 return -1;
2212 }
2213
2214 // Check for missing fields. The osId field must be there, and we
2215 // currently require that the physical id field is specified, also.
2216 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2217 CLEANUP_THREAD_INFO;
2218 *msg_id = kmp_i18n_str_MissingProcField;
2219 return -1;
2220 }
2221 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2222 CLEANUP_THREAD_INFO;
2223 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2224 return -1;
2225 }
2226
2227 // Skip this proc if it is not included in the machine model.
2228 if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
2229 __kmp_affin_fullMask)) {
2230 INIT_PROC_INFO(threadInfo[num_avail]);
2231 continue;
2232 }
2233
2234 // We have a successful parse of this proc's info.
2235 // Increment the counter, and prepare for the next proc.
2236 num_avail++;
2237 KMP_ASSERT(num_avail <= num_records);
2238 INIT_PROC_INFO(threadInfo[num_avail]);
2239 }
2240 continue;
2241
2242 no_val:
2243 CLEANUP_THREAD_INFO;
2244 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2245 return -1;
2246
2247 dup_field:
2248 CLEANUP_THREAD_INFO;
2249 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2250 return -1;
2251 }
2252 *line = 0;
2253
2254 #if KMP_MIC && REDUCE_TEAM_SIZE
2255 unsigned teamSize = 0;
2256 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2257
2258 // check for num_records == __kmp_xproc ???
2259
2260 // If there's only one thread context to bind to, form an Address object with
2261 // depth 1 and return immediately (or, if affinity is off, set address2os to
2262 // NULL and return).
2263 //
2264 // If it is configured to omit the package level when there is only a single
2265 // package, the logic at the end of this routine won't work if there is only a
2266 // single thread - it would try to form an Address object with depth 0.
2267 KMP_ASSERT(num_avail > 0);
2268 KMP_ASSERT(num_avail <= num_records);
2269 if (num_avail == 1) {
2270 __kmp_ncores = 1;
2271 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2272 if (__kmp_affinity_verbose) {
2273 if (!KMP_AFFINITY_CAPABLE()) {
2274 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2275 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2276 KMP_INFORM(Uniform, "KMP_AFFINITY");
2277 } else {
2278 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2279 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2280 KMP_INFORM(Uniform, "KMP_AFFINITY");
2281 }
2282 int index;
2283 kmp_str_buf_t buf;
2284 __kmp_str_buf_init(&buf);
2285 __kmp_str_buf_print(&buf, "1");
2286 for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2287 __kmp_str_buf_print(&buf, " x 1");
2288 }
2289 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
2290 __kmp_str_buf_free(&buf);
2291 }
2292
2293 if (__kmp_affinity_type == affinity_none) {
2294 CLEANUP_THREAD_INFO;
2295 return 0;
2296 }
2297
2298 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
2299 Address addr(1);
2300 addr.labels[0] = threadInfo[0][pkgIdIndex];
2301 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2302
2303 if (__kmp_affinity_gran_levels < 0) {
2304 __kmp_affinity_gran_levels = 0;
2305 }
2306
2307 if (__kmp_affinity_verbose) {
2308 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2309 }
2310
2311 CLEANUP_THREAD_INFO;
2312 return 1;
2313 }
2314
2315 // Sort the threadInfo table by physical Id.
2316 qsort(threadInfo, num_avail, sizeof(*threadInfo),
2317 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2318
2319 // The table is now sorted by pkgId / coreId / threadId, but we really don't
2320 // know the radix of any of the fields. pkgId's may be sparsely assigned among
2321 // the chips on a system. Although coreId's are usually assigned
2322 // [0 .. coresPerPkg-1] and threadId's are usually assigned
2323 // [0..threadsPerCore-1], we don't want to make any such assumptions.
2324 //
2325 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2326 // total # packages) are at this point - we want to determine that now. We
2327 // only have an upper bound on the first two figures.
2328 unsigned *counts =
2329 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2330 unsigned *maxCt =
2331 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2332 unsigned *totals =
2333 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2334 unsigned *lastId =
2335 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2336
2337 bool assign_thread_ids = false;
2338 unsigned threadIdCt;
2339 unsigned index;
2340
2341 restart_radix_check:
2342 threadIdCt = 0;
2343
2344 // Initialize the counter arrays with data from threadInfo[0].
2345 if (assign_thread_ids) {
2346 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2347 threadInfo[0][threadIdIndex] = threadIdCt++;
2348 } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2349 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2350 }
2351 }
2352 for (index = 0; index <= maxIndex; index++) {
2353 counts[index] = 1;
2354 maxCt[index] = 1;
2355 totals[index] = 1;
2356 lastId[index] = threadInfo[0][index];
2357 ;
2358 }
2359
2360 // Run through the rest of the OS procs.
2361 for (i = 1; i < num_avail; i++) {
2362 // Find the most significant index whose id differs from the id for the
2363 // previous OS proc.
2364 for (index = maxIndex; index >= threadIdIndex; index--) {
2365 if (assign_thread_ids && (index == threadIdIndex)) {
2366 // Auto-assign the thread id field if it wasn't specified.
2367 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2368 threadInfo[i][threadIdIndex] = threadIdCt++;
2369 }
2370 // Apparently the thread id field was specified for some entries and not
2371 // others. Start the thread id counter off at the next higher thread id.
2372 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2373 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2374 }
2375 }
2376 if (threadInfo[i][index] != lastId[index]) {
2377 // Run through all indices which are less significant, and reset the
2378 // counts to 1. At all levels up to and including index, we need to
2379 // increment the totals and record the last id.
2380 unsigned index2;
2381 for (index2 = threadIdIndex; index2 < index; index2++) {
2382 totals[index2]++;
2383 if (counts[index2] > maxCt[index2]) {
2384 maxCt[index2] = counts[index2];
2385 }
2386 counts[index2] = 1;
2387 lastId[index2] = threadInfo[i][index2];
2388 }
2389 counts[index]++;
2390 totals[index]++;
2391 lastId[index] = threadInfo[i][index];
2392
2393 if (assign_thread_ids && (index > threadIdIndex)) {
2394
2395 #if KMP_MIC && REDUCE_TEAM_SIZE
2396 // The default team size is the total #threads in the machine
2397 // minus 1 thread for every core that has 3 or more threads.
2398 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2399 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2400
2401 // Restart the thread counter, as we are on a new core.
2402 threadIdCt = 0;
2403
2404 // Auto-assign the thread id field if it wasn't specified.
2405 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2406 threadInfo[i][threadIdIndex] = threadIdCt++;
2407 }
2408
2409 // Apparently the thread id field was specified for some entries and
2410 // not others. Start the thread id counter off at the next higher
2411 // thread id.
2412 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2413 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2414 }
2415 }
2416 break;
2417 }
2418 }
2419 if (index < threadIdIndex) {
2420 // If thread ids were specified, it is an error if they are not unique.
2421 // Also, check that we waven't already restarted the loop (to be safe -
2422 // shouldn't need to).
2423 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
2424 __kmp_free(lastId);
2425 __kmp_free(totals);
2426 __kmp_free(maxCt);
2427 __kmp_free(counts);
2428 CLEANUP_THREAD_INFO;
2429 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2430 return -1;
2431 }
2432
2433 // If the thread ids were not specified and we see entries entries that
2434 // are duplicates, start the loop over and assign the thread ids manually.
2435 assign_thread_ids = true;
2436 goto restart_radix_check;
2437 }
2438 }
2439
2440 #if KMP_MIC && REDUCE_TEAM_SIZE
2441 // The default team size is the total #threads in the machine
2442 // minus 1 thread for every core that has 3 or more threads.
2443 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2444 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2445
2446 for (index = threadIdIndex; index <= maxIndex; index++) {
2447 if (counts[index] > maxCt[index]) {
2448 maxCt[index] = counts[index];
2449 }
2450 }
2451
2452 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2453 nCoresPerPkg = maxCt[coreIdIndex];
2454 nPackages = totals[pkgIdIndex];
2455
2456 // Check to see if the machine topology is uniform
2457 unsigned prod = totals[maxIndex];
2458 for (index = threadIdIndex; index < maxIndex; index++) {
2459 prod *= maxCt[index];
2460 }
2461 bool uniform = (prod == totals[threadIdIndex]);
2462
2463 // When affinity is off, this routine will still be called to set
2464 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2465 // Make sure all these vars are set correctly, and return now if affinity is
2466 // not enabled.
2467 __kmp_ncores = totals[coreIdIndex];
2468
2469 if (__kmp_affinity_verbose) {
2470 if (!KMP_AFFINITY_CAPABLE()) {
2471 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2472 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2473 if (uniform) {
2474 KMP_INFORM(Uniform, "KMP_AFFINITY");
2475 } else {
2476 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2477 }
2478 } else {
2479 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2480 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2481 if (uniform) {
2482 KMP_INFORM(Uniform, "KMP_AFFINITY");
2483 } else {
2484 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2485 }
2486 }
2487 kmp_str_buf_t buf;
2488 __kmp_str_buf_init(&buf);
2489
2490 __kmp_str_buf_print(&buf, "%d", totals[maxIndex]);
2491 for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2492 __kmp_str_buf_print(&buf, " x %d", maxCt[index]);
2493 }
2494 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2495 maxCt[threadIdIndex], __kmp_ncores);
2496
2497 __kmp_str_buf_free(&buf);
2498 }
2499
2500 #if KMP_MIC && REDUCE_TEAM_SIZE
2501 // Set the default team size.
2502 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2503 __kmp_dflt_team_nth = teamSize;
2504 KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
2505 "__kmp_dflt_team_nth = %d\n",
2506 __kmp_dflt_team_nth));
2507 }
2508 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2509
2510 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
2511 KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
2512 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
2513 for (i = 0; i < num_avail; ++i) { // fill the os indices
2514 __kmp_pu_os_idx[i] = threadInfo[i][osIdIndex];
2515 }
2516
2517 if (__kmp_affinity_type == affinity_none) {
2518 __kmp_free(lastId);
2519 __kmp_free(totals);
2520 __kmp_free(maxCt);
2521 __kmp_free(counts);
2522 CLEANUP_THREAD_INFO;
2523 return 0;
2524 }
2525
2526 // Count the number of levels which have more nodes at that level than at the
2527 // parent's level (with there being an implicit root node of the top level).
2528 // This is equivalent to saying that there is at least one node at this level
2529 // which has a sibling. These levels are in the map, and the package level is
2530 // always in the map.
2531 bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
2532 for (index = threadIdIndex; index < maxIndex; index++) {
2533 KMP_ASSERT(totals[index] >= totals[index + 1]);
2534 inMap[index] = (totals[index] > totals[index + 1]);
2535 }
2536 inMap[maxIndex] = (totals[maxIndex] > 1);
2537 inMap[pkgIdIndex] = true;
2538
2539 int depth = 0;
2540 for (index = threadIdIndex; index <= maxIndex; index++) {
2541 if (inMap[index]) {
2542 depth++;
2543 }
2544 }
2545 KMP_ASSERT(depth > 0);
2546
2547 // Construct the data structure that is to be returned.
2548 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * num_avail);
2549 int pkgLevel = -1;
2550 int coreLevel = -1;
2551 int threadLevel = -1;
2552
2553 for (i = 0; i < num_avail; ++i) {
2554 Address addr(depth);
2555 unsigned os = threadInfo[i][osIdIndex];
2556 int src_index;
2557 int dst_index = 0;
2558
2559 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2560 if (!inMap[src_index]) {
2561 continue;
2562 }
2563 addr.labels[dst_index] = threadInfo[i][src_index];
2564 if (src_index == pkgIdIndex) {
2565 pkgLevel = dst_index;
2566 } else if (src_index == coreIdIndex) {
2567 coreLevel = dst_index;
2568 } else if (src_index == threadIdIndex) {
2569 threadLevel = dst_index;
2570 }
2571 dst_index++;
2572 }
2573 (*address2os)[i] = AddrUnsPair(addr, os);
2574 }
2575
2576 if (__kmp_affinity_gran_levels < 0) {
2577 // Set the granularity level based on what levels are modeled
2578 // in the machine topology map.
2579 unsigned src_index;
2580 __kmp_affinity_gran_levels = 0;
2581 for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2582 if (!inMap[src_index]) {
2583 continue;
2584 }
2585 switch (src_index) {
2586 case threadIdIndex:
2587 if (__kmp_affinity_gran > affinity_gran_thread) {
2588 __kmp_affinity_gran_levels++;
2589 }
2590
2591 break;
2592 case coreIdIndex:
2593 if (__kmp_affinity_gran > affinity_gran_core) {
2594 __kmp_affinity_gran_levels++;
2595 }
2596 break;
2597
2598 case pkgIdIndex:
2599 if (__kmp_affinity_gran > affinity_gran_package) {
2600 __kmp_affinity_gran_levels++;
2601 }
2602 break;
2603 }
2604 }
2605 }
2606
2607 if (__kmp_affinity_verbose) {
2608 __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2609 coreLevel, threadLevel);
2610 }
2611
2612 __kmp_free(inMap);
2613 __kmp_free(lastId);
2614 __kmp_free(totals);
2615 __kmp_free(maxCt);
2616 __kmp_free(counts);
2617 CLEANUP_THREAD_INFO;
2618 return depth;
2619 }
2620
2621 // Create and return a table of affinity masks, indexed by OS thread ID.
2622 // This routine handles OR'ing together all the affinity masks of threads
2623 // that are sufficiently close, if granularity > fine.
__kmp_create_masks(unsigned * maxIndex,unsigned * numUnique,AddrUnsPair * address2os,unsigned numAddrs)2624 static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
2625 unsigned *numUnique,
2626 AddrUnsPair *address2os,
2627 unsigned numAddrs) {
2628 // First form a table of affinity masks in order of OS thread id.
2629 unsigned depth;
2630 unsigned maxOsId;
2631 unsigned i;
2632
2633 KMP_ASSERT(numAddrs > 0);
2634 depth = address2os[0].first.depth;
2635
2636 maxOsId = 0;
2637 for (i = numAddrs - 1;; --i) {
2638 unsigned osId = address2os[i].second;
2639 if (osId > maxOsId) {
2640 maxOsId = osId;
2641 }
2642 if (i == 0)
2643 break;
2644 }
2645 kmp_affin_mask_t *osId2Mask;
2646 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
2647
2648 // Sort the address2os table according to physical order. Doing so will put
2649 // all threads on the same core/package/node in consecutive locations.
2650 qsort(address2os, numAddrs, sizeof(*address2os),
2651 __kmp_affinity_cmp_Address_labels);
2652
2653 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2654 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2655 KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
2656 }
2657 if (__kmp_affinity_gran_levels >= (int)depth) {
2658 if (__kmp_affinity_verbose ||
2659 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
2660 KMP_WARNING(AffThreadsMayMigrate);
2661 }
2662 }
2663
2664 // Run through the table, forming the masks for all threads on each core.
2665 // Threads on the same core will have identical "Address" objects, not
2666 // considering the last level, which must be the thread id. All threads on a
2667 // core will appear consecutively.
2668 unsigned unique = 0;
2669 unsigned j = 0; // index of 1st thread on core
2670 unsigned leader = 0;
2671 Address *leaderAddr = &(address2os[0].first);
2672 kmp_affin_mask_t *sum;
2673 KMP_CPU_ALLOC_ON_STACK(sum);
2674 KMP_CPU_ZERO(sum);
2675 KMP_CPU_SET(address2os[0].second, sum);
2676 for (i = 1; i < numAddrs; i++) {
2677 // If this thread is sufficiently close to the leader (within the
2678 // granularity setting), then set the bit for this os thread in the
2679 // affinity mask for this group, and go on to the next thread.
2680 if (leaderAddr->isClose(address2os[i].first, __kmp_affinity_gran_levels)) {
2681 KMP_CPU_SET(address2os[i].second, sum);
2682 continue;
2683 }
2684
2685 // For every thread in this group, copy the mask to the thread's entry in
2686 // the osId2Mask table. Mark the first address as a leader.
2687 for (; j < i; j++) {
2688 unsigned osId = address2os[j].second;
2689 KMP_DEBUG_ASSERT(osId <= maxOsId);
2690 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2691 KMP_CPU_COPY(mask, sum);
2692 address2os[j].first.leader = (j == leader);
2693 }
2694 unique++;
2695
2696 // Start a new mask.
2697 leader = i;
2698 leaderAddr = &(address2os[i].first);
2699 KMP_CPU_ZERO(sum);
2700 KMP_CPU_SET(address2os[i].second, sum);
2701 }
2702
2703 // For every thread in last group, copy the mask to the thread's
2704 // entry in the osId2Mask table.
2705 for (; j < i; j++) {
2706 unsigned osId = address2os[j].second;
2707 KMP_DEBUG_ASSERT(osId <= maxOsId);
2708 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2709 KMP_CPU_COPY(mask, sum);
2710 address2os[j].first.leader = (j == leader);
2711 }
2712 unique++;
2713 KMP_CPU_FREE_FROM_STACK(sum);
2714
2715 *maxIndex = maxOsId;
2716 *numUnique = unique;
2717 return osId2Mask;
2718 }
2719
2720 // Stuff for the affinity proclist parsers. It's easier to declare these vars
2721 // as file-static than to try and pass them through the calling sequence of
2722 // the recursive-descent OMP_PLACES parser.
2723 static kmp_affin_mask_t *newMasks;
2724 static int numNewMasks;
2725 static int nextNewMask;
2726
2727 #define ADD_MASK(_mask) \
2728 { \
2729 if (nextNewMask >= numNewMasks) { \
2730 int i; \
2731 numNewMasks *= 2; \
2732 kmp_affin_mask_t *temp; \
2733 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
2734 for (i = 0; i < numNewMasks / 2; i++) { \
2735 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
2736 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
2737 KMP_CPU_COPY(dest, src); \
2738 } \
2739 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
2740 newMasks = temp; \
2741 } \
2742 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
2743 nextNewMask++; \
2744 }
2745
2746 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
2747 { \
2748 if (((_osId) > _maxOsId) || \
2749 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
2750 if (__kmp_affinity_verbose || \
2751 (__kmp_affinity_warnings && \
2752 (__kmp_affinity_type != affinity_none))) { \
2753 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
2754 } \
2755 } else { \
2756 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
2757 } \
2758 }
2759
2760 // Re-parse the proclist (for the explicit affinity type), and form the list
2761 // of affinity newMasks indexed by gtid.
__kmp_affinity_process_proclist(kmp_affin_mask_t ** out_masks,unsigned int * out_numMasks,const char * proclist,kmp_affin_mask_t * osId2Mask,int maxOsId)2762 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2763 unsigned int *out_numMasks,
2764 const char *proclist,
2765 kmp_affin_mask_t *osId2Mask,
2766 int maxOsId) {
2767 int i;
2768 const char *scan = proclist;
2769 const char *next = proclist;
2770
2771 // We use malloc() for the temporary mask vector, so that we can use
2772 // realloc() to extend it.
2773 numNewMasks = 2;
2774 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2775 nextNewMask = 0;
2776 kmp_affin_mask_t *sumMask;
2777 KMP_CPU_ALLOC(sumMask);
2778 int setSize = 0;
2779
2780 for (;;) {
2781 int start, end, stride;
2782
2783 SKIP_WS(scan);
2784 next = scan;
2785 if (*next == '\0') {
2786 break;
2787 }
2788
2789 if (*next == '{') {
2790 int num;
2791 setSize = 0;
2792 next++; // skip '{'
2793 SKIP_WS(next);
2794 scan = next;
2795
2796 // Read the first integer in the set.
2797 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
2798 SKIP_DIGITS(next);
2799 num = __kmp_str_to_int(scan, *next);
2800 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2801
2802 // Copy the mask for that osId to the sum (union) mask.
2803 if ((num > maxOsId) ||
2804 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2805 if (__kmp_affinity_verbose ||
2806 (__kmp_affinity_warnings &&
2807 (__kmp_affinity_type != affinity_none))) {
2808 KMP_WARNING(AffIgnoreInvalidProcID, num);
2809 }
2810 KMP_CPU_ZERO(sumMask);
2811 } else {
2812 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2813 setSize = 1;
2814 }
2815
2816 for (;;) {
2817 // Check for end of set.
2818 SKIP_WS(next);
2819 if (*next == '}') {
2820 next++; // skip '}'
2821 break;
2822 }
2823
2824 // Skip optional comma.
2825 if (*next == ',') {
2826 next++;
2827 }
2828 SKIP_WS(next);
2829
2830 // Read the next integer in the set.
2831 scan = next;
2832 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2833
2834 SKIP_DIGITS(next);
2835 num = __kmp_str_to_int(scan, *next);
2836 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2837
2838 // Add the mask for that osId to the sum mask.
2839 if ((num > maxOsId) ||
2840 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2841 if (__kmp_affinity_verbose ||
2842 (__kmp_affinity_warnings &&
2843 (__kmp_affinity_type != affinity_none))) {
2844 KMP_WARNING(AffIgnoreInvalidProcID, num);
2845 }
2846 } else {
2847 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2848 setSize++;
2849 }
2850 }
2851 if (setSize > 0) {
2852 ADD_MASK(sumMask);
2853 }
2854
2855 SKIP_WS(next);
2856 if (*next == ',') {
2857 next++;
2858 }
2859 scan = next;
2860 continue;
2861 }
2862
2863 // Read the first integer.
2864 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2865 SKIP_DIGITS(next);
2866 start = __kmp_str_to_int(scan, *next);
2867 KMP_ASSERT2(start >= 0, "bad explicit proc list");
2868 SKIP_WS(next);
2869
2870 // If this isn't a range, then add a mask to the list and go on.
2871 if (*next != '-') {
2872 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2873
2874 // Skip optional comma.
2875 if (*next == ',') {
2876 next++;
2877 }
2878 scan = next;
2879 continue;
2880 }
2881
2882 // This is a range. Skip over the '-' and read in the 2nd int.
2883 next++; // skip '-'
2884 SKIP_WS(next);
2885 scan = next;
2886 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2887 SKIP_DIGITS(next);
2888 end = __kmp_str_to_int(scan, *next);
2889 KMP_ASSERT2(end >= 0, "bad explicit proc list");
2890
2891 // Check for a stride parameter
2892 stride = 1;
2893 SKIP_WS(next);
2894 if (*next == ':') {
2895 // A stride is specified. Skip over the ':" and read the 3rd int.
2896 int sign = +1;
2897 next++; // skip ':'
2898 SKIP_WS(next);
2899 scan = next;
2900 if (*next == '-') {
2901 sign = -1;
2902 next++;
2903 SKIP_WS(next);
2904 scan = next;
2905 }
2906 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2907 SKIP_DIGITS(next);
2908 stride = __kmp_str_to_int(scan, *next);
2909 KMP_ASSERT2(stride >= 0, "bad explicit proc list");
2910 stride *= sign;
2911 }
2912
2913 // Do some range checks.
2914 KMP_ASSERT2(stride != 0, "bad explicit proc list");
2915 if (stride > 0) {
2916 KMP_ASSERT2(start <= end, "bad explicit proc list");
2917 } else {
2918 KMP_ASSERT2(start >= end, "bad explicit proc list");
2919 }
2920 KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
2921
2922 // Add the mask for each OS proc # to the list.
2923 if (stride > 0) {
2924 do {
2925 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2926 start += stride;
2927 } while (start <= end);
2928 } else {
2929 do {
2930 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2931 start += stride;
2932 } while (start >= end);
2933 }
2934
2935 // Skip optional comma.
2936 SKIP_WS(next);
2937 if (*next == ',') {
2938 next++;
2939 }
2940 scan = next;
2941 }
2942
2943 *out_numMasks = nextNewMask;
2944 if (nextNewMask == 0) {
2945 *out_masks = NULL;
2946 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
2947 return;
2948 }
2949 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
2950 for (i = 0; i < nextNewMask; i++) {
2951 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
2952 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
2953 KMP_CPU_COPY(dest, src);
2954 }
2955 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
2956 KMP_CPU_FREE(sumMask);
2957 }
2958
2959 /*-----------------------------------------------------------------------------
2960 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
2961 places. Again, Here is the grammar:
2962
2963 place_list := place
2964 place_list := place , place_list
2965 place := num
2966 place := place : num
2967 place := place : num : signed
2968 place := { subplacelist }
2969 place := ! place // (lowest priority)
2970 subplace_list := subplace
2971 subplace_list := subplace , subplace_list
2972 subplace := num
2973 subplace := num : num
2974 subplace := num : num : signed
2975 signed := num
2976 signed := + signed
2977 signed := - signed
2978 -----------------------------------------------------------------------------*/
__kmp_process_subplace_list(const char ** scan,kmp_affin_mask_t * osId2Mask,int maxOsId,kmp_affin_mask_t * tempMask,int * setSize)2979 static void __kmp_process_subplace_list(const char **scan,
2980 kmp_affin_mask_t *osId2Mask,
2981 int maxOsId, kmp_affin_mask_t *tempMask,
2982 int *setSize) {
2983 const char *next;
2984
2985 for (;;) {
2986 int start, count, stride, i;
2987
2988 // Read in the starting proc id
2989 SKIP_WS(*scan);
2990 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
2991 next = *scan;
2992 SKIP_DIGITS(next);
2993 start = __kmp_str_to_int(*scan, *next);
2994 KMP_ASSERT(start >= 0);
2995 *scan = next;
2996
2997 // valid follow sets are ',' ':' and '}'
2998 SKIP_WS(*scan);
2999 if (**scan == '}' || **scan == ',') {
3000 if ((start > maxOsId) ||
3001 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3002 if (__kmp_affinity_verbose ||
3003 (__kmp_affinity_warnings &&
3004 (__kmp_affinity_type != affinity_none))) {
3005 KMP_WARNING(AffIgnoreInvalidProcID, start);
3006 }
3007 } else {
3008 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3009 (*setSize)++;
3010 }
3011 if (**scan == '}') {
3012 break;
3013 }
3014 (*scan)++; // skip ','
3015 continue;
3016 }
3017 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3018 (*scan)++; // skip ':'
3019
3020 // Read count parameter
3021 SKIP_WS(*scan);
3022 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3023 next = *scan;
3024 SKIP_DIGITS(next);
3025 count = __kmp_str_to_int(*scan, *next);
3026 KMP_ASSERT(count >= 0);
3027 *scan = next;
3028
3029 // valid follow sets are ',' ':' and '}'
3030 SKIP_WS(*scan);
3031 if (**scan == '}' || **scan == ',') {
3032 for (i = 0; i < count; i++) {
3033 if ((start > maxOsId) ||
3034 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3035 if (__kmp_affinity_verbose ||
3036 (__kmp_affinity_warnings &&
3037 (__kmp_affinity_type != affinity_none))) {
3038 KMP_WARNING(AffIgnoreInvalidProcID, start);
3039 }
3040 break; // don't proliferate warnings for large count
3041 } else {
3042 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3043 start++;
3044 (*setSize)++;
3045 }
3046 }
3047 if (**scan == '}') {
3048 break;
3049 }
3050 (*scan)++; // skip ','
3051 continue;
3052 }
3053 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3054 (*scan)++; // skip ':'
3055
3056 // Read stride parameter
3057 int sign = +1;
3058 for (;;) {
3059 SKIP_WS(*scan);
3060 if (**scan == '+') {
3061 (*scan)++; // skip '+'
3062 continue;
3063 }
3064 if (**scan == '-') {
3065 sign *= -1;
3066 (*scan)++; // skip '-'
3067 continue;
3068 }
3069 break;
3070 }
3071 SKIP_WS(*scan);
3072 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3073 next = *scan;
3074 SKIP_DIGITS(next);
3075 stride = __kmp_str_to_int(*scan, *next);
3076 KMP_ASSERT(stride >= 0);
3077 *scan = next;
3078 stride *= sign;
3079
3080 // valid follow sets are ',' and '}'
3081 SKIP_WS(*scan);
3082 if (**scan == '}' || **scan == ',') {
3083 for (i = 0; i < count; i++) {
3084 if ((start > maxOsId) ||
3085 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3086 if (__kmp_affinity_verbose ||
3087 (__kmp_affinity_warnings &&
3088 (__kmp_affinity_type != affinity_none))) {
3089 KMP_WARNING(AffIgnoreInvalidProcID, start);
3090 }
3091 break; // don't proliferate warnings for large count
3092 } else {
3093 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3094 start += stride;
3095 (*setSize)++;
3096 }
3097 }
3098 if (**scan == '}') {
3099 break;
3100 }
3101 (*scan)++; // skip ','
3102 continue;
3103 }
3104
3105 KMP_ASSERT2(0, "bad explicit places list");
3106 }
3107 }
3108
__kmp_process_place(const char ** scan,kmp_affin_mask_t * osId2Mask,int maxOsId,kmp_affin_mask_t * tempMask,int * setSize)3109 static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3110 int maxOsId, kmp_affin_mask_t *tempMask,
3111 int *setSize) {
3112 const char *next;
3113
3114 // valid follow sets are '{' '!' and num
3115 SKIP_WS(*scan);
3116 if (**scan == '{') {
3117 (*scan)++; // skip '{'
3118 __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3119 KMP_ASSERT2(**scan == '}', "bad explicit places list");
3120 (*scan)++; // skip '}'
3121 } else if (**scan == '!') {
3122 (*scan)++; // skip '!'
3123 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3124 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3125 } else if ((**scan >= '0') && (**scan <= '9')) {
3126 next = *scan;
3127 SKIP_DIGITS(next);
3128 int num = __kmp_str_to_int(*scan, *next);
3129 KMP_ASSERT(num >= 0);
3130 if ((num > maxOsId) ||
3131 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3132 if (__kmp_affinity_verbose ||
3133 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3134 KMP_WARNING(AffIgnoreInvalidProcID, num);
3135 }
3136 } else {
3137 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3138 (*setSize)++;
3139 }
3140 *scan = next; // skip num
3141 } else {
3142 KMP_ASSERT2(0, "bad explicit places list");
3143 }
3144 }
3145
3146 // static void
__kmp_affinity_process_placelist(kmp_affin_mask_t ** out_masks,unsigned int * out_numMasks,const char * placelist,kmp_affin_mask_t * osId2Mask,int maxOsId)3147 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3148 unsigned int *out_numMasks,
3149 const char *placelist,
3150 kmp_affin_mask_t *osId2Mask,
3151 int maxOsId) {
3152 int i, j, count, stride, sign;
3153 const char *scan = placelist;
3154 const char *next = placelist;
3155
3156 numNewMasks = 2;
3157 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3158 nextNewMask = 0;
3159
3160 // tempMask is modified based on the previous or initial
3161 // place to form the current place
3162 // previousMask contains the previous place
3163 kmp_affin_mask_t *tempMask;
3164 kmp_affin_mask_t *previousMask;
3165 KMP_CPU_ALLOC(tempMask);
3166 KMP_CPU_ZERO(tempMask);
3167 KMP_CPU_ALLOC(previousMask);
3168 KMP_CPU_ZERO(previousMask);
3169 int setSize = 0;
3170
3171 for (;;) {
3172 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3173
3174 // valid follow sets are ',' ':' and EOL
3175 SKIP_WS(scan);
3176 if (*scan == '\0' || *scan == ',') {
3177 if (setSize > 0) {
3178 ADD_MASK(tempMask);
3179 }
3180 KMP_CPU_ZERO(tempMask);
3181 setSize = 0;
3182 if (*scan == '\0') {
3183 break;
3184 }
3185 scan++; // skip ','
3186 continue;
3187 }
3188
3189 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3190 scan++; // skip ':'
3191
3192 // Read count parameter
3193 SKIP_WS(scan);
3194 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3195 next = scan;
3196 SKIP_DIGITS(next);
3197 count = __kmp_str_to_int(scan, *next);
3198 KMP_ASSERT(count >= 0);
3199 scan = next;
3200
3201 // valid follow sets are ',' ':' and EOL
3202 SKIP_WS(scan);
3203 if (*scan == '\0' || *scan == ',') {
3204 stride = +1;
3205 } else {
3206 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3207 scan++; // skip ':'
3208
3209 // Read stride parameter
3210 sign = +1;
3211 for (;;) {
3212 SKIP_WS(scan);
3213 if (*scan == '+') {
3214 scan++; // skip '+'
3215 continue;
3216 }
3217 if (*scan == '-') {
3218 sign *= -1;
3219 scan++; // skip '-'
3220 continue;
3221 }
3222 break;
3223 }
3224 SKIP_WS(scan);
3225 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3226 next = scan;
3227 SKIP_DIGITS(next);
3228 stride = __kmp_str_to_int(scan, *next);
3229 KMP_DEBUG_ASSERT(stride >= 0);
3230 scan = next;
3231 stride *= sign;
3232 }
3233
3234 // Add places determined by initial_place : count : stride
3235 for (i = 0; i < count; i++) {
3236 if (setSize == 0) {
3237 break;
3238 }
3239 // Add the current place, then build the next place (tempMask) from that
3240 KMP_CPU_COPY(previousMask, tempMask);
3241 ADD_MASK(previousMask);
3242 KMP_CPU_ZERO(tempMask);
3243 setSize = 0;
3244 KMP_CPU_SET_ITERATE(j, previousMask) {
3245 if (!KMP_CPU_ISSET(j, previousMask)) {
3246 continue;
3247 }
3248 if ((j + stride > maxOsId) || (j + stride < 0) ||
3249 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3250 (!KMP_CPU_ISSET(j + stride,
3251 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3252 if ((__kmp_affinity_verbose ||
3253 (__kmp_affinity_warnings &&
3254 (__kmp_affinity_type != affinity_none))) &&
3255 i < count - 1) {
3256 KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3257 }
3258 continue;
3259 }
3260 KMP_CPU_SET(j + stride, tempMask);
3261 setSize++;
3262 }
3263 }
3264 KMP_CPU_ZERO(tempMask);
3265 setSize = 0;
3266
3267 // valid follow sets are ',' and EOL
3268 SKIP_WS(scan);
3269 if (*scan == '\0') {
3270 break;
3271 }
3272 if (*scan == ',') {
3273 scan++; // skip ','
3274 continue;
3275 }
3276
3277 KMP_ASSERT2(0, "bad explicit places list");
3278 }
3279
3280 *out_numMasks = nextNewMask;
3281 if (nextNewMask == 0) {
3282 *out_masks = NULL;
3283 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3284 return;
3285 }
3286 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3287 KMP_CPU_FREE(tempMask);
3288 KMP_CPU_FREE(previousMask);
3289 for (i = 0; i < nextNewMask; i++) {
3290 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3291 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3292 KMP_CPU_COPY(dest, src);
3293 }
3294 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3295 }
3296
3297 #undef ADD_MASK
3298 #undef ADD_MASK_OSID
3299
3300 #if KMP_USE_HWLOC
__kmp_hwloc_skip_PUs_obj(hwloc_topology_t t,hwloc_obj_t o)3301 static int __kmp_hwloc_skip_PUs_obj(hwloc_topology_t t, hwloc_obj_t o) {
3302 // skip PUs descendants of the object o
3303 int skipped = 0;
3304 hwloc_obj_t hT = NULL;
3305 int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3306 for (int i = 0; i < N; ++i) {
3307 KMP_DEBUG_ASSERT(hT);
3308 unsigned idx = hT->os_index;
3309 if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3310 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3311 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3312 ++skipped;
3313 }
3314 hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3315 }
3316 return skipped; // count number of skipped units
3317 }
3318
__kmp_hwloc_obj_has_PUs(hwloc_topology_t t,hwloc_obj_t o)3319 static int __kmp_hwloc_obj_has_PUs(hwloc_topology_t t, hwloc_obj_t o) {
3320 // check if obj has PUs present in fullMask
3321 hwloc_obj_t hT = NULL;
3322 int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3323 for (int i = 0; i < N; ++i) {
3324 KMP_DEBUG_ASSERT(hT);
3325 unsigned idx = hT->os_index;
3326 if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask))
3327 return 1; // found PU
3328 hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3329 }
3330 return 0; // no PUs found
3331 }
3332 #endif // KMP_USE_HWLOC
3333
__kmp_apply_thread_places(AddrUnsPair ** pAddr,int depth)3334 static void __kmp_apply_thread_places(AddrUnsPair **pAddr, int depth) {
3335 AddrUnsPair *newAddr;
3336 if (__kmp_hws_requested == 0)
3337 goto _exit; // no topology limiting actions requested, exit
3338 #if KMP_USE_HWLOC
3339 if (__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
3340 // Number of subobjects calculated dynamically, this works fine for
3341 // any non-uniform topology.
3342 // L2 cache objects are determined by depth, other objects - by type.
3343 hwloc_topology_t tp = __kmp_hwloc_topology;
3344 int nS = 0, nN = 0, nL = 0, nC = 0,
3345 nT = 0; // logical index including skipped
3346 int nCr = 0, nTr = 0; // number of requested units
3347 int nPkg = 0, nCo = 0, n_new = 0, n_old = 0, nCpP = 0, nTpC = 0; // counters
3348 hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
3349 int L2depth, idx;
3350
3351 // check support of extensions ----------------------------------
3352 int numa_support = 0, tile_support = 0;
3353 if (__kmp_pu_os_idx)
3354 hT = hwloc_get_pu_obj_by_os_index(tp,
3355 __kmp_pu_os_idx[__kmp_avail_proc - 1]);
3356 else
3357 hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, __kmp_avail_proc - 1);
3358 if (hT == NULL) { // something's gone wrong
3359 KMP_WARNING(AffHWSubsetUnsupported);
3360 goto _exit;
3361 }
3362 // check NUMA node
3363 hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
3364 hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
3365 if (hN != NULL && hN->depth > hS->depth) {
3366 numa_support = 1; // 1 in case socket includes node(s)
3367 } else if (__kmp_hws_node.num > 0) {
3368 // don't support sockets inside NUMA node (no such HW found for testing)
3369 KMP_WARNING(AffHWSubsetUnsupported);
3370 goto _exit;
3371 }
3372 // check L2 cahce, get object by depth because of multiple caches
3373 L2depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
3374 hL = hwloc_get_ancestor_obj_by_depth(tp, L2depth, hT);
3375 if (hL != NULL &&
3376 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1) {
3377 tile_support = 1; // no sense to count L2 if it includes single core
3378 } else if (__kmp_hws_tile.num > 0) {
3379 if (__kmp_hws_core.num == 0) {
3380 __kmp_hws_core = __kmp_hws_tile; // replace L2 with core
3381 __kmp_hws_tile.num = 0;
3382 } else {
3383 // L2 and core are both requested, but represent same object
3384 KMP_WARNING(AffHWSubsetInvalid);
3385 goto _exit;
3386 }
3387 }
3388 // end of check of extensions -----------------------------------
3389
3390 // fill in unset items, validate settings -----------------------
3391 if (__kmp_hws_socket.num == 0)
3392 __kmp_hws_socket.num = nPackages; // use all available sockets
3393 if (__kmp_hws_socket.offset >= nPackages) {
3394 KMP_WARNING(AffHWSubsetManySockets);
3395 goto _exit;
3396 }
3397 if (numa_support) {
3398 hN = NULL;
3399 int NN = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE,
3400 &hN); // num nodes in socket
3401 if (__kmp_hws_node.num == 0)
3402 __kmp_hws_node.num = NN; // use all available nodes
3403 if (__kmp_hws_node.offset >= NN) {
3404 KMP_WARNING(AffHWSubsetManyNodes);
3405 goto _exit;
3406 }
3407 if (tile_support) {
3408 // get num tiles in node
3409 int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3410 if (__kmp_hws_tile.num == 0) {
3411 __kmp_hws_tile.num = NL + 1;
3412 } // use all available tiles, some node may have more tiles, thus +1
3413 if (__kmp_hws_tile.offset >= NL) {
3414 KMP_WARNING(AffHWSubsetManyTiles);
3415 goto _exit;
3416 }
3417 int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3418 &hC); // num cores in tile
3419 if (__kmp_hws_core.num == 0)
3420 __kmp_hws_core.num = NC; // use all available cores
3421 if (__kmp_hws_core.offset >= NC) {
3422 KMP_WARNING(AffHWSubsetManyCores);
3423 goto _exit;
3424 }
3425 } else { // tile_support
3426 int NC = __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE,
3427 &hC); // num cores in node
3428 if (__kmp_hws_core.num == 0)
3429 __kmp_hws_core.num = NC; // use all available cores
3430 if (__kmp_hws_core.offset >= NC) {
3431 KMP_WARNING(AffHWSubsetManyCores);
3432 goto _exit;
3433 }
3434 } // tile_support
3435 } else { // numa_support
3436 if (tile_support) {
3437 // get num tiles in socket
3438 int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3439 if (__kmp_hws_tile.num == 0)
3440 __kmp_hws_tile.num = NL; // use all available tiles
3441 if (__kmp_hws_tile.offset >= NL) {
3442 KMP_WARNING(AffHWSubsetManyTiles);
3443 goto _exit;
3444 }
3445 int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3446 &hC); // num cores in tile
3447 if (__kmp_hws_core.num == 0)
3448 __kmp_hws_core.num = NC; // use all available cores
3449 if (__kmp_hws_core.offset >= NC) {
3450 KMP_WARNING(AffHWSubsetManyCores);
3451 goto _exit;
3452 }
3453 } else { // tile_support
3454 int NC = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE,
3455 &hC); // num cores in socket
3456 if (__kmp_hws_core.num == 0)
3457 __kmp_hws_core.num = NC; // use all available cores
3458 if (__kmp_hws_core.offset >= NC) {
3459 KMP_WARNING(AffHWSubsetManyCores);
3460 goto _exit;
3461 }
3462 } // tile_support
3463 }
3464 if (__kmp_hws_proc.num == 0)
3465 __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all available procs
3466 if (__kmp_hws_proc.offset >= __kmp_nThreadsPerCore) {
3467 KMP_WARNING(AffHWSubsetManyProcs);
3468 goto _exit;
3469 }
3470 // end of validation --------------------------------------------
3471
3472 if (pAddr) // pAddr is NULL in case of affinity_none
3473 newAddr = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) *
3474 __kmp_avail_proc); // max size
3475 // main loop to form HW subset ----------------------------------
3476 hS = NULL;
3477 int NP = hwloc_get_nbobjs_by_type(tp, HWLOC_OBJ_PACKAGE);
3478 for (int s = 0; s < NP; ++s) {
3479 // Check Socket -----------------------------------------------
3480 hS = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hS);
3481 if (!__kmp_hwloc_obj_has_PUs(tp, hS))
3482 continue; // skip socket if all PUs are out of fullMask
3483 ++nS; // only count objects those have PUs in affinity mask
3484 if (nS <= __kmp_hws_socket.offset ||
3485 nS > __kmp_hws_socket.num + __kmp_hws_socket.offset) {
3486 n_old += __kmp_hwloc_skip_PUs_obj(tp, hS); // skip socket
3487 continue; // move to next socket
3488 }
3489 nCr = 0; // count number of cores per socket
3490 // socket requested, go down the topology tree
3491 // check 4 cases: (+NUMA+Tile), (+NUMA-Tile), (-NUMA+Tile), (-NUMA-Tile)
3492 if (numa_support) {
3493 nN = 0;
3494 hN = NULL;
3495 // num nodes in current socket
3496 int NN =
3497 __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE, &hN);
3498 for (int n = 0; n < NN; ++n) {
3499 // Check NUMA Node ----------------------------------------
3500 if (!__kmp_hwloc_obj_has_PUs(tp, hN)) {
3501 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3502 continue; // skip node if all PUs are out of fullMask
3503 }
3504 ++nN;
3505 if (nN <= __kmp_hws_node.offset ||
3506 nN > __kmp_hws_node.num + __kmp_hws_node.offset) {
3507 // skip node as not requested
3508 n_old += __kmp_hwloc_skip_PUs_obj(tp, hN); // skip node
3509 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3510 continue; // move to next node
3511 }
3512 // node requested, go down the topology tree
3513 if (tile_support) {
3514 nL = 0;
3515 hL = NULL;
3516 int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3517 for (int l = 0; l < NL; ++l) {
3518 // Check L2 (tile) ------------------------------------
3519 if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3520 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3521 continue; // skip tile if all PUs are out of fullMask
3522 }
3523 ++nL;
3524 if (nL <= __kmp_hws_tile.offset ||
3525 nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3526 // skip tile as not requested
3527 n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3528 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3529 continue; // move to next tile
3530 }
3531 // tile requested, go down the topology tree
3532 nC = 0;
3533 hC = NULL;
3534 // num cores in current tile
3535 int NC = __kmp_hwloc_count_children_by_type(tp, hL,
3536 HWLOC_OBJ_CORE, &hC);
3537 for (int c = 0; c < NC; ++c) {
3538 // Check Core ---------------------------------------
3539 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3540 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3541 continue; // skip core if all PUs are out of fullMask
3542 }
3543 ++nC;
3544 if (nC <= __kmp_hws_core.offset ||
3545 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3546 // skip node as not requested
3547 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3548 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3549 continue; // move to next node
3550 }
3551 // core requested, go down to PUs
3552 nT = 0;
3553 nTr = 0;
3554 hT = NULL;
3555 // num procs in current core
3556 int NT = __kmp_hwloc_count_children_by_type(tp, hC,
3557 HWLOC_OBJ_PU, &hT);
3558 for (int t = 0; t < NT; ++t) {
3559 // Check PU ---------------------------------------
3560 idx = hT->os_index;
3561 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3562 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3563 continue; // skip PU if not in fullMask
3564 }
3565 ++nT;
3566 if (nT <= __kmp_hws_proc.offset ||
3567 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3568 // skip PU
3569 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3570 ++n_old;
3571 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3572 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3573 continue; // move to next node
3574 }
3575 ++nTr;
3576 if (pAddr) // collect requested thread's data
3577 newAddr[n_new] = (*pAddr)[n_old];
3578 ++n_new;
3579 ++n_old;
3580 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3581 } // threads loop
3582 if (nTr > 0) {
3583 ++nCr; // num cores per socket
3584 ++nCo; // total num cores
3585 if (nTr > nTpC)
3586 nTpC = nTr; // calc max threads per core
3587 }
3588 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3589 } // cores loop
3590 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3591 } // tiles loop
3592 } else { // tile_support
3593 // no tiles, check cores
3594 nC = 0;
3595 hC = NULL;
3596 // num cores in current node
3597 int NC =
3598 __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE, &hC);
3599 for (int c = 0; c < NC; ++c) {
3600 // Check Core ---------------------------------------
3601 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3602 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3603 continue; // skip core if all PUs are out of fullMask
3604 }
3605 ++nC;
3606 if (nC <= __kmp_hws_core.offset ||
3607 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3608 // skip node as not requested
3609 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3610 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3611 continue; // move to next node
3612 }
3613 // core requested, go down to PUs
3614 nT = 0;
3615 nTr = 0;
3616 hT = NULL;
3617 int NT =
3618 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3619 for (int t = 0; t < NT; ++t) {
3620 // Check PU ---------------------------------------
3621 idx = hT->os_index;
3622 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3623 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3624 continue; // skip PU if not in fullMask
3625 }
3626 ++nT;
3627 if (nT <= __kmp_hws_proc.offset ||
3628 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3629 // skip PU
3630 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3631 ++n_old;
3632 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3633 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3634 continue; // move to next node
3635 }
3636 ++nTr;
3637 if (pAddr) // collect requested thread's data
3638 newAddr[n_new] = (*pAddr)[n_old];
3639 ++n_new;
3640 ++n_old;
3641 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3642 } // threads loop
3643 if (nTr > 0) {
3644 ++nCr; // num cores per socket
3645 ++nCo; // total num cores
3646 if (nTr > nTpC)
3647 nTpC = nTr; // calc max threads per core
3648 }
3649 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3650 } // cores loop
3651 } // tiles support
3652 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3653 } // nodes loop
3654 } else { // numa_support
3655 // no NUMA support
3656 if (tile_support) {
3657 nL = 0;
3658 hL = NULL;
3659 // num tiles in current socket
3660 int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3661 for (int l = 0; l < NL; ++l) {
3662 // Check L2 (tile) ------------------------------------
3663 if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3664 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3665 continue; // skip tile if all PUs are out of fullMask
3666 }
3667 ++nL;
3668 if (nL <= __kmp_hws_tile.offset ||
3669 nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3670 // skip tile as not requested
3671 n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3672 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3673 continue; // move to next tile
3674 }
3675 // tile requested, go down the topology tree
3676 nC = 0;
3677 hC = NULL;
3678 // num cores per tile
3679 int NC =
3680 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC);
3681 for (int c = 0; c < NC; ++c) {
3682 // Check Core ---------------------------------------
3683 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3684 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3685 continue; // skip core if all PUs are out of fullMask
3686 }
3687 ++nC;
3688 if (nC <= __kmp_hws_core.offset ||
3689 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3690 // skip node as not requested
3691 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3692 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3693 continue; // move to next node
3694 }
3695 // core requested, go down to PUs
3696 nT = 0;
3697 nTr = 0;
3698 hT = NULL;
3699 // num procs per core
3700 int NT =
3701 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3702 for (int t = 0; t < NT; ++t) {
3703 // Check PU ---------------------------------------
3704 idx = hT->os_index;
3705 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3706 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3707 continue; // skip PU if not in fullMask
3708 }
3709 ++nT;
3710 if (nT <= __kmp_hws_proc.offset ||
3711 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3712 // skip PU
3713 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3714 ++n_old;
3715 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3716 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3717 continue; // move to next node
3718 }
3719 ++nTr;
3720 if (pAddr) // collect requested thread's data
3721 newAddr[n_new] = (*pAddr)[n_old];
3722 ++n_new;
3723 ++n_old;
3724 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3725 } // threads loop
3726 if (nTr > 0) {
3727 ++nCr; // num cores per socket
3728 ++nCo; // total num cores
3729 if (nTr > nTpC)
3730 nTpC = nTr; // calc max threads per core
3731 }
3732 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3733 } // cores loop
3734 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3735 } // tiles loop
3736 } else { // tile_support
3737 // no tiles, check cores
3738 nC = 0;
3739 hC = NULL;
3740 // num cores in socket
3741 int NC =
3742 __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE, &hC);
3743 for (int c = 0; c < NC; ++c) {
3744 // Check Core -------------------------------------------
3745 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3746 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3747 continue; // skip core if all PUs are out of fullMask
3748 }
3749 ++nC;
3750 if (nC <= __kmp_hws_core.offset ||
3751 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3752 // skip node as not requested
3753 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3754 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3755 continue; // move to next node
3756 }
3757 // core requested, go down to PUs
3758 nT = 0;
3759 nTr = 0;
3760 hT = NULL;
3761 // num procs per core
3762 int NT =
3763 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3764 for (int t = 0; t < NT; ++t) {
3765 // Check PU ---------------------------------------
3766 idx = hT->os_index;
3767 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3768 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3769 continue; // skip PU if not in fullMask
3770 }
3771 ++nT;
3772 if (nT <= __kmp_hws_proc.offset ||
3773 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3774 // skip PU
3775 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3776 ++n_old;
3777 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3778 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3779 continue; // move to next node
3780 }
3781 ++nTr;
3782 if (pAddr) // collect requested thread's data
3783 newAddr[n_new] = (*pAddr)[n_old];
3784 ++n_new;
3785 ++n_old;
3786 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3787 } // threads loop
3788 if (nTr > 0) {
3789 ++nCr; // num cores per socket
3790 ++nCo; // total num cores
3791 if (nTr > nTpC)
3792 nTpC = nTr; // calc max threads per core
3793 }
3794 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3795 } // cores loop
3796 } // tiles support
3797 } // numa_support
3798 if (nCr > 0) { // found cores?
3799 ++nPkg; // num sockets
3800 if (nCr > nCpP)
3801 nCpP = nCr; // calc max cores per socket
3802 }
3803 } // sockets loop
3804
3805 // check the subset is valid
3806 KMP_DEBUG_ASSERT(n_old == __kmp_avail_proc);
3807 KMP_DEBUG_ASSERT(nPkg > 0);
3808 KMP_DEBUG_ASSERT(nCpP > 0);
3809 KMP_DEBUG_ASSERT(nTpC > 0);
3810 KMP_DEBUG_ASSERT(nCo > 0);
3811 KMP_DEBUG_ASSERT(nPkg <= nPackages);
3812 KMP_DEBUG_ASSERT(nCpP <= nCoresPerPkg);
3813 KMP_DEBUG_ASSERT(nTpC <= __kmp_nThreadsPerCore);
3814 KMP_DEBUG_ASSERT(nCo <= __kmp_ncores);
3815
3816 nPackages = nPkg; // correct num sockets
3817 nCoresPerPkg = nCpP; // correct num cores per socket
3818 __kmp_nThreadsPerCore = nTpC; // correct num threads per core
3819 __kmp_avail_proc = n_new; // correct num procs
3820 __kmp_ncores = nCo; // correct num cores
3821 // hwloc topology method end
3822 } else
3823 #endif // KMP_USE_HWLOC
3824 {
3825 int n_old = 0, n_new = 0, proc_num = 0;
3826 if (__kmp_hws_node.num > 0 || __kmp_hws_tile.num > 0) {
3827 KMP_WARNING(AffHWSubsetNoHWLOC);
3828 goto _exit;
3829 }
3830 if (__kmp_hws_socket.num == 0)
3831 __kmp_hws_socket.num = nPackages; // use all available sockets
3832 if (__kmp_hws_core.num == 0)
3833 __kmp_hws_core.num = nCoresPerPkg; // use all available cores
3834 if (__kmp_hws_proc.num == 0 || __kmp_hws_proc.num > __kmp_nThreadsPerCore)
3835 __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all HW contexts
3836 if (!__kmp_affinity_uniform_topology()) {
3837 KMP_WARNING(AffHWSubsetNonUniform);
3838 goto _exit; // don't support non-uniform topology
3839 }
3840 if (depth > 3) {
3841 KMP_WARNING(AffHWSubsetNonThreeLevel);
3842 goto _exit; // don't support not-3-level topology
3843 }
3844 if (__kmp_hws_socket.offset + __kmp_hws_socket.num > nPackages) {
3845 KMP_WARNING(AffHWSubsetManySockets);
3846 goto _exit;
3847 }
3848 if (__kmp_hws_core.offset + __kmp_hws_core.num > nCoresPerPkg) {
3849 KMP_WARNING(AffHWSubsetManyCores);
3850 goto _exit;
3851 }
3852 // Form the requested subset
3853 if (pAddr) // pAddr is NULL in case of affinity_none
3854 newAddr = (AddrUnsPair *)__kmp_allocate(
3855 sizeof(AddrUnsPair) * __kmp_hws_socket.num * __kmp_hws_core.num *
3856 __kmp_hws_proc.num);
3857 for (int i = 0; i < nPackages; ++i) {
3858 if (i < __kmp_hws_socket.offset ||
3859 i >= __kmp_hws_socket.offset + __kmp_hws_socket.num) {
3860 // skip not-requested socket
3861 n_old += nCoresPerPkg * __kmp_nThreadsPerCore;
3862 if (__kmp_pu_os_idx != NULL) {
3863 // walk through skipped socket
3864 for (int j = 0; j < nCoresPerPkg; ++j) {
3865 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3866 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3867 ++proc_num;
3868 }
3869 }
3870 }
3871 } else {
3872 // walk through requested socket
3873 for (int j = 0; j < nCoresPerPkg; ++j) {
3874 if (j < __kmp_hws_core.offset ||
3875 j >= __kmp_hws_core.offset +
3876 __kmp_hws_core.num) { // skip not-requested core
3877 n_old += __kmp_nThreadsPerCore;
3878 if (__kmp_pu_os_idx != NULL) {
3879 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3880 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3881 ++proc_num;
3882 }
3883 }
3884 } else {
3885 // walk through requested core
3886 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3887 if (k < __kmp_hws_proc.num) {
3888 if (pAddr) // collect requested thread's data
3889 newAddr[n_new] = (*pAddr)[n_old];
3890 n_new++;
3891 } else {
3892 if (__kmp_pu_os_idx != NULL)
3893 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3894 }
3895 n_old++;
3896 ++proc_num;
3897 }
3898 }
3899 }
3900 }
3901 }
3902 KMP_DEBUG_ASSERT(n_old == nPackages * nCoresPerPkg * __kmp_nThreadsPerCore);
3903 KMP_DEBUG_ASSERT(n_new ==
3904 __kmp_hws_socket.num * __kmp_hws_core.num *
3905 __kmp_hws_proc.num);
3906 nPackages = __kmp_hws_socket.num; // correct nPackages
3907 nCoresPerPkg = __kmp_hws_core.num; // correct nCoresPerPkg
3908 __kmp_nThreadsPerCore = __kmp_hws_proc.num; // correct __kmp_nThreadsPerCore
3909 __kmp_avail_proc = n_new; // correct avail_proc
3910 __kmp_ncores = nPackages * __kmp_hws_core.num; // correct ncores
3911 } // non-hwloc topology method
3912 if (pAddr) {
3913 __kmp_free(*pAddr);
3914 *pAddr = newAddr; // replace old topology with new one
3915 }
3916 if (__kmp_affinity_verbose) {
3917 KMP_INFORM(AvailableOSProc, "KMP_HW_SUBSET", __kmp_avail_proc);
3918 kmp_str_buf_t buf;
3919 __kmp_str_buf_init(&buf);
3920 __kmp_str_buf_print(&buf, "%d", nPackages);
3921 KMP_INFORM(TopologyExtra, "KMP_HW_SUBSET", buf.str, nCoresPerPkg,
3922 __kmp_nThreadsPerCore, __kmp_ncores);
3923 __kmp_str_buf_free(&buf);
3924 }
3925 _exit:
3926 if (__kmp_pu_os_idx != NULL) {
3927 __kmp_free(__kmp_pu_os_idx);
3928 __kmp_pu_os_idx = NULL;
3929 }
3930 }
3931
3932 // This function figures out the deepest level at which there is at least one
3933 // cluster/core with more than one processing unit bound to it.
__kmp_affinity_find_core_level(const AddrUnsPair * address2os,int nprocs,int bottom_level)3934 static int __kmp_affinity_find_core_level(const AddrUnsPair *address2os,
3935 int nprocs, int bottom_level) {
3936 int core_level = 0;
3937
3938 for (int i = 0; i < nprocs; i++) {
3939 for (int j = bottom_level; j > 0; j--) {
3940 if (address2os[i].first.labels[j] > 0) {
3941 if (core_level < (j - 1)) {
3942 core_level = j - 1;
3943 }
3944 }
3945 }
3946 }
3947 return core_level;
3948 }
3949
3950 // This function counts number of clusters/cores at given level.
__kmp_affinity_compute_ncores(const AddrUnsPair * address2os,int nprocs,int bottom_level,int core_level)3951 static int __kmp_affinity_compute_ncores(const AddrUnsPair *address2os,
3952 int nprocs, int bottom_level,
3953 int core_level) {
3954 int ncores = 0;
3955 int i, j;
3956
3957 j = bottom_level;
3958 for (i = 0; i < nprocs; i++) {
3959 for (j = bottom_level; j > core_level; j--) {
3960 if ((i + 1) < nprocs) {
3961 if (address2os[i + 1].first.labels[j] > 0) {
3962 break;
3963 }
3964 }
3965 }
3966 if (j == core_level) {
3967 ncores++;
3968 }
3969 }
3970 if (j > core_level) {
3971 // In case of ( nprocs < __kmp_avail_proc ) we may end too deep and miss one
3972 // core. May occur when called from __kmp_affinity_find_core().
3973 ncores++;
3974 }
3975 return ncores;
3976 }
3977
3978 // This function finds to which cluster/core given processing unit is bound.
__kmp_affinity_find_core(const AddrUnsPair * address2os,int proc,int bottom_level,int core_level)3979 static int __kmp_affinity_find_core(const AddrUnsPair *address2os, int proc,
3980 int bottom_level, int core_level) {
3981 return __kmp_affinity_compute_ncores(address2os, proc + 1, bottom_level,
3982 core_level) -
3983 1;
3984 }
3985
3986 // This function finds maximal number of processing units bound to a
3987 // cluster/core at given level.
__kmp_affinity_max_proc_per_core(const AddrUnsPair * address2os,int nprocs,int bottom_level,int core_level)3988 static int __kmp_affinity_max_proc_per_core(const AddrUnsPair *address2os,
3989 int nprocs, int bottom_level,
3990 int core_level) {
3991 int maxprocpercore = 0;
3992
3993 if (core_level < bottom_level) {
3994 for (int i = 0; i < nprocs; i++) {
3995 int percore = address2os[i].first.labels[core_level + 1] + 1;
3996
3997 if (percore > maxprocpercore) {
3998 maxprocpercore = percore;
3999 }
4000 }
4001 } else {
4002 maxprocpercore = 1;
4003 }
4004 return maxprocpercore;
4005 }
4006
4007 static AddrUnsPair *address2os = NULL;
4008 static int *procarr = NULL;
4009 static int __kmp_aff_depth = 0;
4010
4011 #if KMP_USE_HIER_SCHED
4012 #define KMP_EXIT_AFF_NONE \
4013 KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4014 KMP_ASSERT(address2os == NULL); \
4015 __kmp_apply_thread_places(NULL, 0); \
4016 __kmp_create_affinity_none_places(); \
4017 __kmp_dispatch_set_hierarchy_values(); \
4018 return;
4019 #else
4020 #define KMP_EXIT_AFF_NONE \
4021 KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4022 KMP_ASSERT(address2os == NULL); \
4023 __kmp_apply_thread_places(NULL, 0); \
4024 __kmp_create_affinity_none_places(); \
4025 return;
4026 #endif
4027
4028 // Create a one element mask array (set of places) which only contains the
4029 // initial process's affinity mask
__kmp_create_affinity_none_places()4030 static void __kmp_create_affinity_none_places() {
4031 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4032 KMP_ASSERT(__kmp_affinity_type == affinity_none);
4033 __kmp_affinity_num_masks = 1;
4034 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4035 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4036 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4037 }
4038
__kmp_affinity_cmp_Address_child_num(const void * a,const void * b)4039 static int __kmp_affinity_cmp_Address_child_num(const void *a, const void *b) {
4040 const Address *aa = &(((const AddrUnsPair *)a)->first);
4041 const Address *bb = &(((const AddrUnsPair *)b)->first);
4042 unsigned depth = aa->depth;
4043 unsigned i;
4044 KMP_DEBUG_ASSERT(depth == bb->depth);
4045 KMP_DEBUG_ASSERT((unsigned)__kmp_affinity_compact <= depth);
4046 KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
4047 for (i = 0; i < (unsigned)__kmp_affinity_compact; i++) {
4048 int j = depth - i - 1;
4049 if (aa->childNums[j] < bb->childNums[j])
4050 return -1;
4051 if (aa->childNums[j] > bb->childNums[j])
4052 return 1;
4053 }
4054 for (; i < depth; i++) {
4055 int j = i - __kmp_affinity_compact;
4056 if (aa->childNums[j] < bb->childNums[j])
4057 return -1;
4058 if (aa->childNums[j] > bb->childNums[j])
4059 return 1;
4060 }
4061 return 0;
4062 }
4063
__kmp_aux_affinity_initialize(void)4064 static void __kmp_aux_affinity_initialize(void) {
4065 if (__kmp_affinity_masks != NULL) {
4066 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4067 return;
4068 }
4069
4070 // Create the "full" mask - this defines all of the processors that we
4071 // consider to be in the machine model. If respect is set, then it is the
4072 // initialization thread's affinity mask. Otherwise, it is all processors that
4073 // we know about on the machine.
4074 if (__kmp_affin_fullMask == NULL) {
4075 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4076 }
4077 if (KMP_AFFINITY_CAPABLE()) {
4078 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4079 if (__kmp_affinity_respect_mask) {
4080 // Count the number of available processors.
4081 unsigned i;
4082 __kmp_avail_proc = 0;
4083 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4084 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4085 continue;
4086 }
4087 __kmp_avail_proc++;
4088 }
4089 if (__kmp_avail_proc > __kmp_xproc) {
4090 if (__kmp_affinity_verbose ||
4091 (__kmp_affinity_warnings &&
4092 (__kmp_affinity_type != affinity_none))) {
4093 KMP_WARNING(ErrorInitializeAffinity);
4094 }
4095 __kmp_affinity_type = affinity_none;
4096 KMP_AFFINITY_DISABLE();
4097 return;
4098 }
4099
4100 if (__kmp_affinity_verbose) {
4101 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4102 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4103 __kmp_affin_fullMask);
4104 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
4105 }
4106 } else {
4107 if (__kmp_affinity_verbose) {
4108 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4109 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4110 __kmp_affin_fullMask);
4111 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
4112 }
4113 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4114 __kmp_avail_proc = __kmp_xproc;
4115 #if KMP_OS_WINDOWS
4116 // Set the process affinity mask since threads' affinity
4117 // masks must be subset of process mask in Windows* OS
4118 __kmp_affin_fullMask->set_process_affinity(true);
4119 #endif
4120 }
4121 }
4122
4123 if (__kmp_affinity_gran == affinity_gran_tile &&
4124 // check if user's request is valid
4125 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::NATIVE_OS) {
4126 KMP_WARNING(AffTilesNoHWLOC, "KMP_AFFINITY");
4127 __kmp_affinity_gran = affinity_gran_package;
4128 }
4129
4130 int depth = -1;
4131 kmp_i18n_id_t msg_id = kmp_i18n_null;
4132
4133 // For backward compatibility, setting KMP_CPUINFO_FILE =>
4134 // KMP_TOPOLOGY_METHOD=cpuinfo
4135 if ((__kmp_cpuinfo_file != NULL) &&
4136 (__kmp_affinity_top_method == affinity_top_method_all)) {
4137 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4138 }
4139
4140 if (__kmp_affinity_top_method == affinity_top_method_all) {
4141 // In the default code path, errors are not fatal - we just try using
4142 // another method. We only emit a warning message if affinity is on, or the
4143 // verbose flag is set, and the nowarnings flag was not set.
4144 const char *file_name = NULL;
4145 int line = 0;
4146 #if KMP_USE_HWLOC
4147 if (depth < 0 &&
4148 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4149 if (__kmp_affinity_verbose) {
4150 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4151 }
4152 if (!__kmp_hwloc_error) {
4153 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4154 if (depth == 0) {
4155 KMP_EXIT_AFF_NONE;
4156 } else if (depth < 0 && __kmp_affinity_verbose) {
4157 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4158 }
4159 } else if (__kmp_affinity_verbose) {
4160 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4161 }
4162 }
4163 #endif
4164
4165 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4166
4167 if (depth < 0) {
4168 if (__kmp_affinity_verbose) {
4169 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4170 }
4171
4172 file_name = NULL;
4173 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4174 if (depth == 0) {
4175 KMP_EXIT_AFF_NONE;
4176 }
4177
4178 if (depth < 0) {
4179 if (__kmp_affinity_verbose) {
4180 if (msg_id != kmp_i18n_null) {
4181 KMP_INFORM(AffInfoStrStr, "KMP_AFFINITY",
4182 __kmp_i18n_catgets(msg_id),
4183 KMP_I18N_STR(DecodingLegacyAPIC));
4184 } else {
4185 KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
4186 KMP_I18N_STR(DecodingLegacyAPIC));
4187 }
4188 }
4189
4190 file_name = NULL;
4191 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4192 if (depth == 0) {
4193 KMP_EXIT_AFF_NONE;
4194 }
4195 }
4196 }
4197
4198 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4199
4200 #if KMP_OS_LINUX
4201
4202 if (depth < 0) {
4203 if (__kmp_affinity_verbose) {
4204 if (msg_id != kmp_i18n_null) {
4205 KMP_INFORM(AffStrParseFilename, "KMP_AFFINITY",
4206 __kmp_i18n_catgets(msg_id), "/proc/cpuinfo");
4207 } else {
4208 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "/proc/cpuinfo");
4209 }
4210 }
4211
4212 kmp_safe_raii_file_t f("/proc/cpuinfo", "r");
4213 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4214 if (depth == 0) {
4215 KMP_EXIT_AFF_NONE;
4216 }
4217 }
4218
4219 #endif /* KMP_OS_LINUX */
4220
4221 #if KMP_GROUP_AFFINITY
4222
4223 if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
4224 if (__kmp_affinity_verbose) {
4225 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4226 }
4227
4228 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4229 KMP_ASSERT(depth != 0);
4230 }
4231
4232 #endif /* KMP_GROUP_AFFINITY */
4233
4234 if (depth < 0) {
4235 if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
4236 if (file_name == NULL) {
4237 KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
4238 } else if (line == 0) {
4239 KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
4240 } else {
4241 KMP_INFORM(UsingFlatOSFileLine, file_name, line,
4242 __kmp_i18n_catgets(msg_id));
4243 }
4244 }
4245 // FIXME - print msg if msg_id = kmp_i18n_null ???
4246
4247 file_name = "";
4248 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4249 if (depth == 0) {
4250 KMP_EXIT_AFF_NONE;
4251 }
4252 KMP_ASSERT(depth > 0);
4253 KMP_ASSERT(address2os != NULL);
4254 }
4255 }
4256
4257 #if KMP_USE_HWLOC
4258 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4259 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4260 if (__kmp_affinity_verbose) {
4261 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4262 }
4263 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4264 if (depth == 0) {
4265 KMP_EXIT_AFF_NONE;
4266 }
4267 }
4268 #endif // KMP_USE_HWLOC
4269
4270 // If the user has specified that a particular topology discovery method is to be
4271 // used, then we abort if that method fails. The exception is group affinity,
4272 // which might have been implicitly set.
4273
4274 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4275
4276 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
4277 if (__kmp_affinity_verbose) {
4278 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4279 }
4280
4281 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4282 if (depth == 0) {
4283 KMP_EXIT_AFF_NONE;
4284 }
4285 if (depth < 0) {
4286 KMP_ASSERT(msg_id != kmp_i18n_null);
4287 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4288 }
4289 } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4290 if (__kmp_affinity_verbose) {
4291 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
4292 }
4293
4294 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4295 if (depth == 0) {
4296 KMP_EXIT_AFF_NONE;
4297 }
4298 if (depth < 0) {
4299 KMP_ASSERT(msg_id != kmp_i18n_null);
4300 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4301 }
4302 }
4303
4304 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4305
4306 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4307 const char *filename;
4308 const char *env_var = nullptr;
4309 if (__kmp_cpuinfo_file != NULL) {
4310 filename = __kmp_cpuinfo_file;
4311 env_var = "KMP_CPUINFO_FILE";
4312 } else {
4313 filename = "/proc/cpuinfo";
4314 }
4315
4316 if (__kmp_affinity_verbose) {
4317 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
4318 }
4319
4320 kmp_safe_raii_file_t f(filename, "r", env_var);
4321 int line = 0;
4322 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4323 if (depth < 0) {
4324 KMP_ASSERT(msg_id != kmp_i18n_null);
4325 if (line > 0) {
4326 KMP_FATAL(FileLineMsgExiting, filename, line,
4327 __kmp_i18n_catgets(msg_id));
4328 } else {
4329 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4330 }
4331 }
4332 if (__kmp_affinity_type == affinity_none) {
4333 KMP_ASSERT(depth == 0);
4334 KMP_EXIT_AFF_NONE;
4335 }
4336 }
4337
4338 #if KMP_GROUP_AFFINITY
4339
4340 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4341 if (__kmp_affinity_verbose) {
4342 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4343 }
4344
4345 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4346 KMP_ASSERT(depth != 0);
4347 if (depth < 0) {
4348 KMP_ASSERT(msg_id != kmp_i18n_null);
4349 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4350 }
4351 }
4352
4353 #endif /* KMP_GROUP_AFFINITY */
4354
4355 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4356 if (__kmp_affinity_verbose) {
4357 KMP_INFORM(AffUsingFlatOS, "KMP_AFFINITY");
4358 }
4359
4360 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4361 if (depth == 0) {
4362 KMP_EXIT_AFF_NONE;
4363 }
4364 // should not fail
4365 KMP_ASSERT(depth > 0);
4366 KMP_ASSERT(address2os != NULL);
4367 }
4368
4369 #if KMP_USE_HIER_SCHED
4370 __kmp_dispatch_set_hierarchy_values();
4371 #endif
4372
4373 if (address2os == NULL) {
4374 if (KMP_AFFINITY_CAPABLE() &&
4375 (__kmp_affinity_verbose ||
4376 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4377 KMP_WARNING(ErrorInitializeAffinity);
4378 }
4379 __kmp_affinity_type = affinity_none;
4380 __kmp_create_affinity_none_places();
4381 KMP_AFFINITY_DISABLE();
4382 return;
4383 }
4384
4385 if (__kmp_affinity_gran == affinity_gran_tile
4386 #if KMP_USE_HWLOC
4387 && __kmp_tile_depth == 0
4388 #endif
4389 ) {
4390 // tiles requested but not detected, warn user on this
4391 KMP_WARNING(AffTilesNoTiles, "KMP_AFFINITY");
4392 }
4393
4394 __kmp_apply_thread_places(&address2os, depth);
4395
4396 // Create the table of masks, indexed by thread Id.
4397 unsigned maxIndex;
4398 unsigned numUnique;
4399 kmp_affin_mask_t *osId2Mask =
4400 __kmp_create_masks(&maxIndex, &numUnique, address2os, __kmp_avail_proc);
4401 if (__kmp_affinity_gran_levels == 0) {
4402 KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4403 }
4404
4405 // Set the childNums vector in all Address objects. This must be done before
4406 // we can sort using __kmp_affinity_cmp_Address_child_num(), which takes into
4407 // account the setting of __kmp_affinity_compact.
4408 __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
4409
4410 switch (__kmp_affinity_type) {
4411
4412 case affinity_explicit:
4413 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4414 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4415 __kmp_affinity_process_proclist(
4416 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4417 __kmp_affinity_proclist, osId2Mask, maxIndex);
4418 } else {
4419 __kmp_affinity_process_placelist(
4420 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4421 __kmp_affinity_proclist, osId2Mask, maxIndex);
4422 }
4423 if (__kmp_affinity_num_masks == 0) {
4424 if (__kmp_affinity_verbose ||
4425 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4426 KMP_WARNING(AffNoValidProcID);
4427 }
4428 __kmp_affinity_type = affinity_none;
4429 __kmp_create_affinity_none_places();
4430 return;
4431 }
4432 break;
4433
4434 // The other affinity types rely on sorting the Addresses according to some
4435 // permutation of the machine topology tree. Set __kmp_affinity_compact and
4436 // __kmp_affinity_offset appropriately, then jump to a common code fragment
4437 // to do the sort and create the array of affinity masks.
4438
4439 case affinity_logical:
4440 __kmp_affinity_compact = 0;
4441 if (__kmp_affinity_offset) {
4442 __kmp_affinity_offset =
4443 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4444 }
4445 goto sortAddresses;
4446
4447 case affinity_physical:
4448 if (__kmp_nThreadsPerCore > 1) {
4449 __kmp_affinity_compact = 1;
4450 if (__kmp_affinity_compact >= depth) {
4451 __kmp_affinity_compact = 0;
4452 }
4453 } else {
4454 __kmp_affinity_compact = 0;
4455 }
4456 if (__kmp_affinity_offset) {
4457 __kmp_affinity_offset =
4458 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4459 }
4460 goto sortAddresses;
4461
4462 case affinity_scatter:
4463 if (__kmp_affinity_compact >= depth) {
4464 __kmp_affinity_compact = 0;
4465 } else {
4466 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4467 }
4468 goto sortAddresses;
4469
4470 case affinity_compact:
4471 if (__kmp_affinity_compact >= depth) {
4472 __kmp_affinity_compact = depth - 1;
4473 }
4474 goto sortAddresses;
4475
4476 case affinity_balanced:
4477 if (depth <= 1) {
4478 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4479 KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4480 }
4481 __kmp_affinity_type = affinity_none;
4482 __kmp_create_affinity_none_places();
4483 return;
4484 } else if (!__kmp_affinity_uniform_topology()) {
4485 // Save the depth for further usage
4486 __kmp_aff_depth = depth;
4487
4488 int core_level = __kmp_affinity_find_core_level(
4489 address2os, __kmp_avail_proc, depth - 1);
4490 int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
4491 depth - 1, core_level);
4492 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4493 address2os, __kmp_avail_proc, depth - 1, core_level);
4494
4495 int nproc = ncores * maxprocpercore;
4496 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4497 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4498 KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4499 }
4500 __kmp_affinity_type = affinity_none;
4501 return;
4502 }
4503
4504 procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4505 for (int i = 0; i < nproc; i++) {
4506 procarr[i] = -1;
4507 }
4508
4509 int lastcore = -1;
4510 int inlastcore = 0;
4511 for (int i = 0; i < __kmp_avail_proc; i++) {
4512 int proc = address2os[i].second;
4513 int core =
4514 __kmp_affinity_find_core(address2os, i, depth - 1, core_level);
4515
4516 if (core == lastcore) {
4517 inlastcore++;
4518 } else {
4519 inlastcore = 0;
4520 }
4521 lastcore = core;
4522
4523 procarr[core * maxprocpercore + inlastcore] = proc;
4524 }
4525 }
4526 if (__kmp_affinity_compact >= depth) {
4527 __kmp_affinity_compact = depth - 1;
4528 }
4529
4530 sortAddresses:
4531 // Allocate the gtid->affinity mask table.
4532 if (__kmp_affinity_dups) {
4533 __kmp_affinity_num_masks = __kmp_avail_proc;
4534 } else {
4535 __kmp_affinity_num_masks = numUnique;
4536 }
4537
4538 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4539 (__kmp_affinity_num_places > 0) &&
4540 ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4541 __kmp_affinity_num_masks = __kmp_affinity_num_places;
4542 }
4543
4544 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4545
4546 // Sort the address2os table according to the current setting of
4547 // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4548 qsort(address2os, __kmp_avail_proc, sizeof(*address2os),
4549 __kmp_affinity_cmp_Address_child_num);
4550 {
4551 int i;
4552 unsigned j;
4553 for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4554 if ((!__kmp_affinity_dups) && (!address2os[i].first.leader)) {
4555 continue;
4556 }
4557 unsigned osId = address2os[i].second;
4558 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4559 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4560 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4561 KMP_CPU_COPY(dest, src);
4562 if (++j >= __kmp_affinity_num_masks) {
4563 break;
4564 }
4565 }
4566 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4567 }
4568 break;
4569
4570 default:
4571 KMP_ASSERT2(0, "Unexpected affinity setting");
4572 }
4573
4574 KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4575 machine_hierarchy.init(address2os, __kmp_avail_proc);
4576 }
4577 #undef KMP_EXIT_AFF_NONE
4578
__kmp_affinity_initialize(void)4579 void __kmp_affinity_initialize(void) {
4580 // Much of the code above was written assuming that if a machine was not
4581 // affinity capable, then __kmp_affinity_type == affinity_none. We now
4582 // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4583 // There are too many checks for __kmp_affinity_type == affinity_none
4584 // in this code. Instead of trying to change them all, check if
4585 // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4586 // affinity_none, call the real initialization routine, then restore
4587 // __kmp_affinity_type to affinity_disabled.
4588 int disabled = (__kmp_affinity_type == affinity_disabled);
4589 if (!KMP_AFFINITY_CAPABLE()) {
4590 KMP_ASSERT(disabled);
4591 }
4592 if (disabled) {
4593 __kmp_affinity_type = affinity_none;
4594 }
4595 __kmp_aux_affinity_initialize();
4596 if (disabled) {
4597 __kmp_affinity_type = affinity_disabled;
4598 }
4599 }
4600
__kmp_affinity_uninitialize(void)4601 void __kmp_affinity_uninitialize(void) {
4602 if (__kmp_affinity_masks != NULL) {
4603 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4604 __kmp_affinity_masks = NULL;
4605 }
4606 if (__kmp_affin_fullMask != NULL) {
4607 KMP_CPU_FREE(__kmp_affin_fullMask);
4608 __kmp_affin_fullMask = NULL;
4609 }
4610 __kmp_affinity_num_masks = 0;
4611 __kmp_affinity_type = affinity_default;
4612 __kmp_affinity_num_places = 0;
4613 if (__kmp_affinity_proclist != NULL) {
4614 __kmp_free(__kmp_affinity_proclist);
4615 __kmp_affinity_proclist = NULL;
4616 }
4617 if (address2os != NULL) {
4618 __kmp_free(address2os);
4619 address2os = NULL;
4620 }
4621 if (procarr != NULL) {
4622 __kmp_free(procarr);
4623 procarr = NULL;
4624 }
4625 #if KMP_USE_HWLOC
4626 if (__kmp_hwloc_topology != NULL) {
4627 hwloc_topology_destroy(__kmp_hwloc_topology);
4628 __kmp_hwloc_topology = NULL;
4629 }
4630 #endif
4631 KMPAffinity::destroy_api();
4632 }
4633
__kmp_affinity_set_init_mask(int gtid,int isa_root)4634 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4635 if (!KMP_AFFINITY_CAPABLE()) {
4636 return;
4637 }
4638
4639 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4640 if (th->th.th_affin_mask == NULL) {
4641 KMP_CPU_ALLOC(th->th.th_affin_mask);
4642 } else {
4643 KMP_CPU_ZERO(th->th.th_affin_mask);
4644 }
4645
4646 // Copy the thread mask to the kmp_info_t structure. If
4647 // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4648 // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4649 // then the full mask is the same as the mask of the initialization thread.
4650 kmp_affin_mask_t *mask;
4651 int i;
4652
4653 if (KMP_AFFINITY_NON_PROC_BIND) {
4654 if ((__kmp_affinity_type == affinity_none) ||
4655 (__kmp_affinity_type == affinity_balanced)) {
4656 #if KMP_GROUP_AFFINITY
4657 if (__kmp_num_proc_groups > 1) {
4658 return;
4659 }
4660 #endif
4661 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4662 i = 0;
4663 mask = __kmp_affin_fullMask;
4664 } else {
4665 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4666 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4667 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4668 }
4669 } else {
4670 if ((!isa_root) ||
4671 (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4672 #if KMP_GROUP_AFFINITY
4673 if (__kmp_num_proc_groups > 1) {
4674 return;
4675 }
4676 #endif
4677 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4678 i = KMP_PLACE_ALL;
4679 mask = __kmp_affin_fullMask;
4680 } else {
4681 // int i = some hash function or just a counter that doesn't
4682 // always start at 0. Use gtid for now.
4683 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4684 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4685 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4686 }
4687 }
4688
4689 th->th.th_current_place = i;
4690 if (isa_root) {
4691 th->th.th_new_place = i;
4692 th->th.th_first_place = 0;
4693 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4694 } else if (KMP_AFFINITY_NON_PROC_BIND) {
4695 // When using a Non-OMP_PROC_BIND affinity method,
4696 // set all threads' place-partition-var to the entire place list
4697 th->th.th_first_place = 0;
4698 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4699 }
4700
4701 if (i == KMP_PLACE_ALL) {
4702 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4703 gtid));
4704 } else {
4705 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4706 gtid, i));
4707 }
4708
4709 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4710
4711 if (__kmp_affinity_verbose
4712 /* to avoid duplicate printing (will be correctly printed on barrier) */
4713 && (__kmp_affinity_type == affinity_none ||
4714 (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4715 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4716 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4717 th->th.th_affin_mask);
4718 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4719 __kmp_gettid(), gtid, buf);
4720 }
4721
4722 #if KMP_OS_WINDOWS
4723 // On Windows* OS, the process affinity mask might have changed. If the user
4724 // didn't request affinity and this call fails, just continue silently.
4725 // See CQ171393.
4726 if (__kmp_affinity_type == affinity_none) {
4727 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4728 } else
4729 #endif
4730 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4731 }
4732
__kmp_affinity_set_place(int gtid)4733 void __kmp_affinity_set_place(int gtid) {
4734 if (!KMP_AFFINITY_CAPABLE()) {
4735 return;
4736 }
4737
4738 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4739
4740 KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4741 "place = %d)\n",
4742 gtid, th->th.th_new_place, th->th.th_current_place));
4743
4744 // Check that the new place is within this thread's partition.
4745 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4746 KMP_ASSERT(th->th.th_new_place >= 0);
4747 KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4748 if (th->th.th_first_place <= th->th.th_last_place) {
4749 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4750 (th->th.th_new_place <= th->th.th_last_place));
4751 } else {
4752 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4753 (th->th.th_new_place >= th->th.th_last_place));
4754 }
4755
4756 // Copy the thread mask to the kmp_info_t structure,
4757 // and set this thread's affinity.
4758 kmp_affin_mask_t *mask =
4759 KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4760 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4761 th->th.th_current_place = th->th.th_new_place;
4762
4763 if (__kmp_affinity_verbose) {
4764 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4765 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4766 th->th.th_affin_mask);
4767 KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4768 __kmp_gettid(), gtid, buf);
4769 }
4770 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4771 }
4772
__kmp_aux_set_affinity(void ** mask)4773 int __kmp_aux_set_affinity(void **mask) {
4774 int gtid;
4775 kmp_info_t *th;
4776 int retval;
4777
4778 if (!KMP_AFFINITY_CAPABLE()) {
4779 return -1;
4780 }
4781
4782 gtid = __kmp_entry_gtid();
4783 KA_TRACE(1000, (""); {
4784 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4785 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4786 (kmp_affin_mask_t *)(*mask));
4787 __kmp_debug_printf(
4788 "kmp_set_affinity: setting affinity mask for thread %d = %s\n", gtid,
4789 buf);
4790 });
4791
4792 if (__kmp_env_consistency_check) {
4793 if ((mask == NULL) || (*mask == NULL)) {
4794 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4795 } else {
4796 unsigned proc;
4797 int num_procs = 0;
4798
4799 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4800 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4801 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4802 }
4803 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4804 continue;
4805 }
4806 num_procs++;
4807 }
4808 if (num_procs == 0) {
4809 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4810 }
4811
4812 #if KMP_GROUP_AFFINITY
4813 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4814 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4815 }
4816 #endif /* KMP_GROUP_AFFINITY */
4817 }
4818 }
4819
4820 th = __kmp_threads[gtid];
4821 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4822 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4823 if (retval == 0) {
4824 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4825 }
4826
4827 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4828 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4829 th->th.th_first_place = 0;
4830 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4831
4832 // Turn off 4.0 affinity for the current tread at this parallel level.
4833 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4834
4835 return retval;
4836 }
4837
__kmp_aux_get_affinity(void ** mask)4838 int __kmp_aux_get_affinity(void **mask) {
4839 int gtid;
4840 int retval;
4841 kmp_info_t *th;
4842
4843 if (!KMP_AFFINITY_CAPABLE()) {
4844 return -1;
4845 }
4846
4847 gtid = __kmp_entry_gtid();
4848 th = __kmp_threads[gtid];
4849 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4850
4851 KA_TRACE(1000, (""); {
4852 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4853 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4854 th->th.th_affin_mask);
4855 __kmp_printf("kmp_get_affinity: stored affinity mask for thread %d = %s\n",
4856 gtid, buf);
4857 });
4858
4859 if (__kmp_env_consistency_check) {
4860 if ((mask == NULL) || (*mask == NULL)) {
4861 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4862 }
4863 }
4864
4865 #if !KMP_OS_WINDOWS
4866
4867 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4868 KA_TRACE(1000, (""); {
4869 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4870 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4871 (kmp_affin_mask_t *)(*mask));
4872 __kmp_printf("kmp_get_affinity: system affinity mask for thread %d = %s\n",
4873 gtid, buf);
4874 });
4875 return retval;
4876
4877 #else
4878
4879 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4880 return 0;
4881
4882 #endif /* KMP_OS_WINDOWS */
4883 }
4884
__kmp_aux_get_affinity_max_proc()4885 int __kmp_aux_get_affinity_max_proc() {
4886 if (!KMP_AFFINITY_CAPABLE()) {
4887 return 0;
4888 }
4889 #if KMP_GROUP_AFFINITY
4890 if (__kmp_num_proc_groups > 1) {
4891 return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
4892 }
4893 #endif
4894 return __kmp_xproc;
4895 }
4896
__kmp_aux_set_affinity_mask_proc(int proc,void ** mask)4897 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
4898 if (!KMP_AFFINITY_CAPABLE()) {
4899 return -1;
4900 }
4901
4902 KA_TRACE(1000, (""); {
4903 int gtid = __kmp_entry_gtid();
4904 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4905 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4906 (kmp_affin_mask_t *)(*mask));
4907 __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
4908 "affinity mask for thread %d = %s\n",
4909 proc, gtid, buf);
4910 });
4911
4912 if (__kmp_env_consistency_check) {
4913 if ((mask == NULL) || (*mask == NULL)) {
4914 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
4915 }
4916 }
4917
4918 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4919 return -1;
4920 }
4921 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4922 return -2;
4923 }
4924
4925 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4926 return 0;
4927 }
4928
__kmp_aux_unset_affinity_mask_proc(int proc,void ** mask)4929 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
4930 if (!KMP_AFFINITY_CAPABLE()) {
4931 return -1;
4932 }
4933
4934 KA_TRACE(1000, (""); {
4935 int gtid = __kmp_entry_gtid();
4936 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4937 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4938 (kmp_affin_mask_t *)(*mask));
4939 __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
4940 "affinity mask for thread %d = %s\n",
4941 proc, gtid, buf);
4942 });
4943
4944 if (__kmp_env_consistency_check) {
4945 if ((mask == NULL) || (*mask == NULL)) {
4946 KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
4947 }
4948 }
4949
4950 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4951 return -1;
4952 }
4953 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4954 return -2;
4955 }
4956
4957 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4958 return 0;
4959 }
4960
__kmp_aux_get_affinity_mask_proc(int proc,void ** mask)4961 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
4962 if (!KMP_AFFINITY_CAPABLE()) {
4963 return -1;
4964 }
4965
4966 KA_TRACE(1000, (""); {
4967 int gtid = __kmp_entry_gtid();
4968 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4969 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4970 (kmp_affin_mask_t *)(*mask));
4971 __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
4972 "affinity mask for thread %d = %s\n",
4973 proc, gtid, buf);
4974 });
4975
4976 if (__kmp_env_consistency_check) {
4977 if ((mask == NULL) || (*mask == NULL)) {
4978 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
4979 }
4980 }
4981
4982 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4983 return -1;
4984 }
4985 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4986 return 0;
4987 }
4988
4989 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4990 }
4991
4992 // Dynamic affinity settings - Affinity balanced
__kmp_balanced_affinity(kmp_info_t * th,int nthreads)4993 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
4994 KMP_DEBUG_ASSERT(th);
4995 bool fine_gran = true;
4996 int tid = th->th.th_info.ds.ds_tid;
4997
4998 switch (__kmp_affinity_gran) {
4999 case affinity_gran_fine:
5000 case affinity_gran_thread:
5001 break;
5002 case affinity_gran_core:
5003 if (__kmp_nThreadsPerCore > 1) {
5004 fine_gran = false;
5005 }
5006 break;
5007 case affinity_gran_package:
5008 if (nCoresPerPkg > 1) {
5009 fine_gran = false;
5010 }
5011 break;
5012 default:
5013 fine_gran = false;
5014 }
5015
5016 if (__kmp_affinity_uniform_topology()) {
5017 int coreID;
5018 int threadID;
5019 // Number of hyper threads per core in HT machine
5020 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5021 // Number of cores
5022 int ncores = __kmp_ncores;
5023 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5024 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5025 ncores = nPackages;
5026 }
5027 // How many threads will be bound to each core
5028 int chunk = nthreads / ncores;
5029 // How many cores will have an additional thread bound to it - "big cores"
5030 int big_cores = nthreads % ncores;
5031 // Number of threads on the big cores
5032 int big_nth = (chunk + 1) * big_cores;
5033 if (tid < big_nth) {
5034 coreID = tid / (chunk + 1);
5035 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5036 } else { // tid >= big_nth
5037 coreID = (tid - big_cores) / chunk;
5038 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5039 }
5040
5041 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5042 "Illegal set affinity operation when not capable");
5043
5044 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5045 KMP_CPU_ZERO(mask);
5046
5047 if (fine_gran) {
5048 int osID = address2os[coreID * __kmp_nth_per_core + threadID].second;
5049 KMP_CPU_SET(osID, mask);
5050 } else {
5051 for (int i = 0; i < __kmp_nth_per_core; i++) {
5052 int osID;
5053 osID = address2os[coreID * __kmp_nth_per_core + i].second;
5054 KMP_CPU_SET(osID, mask);
5055 }
5056 }
5057 if (__kmp_affinity_verbose) {
5058 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5059 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5060 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5061 __kmp_gettid(), tid, buf);
5062 }
5063 __kmp_set_system_affinity(mask, TRUE);
5064 } else { // Non-uniform topology
5065
5066 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5067 KMP_CPU_ZERO(mask);
5068
5069 int core_level = __kmp_affinity_find_core_level(
5070 address2os, __kmp_avail_proc, __kmp_aff_depth - 1);
5071 int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
5072 __kmp_aff_depth - 1, core_level);
5073 int nth_per_core = __kmp_affinity_max_proc_per_core(
5074 address2os, __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5075
5076 // For performance gain consider the special case nthreads ==
5077 // __kmp_avail_proc
5078 if (nthreads == __kmp_avail_proc) {
5079 if (fine_gran) {
5080 int osID = address2os[tid].second;
5081 KMP_CPU_SET(osID, mask);
5082 } else {
5083 int core = __kmp_affinity_find_core(address2os, tid,
5084 __kmp_aff_depth - 1, core_level);
5085 for (int i = 0; i < __kmp_avail_proc; i++) {
5086 int osID = address2os[i].second;
5087 if (__kmp_affinity_find_core(address2os, i, __kmp_aff_depth - 1,
5088 core_level) == core) {
5089 KMP_CPU_SET(osID, mask);
5090 }
5091 }
5092 }
5093 } else if (nthreads <= ncores) {
5094
5095 int core = 0;
5096 for (int i = 0; i < ncores; i++) {
5097 // Check if this core from procarr[] is in the mask
5098 int in_mask = 0;
5099 for (int j = 0; j < nth_per_core; j++) {
5100 if (procarr[i * nth_per_core + j] != -1) {
5101 in_mask = 1;
5102 break;
5103 }
5104 }
5105 if (in_mask) {
5106 if (tid == core) {
5107 for (int j = 0; j < nth_per_core; j++) {
5108 int osID = procarr[i * nth_per_core + j];
5109 if (osID != -1) {
5110 KMP_CPU_SET(osID, mask);
5111 // For fine granularity it is enough to set the first available
5112 // osID for this core
5113 if (fine_gran) {
5114 break;
5115 }
5116 }
5117 }
5118 break;
5119 } else {
5120 core++;
5121 }
5122 }
5123 }
5124 } else { // nthreads > ncores
5125 // Array to save the number of processors at each core
5126 int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5127 // Array to save the number of cores with "x" available processors;
5128 int *ncores_with_x_procs =
5129 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5130 // Array to save the number of cores with # procs from x to nth_per_core
5131 int *ncores_with_x_to_max_procs =
5132 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5133
5134 for (int i = 0; i <= nth_per_core; i++) {
5135 ncores_with_x_procs[i] = 0;
5136 ncores_with_x_to_max_procs[i] = 0;
5137 }
5138
5139 for (int i = 0; i < ncores; i++) {
5140 int cnt = 0;
5141 for (int j = 0; j < nth_per_core; j++) {
5142 if (procarr[i * nth_per_core + j] != -1) {
5143 cnt++;
5144 }
5145 }
5146 nproc_at_core[i] = cnt;
5147 ncores_with_x_procs[cnt]++;
5148 }
5149
5150 for (int i = 0; i <= nth_per_core; i++) {
5151 for (int j = i; j <= nth_per_core; j++) {
5152 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5153 }
5154 }
5155
5156 // Max number of processors
5157 int nproc = nth_per_core * ncores;
5158 // An array to keep number of threads per each context
5159 int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5160 for (int i = 0; i < nproc; i++) {
5161 newarr[i] = 0;
5162 }
5163
5164 int nth = nthreads;
5165 int flag = 0;
5166 while (nth > 0) {
5167 for (int j = 1; j <= nth_per_core; j++) {
5168 int cnt = ncores_with_x_to_max_procs[j];
5169 for (int i = 0; i < ncores; i++) {
5170 // Skip the core with 0 processors
5171 if (nproc_at_core[i] == 0) {
5172 continue;
5173 }
5174 for (int k = 0; k < nth_per_core; k++) {
5175 if (procarr[i * nth_per_core + k] != -1) {
5176 if (newarr[i * nth_per_core + k] == 0) {
5177 newarr[i * nth_per_core + k] = 1;
5178 cnt--;
5179 nth--;
5180 break;
5181 } else {
5182 if (flag != 0) {
5183 newarr[i * nth_per_core + k]++;
5184 cnt--;
5185 nth--;
5186 break;
5187 }
5188 }
5189 }
5190 }
5191 if (cnt == 0 || nth == 0) {
5192 break;
5193 }
5194 }
5195 if (nth == 0) {
5196 break;
5197 }
5198 }
5199 flag = 1;
5200 }
5201 int sum = 0;
5202 for (int i = 0; i < nproc; i++) {
5203 sum += newarr[i];
5204 if (sum > tid) {
5205 if (fine_gran) {
5206 int osID = procarr[i];
5207 KMP_CPU_SET(osID, mask);
5208 } else {
5209 int coreID = i / nth_per_core;
5210 for (int ii = 0; ii < nth_per_core; ii++) {
5211 int osID = procarr[coreID * nth_per_core + ii];
5212 if (osID != -1) {
5213 KMP_CPU_SET(osID, mask);
5214 }
5215 }
5216 }
5217 break;
5218 }
5219 }
5220 __kmp_free(newarr);
5221 }
5222
5223 if (__kmp_affinity_verbose) {
5224 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5225 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5226 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5227 __kmp_gettid(), tid, buf);
5228 }
5229 __kmp_set_system_affinity(mask, TRUE);
5230 }
5231 }
5232
5233 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5234 // We don't need this entry for Windows because
5235 // there is GetProcessAffinityMask() api
5236 //
5237 // The intended usage is indicated by these steps:
5238 // 1) The user gets the current affinity mask
5239 // 2) Then sets the affinity by calling this function
5240 // 3) Error check the return value
5241 // 4) Use non-OpenMP parallelization
5242 // 5) Reset the affinity to what was stored in step 1)
5243 #ifdef __cplusplus
5244 extern "C"
5245 #endif
5246 int
kmp_set_thread_affinity_mask_initial()5247 kmp_set_thread_affinity_mask_initial()
5248 // the function returns 0 on success,
5249 // -1 if we cannot bind thread
5250 // >0 (errno) if an error happened during binding
5251 {
5252 int gtid = __kmp_get_gtid();
5253 if (gtid < 0) {
5254 // Do not touch non-omp threads
5255 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5256 "non-omp thread, returning\n"));
5257 return -1;
5258 }
5259 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5260 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5261 "affinity not initialized, returning\n"));
5262 return -1;
5263 }
5264 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5265 "set full mask for thread %d\n",
5266 gtid));
5267 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5268 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5269 }
5270 #endif
5271
5272 #endif // KMP_AFFINITY_SUPPORTED
5273