1 /*
2  * kmp_affinity.cpp -- affinity management
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22 #if KMP_USE_HWLOC
23 // Copied from hwloc
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
28 #endif
29 #include <ctype.h>
30 
31 // The machine topology
32 kmp_topology_t *__kmp_topology = nullptr;
33 // KMP_HW_SUBSET environment variable
34 kmp_hw_subset_t *__kmp_hw_subset = nullptr;
35 
36 // Store the real or imagined machine hierarchy here
37 static hierarchy_info machine_hierarchy;
38 
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
40 
41 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
42   kmp_uint32 depth;
43   // The test below is true if affinity is available, but set to "none". Need to
44   // init on first use of hierarchical barrier.
45   if (TCR_1(machine_hierarchy.uninitialized))
46     machine_hierarchy.init(nproc);
47 
48   // Adjust the hierarchy in case num threads exceeds original
49   if (nproc > machine_hierarchy.base_num_threads)
50     machine_hierarchy.resize(nproc);
51 
52   depth = machine_hierarchy.depth;
53   KMP_DEBUG_ASSERT(depth > 0);
54 
55   thr_bar->depth = depth;
56   __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57                      &(thr_bar->base_leaf_kids));
58   thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
59 }
60 
61 static int nCoresPerPkg, nPackages;
62 static int __kmp_nThreadsPerCore;
63 #ifndef KMP_DFLT_NTH_CORES
64 static int __kmp_ncores;
65 #endif
66 
67 const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) {
68   switch (type) {
69   case KMP_HW_SOCKET:
70     return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
71   case KMP_HW_DIE:
72     return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
73   case KMP_HW_MODULE:
74     return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
75   case KMP_HW_TILE:
76     return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
77   case KMP_HW_NUMA:
78     return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
79   case KMP_HW_L3:
80     return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
81   case KMP_HW_L2:
82     return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
83   case KMP_HW_L1:
84     return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
85   case KMP_HW_LLC:
86     return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
87   case KMP_HW_CORE:
88     return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
89   case KMP_HW_THREAD:
90     return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91   case KMP_HW_PROC_GROUP:
92     return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
93   }
94   return KMP_I18N_STR(Unknown);
95 }
96 
97 const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) {
98   switch (type) {
99   case KMP_HW_SOCKET:
100     return ((plural) ? "sockets" : "socket");
101   case KMP_HW_DIE:
102     return ((plural) ? "dice" : "die");
103   case KMP_HW_MODULE:
104     return ((plural) ? "modules" : "module");
105   case KMP_HW_TILE:
106     return ((plural) ? "tiles" : "tile");
107   case KMP_HW_NUMA:
108     return ((plural) ? "numa_domains" : "numa_domain");
109   case KMP_HW_L3:
110     return ((plural) ? "l3_caches" : "l3_cache");
111   case KMP_HW_L2:
112     return ((plural) ? "l2_caches" : "l2_cache");
113   case KMP_HW_L1:
114     return ((plural) ? "l1_caches" : "l1_cache");
115   case KMP_HW_LLC:
116     return ((plural) ? "ll_caches" : "ll_cache");
117   case KMP_HW_CORE:
118     return ((plural) ? "cores" : "core");
119   case KMP_HW_THREAD:
120     return ((plural) ? "threads" : "thread");
121   case KMP_HW_PROC_GROUP:
122     return ((plural) ? "proc_groups" : "proc_group");
123   }
124   return ((plural) ? "unknowns" : "unknown");
125 }
126 
127 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
128   switch (type) {
129   case KMP_HW_CORE_TYPE_UNKNOWN:
130     return "unknown";
131 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
132   case KMP_HW_CORE_TYPE_ATOM:
133     return "Intel Atom(R) processor";
134   case KMP_HW_CORE_TYPE_CORE:
135     return "Intel(R) Core(TM) processor";
136 #endif
137   }
138   return "unknown";
139 }
140 
141 #if KMP_AFFINITY_SUPPORTED
142 // If affinity is supported, check the affinity
143 // verbose and warning flags before printing warning
144 #define KMP_AFF_WARNING(...)                                                   \
145   if (__kmp_affinity_verbose ||                                                \
146       (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {   \
147     KMP_WARNING(__VA_ARGS__);                                                  \
148   }
149 #else
150 #define KMP_AFF_WARNING KMP_WARNING
151 #endif
152 
153 ////////////////////////////////////////////////////////////////////////////////
154 // kmp_hw_thread_t methods
155 int kmp_hw_thread_t::compare_ids(const void *a, const void *b) {
156   const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a;
157   const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b;
158   int depth = __kmp_topology->get_depth();
159   for (int level = 0; level < depth; ++level) {
160     if (ahwthread->ids[level] < bhwthread->ids[level])
161       return -1;
162     else if (ahwthread->ids[level] > bhwthread->ids[level])
163       return 1;
164   }
165   if (ahwthread->os_id < bhwthread->os_id)
166     return -1;
167   else if (ahwthread->os_id > bhwthread->os_id)
168     return 1;
169   return 0;
170 }
171 
172 #if KMP_AFFINITY_SUPPORTED
173 int kmp_hw_thread_t::compare_compact(const void *a, const void *b) {
174   int i;
175   const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a;
176   const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b;
177   int depth = __kmp_topology->get_depth();
178   KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
179   KMP_DEBUG_ASSERT(__kmp_affinity_compact <= depth);
180   for (i = 0; i < __kmp_affinity_compact; i++) {
181     int j = depth - i - 1;
182     if (aa->sub_ids[j] < bb->sub_ids[j])
183       return -1;
184     if (aa->sub_ids[j] > bb->sub_ids[j])
185       return 1;
186   }
187   for (; i < depth; i++) {
188     int j = i - __kmp_affinity_compact;
189     if (aa->sub_ids[j] < bb->sub_ids[j])
190       return -1;
191     if (aa->sub_ids[j] > bb->sub_ids[j])
192       return 1;
193   }
194   return 0;
195 }
196 #endif
197 
198 void kmp_hw_thread_t::print() const {
199   int depth = __kmp_topology->get_depth();
200   printf("%4d ", os_id);
201   for (int i = 0; i < depth; ++i) {
202     printf("%4d ", ids[i]);
203   }
204   if (attrs) {
205     if (attrs.is_core_type_valid())
206       printf(" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
207     if (attrs.is_core_eff_valid())
208       printf(" (eff=%d)", attrs.get_core_eff());
209   }
210   printf("\n");
211 }
212 
213 ////////////////////////////////////////////////////////////////////////////////
214 // kmp_topology_t methods
215 
216 // Add a layer to the topology based on the ids. Assume the topology
217 // is perfectly nested (i.e., so no object has more than one parent)
218 void kmp_topology_t::_insert_layer(kmp_hw_t type, const int *ids) {
219   // Figure out where the layer should go by comparing the ids of the current
220   // layers with the new ids
221   int target_layer;
222   int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
223   int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
224 
225   // Start from the highest layer and work down to find target layer
226   // If new layer is equal to another layer then put the new layer above
227   for (target_layer = 0; target_layer < depth; ++target_layer) {
228     bool layers_equal = true;
229     bool strictly_above_target_layer = false;
230     for (int i = 0; i < num_hw_threads; ++i) {
231       int id = hw_threads[i].ids[target_layer];
232       int new_id = ids[i];
233       if (id != previous_id && new_id == previous_new_id) {
234         // Found the layer we are strictly above
235         strictly_above_target_layer = true;
236         layers_equal = false;
237         break;
238       } else if (id == previous_id && new_id != previous_new_id) {
239         // Found a layer we are below. Move to next layer and check.
240         layers_equal = false;
241         break;
242       }
243       previous_id = id;
244       previous_new_id = new_id;
245     }
246     if (strictly_above_target_layer || layers_equal)
247       break;
248   }
249 
250   // Found the layer we are above. Now move everything to accommodate the new
251   // layer. And put the new ids and type into the topology.
252   for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
253     types[j] = types[i];
254   types[target_layer] = type;
255   for (int k = 0; k < num_hw_threads; ++k) {
256     for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
257       hw_threads[k].ids[j] = hw_threads[k].ids[i];
258     hw_threads[k].ids[target_layer] = ids[k];
259   }
260   equivalent[type] = type;
261   depth++;
262 }
263 
264 #if KMP_GROUP_AFFINITY
265 // Insert the Windows Processor Group structure into the topology
266 void kmp_topology_t::_insert_windows_proc_groups() {
267   // Do not insert the processor group structure for a single group
268   if (__kmp_num_proc_groups == 1)
269     return;
270   kmp_affin_mask_t *mask;
271   int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads);
272   KMP_CPU_ALLOC(mask);
273   for (int i = 0; i < num_hw_threads; ++i) {
274     KMP_CPU_ZERO(mask);
275     KMP_CPU_SET(hw_threads[i].os_id, mask);
276     ids[i] = __kmp_get_proc_group(mask);
277   }
278   KMP_CPU_FREE(mask);
279   _insert_layer(KMP_HW_PROC_GROUP, ids);
280   __kmp_free(ids);
281 }
282 #endif
283 
284 // Remove layers that don't add information to the topology.
285 // This is done by having the layer take on the id = UNKNOWN_ID (-1)
286 void kmp_topology_t::_remove_radix1_layers() {
287   int preference[KMP_HW_LAST];
288   int top_index1, top_index2;
289   // Set up preference associative array
290   preference[KMP_HW_SOCKET] = 110;
291   preference[KMP_HW_PROC_GROUP] = 100;
292   preference[KMP_HW_CORE] = 95;
293   preference[KMP_HW_THREAD] = 90;
294   preference[KMP_HW_NUMA] = 85;
295   preference[KMP_HW_DIE] = 80;
296   preference[KMP_HW_TILE] = 75;
297   preference[KMP_HW_MODULE] = 73;
298   preference[KMP_HW_L3] = 70;
299   preference[KMP_HW_L2] = 65;
300   preference[KMP_HW_L1] = 60;
301   preference[KMP_HW_LLC] = 5;
302   top_index1 = 0;
303   top_index2 = 1;
304   while (top_index1 < depth - 1 && top_index2 < depth) {
305     kmp_hw_t type1 = types[top_index1];
306     kmp_hw_t type2 = types[top_index2];
307     KMP_ASSERT_VALID_HW_TYPE(type1);
308     KMP_ASSERT_VALID_HW_TYPE(type2);
309     // Do not allow the three main topology levels (sockets, cores, threads) to
310     // be compacted down
311     if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
312          type1 == KMP_HW_SOCKET) &&
313         (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
314          type2 == KMP_HW_SOCKET)) {
315       top_index1 = top_index2++;
316       continue;
317     }
318     bool radix1 = true;
319     bool all_same = true;
320     int id1 = hw_threads[0].ids[top_index1];
321     int id2 = hw_threads[0].ids[top_index2];
322     int pref1 = preference[type1];
323     int pref2 = preference[type2];
324     for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
325       if (hw_threads[hwidx].ids[top_index1] == id1 &&
326           hw_threads[hwidx].ids[top_index2] != id2) {
327         radix1 = false;
328         break;
329       }
330       if (hw_threads[hwidx].ids[top_index2] != id2)
331         all_same = false;
332       id1 = hw_threads[hwidx].ids[top_index1];
333       id2 = hw_threads[hwidx].ids[top_index2];
334     }
335     if (radix1) {
336       // Select the layer to remove based on preference
337       kmp_hw_t remove_type, keep_type;
338       int remove_layer, remove_layer_ids;
339       if (pref1 > pref2) {
340         remove_type = type2;
341         remove_layer = remove_layer_ids = top_index2;
342         keep_type = type1;
343       } else {
344         remove_type = type1;
345         remove_layer = remove_layer_ids = top_index1;
346         keep_type = type2;
347       }
348       // If all the indexes for the second (deeper) layer are the same.
349       // e.g., all are zero, then make sure to keep the first layer's ids
350       if (all_same)
351         remove_layer_ids = top_index2;
352       // Remove radix one type by setting the equivalence, removing the id from
353       // the hw threads and removing the layer from types and depth
354       set_equivalent_type(remove_type, keep_type);
355       for (int idx = 0; idx < num_hw_threads; ++idx) {
356         kmp_hw_thread_t &hw_thread = hw_threads[idx];
357         for (int d = remove_layer_ids; d < depth - 1; ++d)
358           hw_thread.ids[d] = hw_thread.ids[d + 1];
359       }
360       for (int idx = remove_layer; idx < depth - 1; ++idx)
361         types[idx] = types[idx + 1];
362       depth--;
363     } else {
364       top_index1 = top_index2++;
365     }
366   }
367   KMP_ASSERT(depth > 0);
368 }
369 
370 void kmp_topology_t::_set_last_level_cache() {
371   if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
372     set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
373   else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
374     set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
375 #if KMP_MIC_SUPPORTED
376   else if (__kmp_mic_type == mic3) {
377     if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
378       set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
379     else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
380       set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
381     // L2/Tile wasn't detected so just say L1
382     else
383       set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
384   }
385 #endif
386   else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
387     set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
388   // Fallback is to set last level cache to socket or core
389   if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
390     if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
391       set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
392     else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
393       set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
394   }
395   KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
396 }
397 
398 // Gather the count of each topology layer and the ratio
399 void kmp_topology_t::_gather_enumeration_information() {
400   int previous_id[KMP_HW_LAST];
401   int max[KMP_HW_LAST];
402 
403   for (int i = 0; i < depth; ++i) {
404     previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
405     max[i] = 0;
406     count[i] = 0;
407     ratio[i] = 0;
408   }
409   int core_level = get_level(KMP_HW_CORE);
410   for (int i = 0; i < num_hw_threads; ++i) {
411     kmp_hw_thread_t &hw_thread = hw_threads[i];
412     for (int layer = 0; layer < depth; ++layer) {
413       int id = hw_thread.ids[layer];
414       if (id != previous_id[layer]) {
415         // Add an additional increment to each count
416         for (int l = layer; l < depth; ++l)
417           count[l]++;
418         // Keep track of topology layer ratio statistics
419         max[layer]++;
420         for (int l = layer + 1; l < depth; ++l) {
421           if (max[l] > ratio[l])
422             ratio[l] = max[l];
423           max[l] = 1;
424         }
425         // Figure out the number of different core types
426         // and efficiencies for hybrid CPUs
427         if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
428           if (hw_thread.attrs.is_core_eff_valid() &&
429               hw_thread.attrs.core_eff >= num_core_efficiencies) {
430             // Because efficiencies can range from 0 to max efficiency - 1,
431             // the number of efficiencies is max efficiency + 1
432             num_core_efficiencies = hw_thread.attrs.core_eff + 1;
433           }
434           if (hw_thread.attrs.is_core_type_valid()) {
435             bool found = false;
436             for (int j = 0; j < num_core_types; ++j) {
437               if (hw_thread.attrs.get_core_type() == core_types[j]) {
438                 found = true;
439                 break;
440               }
441             }
442             if (!found) {
443               KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
444               core_types[num_core_types++] = hw_thread.attrs.get_core_type();
445             }
446           }
447         }
448         break;
449       }
450     }
451     for (int layer = 0; layer < depth; ++layer) {
452       previous_id[layer] = hw_thread.ids[layer];
453     }
454   }
455   for (int layer = 0; layer < depth; ++layer) {
456     if (max[layer] > ratio[layer])
457       ratio[layer] = max[layer];
458   }
459 }
460 
461 int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr,
462                                           int above_level,
463                                           bool find_all) const {
464   int current, current_max;
465   int previous_id[KMP_HW_LAST];
466   for (int i = 0; i < depth; ++i)
467     previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
468   int core_level = get_level(KMP_HW_CORE);
469   if (find_all)
470     above_level = -1;
471   KMP_ASSERT(above_level < core_level);
472   current_max = 0;
473   current = 0;
474   for (int i = 0; i < num_hw_threads; ++i) {
475     kmp_hw_thread_t &hw_thread = hw_threads[i];
476     if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
477       if (current > current_max)
478         current_max = current;
479       current = hw_thread.attrs.contains(attr);
480     } else {
481       for (int level = above_level + 1; level <= core_level; ++level) {
482         if (hw_thread.ids[level] != previous_id[level]) {
483           if (hw_thread.attrs.contains(attr))
484             current++;
485           break;
486         }
487       }
488     }
489     for (int level = 0; level < depth; ++level)
490       previous_id[level] = hw_thread.ids[level];
491   }
492   if (current > current_max)
493     current_max = current;
494   return current_max;
495 }
496 
497 // Find out if the topology is uniform
498 void kmp_topology_t::_discover_uniformity() {
499   int num = 1;
500   for (int level = 0; level < depth; ++level)
501     num *= ratio[level];
502   flags.uniform = (num == count[depth - 1]);
503 }
504 
505 // Set all the sub_ids for each hardware thread
506 void kmp_topology_t::_set_sub_ids() {
507   int previous_id[KMP_HW_LAST];
508   int sub_id[KMP_HW_LAST];
509 
510   for (int i = 0; i < depth; ++i) {
511     previous_id[i] = -1;
512     sub_id[i] = -1;
513   }
514   for (int i = 0; i < num_hw_threads; ++i) {
515     kmp_hw_thread_t &hw_thread = hw_threads[i];
516     // Setup the sub_id
517     for (int j = 0; j < depth; ++j) {
518       if (hw_thread.ids[j] != previous_id[j]) {
519         sub_id[j]++;
520         for (int k = j + 1; k < depth; ++k) {
521           sub_id[k] = 0;
522         }
523         break;
524       }
525     }
526     // Set previous_id
527     for (int j = 0; j < depth; ++j) {
528       previous_id[j] = hw_thread.ids[j];
529     }
530     // Set the sub_ids field
531     for (int j = 0; j < depth; ++j) {
532       hw_thread.sub_ids[j] = sub_id[j];
533     }
534   }
535 }
536 
537 void kmp_topology_t::_set_globals() {
538   // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores
539   int core_level, thread_level, package_level;
540   package_level = get_level(KMP_HW_SOCKET);
541 #if KMP_GROUP_AFFINITY
542   if (package_level == -1)
543     package_level = get_level(KMP_HW_PROC_GROUP);
544 #endif
545   core_level = get_level(KMP_HW_CORE);
546   thread_level = get_level(KMP_HW_THREAD);
547 
548   KMP_ASSERT(core_level != -1);
549   KMP_ASSERT(thread_level != -1);
550 
551   __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
552   if (package_level != -1) {
553     nCoresPerPkg = calculate_ratio(core_level, package_level);
554     nPackages = get_count(package_level);
555   } else {
556     // assume one socket
557     nCoresPerPkg = get_count(core_level);
558     nPackages = 1;
559   }
560 #ifndef KMP_DFLT_NTH_CORES
561   __kmp_ncores = get_count(core_level);
562 #endif
563 }
564 
565 kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth,
566                                          const kmp_hw_t *types) {
567   kmp_topology_t *retval;
568   // Allocate all data in one large allocation
569   size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc +
570                 sizeof(int) * (size_t)KMP_HW_LAST * 3;
571   char *bytes = (char *)__kmp_allocate(size);
572   retval = (kmp_topology_t *)bytes;
573   if (nproc > 0) {
574     retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t));
575   } else {
576     retval->hw_threads = nullptr;
577   }
578   retval->num_hw_threads = nproc;
579   retval->depth = ndepth;
580   int *arr =
581       (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc);
582   retval->types = (kmp_hw_t *)arr;
583   retval->ratio = arr + (size_t)KMP_HW_LAST;
584   retval->count = arr + 2 * (size_t)KMP_HW_LAST;
585   retval->num_core_efficiencies = 0;
586   retval->num_core_types = 0;
587   for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
588     retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
589   KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
590   for (int i = 0; i < ndepth; ++i) {
591     retval->types[i] = types[i];
592     retval->equivalent[types[i]] = types[i];
593   }
594   return retval;
595 }
596 
597 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
598   if (topology)
599     __kmp_free(topology);
600 }
601 
602 bool kmp_topology_t::check_ids() const {
603   // Assume ids have been sorted
604   if (num_hw_threads == 0)
605     return true;
606   for (int i = 1; i < num_hw_threads; ++i) {
607     kmp_hw_thread_t &current_thread = hw_threads[i];
608     kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
609     bool unique = false;
610     for (int j = 0; j < depth; ++j) {
611       if (previous_thread.ids[j] != current_thread.ids[j]) {
612         unique = true;
613         break;
614       }
615     }
616     if (unique)
617       continue;
618     return false;
619   }
620   return true;
621 }
622 
623 void kmp_topology_t::dump() const {
624   printf("***********************\n");
625   printf("*** __kmp_topology: ***\n");
626   printf("***********************\n");
627   printf("* depth: %d\n", depth);
628 
629   printf("* types: ");
630   for (int i = 0; i < depth; ++i)
631     printf("%15s ", __kmp_hw_get_keyword(types[i]));
632   printf("\n");
633 
634   printf("* ratio: ");
635   for (int i = 0; i < depth; ++i) {
636     printf("%15d ", ratio[i]);
637   }
638   printf("\n");
639 
640   printf("* count: ");
641   for (int i = 0; i < depth; ++i) {
642     printf("%15d ", count[i]);
643   }
644   printf("\n");
645 
646   printf("* num_core_eff: %d\n", num_core_efficiencies);
647   printf("* num_core_types: %d\n", num_core_types);
648   printf("* core_types: ");
649   for (int i = 0; i < num_core_types; ++i)
650     printf("%3d ", core_types[i]);
651   printf("\n");
652 
653   printf("* equivalent map:\n");
654   KMP_FOREACH_HW_TYPE(i) {
655     const char *key = __kmp_hw_get_keyword(i);
656     const char *value = __kmp_hw_get_keyword(equivalent[i]);
657     printf("%-15s -> %-15s\n", key, value);
658   }
659 
660   printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No"));
661 
662   printf("* num_hw_threads: %d\n", num_hw_threads);
663   printf("* hw_threads:\n");
664   for (int i = 0; i < num_hw_threads; ++i) {
665     hw_threads[i].print();
666   }
667   printf("***********************\n");
668 }
669 
670 void kmp_topology_t::print(const char *env_var) const {
671   kmp_str_buf_t buf;
672   int print_types_depth;
673   __kmp_str_buf_init(&buf);
674   kmp_hw_t print_types[KMP_HW_LAST + 2];
675 
676   // Num Available Threads
677   KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
678 
679   // Uniform or not
680   if (is_uniform()) {
681     KMP_INFORM(Uniform, env_var);
682   } else {
683     KMP_INFORM(NonUniform, env_var);
684   }
685 
686   // Equivalent types
687   KMP_FOREACH_HW_TYPE(type) {
688     kmp_hw_t eq_type = equivalent[type];
689     if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
690       KMP_INFORM(AffEqualTopologyTypes, env_var,
691                  __kmp_hw_get_catalog_string(type),
692                  __kmp_hw_get_catalog_string(eq_type));
693     }
694   }
695 
696   // Quick topology
697   KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST);
698   // Create a print types array that always guarantees printing
699   // the core and thread level
700   print_types_depth = 0;
701   for (int level = 0; level < depth; ++level)
702     print_types[print_types_depth++] = types[level];
703   if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
704     // Force in the core level for quick topology
705     if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
706       // Force core before thread e.g., 1 socket X 2 threads/socket
707       // becomes 1 socket X 1 core/socket X 2 threads/socket
708       print_types[print_types_depth - 1] = KMP_HW_CORE;
709       print_types[print_types_depth++] = KMP_HW_THREAD;
710     } else {
711       print_types[print_types_depth++] = KMP_HW_CORE;
712     }
713   }
714   // Always put threads at very end of quick topology
715   if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
716     print_types[print_types_depth++] = KMP_HW_THREAD;
717 
718   __kmp_str_buf_clear(&buf);
719   kmp_hw_t numerator_type;
720   kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
721   int core_level = get_level(KMP_HW_CORE);
722   int ncores = get_count(core_level);
723 
724   for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
725     int c;
726     bool plural;
727     numerator_type = print_types[plevel];
728     KMP_ASSERT_VALID_HW_TYPE(numerator_type);
729     if (equivalent[numerator_type] != numerator_type)
730       c = 1;
731     else
732       c = get_ratio(level++);
733     plural = (c > 1);
734     if (plevel == 0) {
735       __kmp_str_buf_print(&buf, "%d %s", c,
736                           __kmp_hw_get_catalog_string(numerator_type, plural));
737     } else {
738       __kmp_str_buf_print(&buf, " x %d %s/%s", c,
739                           __kmp_hw_get_catalog_string(numerator_type, plural),
740                           __kmp_hw_get_catalog_string(denominator_type));
741     }
742     denominator_type = numerator_type;
743   }
744   KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
745 
746   // Hybrid topology information
747   if (__kmp_is_hybrid_cpu()) {
748     for (int i = 0; i < num_core_types; ++i) {
749       kmp_hw_core_type_t core_type = core_types[i];
750       kmp_hw_attr_t attr;
751       attr.clear();
752       attr.set_core_type(core_type);
753       int ncores = get_ncores_with_attr(attr);
754       if (ncores > 0) {
755         KMP_INFORM(TopologyHybrid, env_var, ncores,
756                    __kmp_hw_get_core_type_string(core_type));
757         KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
758         for (int eff = 0; eff < num_core_efficiencies; ++eff) {
759           attr.set_core_eff(eff);
760           int ncores_with_eff = get_ncores_with_attr(attr);
761           if (ncores_with_eff > 0) {
762             KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
763           }
764         }
765       }
766     }
767   }
768 
769   if (num_hw_threads <= 0) {
770     __kmp_str_buf_free(&buf);
771     return;
772   }
773 
774   // Full OS proc to hardware thread map
775   KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
776   for (int i = 0; i < num_hw_threads; i++) {
777     __kmp_str_buf_clear(&buf);
778     for (int level = 0; level < depth; ++level) {
779       kmp_hw_t type = types[level];
780       __kmp_str_buf_print(&buf, "%s ", __kmp_hw_get_catalog_string(type));
781       __kmp_str_buf_print(&buf, "%d ", hw_threads[i].ids[level]);
782     }
783     if (__kmp_is_hybrid_cpu())
784       __kmp_str_buf_print(
785           &buf, "(%s)",
786           __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
787     KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
788   }
789 
790   __kmp_str_buf_free(&buf);
791 }
792 
793 void kmp_topology_t::canonicalize() {
794 #if KMP_GROUP_AFFINITY
795   _insert_windows_proc_groups();
796 #endif
797   _remove_radix1_layers();
798   _gather_enumeration_information();
799   _discover_uniformity();
800   _set_sub_ids();
801   _set_globals();
802   _set_last_level_cache();
803 
804 #if KMP_MIC_SUPPORTED
805   // Manually Add L2 = Tile equivalence
806   if (__kmp_mic_type == mic3) {
807     if (get_level(KMP_HW_L2) != -1)
808       set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
809     else if (get_level(KMP_HW_TILE) != -1)
810       set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
811   }
812 #endif
813 
814   // Perform post canonicalization checking
815   KMP_ASSERT(depth > 0);
816   for (int level = 0; level < depth; ++level) {
817     // All counts, ratios, and types must be valid
818     KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
819     KMP_ASSERT_VALID_HW_TYPE(types[level]);
820     // Detected types must point to themselves
821     KMP_ASSERT(equivalent[types[level]] == types[level]);
822   }
823 
824 #if KMP_AFFINITY_SUPPORTED
825   // Set the number of affinity granularity levels
826   if (__kmp_affinity_gran_levels < 0) {
827     kmp_hw_t gran_type = get_equivalent_type(__kmp_affinity_gran);
828     // Check if user's granularity request is valid
829     if (gran_type == KMP_HW_UNKNOWN) {
830       // First try core, then thread, then package
831       kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
832       for (auto g : gran_types) {
833         if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
834           gran_type = g;
835           break;
836         }
837       }
838       KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
839       // Warn user what granularity setting will be used instead
840       KMP_AFF_WARNING(AffGranularityBad, "KMP_AFFINITY",
841                       __kmp_hw_get_catalog_string(__kmp_affinity_gran),
842                       __kmp_hw_get_catalog_string(gran_type));
843       __kmp_affinity_gran = gran_type;
844     }
845 #if KMP_GROUP_AFFINITY
846     // If more than one processor group exists, and the level of
847     // granularity specified by the user is too coarse, then the
848     // granularity must be adjusted "down" to processor group affinity
849     // because threads can only exist within one processor group.
850     // For example, if a user sets granularity=socket and there are two
851     // processor groups that cover a socket, then the runtime must
852     // restrict the granularity down to the processor group level.
853     if (__kmp_num_proc_groups > 1) {
854       int gran_depth = get_level(gran_type);
855       int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
856       if (gran_depth >= 0 && proc_group_depth >= 0 &&
857           gran_depth < proc_group_depth) {
858         KMP_AFF_WARNING(AffGranTooCoarseProcGroup, "KMP_AFFINITY",
859                         __kmp_hw_get_catalog_string(__kmp_affinity_gran));
860         __kmp_affinity_gran = gran_type = KMP_HW_PROC_GROUP;
861       }
862     }
863 #endif
864     __kmp_affinity_gran_levels = 0;
865     for (int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
866       __kmp_affinity_gran_levels++;
867   }
868 #endif // KMP_AFFINITY_SUPPORTED
869 }
870 
871 // Canonicalize an explicit packages X cores/pkg X threads/core topology
872 void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg,
873                                   int nthreads_per_core, int ncores) {
874   int ndepth = 3;
875   depth = ndepth;
876   KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
877   for (int level = 0; level < depth; ++level) {
878     count[level] = 0;
879     ratio[level] = 0;
880   }
881   count[0] = npackages;
882   count[1] = ncores;
883   count[2] = __kmp_xproc;
884   ratio[0] = npackages;
885   ratio[1] = ncores_per_pkg;
886   ratio[2] = nthreads_per_core;
887   equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
888   equivalent[KMP_HW_CORE] = KMP_HW_CORE;
889   equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
890   types[0] = KMP_HW_SOCKET;
891   types[1] = KMP_HW_CORE;
892   types[2] = KMP_HW_THREAD;
893   //__kmp_avail_proc = __kmp_xproc;
894   _discover_uniformity();
895 }
896 
897 // Represents running sub IDs for a single core attribute where
898 // attribute values have SIZE possibilities.
899 template <size_t SIZE, typename IndexFunc> struct kmp_sub_ids_t {
900   int last_level; // last level in topology to consider for sub_ids
901   int sub_id[SIZE]; // The sub ID for a given attribute value
902   int prev_sub_id[KMP_HW_LAST];
903   IndexFunc indexer;
904 
905 public:
906   kmp_sub_ids_t(int last_level) : last_level(last_level) {
907     KMP_ASSERT(last_level < KMP_HW_LAST);
908     for (size_t i = 0; i < SIZE; ++i)
909       sub_id[i] = -1;
910     for (size_t i = 0; i < KMP_HW_LAST; ++i)
911       prev_sub_id[i] = -1;
912   }
913   void update(const kmp_hw_thread_t &hw_thread) {
914     int idx = indexer(hw_thread);
915     KMP_ASSERT(idx < (int)SIZE);
916     for (int level = 0; level <= last_level; ++level) {
917       if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
918         if (level < last_level)
919           sub_id[idx] = -1;
920         sub_id[idx]++;
921         break;
922       }
923     }
924     for (int level = 0; level <= last_level; ++level)
925       prev_sub_id[level] = hw_thread.sub_ids[level];
926   }
927   int get_sub_id(const kmp_hw_thread_t &hw_thread) const {
928     return sub_id[indexer(hw_thread)];
929   }
930 };
931 
932 static kmp_str_buf_t *
933 __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
934                                  bool plural) {
935   __kmp_str_buf_init(buf);
936   if (attr.is_core_type_valid())
937     __kmp_str_buf_print(buf, "%s %s",
938                         __kmp_hw_get_core_type_string(attr.get_core_type()),
939                         __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
940   else
941     __kmp_str_buf_print(buf, "%s eff=%d",
942                         __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
943                         attr.get_core_eff());
944   return buf;
945 }
946 
947 // Apply the KMP_HW_SUBSET envirable to the topology
948 // Returns true if KMP_HW_SUBSET filtered any processors
949 // otherwise, returns false
950 bool kmp_topology_t::filter_hw_subset() {
951   // If KMP_HW_SUBSET wasn't requested, then do nothing.
952   if (!__kmp_hw_subset)
953     return false;
954 
955   // First, sort the KMP_HW_SUBSET items by the machine topology
956   __kmp_hw_subset->sort();
957 
958   // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology
959   bool using_core_types = false;
960   bool using_core_effs = false;
961   int hw_subset_depth = __kmp_hw_subset->get_depth();
962   kmp_hw_t specified[KMP_HW_LAST];
963   int *topology_levels = (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth);
964   KMP_ASSERT(hw_subset_depth > 0);
965   KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
966   int core_level = get_level(KMP_HW_CORE);
967   for (int i = 0; i < hw_subset_depth; ++i) {
968     int max_count;
969     const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
970     int num = item.num[0];
971     int offset = item.offset[0];
972     kmp_hw_t type = item.type;
973     kmp_hw_t equivalent_type = equivalent[type];
974     int level = get_level(type);
975     topology_levels[i] = level;
976 
977     // Check to see if current layer is in detected machine topology
978     if (equivalent_type != KMP_HW_UNKNOWN) {
979       __kmp_hw_subset->at(i).type = equivalent_type;
980     } else {
981       KMP_AFF_WARNING(AffHWSubsetNotExistGeneric,
982                       __kmp_hw_get_catalog_string(type));
983       return false;
984     }
985 
986     // Check to see if current layer has already been
987     // specified either directly or through an equivalent type
988     if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
989       KMP_AFF_WARNING(AffHWSubsetEqvLayers, __kmp_hw_get_catalog_string(type),
990                       __kmp_hw_get_catalog_string(specified[equivalent_type]));
991       return false;
992     }
993     specified[equivalent_type] = type;
994 
995     // Check to see if each layer's num & offset parameters are valid
996     max_count = get_ratio(level);
997     if (max_count < 0 ||
998         (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
999       bool plural = (num > 1);
1000       KMP_AFF_WARNING(AffHWSubsetManyGeneric,
1001                       __kmp_hw_get_catalog_string(type, plural));
1002       return false;
1003     }
1004 
1005     // Check to see if core attributes are consistent
1006     if (core_level == level) {
1007       // Determine which core attributes are specified
1008       for (int j = 0; j < item.num_attrs; ++j) {
1009         if (item.attr[j].is_core_type_valid())
1010           using_core_types = true;
1011         if (item.attr[j].is_core_eff_valid())
1012           using_core_effs = true;
1013       }
1014 
1015       // Check if using a single core attribute on non-hybrid arch.
1016       // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute.
1017       //
1018       // Check if using multiple core attributes on non-hyrbid arch.
1019       // Ignore all of KMP_HW_SUBSET if this is the case.
1020       if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1021         if (item.num_attrs == 1) {
1022           if (using_core_effs) {
1023             KMP_AFF_WARNING(AffHWSubsetIgnoringAttr, "efficiency");
1024           } else {
1025             KMP_AFF_WARNING(AffHWSubsetIgnoringAttr, "core_type");
1026           }
1027           using_core_effs = false;
1028           using_core_types = false;
1029         } else {
1030           KMP_AFF_WARNING(AffHWSubsetAttrsNonHybrid);
1031           return false;
1032         }
1033       }
1034 
1035       // Check if using both core types and core efficiencies together
1036       if (using_core_types && using_core_effs) {
1037         KMP_AFF_WARNING(AffHWSubsetIncompat, "core_type", "efficiency");
1038         return false;
1039       }
1040 
1041       // Check that core efficiency values are valid
1042       if (using_core_effs) {
1043         for (int j = 0; j < item.num_attrs; ++j) {
1044           if (item.attr[j].is_core_eff_valid()) {
1045             int core_eff = item.attr[j].get_core_eff();
1046             if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1047               kmp_str_buf_t buf;
1048               __kmp_str_buf_init(&buf);
1049               __kmp_str_buf_print(&buf, "%d", item.attr[j].get_core_eff());
1050               __kmp_msg(kmp_ms_warning,
1051                         KMP_MSG(AffHWSubsetAttrInvalid, "efficiency", buf.str),
1052                         KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1053                         __kmp_msg_null);
1054               __kmp_str_buf_free(&buf);
1055               return false;
1056             }
1057           }
1058         }
1059       }
1060 
1061       // Check that the number of requested cores with attributes is valid
1062       if (using_core_types || using_core_effs) {
1063         for (int j = 0; j < item.num_attrs; ++j) {
1064           int num = item.num[j];
1065           int offset = item.offset[j];
1066           int level_above = core_level - 1;
1067           if (level_above >= 0) {
1068             max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1069             if (max_count <= 0 ||
1070                 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1071               kmp_str_buf_t buf;
1072               __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1073               KMP_AFF_WARNING(AffHWSubsetManyGeneric, buf.str);
1074               __kmp_str_buf_free(&buf);
1075               return false;
1076             }
1077           }
1078         }
1079       }
1080 
1081       if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1082         for (int j = 0; j < item.num_attrs; ++j) {
1083           // Ambiguous use of specific core attribute + generic core
1084           // e.g., 4c & 3c:intel_core or 4c & 3c:eff1
1085           if (!item.attr[j]) {
1086             kmp_hw_attr_t other_attr;
1087             for (int k = 0; k < item.num_attrs; ++k) {
1088               if (item.attr[k] != item.attr[j]) {
1089                 other_attr = item.attr[k];
1090                 break;
1091               }
1092             }
1093             kmp_str_buf_t buf;
1094             __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1095             KMP_AFF_WARNING(AffHWSubsetIncompat,
1096                             __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1097             __kmp_str_buf_free(&buf);
1098             return false;
1099           }
1100           // Allow specifying a specific core type or core eff exactly once
1101           for (int k = 0; k < j; ++k) {
1102             if (!item.attr[j] || !item.attr[k])
1103               continue;
1104             if (item.attr[k] == item.attr[j]) {
1105               kmp_str_buf_t buf;
1106               __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1107                                                item.num[j] > 0);
1108               KMP_AFF_WARNING(AffHWSubsetAttrRepeat, buf.str);
1109               __kmp_str_buf_free(&buf);
1110               return false;
1111             }
1112           }
1113         }
1114       }
1115     }
1116   }
1117 
1118   struct core_type_indexer {
1119     int operator()(const kmp_hw_thread_t &t) const {
1120       switch (t.attrs.get_core_type()) {
1121 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1122       case KMP_HW_CORE_TYPE_ATOM:
1123         return 1;
1124       case KMP_HW_CORE_TYPE_CORE:
1125         return 2;
1126 #endif
1127       case KMP_HW_CORE_TYPE_UNKNOWN:
1128         return 0;
1129       }
1130       KMP_ASSERT(0);
1131       return 0;
1132     }
1133   };
1134   struct core_eff_indexer {
1135     int operator()(const kmp_hw_thread_t &t) const {
1136       return t.attrs.get_core_eff();
1137     }
1138   };
1139 
1140   kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1141       core_level);
1142   kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1143       core_level);
1144 
1145   // Determine which hardware threads should be filtered.
1146   int num_filtered = 0;
1147   bool *filtered = (bool *)__kmp_allocate(sizeof(bool) * num_hw_threads);
1148   for (int i = 0; i < num_hw_threads; ++i) {
1149     kmp_hw_thread_t &hw_thread = hw_threads[i];
1150     // Update type_sub_id
1151     if (using_core_types)
1152       core_type_sub_ids.update(hw_thread);
1153     if (using_core_effs)
1154       core_eff_sub_ids.update(hw_thread);
1155 
1156     // Check to see if this hardware thread should be filtered
1157     bool should_be_filtered = false;
1158     for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1159          ++hw_subset_index) {
1160       const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1161       int level = topology_levels[hw_subset_index];
1162       if (level == -1)
1163         continue;
1164       if ((using_core_effs || using_core_types) && level == core_level) {
1165         // Look for the core attribute in KMP_HW_SUBSET which corresponds
1166         // to this hardware thread's core attribute. Use this num,offset plus
1167         // the running sub_id for the particular core attribute of this hardware
1168         // thread to determine if the hardware thread should be filtered or not.
1169         int attr_idx;
1170         kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1171         int core_eff = hw_thread.attrs.get_core_eff();
1172         for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1173           if (using_core_types &&
1174               hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1175             break;
1176           if (using_core_effs &&
1177               hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1178             break;
1179         }
1180         // This core attribute isn't in the KMP_HW_SUBSET so always filter it.
1181         if (attr_idx == hw_subset_item.num_attrs) {
1182           should_be_filtered = true;
1183           break;
1184         }
1185         int sub_id;
1186         int num = hw_subset_item.num[attr_idx];
1187         int offset = hw_subset_item.offset[attr_idx];
1188         if (using_core_types)
1189           sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1190         else
1191           sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1192         if (sub_id < offset ||
1193             (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1194           should_be_filtered = true;
1195           break;
1196         }
1197       } else {
1198         int num = hw_subset_item.num[0];
1199         int offset = hw_subset_item.offset[0];
1200         if (hw_thread.sub_ids[level] < offset ||
1201             (num != kmp_hw_subset_t::USE_ALL &&
1202              hw_thread.sub_ids[level] >= offset + num)) {
1203           should_be_filtered = true;
1204           break;
1205         }
1206       }
1207     }
1208     // Collect filtering information
1209     filtered[i] = should_be_filtered;
1210     if (should_be_filtered)
1211       num_filtered++;
1212   }
1213 
1214   // One last check that we shouldn't allow filtering entire machine
1215   if (num_filtered == num_hw_threads) {
1216     KMP_AFF_WARNING(AffHWSubsetAllFiltered);
1217     __kmp_free(filtered);
1218     return false;
1219   }
1220 
1221   // Apply the filter
1222   int new_index = 0;
1223   for (int i = 0; i < num_hw_threads; ++i) {
1224     if (!filtered[i]) {
1225       if (i != new_index)
1226         hw_threads[new_index] = hw_threads[i];
1227       new_index++;
1228     } else {
1229 #if KMP_AFFINITY_SUPPORTED
1230       KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1231 #endif
1232       __kmp_avail_proc--;
1233     }
1234   }
1235 
1236   KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1237   num_hw_threads = new_index;
1238 
1239   // Post hardware subset canonicalization
1240   _gather_enumeration_information();
1241   _discover_uniformity();
1242   _set_globals();
1243   _set_last_level_cache();
1244   __kmp_free(filtered);
1245   return true;
1246 }
1247 
1248 bool kmp_topology_t::is_close(int hwt1, int hwt2, int hw_level) const {
1249   if (hw_level >= depth)
1250     return true;
1251   bool retval = true;
1252   const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1253   const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1254   for (int i = 0; i < (depth - hw_level); ++i) {
1255     if (t1.ids[i] != t2.ids[i])
1256       return false;
1257   }
1258   return retval;
1259 }
1260 
1261 ////////////////////////////////////////////////////////////////////////////////
1262 
1263 #if KMP_AFFINITY_SUPPORTED
1264 class kmp_affinity_raii_t {
1265   kmp_affin_mask_t *mask;
1266   bool restored;
1267 
1268 public:
1269   kmp_affinity_raii_t() : restored(false) {
1270     KMP_CPU_ALLOC(mask);
1271     KMP_ASSERT(mask != NULL);
1272     __kmp_get_system_affinity(mask, TRUE);
1273   }
1274   void restore() {
1275     __kmp_set_system_affinity(mask, TRUE);
1276     KMP_CPU_FREE(mask);
1277     restored = true;
1278   }
1279   ~kmp_affinity_raii_t() {
1280     if (!restored) {
1281       __kmp_set_system_affinity(mask, TRUE);
1282       KMP_CPU_FREE(mask);
1283     }
1284   }
1285 };
1286 
1287 bool KMPAffinity::picked_api = false;
1288 
1289 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
1290 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
1291 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
1292 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
1293 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
1294 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
1295 
1296 void KMPAffinity::pick_api() {
1297   KMPAffinity *affinity_dispatch;
1298   if (picked_api)
1299     return;
1300 #if KMP_USE_HWLOC
1301   // Only use Hwloc if affinity isn't explicitly disabled and
1302   // user requests Hwloc topology method
1303   if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1304       __kmp_affinity_type != affinity_disabled) {
1305     affinity_dispatch = new KMPHwlocAffinity();
1306   } else
1307 #endif
1308   {
1309     affinity_dispatch = new KMPNativeAffinity();
1310   }
1311   __kmp_affinity_dispatch = affinity_dispatch;
1312   picked_api = true;
1313 }
1314 
1315 void KMPAffinity::destroy_api() {
1316   if (__kmp_affinity_dispatch != NULL) {
1317     delete __kmp_affinity_dispatch;
1318     __kmp_affinity_dispatch = NULL;
1319     picked_api = false;
1320   }
1321 }
1322 
1323 #define KMP_ADVANCE_SCAN(scan)                                                 \
1324   while (*scan != '\0') {                                                      \
1325     scan++;                                                                    \
1326   }
1327 
1328 // Print the affinity mask to the character array in a pretty format.
1329 // The format is a comma separated list of non-negative integers or integer
1330 // ranges: e.g., 1,2,3-5,7,9-15
1331 // The format can also be the string "{<empty>}" if no bits are set in mask
1332 char *__kmp_affinity_print_mask(char *buf, int buf_len,
1333                                 kmp_affin_mask_t *mask) {
1334   int start = 0, finish = 0, previous = 0;
1335   bool first_range;
1336   KMP_ASSERT(buf);
1337   KMP_ASSERT(buf_len >= 40);
1338   KMP_ASSERT(mask);
1339   char *scan = buf;
1340   char *end = buf + buf_len - 1;
1341 
1342   // Check for empty set.
1343   if (mask->begin() == mask->end()) {
1344     KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
1345     KMP_ADVANCE_SCAN(scan);
1346     KMP_ASSERT(scan <= end);
1347     return buf;
1348   }
1349 
1350   first_range = true;
1351   start = mask->begin();
1352   while (1) {
1353     // Find next range
1354     // [start, previous] is inclusive range of contiguous bits in mask
1355     for (finish = mask->next(start), previous = start;
1356          finish == previous + 1 && finish != mask->end();
1357          finish = mask->next(finish)) {
1358       previous = finish;
1359     }
1360 
1361     // The first range does not need a comma printed before it, but the rest
1362     // of the ranges do need a comma beforehand
1363     if (!first_range) {
1364       KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
1365       KMP_ADVANCE_SCAN(scan);
1366     } else {
1367       first_range = false;
1368     }
1369     // Range with three or more contiguous bits in the affinity mask
1370     if (previous - start > 1) {
1371       KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
1372     } else {
1373       // Range with one or two contiguous bits in the affinity mask
1374       KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
1375       KMP_ADVANCE_SCAN(scan);
1376       if (previous - start > 0) {
1377         KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
1378       }
1379     }
1380     KMP_ADVANCE_SCAN(scan);
1381     // Start over with new start point
1382     start = finish;
1383     if (start == mask->end())
1384       break;
1385     // Check for overflow
1386     if (end - scan < 2)
1387       break;
1388   }
1389 
1390   // Check for overflow
1391   KMP_ASSERT(scan <= end);
1392   return buf;
1393 }
1394 #undef KMP_ADVANCE_SCAN
1395 
1396 // Print the affinity mask to the string buffer object in a pretty format
1397 // The format is a comma separated list of non-negative integers or integer
1398 // ranges: e.g., 1,2,3-5,7,9-15
1399 // The format can also be the string "{<empty>}" if no bits are set in mask
1400 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1401                                            kmp_affin_mask_t *mask) {
1402   int start = 0, finish = 0, previous = 0;
1403   bool first_range;
1404   KMP_ASSERT(buf);
1405   KMP_ASSERT(mask);
1406 
1407   __kmp_str_buf_clear(buf);
1408 
1409   // Check for empty set.
1410   if (mask->begin() == mask->end()) {
1411     __kmp_str_buf_print(buf, "%s", "{<empty>}");
1412     return buf;
1413   }
1414 
1415   first_range = true;
1416   start = mask->begin();
1417   while (1) {
1418     // Find next range
1419     // [start, previous] is inclusive range of contiguous bits in mask
1420     for (finish = mask->next(start), previous = start;
1421          finish == previous + 1 && finish != mask->end();
1422          finish = mask->next(finish)) {
1423       previous = finish;
1424     }
1425 
1426     // The first range does not need a comma printed before it, but the rest
1427     // of the ranges do need a comma beforehand
1428     if (!first_range) {
1429       __kmp_str_buf_print(buf, "%s", ",");
1430     } else {
1431       first_range = false;
1432     }
1433     // Range with three or more contiguous bits in the affinity mask
1434     if (previous - start > 1) {
1435       __kmp_str_buf_print(buf, "%u-%u", start, previous);
1436     } else {
1437       // Range with one or two contiguous bits in the affinity mask
1438       __kmp_str_buf_print(buf, "%u", start);
1439       if (previous - start > 0) {
1440         __kmp_str_buf_print(buf, ",%u", previous);
1441       }
1442     }
1443     // Start over with new start point
1444     start = finish;
1445     if (start == mask->end())
1446       break;
1447   }
1448   return buf;
1449 }
1450 
1451 // Return (possibly empty) affinity mask representing the offline CPUs
1452 // Caller must free the mask
1453 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1454   kmp_affin_mask_t *offline;
1455   KMP_CPU_ALLOC(offline);
1456   KMP_CPU_ZERO(offline);
1457 #if KMP_OS_LINUX
1458   int n, begin_cpu, end_cpu;
1459   kmp_safe_raii_file_t offline_file;
1460   auto skip_ws = [](FILE *f) {
1461     int c;
1462     do {
1463       c = fgetc(f);
1464     } while (isspace(c));
1465     if (c != EOF)
1466       ungetc(c, f);
1467   };
1468   // File contains CSV of integer ranges representing the offline CPUs
1469   // e.g., 1,2,4-7,9,11-15
1470   int status = offline_file.try_open("/sys/devices/system/cpu/offline", "r");
1471   if (status != 0)
1472     return offline;
1473   while (!feof(offline_file)) {
1474     skip_ws(offline_file);
1475     n = fscanf(offline_file, "%d", &begin_cpu);
1476     if (n != 1)
1477       break;
1478     skip_ws(offline_file);
1479     int c = fgetc(offline_file);
1480     if (c == EOF || c == ',') {
1481       // Just single CPU
1482       end_cpu = begin_cpu;
1483     } else if (c == '-') {
1484       // Range of CPUs
1485       skip_ws(offline_file);
1486       n = fscanf(offline_file, "%d", &end_cpu);
1487       if (n != 1)
1488         break;
1489       skip_ws(offline_file);
1490       c = fgetc(offline_file); // skip ','
1491     } else {
1492       // Syntax problem
1493       break;
1494     }
1495     // Ensure a valid range of CPUs
1496     if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1497         end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1498       continue;
1499     }
1500     // Insert [begin_cpu, end_cpu] into offline mask
1501     for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1502       KMP_CPU_SET(cpu, offline);
1503     }
1504   }
1505 #endif
1506   return offline;
1507 }
1508 
1509 // Return the number of available procs
1510 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1511   int avail_proc = 0;
1512   KMP_CPU_ZERO(mask);
1513 
1514 #if KMP_GROUP_AFFINITY
1515 
1516   if (__kmp_num_proc_groups > 1) {
1517     int group;
1518     KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1519     for (group = 0; group < __kmp_num_proc_groups; group++) {
1520       int i;
1521       int num = __kmp_GetActiveProcessorCount(group);
1522       for (i = 0; i < num; i++) {
1523         KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
1524         avail_proc++;
1525       }
1526     }
1527   } else
1528 
1529 #endif /* KMP_GROUP_AFFINITY */
1530 
1531   {
1532     int proc;
1533     kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1534     for (proc = 0; proc < __kmp_xproc; proc++) {
1535       // Skip offline CPUs
1536       if (KMP_CPU_ISSET(proc, offline_cpus))
1537         continue;
1538       KMP_CPU_SET(proc, mask);
1539       avail_proc++;
1540     }
1541     KMP_CPU_FREE(offline_cpus);
1542   }
1543 
1544   return avail_proc;
1545 }
1546 
1547 // All of the __kmp_affinity_create_*_map() routines should allocate the
1548 // internal topology object and set the layer ids for it.  Each routine
1549 // returns a boolean on whether it was successful at doing so.
1550 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1551 // Original mask is a subset of full mask in multiple processor groups topology
1552 kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1553 
1554 #if KMP_USE_HWLOC
1555 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1556 #if HWLOC_API_VERSION >= 0x00020000
1557   return hwloc_obj_type_is_cache(obj->type);
1558 #else
1559   return obj->type == HWLOC_OBJ_CACHE;
1560 #endif
1561 }
1562 
1563 // Returns KMP_HW_* type derived from HWLOC_* type
1564 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1565 
1566   if (__kmp_hwloc_is_cache_type(obj)) {
1567     if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1568       return KMP_HW_UNKNOWN;
1569     switch (obj->attr->cache.depth) {
1570     case 1:
1571       return KMP_HW_L1;
1572     case 2:
1573 #if KMP_MIC_SUPPORTED
1574       if (__kmp_mic_type == mic3) {
1575         return KMP_HW_TILE;
1576       }
1577 #endif
1578       return KMP_HW_L2;
1579     case 3:
1580       return KMP_HW_L3;
1581     }
1582     return KMP_HW_UNKNOWN;
1583   }
1584 
1585   switch (obj->type) {
1586   case HWLOC_OBJ_PACKAGE:
1587     return KMP_HW_SOCKET;
1588   case HWLOC_OBJ_NUMANODE:
1589     return KMP_HW_NUMA;
1590   case HWLOC_OBJ_CORE:
1591     return KMP_HW_CORE;
1592   case HWLOC_OBJ_PU:
1593     return KMP_HW_THREAD;
1594   case HWLOC_OBJ_GROUP:
1595     if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1596       return KMP_HW_DIE;
1597     else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1598       return KMP_HW_TILE;
1599     else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1600       return KMP_HW_MODULE;
1601     else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1602       return KMP_HW_PROC_GROUP;
1603     return KMP_HW_UNKNOWN;
1604 #if HWLOC_API_VERSION >= 0x00020100
1605   case HWLOC_OBJ_DIE:
1606     return KMP_HW_DIE;
1607 #endif
1608   }
1609   return KMP_HW_UNKNOWN;
1610 }
1611 
1612 // Returns the number of objects of type 'type' below 'obj' within the topology
1613 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
1614 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
1615 // object.
1616 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1617                                            hwloc_obj_type_t type) {
1618   int retval = 0;
1619   hwloc_obj_t first;
1620   for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1621                                            obj->logical_index, type, 0);
1622        first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1623                                                        obj->type, first) == obj;
1624        first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1625                                           first)) {
1626     ++retval;
1627   }
1628   return retval;
1629 }
1630 
1631 // This gets the sub_id for a lower object under a higher object in the
1632 // topology tree
1633 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1634                                   hwloc_obj_t lower) {
1635   hwloc_obj_t obj;
1636   hwloc_obj_type_t ltype = lower->type;
1637   int lindex = lower->logical_index - 1;
1638   int sub_id = 0;
1639   // Get the previous lower object
1640   obj = hwloc_get_obj_by_type(t, ltype, lindex);
1641   while (obj && lindex >= 0 &&
1642          hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1643     if (obj->userdata) {
1644       sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1645       break;
1646     }
1647     sub_id++;
1648     lindex--;
1649     obj = hwloc_get_obj_by_type(t, ltype, lindex);
1650   }
1651   // store sub_id + 1 so that 0 is differed from NULL
1652   lower->userdata = RCAST(void *, sub_id + 1);
1653   return sub_id;
1654 }
1655 
1656 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) {
1657   kmp_hw_t type;
1658   int hw_thread_index, sub_id;
1659   int depth;
1660   hwloc_obj_t pu, obj, root, prev;
1661   kmp_hw_t types[KMP_HW_LAST];
1662   hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1663 
1664   hwloc_topology_t tp = __kmp_hwloc_topology;
1665   *msg_id = kmp_i18n_null;
1666   if (__kmp_affinity_verbose) {
1667     KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
1668   }
1669 
1670   if (!KMP_AFFINITY_CAPABLE()) {
1671     // Hack to try and infer the machine topology using only the data
1672     // available from hwloc on the current thread, and __kmp_xproc.
1673     KMP_ASSERT(__kmp_affinity_type == affinity_none);
1674     // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
1675     hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1676     if (o != NULL)
1677       nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1678     else
1679       nCoresPerPkg = 1; // no PACKAGE found
1680     o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1681     if (o != NULL)
1682       __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1683     else
1684       __kmp_nThreadsPerCore = 1; // no CORE found
1685     __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1686     if (nCoresPerPkg == 0)
1687       nCoresPerPkg = 1; // to prevent possible division by 0
1688     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1689     return true;
1690   }
1691 
1692   // Handle multiple types of cores if they exist on the system
1693   int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1694 
1695   typedef struct kmp_hwloc_cpukinds_info_t {
1696     int efficiency;
1697     kmp_hw_core_type_t core_type;
1698     hwloc_bitmap_t mask;
1699   } kmp_hwloc_cpukinds_info_t;
1700   kmp_hwloc_cpukinds_info_t *cpukinds = nullptr;
1701 
1702   if (nr_cpu_kinds > 0) {
1703     unsigned nr_infos;
1704     struct hwloc_info_s *infos;
1705     cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1706         sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1707     for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1708       cpukinds[idx].efficiency = -1;
1709       cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1710       cpukinds[idx].mask = hwloc_bitmap_alloc();
1711       if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1712                                   &cpukinds[idx].efficiency, &nr_infos, &infos,
1713                                   0) == 0) {
1714         for (unsigned i = 0; i < nr_infos; ++i) {
1715           if (__kmp_str_match("CoreType", 8, infos[i].name)) {
1716 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1717             if (__kmp_str_match("IntelAtom", 9, infos[i].value)) {
1718               cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1719               break;
1720             } else if (__kmp_str_match("IntelCore", 9, infos[i].value)) {
1721               cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1722               break;
1723             }
1724 #endif
1725           }
1726         }
1727       }
1728     }
1729   }
1730 
1731   root = hwloc_get_root_obj(tp);
1732 
1733   // Figure out the depth and types in the topology
1734   depth = 0;
1735   pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1736   KMP_ASSERT(pu);
1737   obj = pu;
1738   types[depth] = KMP_HW_THREAD;
1739   hwloc_types[depth] = obj->type;
1740   depth++;
1741   while (obj != root && obj != NULL) {
1742     obj = obj->parent;
1743 #if HWLOC_API_VERSION >= 0x00020000
1744     if (obj->memory_arity) {
1745       hwloc_obj_t memory;
1746       for (memory = obj->memory_first_child; memory;
1747            memory = hwloc_get_next_child(tp, obj, memory)) {
1748         if (memory->type == HWLOC_OBJ_NUMANODE)
1749           break;
1750       }
1751       if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1752         types[depth] = KMP_HW_NUMA;
1753         hwloc_types[depth] = memory->type;
1754         depth++;
1755       }
1756     }
1757 #endif
1758     type = __kmp_hwloc_type_2_topology_type(obj);
1759     if (type != KMP_HW_UNKNOWN) {
1760       types[depth] = type;
1761       hwloc_types[depth] = obj->type;
1762       depth++;
1763     }
1764   }
1765   KMP_ASSERT(depth > 0);
1766 
1767   // Get the order for the types correct
1768   for (int i = 0, j = depth - 1; i < j; ++i, --j) {
1769     hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1770     kmp_hw_t temp = types[i];
1771     types[i] = types[j];
1772     types[j] = temp;
1773     hwloc_types[i] = hwloc_types[j];
1774     hwloc_types[j] = hwloc_temp;
1775   }
1776 
1777   // Allocate the data structure to be returned.
1778   __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1779 
1780   hw_thread_index = 0;
1781   pu = NULL;
1782   while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1783     int index = depth - 1;
1784     bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1785     kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1786     if (included) {
1787       hw_thread.clear();
1788       hw_thread.ids[index] = pu->logical_index;
1789       hw_thread.os_id = pu->os_index;
1790       // If multiple core types, then set that attribute for the hardware thread
1791       if (cpukinds) {
1792         int cpukind_index = -1;
1793         for (int i = 0; i < nr_cpu_kinds; ++i) {
1794           if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1795             cpukind_index = i;
1796             break;
1797           }
1798         }
1799         if (cpukind_index >= 0) {
1800           hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1801           hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1802         }
1803       }
1804       index--;
1805     }
1806     obj = pu;
1807     prev = obj;
1808     while (obj != root && obj != NULL) {
1809       obj = obj->parent;
1810 #if HWLOC_API_VERSION >= 0x00020000
1811       // NUMA Nodes are handled differently since they are not within the
1812       // parent/child structure anymore.  They are separate children
1813       // of obj (memory_first_child points to first memory child)
1814       if (obj->memory_arity) {
1815         hwloc_obj_t memory;
1816         for (memory = obj->memory_first_child; memory;
1817              memory = hwloc_get_next_child(tp, obj, memory)) {
1818           if (memory->type == HWLOC_OBJ_NUMANODE)
1819             break;
1820         }
1821         if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1822           sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1823           if (included) {
1824             hw_thread.ids[index] = memory->logical_index;
1825             hw_thread.ids[index + 1] = sub_id;
1826             index--;
1827           }
1828           prev = memory;
1829         }
1830         prev = obj;
1831       }
1832 #endif
1833       type = __kmp_hwloc_type_2_topology_type(obj);
1834       if (type != KMP_HW_UNKNOWN) {
1835         sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1836         if (included) {
1837           hw_thread.ids[index] = obj->logical_index;
1838           hw_thread.ids[index + 1] = sub_id;
1839           index--;
1840         }
1841         prev = obj;
1842       }
1843     }
1844     if (included)
1845       hw_thread_index++;
1846   }
1847 
1848   // Free the core types information
1849   if (cpukinds) {
1850     for (int idx = 0; idx < nr_cpu_kinds; ++idx)
1851       hwloc_bitmap_free(cpukinds[idx].mask);
1852     __kmp_free(cpukinds);
1853   }
1854   __kmp_topology->sort_ids();
1855   return true;
1856 }
1857 #endif // KMP_USE_HWLOC
1858 
1859 // If we don't know how to retrieve the machine's processor topology, or
1860 // encounter an error in doing so, this routine is called to form a "flat"
1861 // mapping of os thread id's <-> processor id's.
1862 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) {
1863   *msg_id = kmp_i18n_null;
1864   int depth = 3;
1865   kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1866 
1867   if (__kmp_affinity_verbose) {
1868     KMP_INFORM(UsingFlatOS, "KMP_AFFINITY");
1869   }
1870 
1871   // Even if __kmp_affinity_type == affinity_none, this routine might still
1872   // called to set __kmp_ncores, as well as
1873   // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1874   if (!KMP_AFFINITY_CAPABLE()) {
1875     KMP_ASSERT(__kmp_affinity_type == affinity_none);
1876     __kmp_ncores = nPackages = __kmp_xproc;
1877     __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1878     return true;
1879   }
1880 
1881   // When affinity is off, this routine will still be called to set
1882   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1883   // Make sure all these vars are set correctly, and return now if affinity is
1884   // not enabled.
1885   __kmp_ncores = nPackages = __kmp_avail_proc;
1886   __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1887 
1888   // Construct the data structure to be returned.
1889   __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1890   int avail_ct = 0;
1891   int i;
1892   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1893     // Skip this proc if it is not included in the machine model.
1894     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1895       continue;
1896     }
1897     kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1898     hw_thread.clear();
1899     hw_thread.os_id = i;
1900     hw_thread.ids[0] = i;
1901     hw_thread.ids[1] = 0;
1902     hw_thread.ids[2] = 0;
1903     avail_ct++;
1904   }
1905   if (__kmp_affinity_verbose) {
1906     KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
1907   }
1908   return true;
1909 }
1910 
1911 #if KMP_GROUP_AFFINITY
1912 // If multiple Windows* OS processor groups exist, we can create a 2-level
1913 // topology map with the groups at level 0 and the individual procs at level 1.
1914 // This facilitates letting the threads float among all procs in a group,
1915 // if granularity=group (the default when there are multiple groups).
1916 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) {
1917   *msg_id = kmp_i18n_null;
1918   int depth = 3;
1919   kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1920   const static size_t BITS_PER_GROUP = CHAR_BIT * sizeof(DWORD_PTR);
1921 
1922   if (__kmp_affinity_verbose) {
1923     KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
1924   }
1925 
1926   // If we aren't affinity capable, then use flat topology
1927   if (!KMP_AFFINITY_CAPABLE()) {
1928     KMP_ASSERT(__kmp_affinity_type == affinity_none);
1929     nPackages = __kmp_num_proc_groups;
1930     __kmp_nThreadsPerCore = 1;
1931     __kmp_ncores = __kmp_xproc;
1932     nCoresPerPkg = nPackages / __kmp_ncores;
1933     return true;
1934   }
1935 
1936   // Construct the data structure to be returned.
1937   __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1938   int avail_ct = 0;
1939   int i;
1940   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1941     // Skip this proc if it is not included in the machine model.
1942     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1943       continue;
1944     }
1945     kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1946     hw_thread.clear();
1947     hw_thread.os_id = i;
1948     hw_thread.ids[0] = i / BITS_PER_GROUP;
1949     hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1950   }
1951   return true;
1952 }
1953 #endif /* KMP_GROUP_AFFINITY */
1954 
1955 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1956 
1957 template <kmp_uint32 LSB, kmp_uint32 MSB>
1958 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1959   const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB;
1960   const kmp_uint32 SHIFT_RIGHT = LSB;
1961   kmp_uint32 retval = v;
1962   retval <<= SHIFT_LEFT;
1963   retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1964   return retval;
1965 }
1966 
1967 static int __kmp_cpuid_mask_width(int count) {
1968   int r = 0;
1969 
1970   while ((1 << r) < count)
1971     ++r;
1972   return r;
1973 }
1974 
1975 class apicThreadInfo {
1976 public:
1977   unsigned osId; // param to __kmp_affinity_bind_thread
1978   unsigned apicId; // from cpuid after binding
1979   unsigned maxCoresPerPkg; //      ""
1980   unsigned maxThreadsPerPkg; //      ""
1981   unsigned pkgId; // inferred from above values
1982   unsigned coreId; //      ""
1983   unsigned threadId; //      ""
1984 };
1985 
1986 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1987                                                      const void *b) {
1988   const apicThreadInfo *aa = (const apicThreadInfo *)a;
1989   const apicThreadInfo *bb = (const apicThreadInfo *)b;
1990   if (aa->pkgId < bb->pkgId)
1991     return -1;
1992   if (aa->pkgId > bb->pkgId)
1993     return 1;
1994   if (aa->coreId < bb->coreId)
1995     return -1;
1996   if (aa->coreId > bb->coreId)
1997     return 1;
1998   if (aa->threadId < bb->threadId)
1999     return -1;
2000   if (aa->threadId > bb->threadId)
2001     return 1;
2002   return 0;
2003 }
2004 
2005 class kmp_cache_info_t {
2006 public:
2007   struct info_t {
2008     unsigned level, mask;
2009   };
2010   kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2011   size_t get_depth() const { return depth; }
2012   info_t &operator[](size_t index) { return table[index]; }
2013   const info_t &operator[](size_t index) const { return table[index]; }
2014 
2015   static kmp_hw_t get_topology_type(unsigned level) {
2016     KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2017     switch (level) {
2018     case 1:
2019       return KMP_HW_L1;
2020     case 2:
2021       return KMP_HW_L2;
2022     case 3:
2023       return KMP_HW_L3;
2024     }
2025     return KMP_HW_UNKNOWN;
2026   }
2027 
2028 private:
2029   static const int MAX_CACHE_LEVEL = 3;
2030 
2031   size_t depth;
2032   info_t table[MAX_CACHE_LEVEL];
2033 
2034   void get_leaf4_levels() {
2035     unsigned level = 0;
2036     while (depth < MAX_CACHE_LEVEL) {
2037       unsigned cache_type, max_threads_sharing;
2038       unsigned cache_level, cache_mask_width;
2039       kmp_cpuid buf2;
2040       __kmp_x86_cpuid(4, level, &buf2);
2041       cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2042       if (!cache_type)
2043         break;
2044       // Skip instruction caches
2045       if (cache_type == 2) {
2046         level++;
2047         continue;
2048       }
2049       max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2050       cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2051       cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2052       table[depth].level = cache_level;
2053       table[depth].mask = ((-1) << cache_mask_width);
2054       depth++;
2055       level++;
2056     }
2057   }
2058 };
2059 
2060 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
2061 // an algorithm which cycles through the available os threads, setting
2062 // the current thread's affinity mask to that thread, and then retrieves
2063 // the Apic Id for each thread context using the cpuid instruction.
2064 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) {
2065   kmp_cpuid buf;
2066   *msg_id = kmp_i18n_null;
2067 
2068   if (__kmp_affinity_verbose) {
2069     KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2070   }
2071 
2072   // Check if cpuid leaf 4 is supported.
2073   __kmp_x86_cpuid(0, 0, &buf);
2074   if (buf.eax < 4) {
2075     *msg_id = kmp_i18n_str_NoLeaf4Support;
2076     return false;
2077   }
2078 
2079   // The algorithm used starts by setting the affinity to each available thread
2080   // and retrieving info from the cpuid instruction, so if we are not capable of
2081   // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
2082   // need to do something else - use the defaults that we calculated from
2083   // issuing cpuid without binding to each proc.
2084   if (!KMP_AFFINITY_CAPABLE()) {
2085     // Hack to try and infer the machine topology using only the data
2086     // available from cpuid on the current thread, and __kmp_xproc.
2087     KMP_ASSERT(__kmp_affinity_type == affinity_none);
2088 
2089     // Get an upper bound on the number of threads per package using cpuid(1).
2090     // On some OS/chps combinations where HT is supported by the chip but is
2091     // disabled, this value will be 2 on a single core chip. Usually, it will be
2092     // 2 if HT is enabled and 1 if HT is disabled.
2093     __kmp_x86_cpuid(1, 0, &buf);
2094     int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2095     if (maxThreadsPerPkg == 0) {
2096       maxThreadsPerPkg = 1;
2097     }
2098 
2099     // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
2100     // value.
2101     //
2102     // The author of cpu_count.cpp treated this only an upper bound on the
2103     // number of cores, but I haven't seen any cases where it was greater than
2104     // the actual number of cores, so we will treat it as exact in this block of
2105     // code.
2106     //
2107     // First, we need to check if cpuid(4) is supported on this chip. To see if
2108     // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
2109     // greater.
2110     __kmp_x86_cpuid(0, 0, &buf);
2111     if (buf.eax >= 4) {
2112       __kmp_x86_cpuid(4, 0, &buf);
2113       nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2114     } else {
2115       nCoresPerPkg = 1;
2116     }
2117 
2118     // There is no way to reliably tell if HT is enabled without issuing the
2119     // cpuid instruction from every thread, can correlating the cpuid info, so
2120     // if the machine is not affinity capable, we assume that HT is off. We have
2121     // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
2122     // does not support HT.
2123     //
2124     // - Older OSes are usually found on machines with older chips, which do not
2125     //   support HT.
2126     // - The performance penalty for mistakenly identifying a machine as HT when
2127     //   it isn't (which results in blocktime being incorrectly set to 0) is
2128     //   greater than the penalty when for mistakenly identifying a machine as
2129     //   being 1 thread/core when it is really HT enabled (which results in
2130     //   blocktime being incorrectly set to a positive value).
2131     __kmp_ncores = __kmp_xproc;
2132     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2133     __kmp_nThreadsPerCore = 1;
2134     return true;
2135   }
2136 
2137   // From here on, we can assume that it is safe to call
2138   // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2139   // __kmp_affinity_type = affinity_none.
2140 
2141   // Save the affinity mask for the current thread.
2142   kmp_affinity_raii_t previous_affinity;
2143 
2144   // Run through each of the available contexts, binding the current thread
2145   // to it, and obtaining the pertinent information using the cpuid instr.
2146   //
2147   // The relevant information is:
2148   // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
2149   //     has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
2150   // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
2151   //     of this field determines the width of the core# + thread# fields in the
2152   //     Apic Id. It is also an upper bound on the number of threads per
2153   //     package, but it has been verified that situations happen were it is not
2154   //     exact. In particular, on certain OS/chip combinations where Intel(R)
2155   //     Hyper-Threading Technology is supported by the chip but has been
2156   //     disabled, the value of this field will be 2 (for a single core chip).
2157   //     On other OS/chip combinations supporting Intel(R) Hyper-Threading
2158   //     Technology, the value of this field will be 1 when Intel(R)
2159   //     Hyper-Threading Technology is disabled and 2 when it is enabled.
2160   // - Max Cores Per Pkg:  Bits 26:31 of eax after issuing cpuid(4). The value
2161   //     of this field (+1) determines the width of the core# field in the Apic
2162   //     Id. The comments in "cpucount.cpp" say that this value is an upper
2163   //     bound, but the IA-32 architecture manual says that it is exactly the
2164   //     number of cores per package, and I haven't seen any case where it
2165   //     wasn't.
2166   //
2167   // From this information, deduce the package Id, core Id, and thread Id,
2168   // and set the corresponding fields in the apicThreadInfo struct.
2169   unsigned i;
2170   apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2171       __kmp_avail_proc * sizeof(apicThreadInfo));
2172   unsigned nApics = 0;
2173   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2174     // Skip this proc if it is not included in the machine model.
2175     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2176       continue;
2177     }
2178     KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
2179 
2180     __kmp_affinity_dispatch->bind_thread(i);
2181     threadInfo[nApics].osId = i;
2182 
2183     // The apic id and max threads per pkg come from cpuid(1).
2184     __kmp_x86_cpuid(1, 0, &buf);
2185     if (((buf.edx >> 9) & 1) == 0) {
2186       __kmp_free(threadInfo);
2187       *msg_id = kmp_i18n_str_ApicNotPresent;
2188       return false;
2189     }
2190     threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2191     threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2192     if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2193       threadInfo[nApics].maxThreadsPerPkg = 1;
2194     }
2195 
2196     // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
2197     // value.
2198     //
2199     // First, we need to check if cpuid(4) is supported on this chip. To see if
2200     // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
2201     // or greater.
2202     __kmp_x86_cpuid(0, 0, &buf);
2203     if (buf.eax >= 4) {
2204       __kmp_x86_cpuid(4, 0, &buf);
2205       threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2206     } else {
2207       threadInfo[nApics].maxCoresPerPkg = 1;
2208     }
2209 
2210     // Infer the pkgId / coreId / threadId using only the info obtained locally.
2211     int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2212     threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2213 
2214     int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2215     int widthT = widthCT - widthC;
2216     if (widthT < 0) {
2217       // I've never seen this one happen, but I suppose it could, if the cpuid
2218       // instruction on a chip was really screwed up. Make sure to restore the
2219       // affinity mask before the tail call.
2220       __kmp_free(threadInfo);
2221       *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2222       return false;
2223     }
2224 
2225     int maskC = (1 << widthC) - 1;
2226     threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2227 
2228     int maskT = (1 << widthT) - 1;
2229     threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2230 
2231     nApics++;
2232   }
2233 
2234   // We've collected all the info we need.
2235   // Restore the old affinity mask for this thread.
2236   previous_affinity.restore();
2237 
2238   // Sort the threadInfo table by physical Id.
2239   qsort(threadInfo, nApics, sizeof(*threadInfo),
2240         __kmp_affinity_cmp_apicThreadInfo_phys_id);
2241 
2242   // The table is now sorted by pkgId / coreId / threadId, but we really don't
2243   // know the radix of any of the fields. pkgId's may be sparsely assigned among
2244   // the chips on a system. Although coreId's are usually assigned
2245   // [0 .. coresPerPkg-1] and threadId's are usually assigned
2246   // [0..threadsPerCore-1], we don't want to make any such assumptions.
2247   //
2248   // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2249   // total # packages) are at this point - we want to determine that now. We
2250   // only have an upper bound on the first two figures.
2251   //
2252   // We also perform a consistency check at this point: the values returned by
2253   // the cpuid instruction for any thread bound to a given package had better
2254   // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
2255   nPackages = 1;
2256   nCoresPerPkg = 1;
2257   __kmp_nThreadsPerCore = 1;
2258   unsigned nCores = 1;
2259 
2260   unsigned pkgCt = 1; // to determine radii
2261   unsigned lastPkgId = threadInfo[0].pkgId;
2262   unsigned coreCt = 1;
2263   unsigned lastCoreId = threadInfo[0].coreId;
2264   unsigned threadCt = 1;
2265   unsigned lastThreadId = threadInfo[0].threadId;
2266 
2267   // intra-pkg consist checks
2268   unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2269   unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2270 
2271   for (i = 1; i < nApics; i++) {
2272     if (threadInfo[i].pkgId != lastPkgId) {
2273       nCores++;
2274       pkgCt++;
2275       lastPkgId = threadInfo[i].pkgId;
2276       if ((int)coreCt > nCoresPerPkg)
2277         nCoresPerPkg = coreCt;
2278       coreCt = 1;
2279       lastCoreId = threadInfo[i].coreId;
2280       if ((int)threadCt > __kmp_nThreadsPerCore)
2281         __kmp_nThreadsPerCore = threadCt;
2282       threadCt = 1;
2283       lastThreadId = threadInfo[i].threadId;
2284 
2285       // This is a different package, so go on to the next iteration without
2286       // doing any consistency checks. Reset the consistency check vars, though.
2287       prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2288       prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2289       continue;
2290     }
2291 
2292     if (threadInfo[i].coreId != lastCoreId) {
2293       nCores++;
2294       coreCt++;
2295       lastCoreId = threadInfo[i].coreId;
2296       if ((int)threadCt > __kmp_nThreadsPerCore)
2297         __kmp_nThreadsPerCore = threadCt;
2298       threadCt = 1;
2299       lastThreadId = threadInfo[i].threadId;
2300     } else if (threadInfo[i].threadId != lastThreadId) {
2301       threadCt++;
2302       lastThreadId = threadInfo[i].threadId;
2303     } else {
2304       __kmp_free(threadInfo);
2305       *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2306       return false;
2307     }
2308 
2309     // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
2310     // fields agree between all the threads bounds to a given package.
2311     if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2312         (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2313       __kmp_free(threadInfo);
2314       *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2315       return false;
2316     }
2317   }
2318   // When affinity is off, this routine will still be called to set
2319   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2320   // Make sure all these vars are set correctly
2321   nPackages = pkgCt;
2322   if ((int)coreCt > nCoresPerPkg)
2323     nCoresPerPkg = coreCt;
2324   if ((int)threadCt > __kmp_nThreadsPerCore)
2325     __kmp_nThreadsPerCore = threadCt;
2326   __kmp_ncores = nCores;
2327   KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
2328 
2329   // Now that we've determined the number of packages, the number of cores per
2330   // package, and the number of threads per core, we can construct the data
2331   // structure that is to be returned.
2332   int idx = 0;
2333   int pkgLevel = 0;
2334   int coreLevel = 1;
2335   int threadLevel = 2;
2336   //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
2337   int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2338   kmp_hw_t types[3];
2339   if (pkgLevel >= 0)
2340     types[idx++] = KMP_HW_SOCKET;
2341   if (coreLevel >= 0)
2342     types[idx++] = KMP_HW_CORE;
2343   if (threadLevel >= 0)
2344     types[idx++] = KMP_HW_THREAD;
2345 
2346   KMP_ASSERT(depth > 0);
2347   __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2348 
2349   for (i = 0; i < nApics; ++i) {
2350     idx = 0;
2351     unsigned os = threadInfo[i].osId;
2352     kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2353     hw_thread.clear();
2354 
2355     if (pkgLevel >= 0) {
2356       hw_thread.ids[idx++] = threadInfo[i].pkgId;
2357     }
2358     if (coreLevel >= 0) {
2359       hw_thread.ids[idx++] = threadInfo[i].coreId;
2360     }
2361     if (threadLevel >= 0) {
2362       hw_thread.ids[idx++] = threadInfo[i].threadId;
2363     }
2364     hw_thread.os_id = os;
2365   }
2366 
2367   __kmp_free(threadInfo);
2368   __kmp_topology->sort_ids();
2369   if (!__kmp_topology->check_ids()) {
2370     kmp_topology_t::deallocate(__kmp_topology);
2371     __kmp_topology = nullptr;
2372     *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2373     return false;
2374   }
2375   return true;
2376 }
2377 
2378 // Hybrid cpu detection using CPUID.1A
2379 // Thread should be pinned to processor already
2380 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency,
2381                                   unsigned *native_model_id) {
2382   kmp_cpuid buf;
2383   __kmp_x86_cpuid(0x1a, 0, &buf);
2384   *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2385   switch (*type) {
2386   case KMP_HW_CORE_TYPE_ATOM:
2387     *efficiency = 0;
2388     break;
2389   case KMP_HW_CORE_TYPE_CORE:
2390     *efficiency = 1;
2391     break;
2392   default:
2393     *efficiency = 0;
2394   }
2395   *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2396 }
2397 
2398 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
2399 // architectures support a newer interface for specifying the x2APIC Ids,
2400 // based on CPUID.B or CPUID.1F
2401 /*
2402  * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
2403     Bits            Bits            Bits           Bits
2404     31-16           15-8            7-4            4-0
2405 ---+-----------+--------------+-------------+-----------------+
2406 EAX| reserved  |   reserved   |   reserved  |  Bits to Shift  |
2407 ---+-----------|--------------+-------------+-----------------|
2408 EBX| reserved  | Num logical processors at level (16 bits)    |
2409 ---+-----------|--------------+-------------------------------|
2410 ECX| reserved  |   Level Type |      Level Number (8 bits)    |
2411 ---+-----------+--------------+-------------------------------|
2412 EDX|                    X2APIC ID (32 bits)                   |
2413 ---+----------------------------------------------------------+
2414 */
2415 
2416 enum {
2417   INTEL_LEVEL_TYPE_INVALID = 0, // Package level
2418   INTEL_LEVEL_TYPE_SMT = 1,
2419   INTEL_LEVEL_TYPE_CORE = 2,
2420   INTEL_LEVEL_TYPE_TILE = 3,
2421   INTEL_LEVEL_TYPE_MODULE = 4,
2422   INTEL_LEVEL_TYPE_DIE = 5,
2423   INTEL_LEVEL_TYPE_LAST = 6,
2424 };
2425 
2426 struct cpuid_level_info_t {
2427   unsigned level_type, mask, mask_width, nitems, cache_mask;
2428 };
2429 
2430 static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) {
2431   switch (intel_type) {
2432   case INTEL_LEVEL_TYPE_INVALID:
2433     return KMP_HW_SOCKET;
2434   case INTEL_LEVEL_TYPE_SMT:
2435     return KMP_HW_THREAD;
2436   case INTEL_LEVEL_TYPE_CORE:
2437     return KMP_HW_CORE;
2438   case INTEL_LEVEL_TYPE_TILE:
2439     return KMP_HW_TILE;
2440   case INTEL_LEVEL_TYPE_MODULE:
2441     return KMP_HW_MODULE;
2442   case INTEL_LEVEL_TYPE_DIE:
2443     return KMP_HW_DIE;
2444   }
2445   return KMP_HW_UNKNOWN;
2446 }
2447 
2448 // This function takes the topology leaf, a levels array to store the levels
2449 // detected and a bitmap of the known levels.
2450 // Returns the number of levels in the topology
2451 static unsigned
2452 __kmp_x2apicid_get_levels(int leaf,
2453                           cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2454                           kmp_uint64 known_levels) {
2455   unsigned level, levels_index;
2456   unsigned level_type, mask_width, nitems;
2457   kmp_cpuid buf;
2458 
2459   // New algorithm has known topology layers act as highest unknown topology
2460   // layers when unknown topology layers exist.
2461   // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z>
2462   // are unknown topology layers, Then SMT will take the characteristics of
2463   // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>).
2464   // This eliminates unknown portions of the topology while still keeping the
2465   // correct structure.
2466   level = levels_index = 0;
2467   do {
2468     __kmp_x86_cpuid(leaf, level, &buf);
2469     level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2470     mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2471     nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2472     if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2473       return 0;
2474 
2475     if (known_levels & (1ull << level_type)) {
2476       // Add a new level to the topology
2477       KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2478       levels[levels_index].level_type = level_type;
2479       levels[levels_index].mask_width = mask_width;
2480       levels[levels_index].nitems = nitems;
2481       levels_index++;
2482     } else {
2483       // If it is an unknown level, then logically move the previous layer up
2484       if (levels_index > 0) {
2485         levels[levels_index - 1].mask_width = mask_width;
2486         levels[levels_index - 1].nitems = nitems;
2487       }
2488     }
2489     level++;
2490   } while (level_type != INTEL_LEVEL_TYPE_INVALID);
2491 
2492   // Set the masks to & with apicid
2493   for (unsigned i = 0; i < levels_index; ++i) {
2494     if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2495       levels[i].mask = ~((-1) << levels[i].mask_width);
2496       levels[i].cache_mask = (-1) << levels[i].mask_width;
2497       for (unsigned j = 0; j < i; ++j)
2498         levels[i].mask ^= levels[j].mask;
2499     } else {
2500       KMP_DEBUG_ASSERT(levels_index > 0);
2501       levels[i].mask = (-1) << levels[i - 1].mask_width;
2502       levels[i].cache_mask = 0;
2503     }
2504   }
2505   return levels_index;
2506 }
2507 
2508 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) {
2509 
2510   cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2511   kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2512   unsigned levels_index;
2513   kmp_cpuid buf;
2514   kmp_uint64 known_levels;
2515   int topology_leaf, highest_leaf, apic_id;
2516   int num_leaves;
2517   static int leaves[] = {0, 0};
2518 
2519   kmp_i18n_id_t leaf_message_id;
2520 
2521   KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2522 
2523   *msg_id = kmp_i18n_null;
2524   if (__kmp_affinity_verbose) {
2525     KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2526   }
2527 
2528   // Figure out the known topology levels
2529   known_levels = 0ull;
2530   for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2531     if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2532       known_levels |= (1ull << i);
2533     }
2534   }
2535 
2536   // Get the highest cpuid leaf supported
2537   __kmp_x86_cpuid(0, 0, &buf);
2538   highest_leaf = buf.eax;
2539 
2540   // If a specific topology method was requested, only allow that specific leaf
2541   // otherwise, try both leaves 31 and 11 in that order
2542   num_leaves = 0;
2543   if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2544     num_leaves = 1;
2545     leaves[0] = 11;
2546     leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2547   } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2548     num_leaves = 1;
2549     leaves[0] = 31;
2550     leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2551   } else {
2552     num_leaves = 2;
2553     leaves[0] = 31;
2554     leaves[1] = 11;
2555     leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2556   }
2557 
2558   // Check to see if cpuid leaf 31 or 11 is supported.
2559   __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2560   topology_leaf = -1;
2561   for (int i = 0; i < num_leaves; ++i) {
2562     int leaf = leaves[i];
2563     if (highest_leaf < leaf)
2564       continue;
2565     __kmp_x86_cpuid(leaf, 0, &buf);
2566     if (buf.ebx == 0)
2567       continue;
2568     topology_leaf = leaf;
2569     levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2570     if (levels_index == 0)
2571       continue;
2572     break;
2573   }
2574   if (topology_leaf == -1 || levels_index == 0) {
2575     *msg_id = leaf_message_id;
2576     return false;
2577   }
2578   KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2579 
2580   // The algorithm used starts by setting the affinity to each available thread
2581   // and retrieving info from the cpuid instruction, so if we are not capable of
2582   // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
2583   // we need to do something else - use the defaults that we calculated from
2584   // issuing cpuid without binding to each proc.
2585   if (!KMP_AFFINITY_CAPABLE()) {
2586     // Hack to try and infer the machine topology using only the data
2587     // available from cpuid on the current thread, and __kmp_xproc.
2588     KMP_ASSERT(__kmp_affinity_type == affinity_none);
2589     for (unsigned i = 0; i < levels_index; ++i) {
2590       if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2591         __kmp_nThreadsPerCore = levels[i].nitems;
2592       } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2593         nCoresPerPkg = levels[i].nitems;
2594       }
2595     }
2596     __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2597     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2598     return true;
2599   }
2600 
2601   // Allocate the data structure to be returned.
2602   int depth = levels_index;
2603   for (int i = depth - 1, j = 0; i >= 0; --i, ++j)
2604     types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2605   __kmp_topology =
2606       kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2607 
2608   // Insert equivalent cache types if they exist
2609   kmp_cache_info_t cache_info;
2610   for (size_t i = 0; i < cache_info.get_depth(); ++i) {
2611     const kmp_cache_info_t::info_t &info = cache_info[i];
2612     unsigned cache_mask = info.mask;
2613     unsigned cache_level = info.level;
2614     for (unsigned j = 0; j < levels_index; ++j) {
2615       unsigned hw_cache_mask = levels[j].cache_mask;
2616       kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2617       if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2618         kmp_hw_t type =
2619             __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2620         __kmp_topology->set_equivalent_type(cache_type, type);
2621       }
2622     }
2623   }
2624 
2625   // From here on, we can assume that it is safe to call
2626   // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2627   // __kmp_affinity_type = affinity_none.
2628 
2629   // Save the affinity mask for the current thread.
2630   kmp_affinity_raii_t previous_affinity;
2631 
2632   // Run through each of the available contexts, binding the current thread
2633   // to it, and obtaining the pertinent information using the cpuid instr.
2634   unsigned int proc;
2635   int hw_thread_index = 0;
2636   KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2637     cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2638     unsigned my_levels_index;
2639 
2640     // Skip this proc if it is not included in the machine model.
2641     if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2642       continue;
2643     }
2644     KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2645 
2646     __kmp_affinity_dispatch->bind_thread(proc);
2647 
2648     // New algorithm
2649     __kmp_x86_cpuid(topology_leaf, 0, &buf);
2650     apic_id = buf.edx;
2651     kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2652     my_levels_index =
2653         __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2654     if (my_levels_index == 0 || my_levels_index != levels_index) {
2655       *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2656       return false;
2657     }
2658     hw_thread.clear();
2659     hw_thread.os_id = proc;
2660     // Put in topology information
2661     for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2662       hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2663       if (j > 0) {
2664         hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2665       }
2666     }
2667     // Hybrid information
2668     if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2669       kmp_hw_core_type_t type;
2670       unsigned native_model_id;
2671       int efficiency;
2672       __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2673       hw_thread.attrs.set_core_type(type);
2674       hw_thread.attrs.set_core_eff(efficiency);
2675     }
2676     hw_thread_index++;
2677   }
2678   KMP_ASSERT(hw_thread_index > 0);
2679   __kmp_topology->sort_ids();
2680   if (!__kmp_topology->check_ids()) {
2681     kmp_topology_t::deallocate(__kmp_topology);
2682     __kmp_topology = nullptr;
2683     *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2684     return false;
2685   }
2686   return true;
2687 }
2688 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2689 
2690 #define osIdIndex 0
2691 #define threadIdIndex 1
2692 #define coreIdIndex 2
2693 #define pkgIdIndex 3
2694 #define nodeIdIndex 4
2695 
2696 typedef unsigned *ProcCpuInfo;
2697 static unsigned maxIndex = pkgIdIndex;
2698 
2699 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
2700                                                   const void *b) {
2701   unsigned i;
2702   const unsigned *aa = *(unsigned *const *)a;
2703   const unsigned *bb = *(unsigned *const *)b;
2704   for (i = maxIndex;; i--) {
2705     if (aa[i] < bb[i])
2706       return -1;
2707     if (aa[i] > bb[i])
2708       return 1;
2709     if (i == osIdIndex)
2710       break;
2711   }
2712   return 0;
2713 }
2714 
2715 #if KMP_USE_HIER_SCHED
2716 // Set the array sizes for the hierarchy layers
2717 static void __kmp_dispatch_set_hierarchy_values() {
2718   // Set the maximum number of L1's to number of cores
2719   // Set the maximum number of L2's to to either number of cores / 2 for
2720   // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2721   // Or the number of cores for Intel(R) Xeon(R) processors
2722   // Set the maximum number of NUMA nodes and L3's to number of packages
2723   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2724       nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2725   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2726 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) &&   \
2727     KMP_MIC_SUPPORTED
2728   if (__kmp_mic_type >= mic3)
2729     __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2730   else
2731 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2732     __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2733   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2734   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2735   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2736   // Set the number of threads per unit
2737   // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2738   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2739   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2740       __kmp_nThreadsPerCore;
2741 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) &&   \
2742     KMP_MIC_SUPPORTED
2743   if (__kmp_mic_type >= mic3)
2744     __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2745         2 * __kmp_nThreadsPerCore;
2746   else
2747 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2748     __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2749         __kmp_nThreadsPerCore;
2750   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2751       nCoresPerPkg * __kmp_nThreadsPerCore;
2752   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2753       nCoresPerPkg * __kmp_nThreadsPerCore;
2754   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2755       nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2756 }
2757 
2758 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2759 // i.e., this thread's L1 or this thread's L2, etc.
2760 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2761   int index = type + 1;
2762   int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2763   KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2764   if (type == kmp_hier_layer_e::LAYER_THREAD)
2765     return tid;
2766   else if (type == kmp_hier_layer_e::LAYER_LOOP)
2767     return 0;
2768   KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2769   if (tid >= num_hw_threads)
2770     tid = tid % num_hw_threads;
2771   return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2772 }
2773 
2774 // Return the number of t1's per t2
2775 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2776   int i1 = t1 + 1;
2777   int i2 = t2 + 1;
2778   KMP_DEBUG_ASSERT(i1 <= i2);
2779   KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2780   KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2781   KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2782   // (nthreads/t2) / (nthreads/t1) = t1 / t2
2783   return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2784 }
2785 #endif // KMP_USE_HIER_SCHED
2786 
2787 static inline const char *__kmp_cpuinfo_get_filename() {
2788   const char *filename;
2789   if (__kmp_cpuinfo_file != nullptr)
2790     filename = __kmp_cpuinfo_file;
2791   else
2792     filename = "/proc/cpuinfo";
2793   return filename;
2794 }
2795 
2796 static inline const char *__kmp_cpuinfo_get_envvar() {
2797   const char *envvar = nullptr;
2798   if (__kmp_cpuinfo_file != nullptr)
2799     envvar = "KMP_CPUINFO_FILE";
2800   return envvar;
2801 }
2802 
2803 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2804 // affinity map.
2805 static bool __kmp_affinity_create_cpuinfo_map(int *line,
2806                                               kmp_i18n_id_t *const msg_id) {
2807   const char *filename = __kmp_cpuinfo_get_filename();
2808   const char *envvar = __kmp_cpuinfo_get_envvar();
2809   *msg_id = kmp_i18n_null;
2810 
2811   if (__kmp_affinity_verbose) {
2812     KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
2813   }
2814 
2815   kmp_safe_raii_file_t f(filename, "r", envvar);
2816 
2817   // Scan of the file, and count the number of "processor" (osId) fields,
2818   // and find the highest value of <n> for a node_<n> field.
2819   char buf[256];
2820   unsigned num_records = 0;
2821   while (!feof(f)) {
2822     buf[sizeof(buf) - 1] = 1;
2823     if (!fgets(buf, sizeof(buf), f)) {
2824       // Read errors presumably because of EOF
2825       break;
2826     }
2827 
2828     char s1[] = "processor";
2829     if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2830       num_records++;
2831       continue;
2832     }
2833 
2834     // FIXME - this will match "node_<n> <garbage>"
2835     unsigned level;
2836     if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2837       // validate the input fisrt:
2838       if (level > (unsigned)__kmp_xproc) { // level is too big
2839         level = __kmp_xproc;
2840       }
2841       if (nodeIdIndex + level >= maxIndex) {
2842         maxIndex = nodeIdIndex + level;
2843       }
2844       continue;
2845     }
2846   }
2847 
2848   // Check for empty file / no valid processor records, or too many. The number
2849   // of records can't exceed the number of valid bits in the affinity mask.
2850   if (num_records == 0) {
2851     *msg_id = kmp_i18n_str_NoProcRecords;
2852     return false;
2853   }
2854   if (num_records > (unsigned)__kmp_xproc) {
2855     *msg_id = kmp_i18n_str_TooManyProcRecords;
2856     return false;
2857   }
2858 
2859   // Set the file pointer back to the beginning, so that we can scan the file
2860   // again, this time performing a full parse of the data. Allocate a vector of
2861   // ProcCpuInfo object, where we will place the data. Adding an extra element
2862   // at the end allows us to remove a lot of extra checks for termination
2863   // conditions.
2864   if (fseek(f, 0, SEEK_SET) != 0) {
2865     *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2866     return false;
2867   }
2868 
2869   // Allocate the array of records to store the proc info in.  The dummy
2870   // element at the end makes the logic in filling them out easier to code.
2871   unsigned **threadInfo =
2872       (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2873   unsigned i;
2874   for (i = 0; i <= num_records; i++) {
2875     threadInfo[i] =
2876         (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2877   }
2878 
2879 #define CLEANUP_THREAD_INFO                                                    \
2880   for (i = 0; i <= num_records; i++) {                                         \
2881     __kmp_free(threadInfo[i]);                                                 \
2882   }                                                                            \
2883   __kmp_free(threadInfo);
2884 
2885   // A value of UINT_MAX means that we didn't find the field
2886   unsigned __index;
2887 
2888 #define INIT_PROC_INFO(p)                                                      \
2889   for (__index = 0; __index <= maxIndex; __index++) {                          \
2890     (p)[__index] = UINT_MAX;                                                   \
2891   }
2892 
2893   for (i = 0; i <= num_records; i++) {
2894     INIT_PROC_INFO(threadInfo[i]);
2895   }
2896 
2897   unsigned num_avail = 0;
2898   *line = 0;
2899   while (!feof(f)) {
2900     // Create an inner scoping level, so that all the goto targets at the end of
2901     // the loop appear in an outer scoping level. This avoids warnings about
2902     // jumping past an initialization to a target in the same block.
2903     {
2904       buf[sizeof(buf) - 1] = 1;
2905       bool long_line = false;
2906       if (!fgets(buf, sizeof(buf), f)) {
2907         // Read errors presumably because of EOF
2908         // If there is valid data in threadInfo[num_avail], then fake
2909         // a blank line in ensure that the last address gets parsed.
2910         bool valid = false;
2911         for (i = 0; i <= maxIndex; i++) {
2912           if (threadInfo[num_avail][i] != UINT_MAX) {
2913             valid = true;
2914           }
2915         }
2916         if (!valid) {
2917           break;
2918         }
2919         buf[0] = 0;
2920       } else if (!buf[sizeof(buf) - 1]) {
2921         // The line is longer than the buffer.  Set a flag and don't
2922         // emit an error if we were going to ignore the line, anyway.
2923         long_line = true;
2924 
2925 #define CHECK_LINE                                                             \
2926   if (long_line) {                                                             \
2927     CLEANUP_THREAD_INFO;                                                       \
2928     *msg_id = kmp_i18n_str_LongLineCpuinfo;                                    \
2929     return false;                                                              \
2930   }
2931       }
2932       (*line)++;
2933 
2934       char s1[] = "processor";
2935       if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2936         CHECK_LINE;
2937         char *p = strchr(buf + sizeof(s1) - 1, ':');
2938         unsigned val;
2939         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2940           goto no_val;
2941         if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2942 #if KMP_ARCH_AARCH64
2943           // Handle the old AArch64 /proc/cpuinfo layout differently,
2944           // it contains all of the 'processor' entries listed in a
2945           // single 'Processor' section, therefore the normal looking
2946           // for duplicates in that section will always fail.
2947           num_avail++;
2948 #else
2949           goto dup_field;
2950 #endif
2951         threadInfo[num_avail][osIdIndex] = val;
2952 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2953         char path[256];
2954         KMP_SNPRINTF(
2955             path, sizeof(path),
2956             "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2957             threadInfo[num_avail][osIdIndex]);
2958         __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2959 
2960         KMP_SNPRINTF(path, sizeof(path),
2961                      "/sys/devices/system/cpu/cpu%u/topology/core_id",
2962                      threadInfo[num_avail][osIdIndex]);
2963         __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2964         continue;
2965 #else
2966       }
2967       char s2[] = "physical id";
2968       if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2969         CHECK_LINE;
2970         char *p = strchr(buf + sizeof(s2) - 1, ':');
2971         unsigned val;
2972         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2973           goto no_val;
2974         if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2975           goto dup_field;
2976         threadInfo[num_avail][pkgIdIndex] = val;
2977         continue;
2978       }
2979       char s3[] = "core id";
2980       if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2981         CHECK_LINE;
2982         char *p = strchr(buf + sizeof(s3) - 1, ':');
2983         unsigned val;
2984         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2985           goto no_val;
2986         if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2987           goto dup_field;
2988         threadInfo[num_avail][coreIdIndex] = val;
2989         continue;
2990 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
2991       }
2992       char s4[] = "thread id";
2993       if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2994         CHECK_LINE;
2995         char *p = strchr(buf + sizeof(s4) - 1, ':');
2996         unsigned val;
2997         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2998           goto no_val;
2999         if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3000           goto dup_field;
3001         threadInfo[num_avail][threadIdIndex] = val;
3002         continue;
3003       }
3004       unsigned level;
3005       if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
3006         CHECK_LINE;
3007         char *p = strchr(buf + sizeof(s4) - 1, ':');
3008         unsigned val;
3009         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
3010           goto no_val;
3011         // validate the input before using level:
3012         if (level > (unsigned)__kmp_xproc) { // level is too big
3013           level = __kmp_xproc;
3014         }
3015         if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3016           goto dup_field;
3017         threadInfo[num_avail][nodeIdIndex + level] = val;
3018         continue;
3019       }
3020 
3021       // We didn't recognize the leading token on the line. There are lots of
3022       // leading tokens that we don't recognize - if the line isn't empty, go on
3023       // to the next line.
3024       if ((*buf != 0) && (*buf != '\n')) {
3025         // If the line is longer than the buffer, read characters
3026         // until we find a newline.
3027         if (long_line) {
3028           int ch;
3029           while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
3030             ;
3031         }
3032         continue;
3033       }
3034 
3035       // A newline has signalled the end of the processor record.
3036       // Check that there aren't too many procs specified.
3037       if ((int)num_avail == __kmp_xproc) {
3038         CLEANUP_THREAD_INFO;
3039         *msg_id = kmp_i18n_str_TooManyEntries;
3040         return false;
3041       }
3042 
3043       // Check for missing fields.  The osId field must be there, and we
3044       // currently require that the physical id field is specified, also.
3045       if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3046         CLEANUP_THREAD_INFO;
3047         *msg_id = kmp_i18n_str_MissingProcField;
3048         return false;
3049       }
3050       if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3051         CLEANUP_THREAD_INFO;
3052         *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3053         return false;
3054       }
3055 
3056       // Skip this proc if it is not included in the machine model.
3057       if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3058                          __kmp_affin_fullMask)) {
3059         INIT_PROC_INFO(threadInfo[num_avail]);
3060         continue;
3061       }
3062 
3063       // We have a successful parse of this proc's info.
3064       // Increment the counter, and prepare for the next proc.
3065       num_avail++;
3066       KMP_ASSERT(num_avail <= num_records);
3067       INIT_PROC_INFO(threadInfo[num_avail]);
3068     }
3069     continue;
3070 
3071   no_val:
3072     CLEANUP_THREAD_INFO;
3073     *msg_id = kmp_i18n_str_MissingValCpuinfo;
3074     return false;
3075 
3076   dup_field:
3077     CLEANUP_THREAD_INFO;
3078     *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3079     return false;
3080   }
3081   *line = 0;
3082 
3083 #if KMP_MIC && REDUCE_TEAM_SIZE
3084   unsigned teamSize = 0;
3085 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3086 
3087   // check for num_records == __kmp_xproc ???
3088 
3089   // If it is configured to omit the package level when there is only a single
3090   // package, the logic at the end of this routine won't work if there is only a
3091   // single thread
3092   KMP_ASSERT(num_avail > 0);
3093   KMP_ASSERT(num_avail <= num_records);
3094 
3095   // Sort the threadInfo table by physical Id.
3096   qsort(threadInfo, num_avail, sizeof(*threadInfo),
3097         __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3098 
3099   // The table is now sorted by pkgId / coreId / threadId, but we really don't
3100   // know the radix of any of the fields. pkgId's may be sparsely assigned among
3101   // the chips on a system. Although coreId's are usually assigned
3102   // [0 .. coresPerPkg-1] and threadId's are usually assigned
3103   // [0..threadsPerCore-1], we don't want to make any such assumptions.
3104   //
3105   // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
3106   // total # packages) are at this point - we want to determine that now. We
3107   // only have an upper bound on the first two figures.
3108   unsigned *counts =
3109       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3110   unsigned *maxCt =
3111       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3112   unsigned *totals =
3113       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3114   unsigned *lastId =
3115       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3116 
3117   bool assign_thread_ids = false;
3118   unsigned threadIdCt;
3119   unsigned index;
3120 
3121 restart_radix_check:
3122   threadIdCt = 0;
3123 
3124   // Initialize the counter arrays with data from threadInfo[0].
3125   if (assign_thread_ids) {
3126     if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3127       threadInfo[0][threadIdIndex] = threadIdCt++;
3128     } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3129       threadIdCt = threadInfo[0][threadIdIndex] + 1;
3130     }
3131   }
3132   for (index = 0; index <= maxIndex; index++) {
3133     counts[index] = 1;
3134     maxCt[index] = 1;
3135     totals[index] = 1;
3136     lastId[index] = threadInfo[0][index];
3137     ;
3138   }
3139 
3140   // Run through the rest of the OS procs.
3141   for (i = 1; i < num_avail; i++) {
3142     // Find the most significant index whose id differs from the id for the
3143     // previous OS proc.
3144     for (index = maxIndex; index >= threadIdIndex; index--) {
3145       if (assign_thread_ids && (index == threadIdIndex)) {
3146         // Auto-assign the thread id field if it wasn't specified.
3147         if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3148           threadInfo[i][threadIdIndex] = threadIdCt++;
3149         }
3150         // Apparently the thread id field was specified for some entries and not
3151         // others. Start the thread id counter off at the next higher thread id.
3152         else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3153           threadIdCt = threadInfo[i][threadIdIndex] + 1;
3154         }
3155       }
3156       if (threadInfo[i][index] != lastId[index]) {
3157         // Run through all indices which are less significant, and reset the
3158         // counts to 1. At all levels up to and including index, we need to
3159         // increment the totals and record the last id.
3160         unsigned index2;
3161         for (index2 = threadIdIndex; index2 < index; index2++) {
3162           totals[index2]++;
3163           if (counts[index2] > maxCt[index2]) {
3164             maxCt[index2] = counts[index2];
3165           }
3166           counts[index2] = 1;
3167           lastId[index2] = threadInfo[i][index2];
3168         }
3169         counts[index]++;
3170         totals[index]++;
3171         lastId[index] = threadInfo[i][index];
3172 
3173         if (assign_thread_ids && (index > threadIdIndex)) {
3174 
3175 #if KMP_MIC && REDUCE_TEAM_SIZE
3176           // The default team size is the total #threads in the machine
3177           // minus 1 thread for every core that has 3 or more threads.
3178           teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3179 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3180 
3181           // Restart the thread counter, as we are on a new core.
3182           threadIdCt = 0;
3183 
3184           // Auto-assign the thread id field if it wasn't specified.
3185           if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3186             threadInfo[i][threadIdIndex] = threadIdCt++;
3187           }
3188 
3189           // Apparently the thread id field was specified for some entries and
3190           // not others. Start the thread id counter off at the next higher
3191           // thread id.
3192           else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3193             threadIdCt = threadInfo[i][threadIdIndex] + 1;
3194           }
3195         }
3196         break;
3197       }
3198     }
3199     if (index < threadIdIndex) {
3200       // If thread ids were specified, it is an error if they are not unique.
3201       // Also, check that we waven't already restarted the loop (to be safe -
3202       // shouldn't need to).
3203       if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3204         __kmp_free(lastId);
3205         __kmp_free(totals);
3206         __kmp_free(maxCt);
3207         __kmp_free(counts);
3208         CLEANUP_THREAD_INFO;
3209         *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3210         return false;
3211       }
3212 
3213       // If the thread ids were not specified and we see entries entries that
3214       // are duplicates, start the loop over and assign the thread ids manually.
3215       assign_thread_ids = true;
3216       goto restart_radix_check;
3217     }
3218   }
3219 
3220 #if KMP_MIC && REDUCE_TEAM_SIZE
3221   // The default team size is the total #threads in the machine
3222   // minus 1 thread for every core that has 3 or more threads.
3223   teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3224 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3225 
3226   for (index = threadIdIndex; index <= maxIndex; index++) {
3227     if (counts[index] > maxCt[index]) {
3228       maxCt[index] = counts[index];
3229     }
3230   }
3231 
3232   __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3233   nCoresPerPkg = maxCt[coreIdIndex];
3234   nPackages = totals[pkgIdIndex];
3235 
3236   // When affinity is off, this routine will still be called to set
3237   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
3238   // Make sure all these vars are set correctly, and return now if affinity is
3239   // not enabled.
3240   __kmp_ncores = totals[coreIdIndex];
3241   if (!KMP_AFFINITY_CAPABLE()) {
3242     KMP_ASSERT(__kmp_affinity_type == affinity_none);
3243     return true;
3244   }
3245 
3246 #if KMP_MIC && REDUCE_TEAM_SIZE
3247   // Set the default team size.
3248   if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3249     __kmp_dflt_team_nth = teamSize;
3250     KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
3251                   "__kmp_dflt_team_nth = %d\n",
3252                   __kmp_dflt_team_nth));
3253   }
3254 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3255 
3256   KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
3257 
3258   // Count the number of levels which have more nodes at that level than at the
3259   // parent's level (with there being an implicit root node of the top level).
3260   // This is equivalent to saying that there is at least one node at this level
3261   // which has a sibling. These levels are in the map, and the package level is
3262   // always in the map.
3263   bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
3264   for (index = threadIdIndex; index < maxIndex; index++) {
3265     KMP_ASSERT(totals[index] >= totals[index + 1]);
3266     inMap[index] = (totals[index] > totals[index + 1]);
3267   }
3268   inMap[maxIndex] = (totals[maxIndex] > 1);
3269   inMap[pkgIdIndex] = true;
3270   inMap[coreIdIndex] = true;
3271   inMap[threadIdIndex] = true;
3272 
3273   int depth = 0;
3274   int idx = 0;
3275   kmp_hw_t types[KMP_HW_LAST];
3276   int pkgLevel = -1;
3277   int coreLevel = -1;
3278   int threadLevel = -1;
3279   for (index = threadIdIndex; index <= maxIndex; index++) {
3280     if (inMap[index]) {
3281       depth++;
3282     }
3283   }
3284   if (inMap[pkgIdIndex]) {
3285     pkgLevel = idx;
3286     types[idx++] = KMP_HW_SOCKET;
3287   }
3288   if (inMap[coreIdIndex]) {
3289     coreLevel = idx;
3290     types[idx++] = KMP_HW_CORE;
3291   }
3292   if (inMap[threadIdIndex]) {
3293     threadLevel = idx;
3294     types[idx++] = KMP_HW_THREAD;
3295   }
3296   KMP_ASSERT(depth > 0);
3297 
3298   // Construct the data structure that is to be returned.
3299   __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3300 
3301   for (i = 0; i < num_avail; ++i) {
3302     unsigned os = threadInfo[i][osIdIndex];
3303     int src_index;
3304     int dst_index = 0;
3305     kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3306     hw_thread.clear();
3307     hw_thread.os_id = os;
3308 
3309     idx = 0;
3310     for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3311       if (!inMap[src_index]) {
3312         continue;
3313       }
3314       if (src_index == pkgIdIndex) {
3315         hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3316       } else if (src_index == coreIdIndex) {
3317         hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3318       } else if (src_index == threadIdIndex) {
3319         hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3320       }
3321       dst_index++;
3322     }
3323   }
3324 
3325   __kmp_free(inMap);
3326   __kmp_free(lastId);
3327   __kmp_free(totals);
3328   __kmp_free(maxCt);
3329   __kmp_free(counts);
3330   CLEANUP_THREAD_INFO;
3331   __kmp_topology->sort_ids();
3332   if (!__kmp_topology->check_ids()) {
3333     kmp_topology_t::deallocate(__kmp_topology);
3334     __kmp_topology = nullptr;
3335     *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3336     return false;
3337   }
3338   return true;
3339 }
3340 
3341 // Create and return a table of affinity masks, indexed by OS thread ID.
3342 // This routine handles OR'ing together all the affinity masks of threads
3343 // that are sufficiently close, if granularity > fine.
3344 static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
3345                                             unsigned *numUnique) {
3346   // First form a table of affinity masks in order of OS thread id.
3347   int maxOsId;
3348   int i;
3349   int numAddrs = __kmp_topology->get_num_hw_threads();
3350   int depth = __kmp_topology->get_depth();
3351   KMP_ASSERT(numAddrs);
3352   KMP_ASSERT(depth);
3353 
3354   maxOsId = 0;
3355   for (i = numAddrs - 1;; --i) {
3356     int osId = __kmp_topology->at(i).os_id;
3357     if (osId > maxOsId) {
3358       maxOsId = osId;
3359     }
3360     if (i == 0)
3361       break;
3362   }
3363   kmp_affin_mask_t *osId2Mask;
3364   KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
3365   KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
3366   if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
3367     KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
3368   }
3369   if (__kmp_affinity_gran_levels >= (int)depth) {
3370     KMP_AFF_WARNING(AffThreadsMayMigrate);
3371   }
3372 
3373   // Run through the table, forming the masks for all threads on each core.
3374   // Threads on the same core will have identical kmp_hw_thread_t objects, not
3375   // considering the last level, which must be the thread id. All threads on a
3376   // core will appear consecutively.
3377   int unique = 0;
3378   int j = 0; // index of 1st thread on core
3379   int leader = 0;
3380   kmp_affin_mask_t *sum;
3381   KMP_CPU_ALLOC_ON_STACK(sum);
3382   KMP_CPU_ZERO(sum);
3383   KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3384   for (i = 1; i < numAddrs; i++) {
3385     // If this thread is sufficiently close to the leader (within the
3386     // granularity setting), then set the bit for this os thread in the
3387     // affinity mask for this group, and go on to the next thread.
3388     if (__kmp_topology->is_close(leader, i, __kmp_affinity_gran_levels)) {
3389       KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3390       continue;
3391     }
3392 
3393     // For every thread in this group, copy the mask to the thread's entry in
3394     // the osId2Mask table.  Mark the first address as a leader.
3395     for (; j < i; j++) {
3396       int osId = __kmp_topology->at(j).os_id;
3397       KMP_DEBUG_ASSERT(osId <= maxOsId);
3398       kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3399       KMP_CPU_COPY(mask, sum);
3400       __kmp_topology->at(j).leader = (j == leader);
3401     }
3402     unique++;
3403 
3404     // Start a new mask.
3405     leader = i;
3406     KMP_CPU_ZERO(sum);
3407     KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3408   }
3409 
3410   // For every thread in last group, copy the mask to the thread's
3411   // entry in the osId2Mask table.
3412   for (; j < i; j++) {
3413     int osId = __kmp_topology->at(j).os_id;
3414     KMP_DEBUG_ASSERT(osId <= maxOsId);
3415     kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3416     KMP_CPU_COPY(mask, sum);
3417     __kmp_topology->at(j).leader = (j == leader);
3418   }
3419   unique++;
3420   KMP_CPU_FREE_FROM_STACK(sum);
3421 
3422   *maxIndex = maxOsId;
3423   *numUnique = unique;
3424   return osId2Mask;
3425 }
3426 
3427 // Stuff for the affinity proclist parsers.  It's easier to declare these vars
3428 // as file-static than to try and pass them through the calling sequence of
3429 // the recursive-descent OMP_PLACES parser.
3430 static kmp_affin_mask_t *newMasks;
3431 static int numNewMasks;
3432 static int nextNewMask;
3433 
3434 #define ADD_MASK(_mask)                                                        \
3435   {                                                                            \
3436     if (nextNewMask >= numNewMasks) {                                          \
3437       int i;                                                                   \
3438       numNewMasks *= 2;                                                        \
3439       kmp_affin_mask_t *temp;                                                  \
3440       KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks);                         \
3441       for (i = 0; i < numNewMasks / 2; i++) {                                  \
3442         kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);                    \
3443         kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i);                       \
3444         KMP_CPU_COPY(dest, src);                                               \
3445       }                                                                        \
3446       KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2);                  \
3447       newMasks = temp;                                                         \
3448     }                                                                          \
3449     KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask));               \
3450     nextNewMask++;                                                             \
3451   }
3452 
3453 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId)                             \
3454   {                                                                            \
3455     if (((_osId) > _maxOsId) ||                                                \
3456         (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) {     \
3457       KMP_AFF_WARNING(AffIgnoreInvalidProcID, _osId);                          \
3458     } else {                                                                   \
3459       ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId)));                            \
3460     }                                                                          \
3461   }
3462 
3463 // Re-parse the proclist (for the explicit affinity type), and form the list
3464 // of affinity newMasks indexed by gtid.
3465 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
3466                                             unsigned int *out_numMasks,
3467                                             const char *proclist,
3468                                             kmp_affin_mask_t *osId2Mask,
3469                                             int maxOsId) {
3470   int i;
3471   const char *scan = proclist;
3472   const char *next = proclist;
3473 
3474   // We use malloc() for the temporary mask vector, so that we can use
3475   // realloc() to extend it.
3476   numNewMasks = 2;
3477   KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3478   nextNewMask = 0;
3479   kmp_affin_mask_t *sumMask;
3480   KMP_CPU_ALLOC(sumMask);
3481   int setSize = 0;
3482 
3483   for (;;) {
3484     int start, end, stride;
3485 
3486     SKIP_WS(scan);
3487     next = scan;
3488     if (*next == '\0') {
3489       break;
3490     }
3491 
3492     if (*next == '{') {
3493       int num;
3494       setSize = 0;
3495       next++; // skip '{'
3496       SKIP_WS(next);
3497       scan = next;
3498 
3499       // Read the first integer in the set.
3500       KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
3501       SKIP_DIGITS(next);
3502       num = __kmp_str_to_int(scan, *next);
3503       KMP_ASSERT2(num >= 0, "bad explicit proc list");
3504 
3505       // Copy the mask for that osId to the sum (union) mask.
3506       if ((num > maxOsId) ||
3507           (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3508         KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3509         KMP_CPU_ZERO(sumMask);
3510       } else {
3511         KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3512         setSize = 1;
3513       }
3514 
3515       for (;;) {
3516         // Check for end of set.
3517         SKIP_WS(next);
3518         if (*next == '}') {
3519           next++; // skip '}'
3520           break;
3521         }
3522 
3523         // Skip optional comma.
3524         if (*next == ',') {
3525           next++;
3526         }
3527         SKIP_WS(next);
3528 
3529         // Read the next integer in the set.
3530         scan = next;
3531         KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3532 
3533         SKIP_DIGITS(next);
3534         num = __kmp_str_to_int(scan, *next);
3535         KMP_ASSERT2(num >= 0, "bad explicit proc list");
3536 
3537         // Add the mask for that osId to the sum mask.
3538         if ((num > maxOsId) ||
3539             (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3540           KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3541         } else {
3542           KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3543           setSize++;
3544         }
3545       }
3546       if (setSize > 0) {
3547         ADD_MASK(sumMask);
3548       }
3549 
3550       SKIP_WS(next);
3551       if (*next == ',') {
3552         next++;
3553       }
3554       scan = next;
3555       continue;
3556     }
3557 
3558     // Read the first integer.
3559     KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3560     SKIP_DIGITS(next);
3561     start = __kmp_str_to_int(scan, *next);
3562     KMP_ASSERT2(start >= 0, "bad explicit proc list");
3563     SKIP_WS(next);
3564 
3565     // If this isn't a range, then add a mask to the list and go on.
3566     if (*next != '-') {
3567       ADD_MASK_OSID(start, osId2Mask, maxOsId);
3568 
3569       // Skip optional comma.
3570       if (*next == ',') {
3571         next++;
3572       }
3573       scan = next;
3574       continue;
3575     }
3576 
3577     // This is a range.  Skip over the '-' and read in the 2nd int.
3578     next++; // skip '-'
3579     SKIP_WS(next);
3580     scan = next;
3581     KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3582     SKIP_DIGITS(next);
3583     end = __kmp_str_to_int(scan, *next);
3584     KMP_ASSERT2(end >= 0, "bad explicit proc list");
3585 
3586     // Check for a stride parameter
3587     stride = 1;
3588     SKIP_WS(next);
3589     if (*next == ':') {
3590       // A stride is specified.  Skip over the ':" and read the 3rd int.
3591       int sign = +1;
3592       next++; // skip ':'
3593       SKIP_WS(next);
3594       scan = next;
3595       if (*next == '-') {
3596         sign = -1;
3597         next++;
3598         SKIP_WS(next);
3599         scan = next;
3600       }
3601       KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3602       SKIP_DIGITS(next);
3603       stride = __kmp_str_to_int(scan, *next);
3604       KMP_ASSERT2(stride >= 0, "bad explicit proc list");
3605       stride *= sign;
3606     }
3607 
3608     // Do some range checks.
3609     KMP_ASSERT2(stride != 0, "bad explicit proc list");
3610     if (stride > 0) {
3611       KMP_ASSERT2(start <= end, "bad explicit proc list");
3612     } else {
3613       KMP_ASSERT2(start >= end, "bad explicit proc list");
3614     }
3615     KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
3616 
3617     // Add the mask for each OS proc # to the list.
3618     if (stride > 0) {
3619       do {
3620         ADD_MASK_OSID(start, osId2Mask, maxOsId);
3621         start += stride;
3622       } while (start <= end);
3623     } else {
3624       do {
3625         ADD_MASK_OSID(start, osId2Mask, maxOsId);
3626         start += stride;
3627       } while (start >= end);
3628     }
3629 
3630     // Skip optional comma.
3631     SKIP_WS(next);
3632     if (*next == ',') {
3633       next++;
3634     }
3635     scan = next;
3636   }
3637 
3638   *out_numMasks = nextNewMask;
3639   if (nextNewMask == 0) {
3640     *out_masks = NULL;
3641     KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3642     return;
3643   }
3644   KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3645   for (i = 0; i < nextNewMask; i++) {
3646     kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3647     kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3648     KMP_CPU_COPY(dest, src);
3649   }
3650   KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3651   KMP_CPU_FREE(sumMask);
3652 }
3653 
3654 /*-----------------------------------------------------------------------------
3655 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3656 places.  Again, Here is the grammar:
3657 
3658 place_list := place
3659 place_list := place , place_list
3660 place := num
3661 place := place : num
3662 place := place : num : signed
3663 place := { subplacelist }
3664 place := ! place                  // (lowest priority)
3665 subplace_list := subplace
3666 subplace_list := subplace , subplace_list
3667 subplace := num
3668 subplace := num : num
3669 subplace := num : num : signed
3670 signed := num
3671 signed := + signed
3672 signed := - signed
3673 -----------------------------------------------------------------------------*/
3674 static void __kmp_process_subplace_list(const char **scan,
3675                                         kmp_affin_mask_t *osId2Mask,
3676                                         int maxOsId, kmp_affin_mask_t *tempMask,
3677                                         int *setSize) {
3678   const char *next;
3679 
3680   for (;;) {
3681     int start, count, stride, i;
3682 
3683     // Read in the starting proc id
3684     SKIP_WS(*scan);
3685     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3686     next = *scan;
3687     SKIP_DIGITS(next);
3688     start = __kmp_str_to_int(*scan, *next);
3689     KMP_ASSERT(start >= 0);
3690     *scan = next;
3691 
3692     // valid follow sets are ',' ':' and '}'
3693     SKIP_WS(*scan);
3694     if (**scan == '}' || **scan == ',') {
3695       if ((start > maxOsId) ||
3696           (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3697         KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3698       } else {
3699         KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3700         (*setSize)++;
3701       }
3702       if (**scan == '}') {
3703         break;
3704       }
3705       (*scan)++; // skip ','
3706       continue;
3707     }
3708     KMP_ASSERT2(**scan == ':', "bad explicit places list");
3709     (*scan)++; // skip ':'
3710 
3711     // Read count parameter
3712     SKIP_WS(*scan);
3713     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3714     next = *scan;
3715     SKIP_DIGITS(next);
3716     count = __kmp_str_to_int(*scan, *next);
3717     KMP_ASSERT(count >= 0);
3718     *scan = next;
3719 
3720     // valid follow sets are ',' ':' and '}'
3721     SKIP_WS(*scan);
3722     if (**scan == '}' || **scan == ',') {
3723       for (i = 0; i < count; i++) {
3724         if ((start > maxOsId) ||
3725             (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3726           KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3727           break; // don't proliferate warnings for large count
3728         } else {
3729           KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3730           start++;
3731           (*setSize)++;
3732         }
3733       }
3734       if (**scan == '}') {
3735         break;
3736       }
3737       (*scan)++; // skip ','
3738       continue;
3739     }
3740     KMP_ASSERT2(**scan == ':', "bad explicit places list");
3741     (*scan)++; // skip ':'
3742 
3743     // Read stride parameter
3744     int sign = +1;
3745     for (;;) {
3746       SKIP_WS(*scan);
3747       if (**scan == '+') {
3748         (*scan)++; // skip '+'
3749         continue;
3750       }
3751       if (**scan == '-') {
3752         sign *= -1;
3753         (*scan)++; // skip '-'
3754         continue;
3755       }
3756       break;
3757     }
3758     SKIP_WS(*scan);
3759     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3760     next = *scan;
3761     SKIP_DIGITS(next);
3762     stride = __kmp_str_to_int(*scan, *next);
3763     KMP_ASSERT(stride >= 0);
3764     *scan = next;
3765     stride *= sign;
3766 
3767     // valid follow sets are ',' and '}'
3768     SKIP_WS(*scan);
3769     if (**scan == '}' || **scan == ',') {
3770       for (i = 0; i < count; i++) {
3771         if ((start > maxOsId) ||
3772             (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3773           KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3774           break; // don't proliferate warnings for large count
3775         } else {
3776           KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3777           start += stride;
3778           (*setSize)++;
3779         }
3780       }
3781       if (**scan == '}') {
3782         break;
3783       }
3784       (*scan)++; // skip ','
3785       continue;
3786     }
3787 
3788     KMP_ASSERT2(0, "bad explicit places list");
3789   }
3790 }
3791 
3792 static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3793                                 int maxOsId, kmp_affin_mask_t *tempMask,
3794                                 int *setSize) {
3795   const char *next;
3796 
3797   // valid follow sets are '{' '!' and num
3798   SKIP_WS(*scan);
3799   if (**scan == '{') {
3800     (*scan)++; // skip '{'
3801     __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3802     KMP_ASSERT2(**scan == '}', "bad explicit places list");
3803     (*scan)++; // skip '}'
3804   } else if (**scan == '!') {
3805     (*scan)++; // skip '!'
3806     __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3807     KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3808   } else if ((**scan >= '0') && (**scan <= '9')) {
3809     next = *scan;
3810     SKIP_DIGITS(next);
3811     int num = __kmp_str_to_int(*scan, *next);
3812     KMP_ASSERT(num >= 0);
3813     if ((num > maxOsId) ||
3814         (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3815       KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3816     } else {
3817       KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3818       (*setSize)++;
3819     }
3820     *scan = next; // skip num
3821   } else {
3822     KMP_ASSERT2(0, "bad explicit places list");
3823   }
3824 }
3825 
3826 // static void
3827 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3828                                       unsigned int *out_numMasks,
3829                                       const char *placelist,
3830                                       kmp_affin_mask_t *osId2Mask,
3831                                       int maxOsId) {
3832   int i, j, count, stride, sign;
3833   const char *scan = placelist;
3834   const char *next = placelist;
3835 
3836   numNewMasks = 2;
3837   KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3838   nextNewMask = 0;
3839 
3840   // tempMask is modified based on the previous or initial
3841   //   place to form the current place
3842   // previousMask contains the previous place
3843   kmp_affin_mask_t *tempMask;
3844   kmp_affin_mask_t *previousMask;
3845   KMP_CPU_ALLOC(tempMask);
3846   KMP_CPU_ZERO(tempMask);
3847   KMP_CPU_ALLOC(previousMask);
3848   KMP_CPU_ZERO(previousMask);
3849   int setSize = 0;
3850 
3851   for (;;) {
3852     __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3853 
3854     // valid follow sets are ',' ':' and EOL
3855     SKIP_WS(scan);
3856     if (*scan == '\0' || *scan == ',') {
3857       if (setSize > 0) {
3858         ADD_MASK(tempMask);
3859       }
3860       KMP_CPU_ZERO(tempMask);
3861       setSize = 0;
3862       if (*scan == '\0') {
3863         break;
3864       }
3865       scan++; // skip ','
3866       continue;
3867     }
3868 
3869     KMP_ASSERT2(*scan == ':', "bad explicit places list");
3870     scan++; // skip ':'
3871 
3872     // Read count parameter
3873     SKIP_WS(scan);
3874     KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3875     next = scan;
3876     SKIP_DIGITS(next);
3877     count = __kmp_str_to_int(scan, *next);
3878     KMP_ASSERT(count >= 0);
3879     scan = next;
3880 
3881     // valid follow sets are ',' ':' and EOL
3882     SKIP_WS(scan);
3883     if (*scan == '\0' || *scan == ',') {
3884       stride = +1;
3885     } else {
3886       KMP_ASSERT2(*scan == ':', "bad explicit places list");
3887       scan++; // skip ':'
3888 
3889       // Read stride parameter
3890       sign = +1;
3891       for (;;) {
3892         SKIP_WS(scan);
3893         if (*scan == '+') {
3894           scan++; // skip '+'
3895           continue;
3896         }
3897         if (*scan == '-') {
3898           sign *= -1;
3899           scan++; // skip '-'
3900           continue;
3901         }
3902         break;
3903       }
3904       SKIP_WS(scan);
3905       KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3906       next = scan;
3907       SKIP_DIGITS(next);
3908       stride = __kmp_str_to_int(scan, *next);
3909       KMP_DEBUG_ASSERT(stride >= 0);
3910       scan = next;
3911       stride *= sign;
3912     }
3913 
3914     // Add places determined by initial_place : count : stride
3915     for (i = 0; i < count; i++) {
3916       if (setSize == 0) {
3917         break;
3918       }
3919       // Add the current place, then build the next place (tempMask) from that
3920       KMP_CPU_COPY(previousMask, tempMask);
3921       ADD_MASK(previousMask);
3922       KMP_CPU_ZERO(tempMask);
3923       setSize = 0;
3924       KMP_CPU_SET_ITERATE(j, previousMask) {
3925         if (!KMP_CPU_ISSET(j, previousMask)) {
3926           continue;
3927         }
3928         if ((j + stride > maxOsId) || (j + stride < 0) ||
3929             (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3930             (!KMP_CPU_ISSET(j + stride,
3931                             KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3932           if (i < count - 1) {
3933             KMP_AFF_WARNING(AffIgnoreInvalidProcID, j + stride);
3934           }
3935           continue;
3936         }
3937         KMP_CPU_SET(j + stride, tempMask);
3938         setSize++;
3939       }
3940     }
3941     KMP_CPU_ZERO(tempMask);
3942     setSize = 0;
3943 
3944     // valid follow sets are ',' and EOL
3945     SKIP_WS(scan);
3946     if (*scan == '\0') {
3947       break;
3948     }
3949     if (*scan == ',') {
3950       scan++; // skip ','
3951       continue;
3952     }
3953 
3954     KMP_ASSERT2(0, "bad explicit places list");
3955   }
3956 
3957   *out_numMasks = nextNewMask;
3958   if (nextNewMask == 0) {
3959     *out_masks = NULL;
3960     KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3961     return;
3962   }
3963   KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3964   KMP_CPU_FREE(tempMask);
3965   KMP_CPU_FREE(previousMask);
3966   for (i = 0; i < nextNewMask; i++) {
3967     kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3968     kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3969     KMP_CPU_COPY(dest, src);
3970   }
3971   KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3972 }
3973 
3974 #undef ADD_MASK
3975 #undef ADD_MASK_OSID
3976 
3977 // This function figures out the deepest level at which there is at least one
3978 // cluster/core with more than one processing unit bound to it.
3979 static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) {
3980   int core_level = 0;
3981 
3982   for (int i = 0; i < nprocs; i++) {
3983     const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3984     for (int j = bottom_level; j > 0; j--) {
3985       if (hw_thread.ids[j] > 0) {
3986         if (core_level < (j - 1)) {
3987           core_level = j - 1;
3988         }
3989       }
3990     }
3991   }
3992   return core_level;
3993 }
3994 
3995 // This function counts number of clusters/cores at given level.
3996 static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level,
3997                                          int core_level) {
3998   return __kmp_topology->get_count(core_level);
3999 }
4000 // This function finds to which cluster/core given processing unit is bound.
4001 static int __kmp_affinity_find_core(int proc, int bottom_level,
4002                                     int core_level) {
4003   int core = 0;
4004   KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4005   for (int i = 0; i <= proc; ++i) {
4006     if (i + 1 <= proc) {
4007       for (int j = 0; j <= core_level; ++j) {
4008         if (__kmp_topology->at(i + 1).sub_ids[j] !=
4009             __kmp_topology->at(i).sub_ids[j]) {
4010           core++;
4011           break;
4012         }
4013       }
4014     }
4015   }
4016   return core;
4017 }
4018 
4019 // This function finds maximal number of processing units bound to a
4020 // cluster/core at given level.
4021 static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level,
4022                                             int core_level) {
4023   if (core_level >= bottom_level)
4024     return 1;
4025   int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4026   return __kmp_topology->calculate_ratio(thread_level, core_level);
4027 }
4028 
4029 static int *procarr = NULL;
4030 static int __kmp_aff_depth = 0;
4031 
4032 // Create a one element mask array (set of places) which only contains the
4033 // initial process's affinity mask
4034 static void __kmp_create_affinity_none_places() {
4035   KMP_ASSERT(__kmp_affin_fullMask != NULL);
4036   KMP_ASSERT(__kmp_affinity_type == affinity_none);
4037   __kmp_affinity_num_masks = 1;
4038   KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4039   kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4040   KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4041 }
4042 
4043 static void __kmp_aux_affinity_initialize(void) {
4044   if (__kmp_affinity_masks != NULL) {
4045     KMP_ASSERT(__kmp_affin_fullMask != NULL);
4046     return;
4047   }
4048 
4049   // Create the "full" mask - this defines all of the processors that we
4050   // consider to be in the machine model. If respect is set, then it is the
4051   // initialization thread's affinity mask. Otherwise, it is all processors that
4052   // we know about on the machine.
4053   if (__kmp_affin_fullMask == NULL) {
4054     KMP_CPU_ALLOC(__kmp_affin_fullMask);
4055   }
4056   if (__kmp_affin_origMask == NULL) {
4057     KMP_CPU_ALLOC(__kmp_affin_origMask);
4058   }
4059   if (KMP_AFFINITY_CAPABLE()) {
4060     __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4061     // Make a copy before possible expanding to the entire machine mask
4062     __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4063     if (__kmp_affinity_respect_mask) {
4064       // Count the number of available processors.
4065       unsigned i;
4066       __kmp_avail_proc = 0;
4067       KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4068         if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4069           continue;
4070         }
4071         __kmp_avail_proc++;
4072       }
4073       if (__kmp_avail_proc > __kmp_xproc) {
4074         KMP_AFF_WARNING(ErrorInitializeAffinity);
4075         __kmp_affinity_type = affinity_none;
4076         KMP_AFFINITY_DISABLE();
4077         return;
4078       }
4079 
4080       if (__kmp_affinity_verbose) {
4081         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4082         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4083                                   __kmp_affin_fullMask);
4084         KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
4085       }
4086     } else {
4087       if (__kmp_affinity_verbose) {
4088         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4089         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4090                                   __kmp_affin_fullMask);
4091         KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
4092       }
4093       __kmp_avail_proc =
4094           __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4095 #if KMP_OS_WINDOWS
4096       if (__kmp_num_proc_groups <= 1) {
4097         // Copy expanded full mask if topology has single processor group
4098         __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4099       }
4100       // Set the process affinity mask since threads' affinity
4101       // masks must be subset of process mask in Windows* OS
4102       __kmp_affin_fullMask->set_process_affinity(true);
4103 #endif
4104     }
4105   }
4106 
4107   kmp_i18n_id_t msg_id = kmp_i18n_null;
4108 
4109   // For backward compatibility, setting KMP_CPUINFO_FILE =>
4110   // KMP_TOPOLOGY_METHOD=cpuinfo
4111   if ((__kmp_cpuinfo_file != NULL) &&
4112       (__kmp_affinity_top_method == affinity_top_method_all)) {
4113     __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4114   }
4115 
4116   bool success = false;
4117   if (__kmp_affinity_top_method == affinity_top_method_all) {
4118 // In the default code path, errors are not fatal - we just try using
4119 // another method. We only emit a warning message if affinity is on, or the
4120 // verbose flag is set, an the nowarnings flag was not set.
4121 #if KMP_USE_HWLOC
4122     if (!success &&
4123         __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4124       if (!__kmp_hwloc_error) {
4125         success = __kmp_affinity_create_hwloc_map(&msg_id);
4126         if (!success && __kmp_affinity_verbose) {
4127           KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4128         }
4129       } else if (__kmp_affinity_verbose) {
4130         KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4131       }
4132     }
4133 #endif
4134 
4135 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4136     if (!success) {
4137       success = __kmp_affinity_create_x2apicid_map(&msg_id);
4138       if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4139         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4140       }
4141     }
4142     if (!success) {
4143       success = __kmp_affinity_create_apicid_map(&msg_id);
4144       if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4145         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4146       }
4147     }
4148 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4149 
4150 #if KMP_OS_LINUX
4151     if (!success) {
4152       int line = 0;
4153       success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4154       if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4155         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4156       }
4157     }
4158 #endif /* KMP_OS_LINUX */
4159 
4160 #if KMP_GROUP_AFFINITY
4161     if (!success && (__kmp_num_proc_groups > 1)) {
4162       success = __kmp_affinity_create_proc_group_map(&msg_id);
4163       if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4164         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4165       }
4166     }
4167 #endif /* KMP_GROUP_AFFINITY */
4168 
4169     if (!success) {
4170       success = __kmp_affinity_create_flat_map(&msg_id);
4171       if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4172         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4173       }
4174       KMP_ASSERT(success);
4175     }
4176   }
4177 
4178 // If the user has specified that a paricular topology discovery method is to be
4179 // used, then we abort if that method fails. The exception is group affinity,
4180 // which might have been implicitly set.
4181 #if KMP_USE_HWLOC
4182   else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4183     KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4184     success = __kmp_affinity_create_hwloc_map(&msg_id);
4185     if (!success) {
4186       KMP_ASSERT(msg_id != kmp_i18n_null);
4187       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4188     }
4189   }
4190 #endif // KMP_USE_HWLOC
4191 
4192 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4193   else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4194            __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4195     success = __kmp_affinity_create_x2apicid_map(&msg_id);
4196     if (!success) {
4197       KMP_ASSERT(msg_id != kmp_i18n_null);
4198       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4199     }
4200   } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4201     success = __kmp_affinity_create_apicid_map(&msg_id);
4202     if (!success) {
4203       KMP_ASSERT(msg_id != kmp_i18n_null);
4204       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4205     }
4206   }
4207 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4208 
4209   else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4210     int line = 0;
4211     success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4212     if (!success) {
4213       KMP_ASSERT(msg_id != kmp_i18n_null);
4214       const char *filename = __kmp_cpuinfo_get_filename();
4215       if (line > 0) {
4216         KMP_FATAL(FileLineMsgExiting, filename, line,
4217                   __kmp_i18n_catgets(msg_id));
4218       } else {
4219         KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4220       }
4221     }
4222   }
4223 
4224 #if KMP_GROUP_AFFINITY
4225   else if (__kmp_affinity_top_method == affinity_top_method_group) {
4226     success = __kmp_affinity_create_proc_group_map(&msg_id);
4227     KMP_ASSERT(success);
4228     if (!success) {
4229       KMP_ASSERT(msg_id != kmp_i18n_null);
4230       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4231     }
4232   }
4233 #endif /* KMP_GROUP_AFFINITY */
4234 
4235   else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4236     success = __kmp_affinity_create_flat_map(&msg_id);
4237     // should not fail
4238     KMP_ASSERT(success);
4239   }
4240 
4241   // Early exit if topology could not be created
4242   if (!__kmp_topology) {
4243     if (KMP_AFFINITY_CAPABLE()) {
4244       KMP_AFF_WARNING(ErrorInitializeAffinity);
4245     }
4246     if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4247         __kmp_ncores > 0) {
4248       __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4249       __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4250                                    __kmp_nThreadsPerCore, __kmp_ncores);
4251       if (__kmp_affinity_verbose) {
4252         __kmp_topology->print("KMP_AFFINITY");
4253       }
4254     }
4255     __kmp_affinity_type = affinity_none;
4256     __kmp_create_affinity_none_places();
4257 #if KMP_USE_HIER_SCHED
4258     __kmp_dispatch_set_hierarchy_values();
4259 #endif
4260     KMP_AFFINITY_DISABLE();
4261     return;
4262   }
4263 
4264   // Canonicalize, print (if requested), apply KMP_HW_SUBSET, and
4265   // initialize other data structures which depend on the topology
4266   __kmp_topology->canonicalize();
4267   if (__kmp_affinity_verbose)
4268     __kmp_topology->print("KMP_AFFINITY");
4269   bool filtered = __kmp_topology->filter_hw_subset();
4270   if (filtered) {
4271 #if KMP_OS_WINDOWS
4272     // Copy filtered full mask if topology has single processor group
4273     if (__kmp_num_proc_groups <= 1)
4274 #endif
4275       __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4276   }
4277   if (filtered && __kmp_affinity_verbose)
4278     __kmp_topology->print("KMP_HW_SUBSET");
4279   machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4280   KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4281   // If KMP_AFFINITY=none, then only create the single "none" place
4282   // which is the process's initial affinity mask or the number of
4283   // hardware threads depending on respect,norespect
4284   if (__kmp_affinity_type == affinity_none) {
4285     __kmp_create_affinity_none_places();
4286 #if KMP_USE_HIER_SCHED
4287     __kmp_dispatch_set_hierarchy_values();
4288 #endif
4289     return;
4290   }
4291   int depth = __kmp_topology->get_depth();
4292 
4293   // Create the table of masks, indexed by thread Id.
4294   unsigned maxIndex;
4295   unsigned numUnique;
4296   kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique);
4297   if (__kmp_affinity_gran_levels == 0) {
4298     KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4299   }
4300 
4301   switch (__kmp_affinity_type) {
4302 
4303   case affinity_explicit:
4304     KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4305     if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4306       __kmp_affinity_process_proclist(
4307           &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4308           __kmp_affinity_proclist, osId2Mask, maxIndex);
4309     } else {
4310       __kmp_affinity_process_placelist(
4311           &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4312           __kmp_affinity_proclist, osId2Mask, maxIndex);
4313     }
4314     if (__kmp_affinity_num_masks == 0) {
4315       KMP_AFF_WARNING(AffNoValidProcID);
4316       __kmp_affinity_type = affinity_none;
4317       __kmp_create_affinity_none_places();
4318       return;
4319     }
4320     break;
4321 
4322   // The other affinity types rely on sorting the hardware threads according to
4323   // some permutation of the machine topology tree. Set __kmp_affinity_compact
4324   // and __kmp_affinity_offset appropriately, then jump to a common code
4325   // fragment to do the sort and create the array of affinity masks.
4326   case affinity_logical:
4327     __kmp_affinity_compact = 0;
4328     if (__kmp_affinity_offset) {
4329       __kmp_affinity_offset =
4330           __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4331     }
4332     goto sortTopology;
4333 
4334   case affinity_physical:
4335     if (__kmp_nThreadsPerCore > 1) {
4336       __kmp_affinity_compact = 1;
4337       if (__kmp_affinity_compact >= depth) {
4338         __kmp_affinity_compact = 0;
4339       }
4340     } else {
4341       __kmp_affinity_compact = 0;
4342     }
4343     if (__kmp_affinity_offset) {
4344       __kmp_affinity_offset =
4345           __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4346     }
4347     goto sortTopology;
4348 
4349   case affinity_scatter:
4350     if (__kmp_affinity_compact >= depth) {
4351       __kmp_affinity_compact = 0;
4352     } else {
4353       __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4354     }
4355     goto sortTopology;
4356 
4357   case affinity_compact:
4358     if (__kmp_affinity_compact >= depth) {
4359       __kmp_affinity_compact = depth - 1;
4360     }
4361     goto sortTopology;
4362 
4363   case affinity_balanced:
4364     if (depth <= 1) {
4365       KMP_AFF_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4366       __kmp_affinity_type = affinity_none;
4367       __kmp_create_affinity_none_places();
4368       return;
4369     } else if (!__kmp_topology->is_uniform()) {
4370       // Save the depth for further usage
4371       __kmp_aff_depth = depth;
4372 
4373       int core_level =
4374           __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4375       int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4376                                                  core_level);
4377       int maxprocpercore = __kmp_affinity_max_proc_per_core(
4378           __kmp_avail_proc, depth - 1, core_level);
4379 
4380       int nproc = ncores * maxprocpercore;
4381       if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4382         KMP_AFF_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4383         __kmp_affinity_type = affinity_none;
4384         return;
4385       }
4386 
4387       procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4388       for (int i = 0; i < nproc; i++) {
4389         procarr[i] = -1;
4390       }
4391 
4392       int lastcore = -1;
4393       int inlastcore = 0;
4394       for (int i = 0; i < __kmp_avail_proc; i++) {
4395         int proc = __kmp_topology->at(i).os_id;
4396         int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4397 
4398         if (core == lastcore) {
4399           inlastcore++;
4400         } else {
4401           inlastcore = 0;
4402         }
4403         lastcore = core;
4404 
4405         procarr[core * maxprocpercore + inlastcore] = proc;
4406       }
4407     }
4408     if (__kmp_affinity_compact >= depth) {
4409       __kmp_affinity_compact = depth - 1;
4410     }
4411 
4412   sortTopology:
4413     // Allocate the gtid->affinity mask table.
4414     if (__kmp_affinity_dups) {
4415       __kmp_affinity_num_masks = __kmp_avail_proc;
4416     } else {
4417       __kmp_affinity_num_masks = numUnique;
4418     }
4419 
4420     if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4421         (__kmp_affinity_num_places > 0) &&
4422         ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4423       __kmp_affinity_num_masks = __kmp_affinity_num_places;
4424     }
4425 
4426     KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4427 
4428     // Sort the topology table according to the current setting of
4429     // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4430     __kmp_topology->sort_compact();
4431     {
4432       int i;
4433       unsigned j;
4434       int num_hw_threads = __kmp_topology->get_num_hw_threads();
4435       for (i = 0, j = 0; i < num_hw_threads; i++) {
4436         if ((!__kmp_affinity_dups) && (!__kmp_topology->at(i).leader)) {
4437           continue;
4438         }
4439         int osId = __kmp_topology->at(i).os_id;
4440 
4441         kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4442         kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4443         KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4444         KMP_CPU_COPY(dest, src);
4445         if (++j >= __kmp_affinity_num_masks) {
4446           break;
4447         }
4448       }
4449       KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4450     }
4451     // Sort the topology back using ids
4452     __kmp_topology->sort_ids();
4453     break;
4454 
4455   default:
4456     KMP_ASSERT2(0, "Unexpected affinity setting");
4457   }
4458 
4459   KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4460 }
4461 
4462 void __kmp_affinity_initialize(void) {
4463   // Much of the code above was written assuming that if a machine was not
4464   // affinity capable, then __kmp_affinity_type == affinity_none.  We now
4465   // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4466   // There are too many checks for __kmp_affinity_type == affinity_none
4467   // in this code.  Instead of trying to change them all, check if
4468   // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4469   // affinity_none, call the real initialization routine, then restore
4470   // __kmp_affinity_type to affinity_disabled.
4471   int disabled = (__kmp_affinity_type == affinity_disabled);
4472   if (!KMP_AFFINITY_CAPABLE()) {
4473     KMP_ASSERT(disabled);
4474   }
4475   if (disabled) {
4476     __kmp_affinity_type = affinity_none;
4477   }
4478   __kmp_aux_affinity_initialize();
4479   if (disabled) {
4480     __kmp_affinity_type = affinity_disabled;
4481   }
4482 }
4483 
4484 void __kmp_affinity_uninitialize(void) {
4485   if (__kmp_affinity_masks != NULL) {
4486     KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4487     __kmp_affinity_masks = NULL;
4488   }
4489   if (__kmp_affin_fullMask != NULL) {
4490     KMP_CPU_FREE(__kmp_affin_fullMask);
4491     __kmp_affin_fullMask = NULL;
4492   }
4493   if (__kmp_affin_origMask != NULL) {
4494     KMP_CPU_FREE(__kmp_affin_origMask);
4495     __kmp_affin_origMask = NULL;
4496   }
4497   __kmp_affinity_num_masks = 0;
4498   __kmp_affinity_type = affinity_default;
4499   __kmp_affinity_num_places = 0;
4500   if (__kmp_affinity_proclist != NULL) {
4501     __kmp_free(__kmp_affinity_proclist);
4502     __kmp_affinity_proclist = NULL;
4503   }
4504   if (procarr != NULL) {
4505     __kmp_free(procarr);
4506     procarr = NULL;
4507   }
4508 #if KMP_USE_HWLOC
4509   if (__kmp_hwloc_topology != NULL) {
4510     hwloc_topology_destroy(__kmp_hwloc_topology);
4511     __kmp_hwloc_topology = NULL;
4512   }
4513 #endif
4514   if (__kmp_hw_subset) {
4515     kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4516     __kmp_hw_subset = nullptr;
4517   }
4518   if (__kmp_topology) {
4519     kmp_topology_t::deallocate(__kmp_topology);
4520     __kmp_topology = nullptr;
4521   }
4522   KMPAffinity::destroy_api();
4523 }
4524 
4525 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4526   if (!KMP_AFFINITY_CAPABLE()) {
4527     return;
4528   }
4529 
4530   kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4531   if (th->th.th_affin_mask == NULL) {
4532     KMP_CPU_ALLOC(th->th.th_affin_mask);
4533   } else {
4534     KMP_CPU_ZERO(th->th.th_affin_mask);
4535   }
4536 
4537   // Copy the thread mask to the kmp_info_t structure. If
4538   // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4539   // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4540   // then the full mask is the same as the mask of the initialization thread.
4541   kmp_affin_mask_t *mask;
4542   int i;
4543 
4544   if (KMP_AFFINITY_NON_PROC_BIND) {
4545     if ((__kmp_affinity_type == affinity_none) ||
4546         (__kmp_affinity_type == affinity_balanced) ||
4547         KMP_HIDDEN_HELPER_THREAD(gtid)) {
4548 #if KMP_GROUP_AFFINITY
4549       if (__kmp_num_proc_groups > 1) {
4550         return;
4551       }
4552 #endif
4553       KMP_ASSERT(__kmp_affin_fullMask != NULL);
4554       i = 0;
4555       mask = __kmp_affin_fullMask;
4556     } else {
4557       int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4558       KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4559       i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4560       mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4561     }
4562   } else {
4563     if ((!isa_root) || KMP_HIDDEN_HELPER_THREAD(gtid) ||
4564         (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4565 #if KMP_GROUP_AFFINITY
4566       if (__kmp_num_proc_groups > 1) {
4567         return;
4568       }
4569 #endif
4570       KMP_ASSERT(__kmp_affin_fullMask != NULL);
4571       i = KMP_PLACE_ALL;
4572       mask = __kmp_affin_fullMask;
4573     } else {
4574       // int i = some hash function or just a counter that doesn't
4575       // always start at 0.  Use adjusted gtid for now.
4576       int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4577       KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4578       i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4579       mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4580     }
4581   }
4582 
4583   th->th.th_current_place = i;
4584   if (isa_root || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4585     th->th.th_new_place = i;
4586     th->th.th_first_place = 0;
4587     th->th.th_last_place = __kmp_affinity_num_masks - 1;
4588   } else if (KMP_AFFINITY_NON_PROC_BIND) {
4589     // When using a Non-OMP_PROC_BIND affinity method,
4590     // set all threads' place-partition-var to the entire place list
4591     th->th.th_first_place = 0;
4592     th->th.th_last_place = __kmp_affinity_num_masks - 1;
4593   }
4594 
4595   if (i == KMP_PLACE_ALL) {
4596     KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4597                    gtid));
4598   } else {
4599     KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4600                    gtid, i));
4601   }
4602 
4603   KMP_CPU_COPY(th->th.th_affin_mask, mask);
4604 
4605   if (__kmp_affinity_verbose && !KMP_HIDDEN_HELPER_THREAD(gtid)
4606       /* to avoid duplicate printing (will be correctly printed on barrier) */
4607       && (__kmp_affinity_type == affinity_none ||
4608           (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4609     char buf[KMP_AFFIN_MASK_PRINT_LEN];
4610     __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4611                               th->th.th_affin_mask);
4612     KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4613                __kmp_gettid(), gtid, buf);
4614   }
4615 
4616 #if KMP_DEBUG
4617   // Hidden helper thread affinity only printed for debug builds
4618   if (__kmp_affinity_verbose && KMP_HIDDEN_HELPER_THREAD(gtid)) {
4619     char buf[KMP_AFFIN_MASK_PRINT_LEN];
4620     __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4621                               th->th.th_affin_mask);
4622     KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY (hidden helper thread)",
4623                (kmp_int32)getpid(), __kmp_gettid(), gtid, buf);
4624   }
4625 #endif
4626 
4627 #if KMP_OS_WINDOWS
4628   // On Windows* OS, the process affinity mask might have changed. If the user
4629   // didn't request affinity and this call fails, just continue silently.
4630   // See CQ171393.
4631   if (__kmp_affinity_type == affinity_none) {
4632     __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4633   } else
4634 #endif
4635     __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4636 }
4637 
4638 void __kmp_affinity_set_place(int gtid) {
4639   if (!KMP_AFFINITY_CAPABLE()) {
4640     return;
4641   }
4642 
4643   kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4644 
4645   KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4646                  "place = %d)\n",
4647                  gtid, th->th.th_new_place, th->th.th_current_place));
4648 
4649   // Check that the new place is within this thread's partition.
4650   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4651   KMP_ASSERT(th->th.th_new_place >= 0);
4652   KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4653   if (th->th.th_first_place <= th->th.th_last_place) {
4654     KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4655                (th->th.th_new_place <= th->th.th_last_place));
4656   } else {
4657     KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4658                (th->th.th_new_place >= th->th.th_last_place));
4659   }
4660 
4661   // Copy the thread mask to the kmp_info_t structure,
4662   // and set this thread's affinity.
4663   kmp_affin_mask_t *mask =
4664       KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4665   KMP_CPU_COPY(th->th.th_affin_mask, mask);
4666   th->th.th_current_place = th->th.th_new_place;
4667 
4668   if (__kmp_affinity_verbose) {
4669     char buf[KMP_AFFIN_MASK_PRINT_LEN];
4670     __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4671                               th->th.th_affin_mask);
4672     KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4673                __kmp_gettid(), gtid, buf);
4674   }
4675   __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4676 }
4677 
4678 int __kmp_aux_set_affinity(void **mask) {
4679   int gtid;
4680   kmp_info_t *th;
4681   int retval;
4682 
4683   if (!KMP_AFFINITY_CAPABLE()) {
4684     return -1;
4685   }
4686 
4687   gtid = __kmp_entry_gtid();
4688   KA_TRACE(
4689       1000, (""); {
4690         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4691         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4692                                   (kmp_affin_mask_t *)(*mask));
4693         __kmp_debug_printf(
4694             "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4695             gtid, buf);
4696       });
4697 
4698   if (__kmp_env_consistency_check) {
4699     if ((mask == NULL) || (*mask == NULL)) {
4700       KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4701     } else {
4702       unsigned proc;
4703       int num_procs = 0;
4704 
4705       KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4706         if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4707           KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4708         }
4709         if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4710           continue;
4711         }
4712         num_procs++;
4713       }
4714       if (num_procs == 0) {
4715         KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4716       }
4717 
4718 #if KMP_GROUP_AFFINITY
4719       if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4720         KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4721       }
4722 #endif /* KMP_GROUP_AFFINITY */
4723     }
4724   }
4725 
4726   th = __kmp_threads[gtid];
4727   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4728   retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4729   if (retval == 0) {
4730     KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4731   }
4732 
4733   th->th.th_current_place = KMP_PLACE_UNDEFINED;
4734   th->th.th_new_place = KMP_PLACE_UNDEFINED;
4735   th->th.th_first_place = 0;
4736   th->th.th_last_place = __kmp_affinity_num_masks - 1;
4737 
4738   // Turn off 4.0 affinity for the current tread at this parallel level.
4739   th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4740 
4741   return retval;
4742 }
4743 
4744 int __kmp_aux_get_affinity(void **mask) {
4745   int gtid;
4746   int retval;
4747 #if KMP_OS_WINDOWS || KMP_DEBUG
4748   kmp_info_t *th;
4749 #endif
4750   if (!KMP_AFFINITY_CAPABLE()) {
4751     return -1;
4752   }
4753 
4754   gtid = __kmp_entry_gtid();
4755 #if KMP_OS_WINDOWS || KMP_DEBUG
4756   th = __kmp_threads[gtid];
4757 #else
4758   (void)gtid; // unused variable
4759 #endif
4760   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4761 
4762   KA_TRACE(
4763       1000, (""); {
4764         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4765         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4766                                   th->th.th_affin_mask);
4767         __kmp_printf(
4768             "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4769             buf);
4770       });
4771 
4772   if (__kmp_env_consistency_check) {
4773     if ((mask == NULL) || (*mask == NULL)) {
4774       KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4775     }
4776   }
4777 
4778 #if !KMP_OS_WINDOWS
4779 
4780   retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4781   KA_TRACE(
4782       1000, (""); {
4783         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4784         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4785                                   (kmp_affin_mask_t *)(*mask));
4786         __kmp_printf(
4787             "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4788             buf);
4789       });
4790   return retval;
4791 
4792 #else
4793   (void)retval;
4794 
4795   KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4796   return 0;
4797 
4798 #endif /* KMP_OS_WINDOWS */
4799 }
4800 
4801 int __kmp_aux_get_affinity_max_proc() {
4802   if (!KMP_AFFINITY_CAPABLE()) {
4803     return 0;
4804   }
4805 #if KMP_GROUP_AFFINITY
4806   if (__kmp_num_proc_groups > 1) {
4807     return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
4808   }
4809 #endif
4810   return __kmp_xproc;
4811 }
4812 
4813 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
4814   if (!KMP_AFFINITY_CAPABLE()) {
4815     return -1;
4816   }
4817 
4818   KA_TRACE(
4819       1000, (""); {
4820         int gtid = __kmp_entry_gtid();
4821         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4822         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4823                                   (kmp_affin_mask_t *)(*mask));
4824         __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
4825                            "affinity mask for thread %d = %s\n",
4826                            proc, gtid, buf);
4827       });
4828 
4829   if (__kmp_env_consistency_check) {
4830     if ((mask == NULL) || (*mask == NULL)) {
4831       KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
4832     }
4833   }
4834 
4835   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4836     return -1;
4837   }
4838   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4839     return -2;
4840   }
4841 
4842   KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4843   return 0;
4844 }
4845 
4846 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
4847   if (!KMP_AFFINITY_CAPABLE()) {
4848     return -1;
4849   }
4850 
4851   KA_TRACE(
4852       1000, (""); {
4853         int gtid = __kmp_entry_gtid();
4854         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4855         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4856                                   (kmp_affin_mask_t *)(*mask));
4857         __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
4858                            "affinity mask for thread %d = %s\n",
4859                            proc, gtid, buf);
4860       });
4861 
4862   if (__kmp_env_consistency_check) {
4863     if ((mask == NULL) || (*mask == NULL)) {
4864       KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
4865     }
4866   }
4867 
4868   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4869     return -1;
4870   }
4871   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4872     return -2;
4873   }
4874 
4875   KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4876   return 0;
4877 }
4878 
4879 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
4880   if (!KMP_AFFINITY_CAPABLE()) {
4881     return -1;
4882   }
4883 
4884   KA_TRACE(
4885       1000, (""); {
4886         int gtid = __kmp_entry_gtid();
4887         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4888         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4889                                   (kmp_affin_mask_t *)(*mask));
4890         __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
4891                            "affinity mask for thread %d = %s\n",
4892                            proc, gtid, buf);
4893       });
4894 
4895   if (__kmp_env_consistency_check) {
4896     if ((mask == NULL) || (*mask == NULL)) {
4897       KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
4898     }
4899   }
4900 
4901   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4902     return -1;
4903   }
4904   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4905     return 0;
4906   }
4907 
4908   return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4909 }
4910 
4911 // Dynamic affinity settings - Affinity balanced
4912 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
4913   KMP_DEBUG_ASSERT(th);
4914   bool fine_gran = true;
4915   int tid = th->th.th_info.ds.ds_tid;
4916 
4917   // Do not perform balanced affinity for the hidden helper threads
4918   if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
4919     return;
4920 
4921   switch (__kmp_affinity_gran) {
4922   case KMP_HW_THREAD:
4923     break;
4924   case KMP_HW_CORE:
4925     if (__kmp_nThreadsPerCore > 1) {
4926       fine_gran = false;
4927     }
4928     break;
4929   case KMP_HW_SOCKET:
4930     if (nCoresPerPkg > 1) {
4931       fine_gran = false;
4932     }
4933     break;
4934   default:
4935     fine_gran = false;
4936   }
4937 
4938   if (__kmp_topology->is_uniform()) {
4939     int coreID;
4940     int threadID;
4941     // Number of hyper threads per core in HT machine
4942     int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4943     // Number of cores
4944     int ncores = __kmp_ncores;
4945     if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
4946       __kmp_nth_per_core = __kmp_avail_proc / nPackages;
4947       ncores = nPackages;
4948     }
4949     // How many threads will be bound to each core
4950     int chunk = nthreads / ncores;
4951     // How many cores will have an additional thread bound to it - "big cores"
4952     int big_cores = nthreads % ncores;
4953     // Number of threads on the big cores
4954     int big_nth = (chunk + 1) * big_cores;
4955     if (tid < big_nth) {
4956       coreID = tid / (chunk + 1);
4957       threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
4958     } else { // tid >= big_nth
4959       coreID = (tid - big_cores) / chunk;
4960       threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
4961     }
4962     KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4963                       "Illegal set affinity operation when not capable");
4964 
4965     kmp_affin_mask_t *mask = th->th.th_affin_mask;
4966     KMP_CPU_ZERO(mask);
4967 
4968     if (fine_gran) {
4969       int osID =
4970           __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
4971       KMP_CPU_SET(osID, mask);
4972     } else {
4973       for (int i = 0; i < __kmp_nth_per_core; i++) {
4974         int osID;
4975         osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
4976         KMP_CPU_SET(osID, mask);
4977       }
4978     }
4979     if (__kmp_affinity_verbose) {
4980       char buf[KMP_AFFIN_MASK_PRINT_LEN];
4981       __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4982       KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4983                  __kmp_gettid(), tid, buf);
4984     }
4985     __kmp_set_system_affinity(mask, TRUE);
4986   } else { // Non-uniform topology
4987 
4988     kmp_affin_mask_t *mask = th->th.th_affin_mask;
4989     KMP_CPU_ZERO(mask);
4990 
4991     int core_level =
4992         __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
4993     int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
4994                                                __kmp_aff_depth - 1, core_level);
4995     int nth_per_core = __kmp_affinity_max_proc_per_core(
4996         __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
4997 
4998     // For performance gain consider the special case nthreads ==
4999     // __kmp_avail_proc
5000     if (nthreads == __kmp_avail_proc) {
5001       if (fine_gran) {
5002         int osID = __kmp_topology->at(tid).os_id;
5003         KMP_CPU_SET(osID, mask);
5004       } else {
5005         int core =
5006             __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5007         for (int i = 0; i < __kmp_avail_proc; i++) {
5008           int osID = __kmp_topology->at(i).os_id;
5009           if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5010               core) {
5011             KMP_CPU_SET(osID, mask);
5012           }
5013         }
5014       }
5015     } else if (nthreads <= ncores) {
5016 
5017       int core = 0;
5018       for (int i = 0; i < ncores; i++) {
5019         // Check if this core from procarr[] is in the mask
5020         int in_mask = 0;
5021         for (int j = 0; j < nth_per_core; j++) {
5022           if (procarr[i * nth_per_core + j] != -1) {
5023             in_mask = 1;
5024             break;
5025           }
5026         }
5027         if (in_mask) {
5028           if (tid == core) {
5029             for (int j = 0; j < nth_per_core; j++) {
5030               int osID = procarr[i * nth_per_core + j];
5031               if (osID != -1) {
5032                 KMP_CPU_SET(osID, mask);
5033                 // For fine granularity it is enough to set the first available
5034                 // osID for this core
5035                 if (fine_gran) {
5036                   break;
5037                 }
5038               }
5039             }
5040             break;
5041           } else {
5042             core++;
5043           }
5044         }
5045       }
5046     } else { // nthreads > ncores
5047       // Array to save the number of processors at each core
5048       int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5049       // Array to save the number of cores with "x" available processors;
5050       int *ncores_with_x_procs =
5051           (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5052       // Array to save the number of cores with # procs from x to nth_per_core
5053       int *ncores_with_x_to_max_procs =
5054           (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5055 
5056       for (int i = 0; i <= nth_per_core; i++) {
5057         ncores_with_x_procs[i] = 0;
5058         ncores_with_x_to_max_procs[i] = 0;
5059       }
5060 
5061       for (int i = 0; i < ncores; i++) {
5062         int cnt = 0;
5063         for (int j = 0; j < nth_per_core; j++) {
5064           if (procarr[i * nth_per_core + j] != -1) {
5065             cnt++;
5066           }
5067         }
5068         nproc_at_core[i] = cnt;
5069         ncores_with_x_procs[cnt]++;
5070       }
5071 
5072       for (int i = 0; i <= nth_per_core; i++) {
5073         for (int j = i; j <= nth_per_core; j++) {
5074           ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5075         }
5076       }
5077 
5078       // Max number of processors
5079       int nproc = nth_per_core * ncores;
5080       // An array to keep number of threads per each context
5081       int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5082       for (int i = 0; i < nproc; i++) {
5083         newarr[i] = 0;
5084       }
5085 
5086       int nth = nthreads;
5087       int flag = 0;
5088       while (nth > 0) {
5089         for (int j = 1; j <= nth_per_core; j++) {
5090           int cnt = ncores_with_x_to_max_procs[j];
5091           for (int i = 0; i < ncores; i++) {
5092             // Skip the core with 0 processors
5093             if (nproc_at_core[i] == 0) {
5094               continue;
5095             }
5096             for (int k = 0; k < nth_per_core; k++) {
5097               if (procarr[i * nth_per_core + k] != -1) {
5098                 if (newarr[i * nth_per_core + k] == 0) {
5099                   newarr[i * nth_per_core + k] = 1;
5100                   cnt--;
5101                   nth--;
5102                   break;
5103                 } else {
5104                   if (flag != 0) {
5105                     newarr[i * nth_per_core + k]++;
5106                     cnt--;
5107                     nth--;
5108                     break;
5109                   }
5110                 }
5111               }
5112             }
5113             if (cnt == 0 || nth == 0) {
5114               break;
5115             }
5116           }
5117           if (nth == 0) {
5118             break;
5119           }
5120         }
5121         flag = 1;
5122       }
5123       int sum = 0;
5124       for (int i = 0; i < nproc; i++) {
5125         sum += newarr[i];
5126         if (sum > tid) {
5127           if (fine_gran) {
5128             int osID = procarr[i];
5129             KMP_CPU_SET(osID, mask);
5130           } else {
5131             int coreID = i / nth_per_core;
5132             for (int ii = 0; ii < nth_per_core; ii++) {
5133               int osID = procarr[coreID * nth_per_core + ii];
5134               if (osID != -1) {
5135                 KMP_CPU_SET(osID, mask);
5136               }
5137             }
5138           }
5139           break;
5140         }
5141       }
5142       __kmp_free(newarr);
5143     }
5144 
5145     if (__kmp_affinity_verbose) {
5146       char buf[KMP_AFFIN_MASK_PRINT_LEN];
5147       __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5148       KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5149                  __kmp_gettid(), tid, buf);
5150     }
5151     __kmp_set_system_affinity(mask, TRUE);
5152   }
5153 }
5154 
5155 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5156 // We don't need this entry for Windows because
5157 // there is GetProcessAffinityMask() api
5158 //
5159 // The intended usage is indicated by these steps:
5160 // 1) The user gets the current affinity mask
5161 // 2) Then sets the affinity by calling this function
5162 // 3) Error check the return value
5163 // 4) Use non-OpenMP parallelization
5164 // 5) Reset the affinity to what was stored in step 1)
5165 #ifdef __cplusplus
5166 extern "C"
5167 #endif
5168     int
5169     kmp_set_thread_affinity_mask_initial()
5170 // the function returns 0 on success,
5171 //   -1 if we cannot bind thread
5172 //   >0 (errno) if an error happened during binding
5173 {
5174   int gtid = __kmp_get_gtid();
5175   if (gtid < 0) {
5176     // Do not touch non-omp threads
5177     KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5178                   "non-omp thread, returning\n"));
5179     return -1;
5180   }
5181   if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5182     KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5183                   "affinity not initialized, returning\n"));
5184     return -1;
5185   }
5186   KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5187                 "set full mask for thread %d\n",
5188                 gtid));
5189   KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5190   return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5191 }
5192 #endif
5193 
5194 #endif // KMP_AFFINITY_SUPPORTED
5195