14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
32 kmp_topology_t *__kmp_topology =
nullptr;
34 kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
37 static hierarchy_info machine_hierarchy;
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
41 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
45 if (TCR_1(machine_hierarchy.uninitialized))
46 machine_hierarchy.init(nproc);
49 if (nproc > machine_hierarchy.base_num_threads)
50 machine_hierarchy.resize(nproc);
52 depth = machine_hierarchy.depth;
53 KMP_DEBUG_ASSERT(depth > 0);
55 thr_bar->depth = depth;
56 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57 &(thr_bar->base_leaf_kids));
58 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
61 static int nCoresPerPkg, nPackages;
62 static int __kmp_nThreadsPerCore;
63 #ifndef KMP_DFLT_NTH_CORES
64 static int __kmp_ncores;
67 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
70 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
72 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
74 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
76 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
78 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
80 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
82 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
84 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
86 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
88 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
90 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91 case KMP_HW_PROC_GROUP:
92 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
94 return KMP_I18N_STR(Unknown);
97 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
100 return ((plural) ?
"sockets" :
"socket");
102 return ((plural) ?
"dice" :
"die");
104 return ((plural) ?
"modules" :
"module");
106 return ((plural) ?
"tiles" :
"tile");
108 return ((plural) ?
"numa_domains" :
"numa_domain");
110 return ((plural) ?
"l3_caches" :
"l3_cache");
112 return ((plural) ?
"l2_caches" :
"l2_cache");
114 return ((plural) ?
"l1_caches" :
"l1_cache");
116 return ((plural) ?
"ll_caches" :
"ll_cache");
118 return ((plural) ?
"cores" :
"core");
120 return ((plural) ?
"threads" :
"thread");
121 case KMP_HW_PROC_GROUP:
122 return ((plural) ?
"proc_groups" :
"proc_group");
124 return ((plural) ?
"unknowns" :
"unknown");
127 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
129 case KMP_HW_CORE_TYPE_UNKNOWN:
131 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
132 case KMP_HW_CORE_TYPE_ATOM:
133 return "Intel Atom(R) processor";
134 case KMP_HW_CORE_TYPE_CORE:
135 return "Intel(R) Core(TM) processor";
141 #if KMP_AFFINITY_SUPPORTED
144 #define KMP_AFF_WARNING(s, ...) \
145 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
146 KMP_WARNING(__VA_ARGS__); \
149 #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
154 int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
155 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
156 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
157 int depth = __kmp_topology->get_depth();
158 for (
int level = 0; level < depth; ++level) {
159 if (ahwthread->ids[level] < bhwthread->ids[level])
161 else if (ahwthread->ids[level] > bhwthread->ids[level])
164 if (ahwthread->os_id < bhwthread->os_id)
166 else if (ahwthread->os_id > bhwthread->os_id)
171 #if KMP_AFFINITY_SUPPORTED
172 int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
174 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
175 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
176 int depth = __kmp_topology->get_depth();
177 int compact = __kmp_topology->compact;
178 KMP_DEBUG_ASSERT(compact >= 0);
179 KMP_DEBUG_ASSERT(compact <= depth);
180 for (i = 0; i < compact; i++) {
181 int j = depth - i - 1;
182 if (aa->sub_ids[j] < bb->sub_ids[j])
184 if (aa->sub_ids[j] > bb->sub_ids[j])
187 for (; i < depth; i++) {
189 if (aa->sub_ids[j] < bb->sub_ids[j])
191 if (aa->sub_ids[j] > bb->sub_ids[j])
198 void kmp_hw_thread_t::print()
const {
199 int depth = __kmp_topology->get_depth();
200 printf(
"%4d ", os_id);
201 for (
int i = 0; i < depth; ++i) {
202 printf(
"%4d ", ids[i]);
205 if (attrs.is_core_type_valid())
206 printf(
" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
207 if (attrs.is_core_eff_valid())
208 printf(
" (eff=%d)", attrs.get_core_eff());
218 void kmp_topology_t::_insert_layer(kmp_hw_t type,
const int *ids) {
222 int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
223 int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
227 for (target_layer = 0; target_layer < depth; ++target_layer) {
228 bool layers_equal =
true;
229 bool strictly_above_target_layer =
false;
230 for (
int i = 0; i < num_hw_threads; ++i) {
231 int id = hw_threads[i].ids[target_layer];
233 if (
id != previous_id && new_id == previous_new_id) {
235 strictly_above_target_layer =
true;
236 layers_equal =
false;
238 }
else if (
id == previous_id && new_id != previous_new_id) {
240 layers_equal =
false;
244 previous_new_id = new_id;
246 if (strictly_above_target_layer || layers_equal)
252 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
254 types[target_layer] = type;
255 for (
int k = 0; k < num_hw_threads; ++k) {
256 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
257 hw_threads[k].ids[j] = hw_threads[k].ids[i];
258 hw_threads[k].ids[target_layer] = ids[k];
260 equivalent[type] = type;
264 #if KMP_GROUP_AFFINITY
266 void kmp_topology_t::_insert_windows_proc_groups() {
268 if (__kmp_num_proc_groups == 1)
270 kmp_affin_mask_t *mask;
271 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
273 for (
int i = 0; i < num_hw_threads; ++i) {
275 KMP_CPU_SET(hw_threads[i].os_id, mask);
276 ids[i] = __kmp_get_proc_group(mask);
279 _insert_layer(KMP_HW_PROC_GROUP, ids);
286 void kmp_topology_t::_remove_radix1_layers() {
287 int preference[KMP_HW_LAST];
288 int top_index1, top_index2;
290 preference[KMP_HW_SOCKET] = 110;
291 preference[KMP_HW_PROC_GROUP] = 100;
292 preference[KMP_HW_CORE] = 95;
293 preference[KMP_HW_THREAD] = 90;
294 preference[KMP_HW_NUMA] = 85;
295 preference[KMP_HW_DIE] = 80;
296 preference[KMP_HW_TILE] = 75;
297 preference[KMP_HW_MODULE] = 73;
298 preference[KMP_HW_L3] = 70;
299 preference[KMP_HW_L2] = 65;
300 preference[KMP_HW_L1] = 60;
301 preference[KMP_HW_LLC] = 5;
304 while (top_index1 < depth - 1 && top_index2 < depth) {
305 kmp_hw_t type1 = types[top_index1];
306 kmp_hw_t type2 = types[top_index2];
307 KMP_ASSERT_VALID_HW_TYPE(type1);
308 KMP_ASSERT_VALID_HW_TYPE(type2);
311 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
312 type1 == KMP_HW_SOCKET) &&
313 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
314 type2 == KMP_HW_SOCKET)) {
315 top_index1 = top_index2++;
319 bool all_same =
true;
320 int id1 = hw_threads[0].ids[top_index1];
321 int id2 = hw_threads[0].ids[top_index2];
322 int pref1 = preference[type1];
323 int pref2 = preference[type2];
324 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
325 if (hw_threads[hwidx].ids[top_index1] == id1 &&
326 hw_threads[hwidx].ids[top_index2] != id2) {
330 if (hw_threads[hwidx].ids[top_index2] != id2)
332 id1 = hw_threads[hwidx].ids[top_index1];
333 id2 = hw_threads[hwidx].ids[top_index2];
337 kmp_hw_t remove_type, keep_type;
338 int remove_layer, remove_layer_ids;
341 remove_layer = remove_layer_ids = top_index2;
345 remove_layer = remove_layer_ids = top_index1;
351 remove_layer_ids = top_index2;
354 set_equivalent_type(remove_type, keep_type);
355 for (
int idx = 0; idx < num_hw_threads; ++idx) {
356 kmp_hw_thread_t &hw_thread = hw_threads[idx];
357 for (
int d = remove_layer_ids; d < depth - 1; ++d)
358 hw_thread.ids[d] = hw_thread.ids[d + 1];
360 for (
int idx = remove_layer; idx < depth - 1; ++idx)
361 types[idx] = types[idx + 1];
364 top_index1 = top_index2++;
367 KMP_ASSERT(depth > 0);
370 void kmp_topology_t::_set_last_level_cache() {
371 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
372 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
373 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
374 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
375 #if KMP_MIC_SUPPORTED
376 else if (__kmp_mic_type == mic3) {
377 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
378 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
379 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
380 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
383 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
386 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
387 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
389 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
390 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
391 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
392 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
393 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
395 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
399 void kmp_topology_t::_gather_enumeration_information() {
400 int previous_id[KMP_HW_LAST];
401 int max[KMP_HW_LAST];
403 for (
int i = 0; i < depth; ++i) {
404 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
409 int core_level = get_level(KMP_HW_CORE);
410 for (
int i = 0; i < num_hw_threads; ++i) {
411 kmp_hw_thread_t &hw_thread = hw_threads[i];
412 for (
int layer = 0; layer < depth; ++layer) {
413 int id = hw_thread.ids[layer];
414 if (
id != previous_id[layer]) {
416 for (
int l = layer; l < depth; ++l)
420 for (
int l = layer + 1; l < depth; ++l) {
421 if (max[l] > ratio[l])
427 if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
428 if (hw_thread.attrs.is_core_eff_valid() &&
429 hw_thread.attrs.core_eff >= num_core_efficiencies) {
432 num_core_efficiencies = hw_thread.attrs.core_eff + 1;
434 if (hw_thread.attrs.is_core_type_valid()) {
436 for (
int j = 0; j < num_core_types; ++j) {
437 if (hw_thread.attrs.get_core_type() == core_types[j]) {
443 KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
444 core_types[num_core_types++] = hw_thread.attrs.get_core_type();
451 for (
int layer = 0; layer < depth; ++layer) {
452 previous_id[layer] = hw_thread.ids[layer];
455 for (
int layer = 0; layer < depth; ++layer) {
456 if (max[layer] > ratio[layer])
457 ratio[layer] = max[layer];
461 int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
463 bool find_all)
const {
464 int current, current_max;
465 int previous_id[KMP_HW_LAST];
466 for (
int i = 0; i < depth; ++i)
467 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
468 int core_level = get_level(KMP_HW_CORE);
471 KMP_ASSERT(above_level < core_level);
474 for (
int i = 0; i < num_hw_threads; ++i) {
475 kmp_hw_thread_t &hw_thread = hw_threads[i];
476 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
477 if (current > current_max)
478 current_max = current;
479 current = hw_thread.attrs.contains(attr);
481 for (
int level = above_level + 1; level <= core_level; ++level) {
482 if (hw_thread.ids[level] != previous_id[level]) {
483 if (hw_thread.attrs.contains(attr))
489 for (
int level = 0; level < depth; ++level)
490 previous_id[level] = hw_thread.ids[level];
492 if (current > current_max)
493 current_max = current;
498 void kmp_topology_t::_discover_uniformity() {
500 for (
int level = 0; level < depth; ++level)
502 flags.uniform = (num == count[depth - 1]);
506 void kmp_topology_t::_set_sub_ids() {
507 int previous_id[KMP_HW_LAST];
508 int sub_id[KMP_HW_LAST];
510 for (
int i = 0; i < depth; ++i) {
514 for (
int i = 0; i < num_hw_threads; ++i) {
515 kmp_hw_thread_t &hw_thread = hw_threads[i];
517 for (
int j = 0; j < depth; ++j) {
518 if (hw_thread.ids[j] != previous_id[j]) {
520 for (
int k = j + 1; k < depth; ++k) {
527 for (
int j = 0; j < depth; ++j) {
528 previous_id[j] = hw_thread.ids[j];
531 for (
int j = 0; j < depth; ++j) {
532 hw_thread.sub_ids[j] = sub_id[j];
537 void kmp_topology_t::_set_globals() {
539 int core_level, thread_level, package_level;
540 package_level = get_level(KMP_HW_SOCKET);
541 #if KMP_GROUP_AFFINITY
542 if (package_level == -1)
543 package_level = get_level(KMP_HW_PROC_GROUP);
545 core_level = get_level(KMP_HW_CORE);
546 thread_level = get_level(KMP_HW_THREAD);
548 KMP_ASSERT(core_level != -1);
549 KMP_ASSERT(thread_level != -1);
551 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
552 if (package_level != -1) {
553 nCoresPerPkg = calculate_ratio(core_level, package_level);
554 nPackages = get_count(package_level);
557 nCoresPerPkg = get_count(core_level);
560 #ifndef KMP_DFLT_NTH_CORES
561 __kmp_ncores = get_count(core_level);
565 kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
566 const kmp_hw_t *types) {
567 kmp_topology_t *retval;
569 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
570 sizeof(int) * (
size_t)KMP_HW_LAST * 3;
571 char *bytes = (
char *)__kmp_allocate(size);
572 retval = (kmp_topology_t *)bytes;
574 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
576 retval->hw_threads =
nullptr;
578 retval->num_hw_threads = nproc;
579 retval->depth = ndepth;
581 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
582 retval->types = (kmp_hw_t *)arr;
583 retval->ratio = arr + (size_t)KMP_HW_LAST;
584 retval->count = arr + 2 * (size_t)KMP_HW_LAST;
585 retval->num_core_efficiencies = 0;
586 retval->num_core_types = 0;
588 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
589 retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
590 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
591 for (
int i = 0; i < ndepth; ++i) {
592 retval->types[i] = types[i];
593 retval->equivalent[types[i]] = types[i];
598 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
600 __kmp_free(topology);
603 bool kmp_topology_t::check_ids()
const {
605 if (num_hw_threads == 0)
607 for (
int i = 1; i < num_hw_threads; ++i) {
608 kmp_hw_thread_t ¤t_thread = hw_threads[i];
609 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
611 for (
int j = 0; j < depth; ++j) {
612 if (previous_thread.ids[j] != current_thread.ids[j]) {
624 void kmp_topology_t::dump()
const {
625 printf(
"***********************\n");
626 printf(
"*** __kmp_topology: ***\n");
627 printf(
"***********************\n");
628 printf(
"* depth: %d\n", depth);
631 for (
int i = 0; i < depth; ++i)
632 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
636 for (
int i = 0; i < depth; ++i) {
637 printf(
"%15d ", ratio[i]);
642 for (
int i = 0; i < depth; ++i) {
643 printf(
"%15d ", count[i]);
647 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
648 printf(
"* num_core_types: %d\n", num_core_types);
649 printf(
"* core_types: ");
650 for (
int i = 0; i < num_core_types; ++i)
651 printf(
"%3d ", core_types[i]);
654 printf(
"* equivalent map:\n");
655 KMP_FOREACH_HW_TYPE(i) {
656 const char *key = __kmp_hw_get_keyword(i);
657 const char *value = __kmp_hw_get_keyword(equivalent[i]);
658 printf(
"%-15s -> %-15s\n", key, value);
661 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
663 printf(
"* num_hw_threads: %d\n", num_hw_threads);
664 printf(
"* hw_threads:\n");
665 for (
int i = 0; i < num_hw_threads; ++i) {
666 hw_threads[i].print();
668 printf(
"***********************\n");
671 void kmp_topology_t::print(
const char *env_var)
const {
673 int print_types_depth;
674 __kmp_str_buf_init(&buf);
675 kmp_hw_t print_types[KMP_HW_LAST + 2];
678 if (num_hw_threads) {
679 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
681 KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc);
686 KMP_INFORM(Uniform, env_var);
688 KMP_INFORM(NonUniform, env_var);
692 KMP_FOREACH_HW_TYPE(type) {
693 kmp_hw_t eq_type = equivalent[type];
694 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
695 KMP_INFORM(AffEqualTopologyTypes, env_var,
696 __kmp_hw_get_catalog_string(type),
697 __kmp_hw_get_catalog_string(eq_type));
702 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
705 print_types_depth = 0;
706 for (
int level = 0; level < depth; ++level)
707 print_types[print_types_depth++] = types[level];
708 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
710 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
713 print_types[print_types_depth - 1] = KMP_HW_CORE;
714 print_types[print_types_depth++] = KMP_HW_THREAD;
716 print_types[print_types_depth++] = KMP_HW_CORE;
720 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
721 print_types[print_types_depth++] = KMP_HW_THREAD;
723 __kmp_str_buf_clear(&buf);
724 kmp_hw_t numerator_type;
725 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
726 int core_level = get_level(KMP_HW_CORE);
727 int ncores = get_count(core_level);
729 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
732 numerator_type = print_types[plevel];
733 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
734 if (equivalent[numerator_type] != numerator_type)
737 c = get_ratio(level++);
740 __kmp_str_buf_print(&buf,
"%d %s", c,
741 __kmp_hw_get_catalog_string(numerator_type, plural));
743 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
744 __kmp_hw_get_catalog_string(numerator_type, plural),
745 __kmp_hw_get_catalog_string(denominator_type));
747 denominator_type = numerator_type;
749 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
752 if (__kmp_is_hybrid_cpu()) {
753 for (
int i = 0; i < num_core_types; ++i) {
754 kmp_hw_core_type_t core_type = core_types[i];
757 attr.set_core_type(core_type);
758 int ncores = get_ncores_with_attr(attr);
760 KMP_INFORM(TopologyHybrid, env_var, ncores,
761 __kmp_hw_get_core_type_string(core_type));
762 KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
763 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
764 attr.set_core_eff(eff);
765 int ncores_with_eff = get_ncores_with_attr(attr);
766 if (ncores_with_eff > 0) {
767 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
774 if (num_hw_threads <= 0) {
775 __kmp_str_buf_free(&buf);
780 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
781 for (
int i = 0; i < num_hw_threads; i++) {
782 __kmp_str_buf_clear(&buf);
783 for (
int level = 0; level < depth; ++level) {
784 kmp_hw_t type = types[level];
785 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
786 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
788 if (__kmp_is_hybrid_cpu())
791 __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
792 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
795 __kmp_str_buf_free(&buf);
798 #if KMP_AFFINITY_SUPPORTED
799 void kmp_topology_t::set_granularity(kmp_affinity_t &affinity)
const {
800 const char *env_var = affinity.env_var;
802 if (affinity.gran_levels < 0) {
803 kmp_hw_t gran_type = get_equivalent_type(affinity.gran);
805 if (gran_type == KMP_HW_UNKNOWN) {
807 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
808 for (
auto g : gran_types) {
809 if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
814 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
816 KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
817 __kmp_hw_get_catalog_string(affinity.gran),
818 __kmp_hw_get_catalog_string(gran_type));
819 affinity.gran = gran_type;
821 #if KMP_GROUP_AFFINITY
829 if (__kmp_num_proc_groups > 1) {
830 int gran_depth = get_level(gran_type);
831 int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
832 if (gran_depth >= 0 && proc_group_depth >= 0 &&
833 gran_depth < proc_group_depth) {
834 KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var,
835 __kmp_hw_get_catalog_string(affinity.gran));
836 affinity.gran = gran_type = KMP_HW_PROC_GROUP;
840 affinity.gran_levels = 0;
841 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
842 affinity.gran_levels++;
847 void kmp_topology_t::canonicalize() {
848 #if KMP_GROUP_AFFINITY
849 _insert_windows_proc_groups();
851 _remove_radix1_layers();
852 _gather_enumeration_information();
853 _discover_uniformity();
856 _set_last_level_cache();
858 #if KMP_MIC_SUPPORTED
860 if (__kmp_mic_type == mic3) {
861 if (get_level(KMP_HW_L2) != -1)
862 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
863 else if (get_level(KMP_HW_TILE) != -1)
864 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
869 KMP_ASSERT(depth > 0);
870 for (
int level = 0; level < depth; ++level) {
872 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
873 KMP_ASSERT_VALID_HW_TYPE(types[level]);
875 KMP_ASSERT(equivalent[types[level]] == types[level]);
880 void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
881 int nthreads_per_core,
int ncores) {
884 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
885 for (
int level = 0; level < depth; ++level) {
889 count[0] = npackages;
891 count[2] = __kmp_xproc;
892 ratio[0] = npackages;
893 ratio[1] = ncores_per_pkg;
894 ratio[2] = nthreads_per_core;
895 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
896 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
897 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
898 types[0] = KMP_HW_SOCKET;
899 types[1] = KMP_HW_CORE;
900 types[2] = KMP_HW_THREAD;
902 _discover_uniformity();
907 template <
size_t SIZE,
typename IndexFunc>
struct kmp_sub_ids_t {
910 int prev_sub_id[KMP_HW_LAST];
914 kmp_sub_ids_t(
int last_level) : last_level(last_level) {
915 KMP_ASSERT(last_level < KMP_HW_LAST);
916 for (
size_t i = 0; i < SIZE; ++i)
918 for (
size_t i = 0; i < KMP_HW_LAST; ++i)
921 void update(
const kmp_hw_thread_t &hw_thread) {
922 int idx = indexer(hw_thread);
923 KMP_ASSERT(idx < (
int)SIZE);
924 for (
int level = 0; level <= last_level; ++level) {
925 if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
926 if (level < last_level)
932 for (
int level = 0; level <= last_level; ++level)
933 prev_sub_id[level] = hw_thread.sub_ids[level];
935 int get_sub_id(
const kmp_hw_thread_t &hw_thread)
const {
936 return sub_id[indexer(hw_thread)];
940 static kmp_str_buf_t *
941 __kmp_hw_get_catalog_core_string(
const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
943 __kmp_str_buf_init(buf);
944 if (attr.is_core_type_valid())
945 __kmp_str_buf_print(buf,
"%s %s",
946 __kmp_hw_get_core_type_string(attr.get_core_type()),
947 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
949 __kmp_str_buf_print(buf,
"%s eff=%d",
950 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
951 attr.get_core_eff());
958 bool kmp_topology_t::filter_hw_subset() {
960 if (!__kmp_hw_subset)
964 __kmp_hw_subset->sort();
967 bool using_core_types =
false;
968 bool using_core_effs =
false;
969 int hw_subset_depth = __kmp_hw_subset->get_depth();
970 kmp_hw_t specified[KMP_HW_LAST];
971 int *topology_levels = (
int *)KMP_ALLOCA(
sizeof(
int) * hw_subset_depth);
972 KMP_ASSERT(hw_subset_depth > 0);
973 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
974 int core_level = get_level(KMP_HW_CORE);
975 for (
int i = 0; i < hw_subset_depth; ++i) {
977 const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
978 int num = item.num[0];
979 int offset = item.offset[0];
980 kmp_hw_t type = item.type;
981 kmp_hw_t equivalent_type = equivalent[type];
982 int level = get_level(type);
983 topology_levels[i] = level;
986 if (equivalent_type != KMP_HW_UNKNOWN) {
987 __kmp_hw_subset->at(i).type = equivalent_type;
989 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric,
990 __kmp_hw_get_catalog_string(type));
996 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
997 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers,
998 __kmp_hw_get_catalog_string(type),
999 __kmp_hw_get_catalog_string(specified[equivalent_type]));
1002 specified[equivalent_type] = type;
1005 max_count = get_ratio(level);
1006 if (max_count < 0 ||
1007 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1008 bool plural = (num > 1);
1009 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric,
1010 __kmp_hw_get_catalog_string(type, plural));
1015 if (core_level == level) {
1017 for (
int j = 0; j < item.num_attrs; ++j) {
1018 if (item.attr[j].is_core_type_valid())
1019 using_core_types =
true;
1020 if (item.attr[j].is_core_eff_valid())
1021 using_core_effs =
true;
1029 if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1030 if (item.num_attrs == 1) {
1031 if (using_core_effs) {
1032 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1035 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1038 using_core_effs =
false;
1039 using_core_types =
false;
1041 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid);
1047 if (using_core_types && using_core_effs) {
1048 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
"core_type",
1054 if (using_core_effs) {
1055 for (
int j = 0; j < item.num_attrs; ++j) {
1056 if (item.attr[j].is_core_eff_valid()) {
1057 int core_eff = item.attr[j].get_core_eff();
1058 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1060 __kmp_str_buf_init(&buf);
1061 __kmp_str_buf_print(&buf,
"%d", item.attr[j].get_core_eff());
1062 __kmp_msg(kmp_ms_warning,
1063 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency", buf.str),
1064 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1066 __kmp_str_buf_free(&buf);
1074 if (using_core_types || using_core_effs) {
1075 for (
int j = 0; j < item.num_attrs; ++j) {
1076 int num = item.num[j];
1077 int offset = item.offset[j];
1078 int level_above = core_level - 1;
1079 if (level_above >= 0) {
1080 max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1081 if (max_count <= 0 ||
1082 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1084 __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1085 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str);
1086 __kmp_str_buf_free(&buf);
1093 if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1094 for (
int j = 0; j < item.num_attrs; ++j) {
1097 if (!item.attr[j]) {
1098 kmp_hw_attr_t other_attr;
1099 for (
int k = 0; k < item.num_attrs; ++k) {
1100 if (item.attr[k] != item.attr[j]) {
1101 other_attr = item.attr[k];
1106 __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1107 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
1108 __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1109 __kmp_str_buf_free(&buf);
1113 for (
int k = 0; k < j; ++k) {
1114 if (!item.attr[j] || !item.attr[k])
1116 if (item.attr[k] == item.attr[j]) {
1118 __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1120 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str);
1121 __kmp_str_buf_free(&buf);
1130 struct core_type_indexer {
1131 int operator()(
const kmp_hw_thread_t &t)
const {
1132 switch (t.attrs.get_core_type()) {
1133 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1134 case KMP_HW_CORE_TYPE_ATOM:
1136 case KMP_HW_CORE_TYPE_CORE:
1139 case KMP_HW_CORE_TYPE_UNKNOWN:
1146 struct core_eff_indexer {
1147 int operator()(
const kmp_hw_thread_t &t)
const {
1148 return t.attrs.get_core_eff();
1152 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1154 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1158 int num_filtered = 0;
1159 bool *filtered = (
bool *)__kmp_allocate(
sizeof(
bool) * num_hw_threads);
1160 for (
int i = 0; i < num_hw_threads; ++i) {
1161 kmp_hw_thread_t &hw_thread = hw_threads[i];
1163 if (using_core_types)
1164 core_type_sub_ids.update(hw_thread);
1165 if (using_core_effs)
1166 core_eff_sub_ids.update(hw_thread);
1169 bool should_be_filtered =
false;
1170 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1171 ++hw_subset_index) {
1172 const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1173 int level = topology_levels[hw_subset_index];
1176 if ((using_core_effs || using_core_types) && level == core_level) {
1182 kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1183 int core_eff = hw_thread.attrs.get_core_eff();
1184 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1185 if (using_core_types &&
1186 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1188 if (using_core_effs &&
1189 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1193 if (attr_idx == hw_subset_item.num_attrs) {
1194 should_be_filtered =
true;
1198 int num = hw_subset_item.num[attr_idx];
1199 int offset = hw_subset_item.offset[attr_idx];
1200 if (using_core_types)
1201 sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1203 sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1204 if (sub_id < offset ||
1205 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1206 should_be_filtered =
true;
1210 int num = hw_subset_item.num[0];
1211 int offset = hw_subset_item.offset[0];
1212 if (hw_thread.sub_ids[level] < offset ||
1213 (num != kmp_hw_subset_t::USE_ALL &&
1214 hw_thread.sub_ids[level] >= offset + num)) {
1215 should_be_filtered =
true;
1221 filtered[i] = should_be_filtered;
1222 if (should_be_filtered)
1227 if (num_filtered == num_hw_threads) {
1228 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered);
1229 __kmp_free(filtered);
1235 for (
int i = 0; i < num_hw_threads; ++i) {
1238 hw_threads[new_index] = hw_threads[i];
1241 #if KMP_AFFINITY_SUPPORTED
1242 KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1248 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1249 num_hw_threads = new_index;
1252 _gather_enumeration_information();
1253 _discover_uniformity();
1255 _set_last_level_cache();
1256 __kmp_free(filtered);
1260 bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
int hw_level)
const {
1261 if (hw_level >= depth)
1264 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1265 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1266 for (
int i = 0; i < (depth - hw_level); ++i) {
1267 if (t1.ids[i] != t2.ids[i])
1275 #if KMP_AFFINITY_SUPPORTED
1276 class kmp_affinity_raii_t {
1277 kmp_affin_mask_t *mask;
1281 kmp_affinity_raii_t() : restored(false) {
1282 KMP_CPU_ALLOC(mask);
1283 KMP_ASSERT(mask != NULL);
1284 __kmp_get_system_affinity(mask, TRUE);
1287 __kmp_set_system_affinity(mask, TRUE);
1291 ~kmp_affinity_raii_t() {
1293 __kmp_set_system_affinity(mask, TRUE);
1299 bool KMPAffinity::picked_api =
false;
1301 void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1302 void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1303 void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
1304 void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
1305 void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1306 void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
1308 void KMPAffinity::pick_api() {
1309 KMPAffinity *affinity_dispatch;
1315 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1316 __kmp_affinity.type != affinity_disabled) {
1317 affinity_dispatch =
new KMPHwlocAffinity();
1321 affinity_dispatch =
new KMPNativeAffinity();
1323 __kmp_affinity_dispatch = affinity_dispatch;
1327 void KMPAffinity::destroy_api() {
1328 if (__kmp_affinity_dispatch != NULL) {
1329 delete __kmp_affinity_dispatch;
1330 __kmp_affinity_dispatch = NULL;
1335 #define KMP_ADVANCE_SCAN(scan) \
1336 while (*scan != '\0') { \
1344 char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
1345 kmp_affin_mask_t *mask) {
1346 int start = 0, finish = 0, previous = 0;
1349 KMP_ASSERT(buf_len >= 40);
1352 char *end = buf + buf_len - 1;
1355 if (mask->begin() == mask->end()) {
1356 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
1357 KMP_ADVANCE_SCAN(scan);
1358 KMP_ASSERT(scan <= end);
1363 start = mask->begin();
1367 for (finish = mask->next(start), previous = start;
1368 finish == previous + 1 && finish != mask->end();
1369 finish = mask->next(finish)) {
1376 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
1377 KMP_ADVANCE_SCAN(scan);
1379 first_range =
false;
1382 if (previous - start > 1) {
1383 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
1386 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
1387 KMP_ADVANCE_SCAN(scan);
1388 if (previous - start > 0) {
1389 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
1392 KMP_ADVANCE_SCAN(scan);
1395 if (start == mask->end())
1403 KMP_ASSERT(scan <= end);
1406 #undef KMP_ADVANCE_SCAN
1412 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1413 kmp_affin_mask_t *mask) {
1414 int start = 0, finish = 0, previous = 0;
1419 __kmp_str_buf_clear(buf);
1422 if (mask->begin() == mask->end()) {
1423 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
1428 start = mask->begin();
1432 for (finish = mask->next(start), previous = start;
1433 finish == previous + 1 && finish != mask->end();
1434 finish = mask->next(finish)) {
1441 __kmp_str_buf_print(buf,
"%s",
",");
1443 first_range =
false;
1446 if (previous - start > 1) {
1447 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
1450 __kmp_str_buf_print(buf,
"%u", start);
1451 if (previous - start > 0) {
1452 __kmp_str_buf_print(buf,
",%u", previous);
1457 if (start == mask->end())
1465 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1466 kmp_affin_mask_t *offline;
1467 KMP_CPU_ALLOC(offline);
1468 KMP_CPU_ZERO(offline);
1470 int n, begin_cpu, end_cpu;
1472 auto skip_ws = [](FILE *f) {
1476 }
while (isspace(c));
1482 int status = offline_file.
try_open(
"/sys/devices/system/cpu/offline",
"r");
1485 while (!feof(offline_file)) {
1486 skip_ws(offline_file);
1487 n = fscanf(offline_file,
"%d", &begin_cpu);
1490 skip_ws(offline_file);
1491 int c = fgetc(offline_file);
1492 if (c == EOF || c ==
',') {
1494 end_cpu = begin_cpu;
1495 }
else if (c ==
'-') {
1497 skip_ws(offline_file);
1498 n = fscanf(offline_file,
"%d", &end_cpu);
1501 skip_ws(offline_file);
1502 c = fgetc(offline_file);
1508 if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1509 end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1513 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1514 KMP_CPU_SET(cpu, offline);
1522 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1526 #if KMP_GROUP_AFFINITY
1528 if (__kmp_num_proc_groups > 1) {
1530 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1531 for (group = 0; group < __kmp_num_proc_groups; group++) {
1533 int num = __kmp_GetActiveProcessorCount(group);
1534 for (i = 0; i < num; i++) {
1535 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1545 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1546 for (proc = 0; proc < __kmp_xproc; proc++) {
1548 if (KMP_CPU_ISSET(proc, offline_cpus))
1550 KMP_CPU_SET(proc, mask);
1553 KMP_CPU_FREE(offline_cpus);
1562 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1564 kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1567 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1568 #if HWLOC_API_VERSION >= 0x00020000
1569 return hwloc_obj_type_is_cache(obj->type);
1571 return obj->type == HWLOC_OBJ_CACHE;
1576 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1578 if (__kmp_hwloc_is_cache_type(obj)) {
1579 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1580 return KMP_HW_UNKNOWN;
1581 switch (obj->attr->cache.depth) {
1585 #if KMP_MIC_SUPPORTED
1586 if (__kmp_mic_type == mic3) {
1594 return KMP_HW_UNKNOWN;
1597 switch (obj->type) {
1598 case HWLOC_OBJ_PACKAGE:
1599 return KMP_HW_SOCKET;
1600 case HWLOC_OBJ_NUMANODE:
1602 case HWLOC_OBJ_CORE:
1605 return KMP_HW_THREAD;
1606 case HWLOC_OBJ_GROUP:
1607 #if HWLOC_API_VERSION >= 0x00020000
1608 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1610 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1612 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1613 return KMP_HW_MODULE;
1614 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1615 return KMP_HW_PROC_GROUP;
1617 return KMP_HW_UNKNOWN;
1618 #if HWLOC_API_VERSION >= 0x00020100
1623 return KMP_HW_UNKNOWN;
1630 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1631 hwloc_obj_type_t type) {
1634 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1635 obj->logical_index, type, 0);
1636 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1637 obj->type, first) == obj;
1638 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1647 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1648 hwloc_obj_t lower) {
1650 hwloc_obj_type_t ltype = lower->type;
1651 int lindex = lower->logical_index - 1;
1654 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1655 while (obj && lindex >= 0 &&
1656 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1657 if (obj->userdata) {
1658 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1663 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1666 lower->userdata = RCAST(
void *, sub_id + 1);
1670 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1672 int hw_thread_index, sub_id;
1674 hwloc_obj_t pu, obj, root, prev;
1675 kmp_hw_t types[KMP_HW_LAST];
1676 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1678 hwloc_topology_t tp = __kmp_hwloc_topology;
1679 *msg_id = kmp_i18n_null;
1680 if (__kmp_affinity.flags.verbose) {
1681 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1684 if (!KMP_AFFINITY_CAPABLE()) {
1687 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1689 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1691 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1694 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1696 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1698 __kmp_nThreadsPerCore = 1;
1699 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1700 if (nCoresPerPkg == 0)
1702 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1706 #if HWLOC_API_VERSION >= 0x00020400
1708 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1710 typedef struct kmp_hwloc_cpukinds_info_t {
1712 kmp_hw_core_type_t core_type;
1713 hwloc_bitmap_t mask;
1714 } kmp_hwloc_cpukinds_info_t;
1715 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1717 if (nr_cpu_kinds > 0) {
1719 struct hwloc_info_s *infos;
1720 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1721 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1722 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1723 cpukinds[idx].efficiency = -1;
1724 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1725 cpukinds[idx].mask = hwloc_bitmap_alloc();
1726 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1727 &cpukinds[idx].efficiency, &nr_infos, &infos,
1729 for (
unsigned i = 0; i < nr_infos; ++i) {
1730 if (__kmp_str_match(
"CoreType", 8, infos[i].name)) {
1731 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1732 if (__kmp_str_match(
"IntelAtom", 9, infos[i].value)) {
1733 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1735 }
else if (__kmp_str_match(
"IntelCore", 9, infos[i].value)) {
1736 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1747 root = hwloc_get_root_obj(tp);
1751 pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1754 types[depth] = KMP_HW_THREAD;
1755 hwloc_types[depth] = obj->type;
1757 while (obj != root && obj != NULL) {
1759 #if HWLOC_API_VERSION >= 0x00020000
1760 if (obj->memory_arity) {
1762 for (memory = obj->memory_first_child; memory;
1763 memory = hwloc_get_next_child(tp, obj, memory)) {
1764 if (memory->type == HWLOC_OBJ_NUMANODE)
1767 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1768 types[depth] = KMP_HW_NUMA;
1769 hwloc_types[depth] = memory->type;
1774 type = __kmp_hwloc_type_2_topology_type(obj);
1775 if (type != KMP_HW_UNKNOWN) {
1776 types[depth] = type;
1777 hwloc_types[depth] = obj->type;
1781 KMP_ASSERT(depth > 0);
1784 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1785 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1786 kmp_hw_t temp = types[i];
1787 types[i] = types[j];
1789 hwloc_types[i] = hwloc_types[j];
1790 hwloc_types[j] = hwloc_temp;
1794 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1796 hw_thread_index = 0;
1798 while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1799 int index = depth - 1;
1800 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1801 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1804 hw_thread.ids[index] = pu->logical_index;
1805 hw_thread.os_id = pu->os_index;
1807 #if HWLOC_API_VERSION >= 0x00020400
1809 int cpukind_index = -1;
1810 for (
int i = 0; i < nr_cpu_kinds; ++i) {
1811 if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1816 if (cpukind_index >= 0) {
1817 hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1818 hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1826 while (obj != root && obj != NULL) {
1828 #if HWLOC_API_VERSION >= 0x00020000
1832 if (obj->memory_arity) {
1834 for (memory = obj->memory_first_child; memory;
1835 memory = hwloc_get_next_child(tp, obj, memory)) {
1836 if (memory->type == HWLOC_OBJ_NUMANODE)
1839 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1840 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1842 hw_thread.ids[index] = memory->logical_index;
1843 hw_thread.ids[index + 1] = sub_id;
1851 type = __kmp_hwloc_type_2_topology_type(obj);
1852 if (type != KMP_HW_UNKNOWN) {
1853 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1855 hw_thread.ids[index] = obj->logical_index;
1856 hw_thread.ids[index + 1] = sub_id;
1866 #if HWLOC_API_VERSION >= 0x00020400
1869 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
1870 hwloc_bitmap_free(cpukinds[idx].mask);
1871 __kmp_free(cpukinds);
1874 __kmp_topology->sort_ids();
1877 #endif // KMP_USE_HWLOC
1882 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
1883 *msg_id = kmp_i18n_null;
1885 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1887 if (__kmp_affinity.flags.verbose) {
1888 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
1894 if (!KMP_AFFINITY_CAPABLE()) {
1895 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1896 __kmp_ncores = nPackages = __kmp_xproc;
1897 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1905 __kmp_ncores = nPackages = __kmp_avail_proc;
1906 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1909 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1912 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1914 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1917 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1919 hw_thread.os_id = i;
1920 hw_thread.ids[0] = i;
1921 hw_thread.ids[1] = 0;
1922 hw_thread.ids[2] = 0;
1925 if (__kmp_affinity.flags.verbose) {
1926 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
1931 #if KMP_GROUP_AFFINITY
1936 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
1937 *msg_id = kmp_i18n_null;
1939 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1940 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
1942 if (__kmp_affinity.flags.verbose) {
1943 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
1947 if (!KMP_AFFINITY_CAPABLE()) {
1948 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1949 nPackages = __kmp_num_proc_groups;
1950 __kmp_nThreadsPerCore = 1;
1951 __kmp_ncores = __kmp_xproc;
1952 nCoresPerPkg = nPackages / __kmp_ncores;
1957 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1960 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1962 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1965 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1967 hw_thread.os_id = i;
1968 hw_thread.ids[0] = i / BITS_PER_GROUP;
1969 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1975 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1977 template <kmp_u
int32 LSB, kmp_u
int32 MSB>
1978 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1979 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
1980 const kmp_uint32 SHIFT_RIGHT = LSB;
1981 kmp_uint32 retval = v;
1982 retval <<= SHIFT_LEFT;
1983 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1987 static int __kmp_cpuid_mask_width(
int count) {
1990 while ((1 << r) < count)
1995 class apicThreadInfo {
1999 unsigned maxCoresPerPkg;
2000 unsigned maxThreadsPerPkg;
2006 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
2008 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
2009 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
2010 if (aa->pkgId < bb->pkgId)
2012 if (aa->pkgId > bb->pkgId)
2014 if (aa->coreId < bb->coreId)
2016 if (aa->coreId > bb->coreId)
2018 if (aa->threadId < bb->threadId)
2020 if (aa->threadId > bb->threadId)
2025 class kmp_cache_info_t {
2028 unsigned level, mask;
2030 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2031 size_t get_depth()
const {
return depth; }
2032 info_t &operator[](
size_t index) {
return table[index]; }
2033 const info_t &operator[](
size_t index)
const {
return table[index]; }
2035 static kmp_hw_t get_topology_type(
unsigned level) {
2036 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2045 return KMP_HW_UNKNOWN;
2049 static const int MAX_CACHE_LEVEL = 3;
2052 info_t table[MAX_CACHE_LEVEL];
2054 void get_leaf4_levels() {
2056 while (depth < MAX_CACHE_LEVEL) {
2057 unsigned cache_type, max_threads_sharing;
2058 unsigned cache_level, cache_mask_width;
2060 __kmp_x86_cpuid(4, level, &buf2);
2061 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2065 if (cache_type == 2) {
2069 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2070 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2071 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2072 table[depth].level = cache_level;
2073 table[depth].mask = ((-1) << cache_mask_width);
2084 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2086 *msg_id = kmp_i18n_null;
2088 if (__kmp_affinity.flags.verbose) {
2089 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2093 __kmp_x86_cpuid(0, 0, &buf);
2095 *msg_id = kmp_i18n_str_NoLeaf4Support;
2104 if (!KMP_AFFINITY_CAPABLE()) {
2107 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2113 __kmp_x86_cpuid(1, 0, &buf);
2114 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2115 if (maxThreadsPerPkg == 0) {
2116 maxThreadsPerPkg = 1;
2130 __kmp_x86_cpuid(0, 0, &buf);
2132 __kmp_x86_cpuid(4, 0, &buf);
2133 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2151 __kmp_ncores = __kmp_xproc;
2152 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2153 __kmp_nThreadsPerCore = 1;
2162 kmp_affinity_raii_t previous_affinity;
2190 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2191 __kmp_avail_proc *
sizeof(apicThreadInfo));
2192 unsigned nApics = 0;
2193 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2195 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2198 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
2200 __kmp_affinity_dispatch->bind_thread(i);
2201 threadInfo[nApics].osId = i;
2204 __kmp_x86_cpuid(1, 0, &buf);
2205 if (((buf.edx >> 9) & 1) == 0) {
2206 __kmp_free(threadInfo);
2207 *msg_id = kmp_i18n_str_ApicNotPresent;
2210 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2211 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2212 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2213 threadInfo[nApics].maxThreadsPerPkg = 1;
2222 __kmp_x86_cpuid(0, 0, &buf);
2224 __kmp_x86_cpuid(4, 0, &buf);
2225 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2227 threadInfo[nApics].maxCoresPerPkg = 1;
2231 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2232 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2234 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2235 int widthT = widthCT - widthC;
2240 __kmp_free(threadInfo);
2241 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2245 int maskC = (1 << widthC) - 1;
2246 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2248 int maskT = (1 << widthT) - 1;
2249 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2256 previous_affinity.restore();
2259 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2260 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2277 __kmp_nThreadsPerCore = 1;
2278 unsigned nCores = 1;
2281 unsigned lastPkgId = threadInfo[0].pkgId;
2282 unsigned coreCt = 1;
2283 unsigned lastCoreId = threadInfo[0].coreId;
2284 unsigned threadCt = 1;
2285 unsigned lastThreadId = threadInfo[0].threadId;
2288 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2289 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2291 for (i = 1; i < nApics; i++) {
2292 if (threadInfo[i].pkgId != lastPkgId) {
2295 lastPkgId = threadInfo[i].pkgId;
2296 if ((
int)coreCt > nCoresPerPkg)
2297 nCoresPerPkg = coreCt;
2299 lastCoreId = threadInfo[i].coreId;
2300 if ((
int)threadCt > __kmp_nThreadsPerCore)
2301 __kmp_nThreadsPerCore = threadCt;
2303 lastThreadId = threadInfo[i].threadId;
2307 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2308 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2312 if (threadInfo[i].coreId != lastCoreId) {
2315 lastCoreId = threadInfo[i].coreId;
2316 if ((
int)threadCt > __kmp_nThreadsPerCore)
2317 __kmp_nThreadsPerCore = threadCt;
2319 lastThreadId = threadInfo[i].threadId;
2320 }
else if (threadInfo[i].threadId != lastThreadId) {
2322 lastThreadId = threadInfo[i].threadId;
2324 __kmp_free(threadInfo);
2325 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2331 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2332 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2333 __kmp_free(threadInfo);
2334 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2342 if ((
int)coreCt > nCoresPerPkg)
2343 nCoresPerPkg = coreCt;
2344 if ((
int)threadCt > __kmp_nThreadsPerCore)
2345 __kmp_nThreadsPerCore = threadCt;
2346 __kmp_ncores = nCores;
2347 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
2355 int threadLevel = 2;
2357 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2360 types[idx++] = KMP_HW_SOCKET;
2362 types[idx++] = KMP_HW_CORE;
2363 if (threadLevel >= 0)
2364 types[idx++] = KMP_HW_THREAD;
2366 KMP_ASSERT(depth > 0);
2367 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2369 for (i = 0; i < nApics; ++i) {
2371 unsigned os = threadInfo[i].osId;
2372 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2375 if (pkgLevel >= 0) {
2376 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2378 if (coreLevel >= 0) {
2379 hw_thread.ids[idx++] = threadInfo[i].coreId;
2381 if (threadLevel >= 0) {
2382 hw_thread.ids[idx++] = threadInfo[i].threadId;
2384 hw_thread.os_id = os;
2387 __kmp_free(threadInfo);
2388 __kmp_topology->sort_ids();
2389 if (!__kmp_topology->check_ids()) {
2390 kmp_topology_t::deallocate(__kmp_topology);
2391 __kmp_topology =
nullptr;
2392 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2400 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type,
int *efficiency,
2401 unsigned *native_model_id) {
2403 __kmp_x86_cpuid(0x1a, 0, &buf);
2404 *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2406 case KMP_HW_CORE_TYPE_ATOM:
2409 case KMP_HW_CORE_TYPE_CORE:
2415 *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2437 INTEL_LEVEL_TYPE_INVALID = 0,
2438 INTEL_LEVEL_TYPE_SMT = 1,
2439 INTEL_LEVEL_TYPE_CORE = 2,
2440 INTEL_LEVEL_TYPE_MODULE = 3,
2441 INTEL_LEVEL_TYPE_TILE = 4,
2442 INTEL_LEVEL_TYPE_DIE = 5,
2443 INTEL_LEVEL_TYPE_LAST = 6,
2446 struct cpuid_level_info_t {
2447 unsigned level_type, mask, mask_width, nitems, cache_mask;
2450 static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2451 switch (intel_type) {
2452 case INTEL_LEVEL_TYPE_INVALID:
2453 return KMP_HW_SOCKET;
2454 case INTEL_LEVEL_TYPE_SMT:
2455 return KMP_HW_THREAD;
2456 case INTEL_LEVEL_TYPE_CORE:
2458 case INTEL_LEVEL_TYPE_TILE:
2460 case INTEL_LEVEL_TYPE_MODULE:
2461 return KMP_HW_MODULE;
2462 case INTEL_LEVEL_TYPE_DIE:
2465 return KMP_HW_UNKNOWN;
2472 __kmp_x2apicid_get_levels(
int leaf,
2473 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2474 kmp_uint64 known_levels) {
2475 unsigned level, levels_index;
2476 unsigned level_type, mask_width, nitems;
2486 level = levels_index = 0;
2488 __kmp_x86_cpuid(leaf, level, &buf);
2489 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2490 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2491 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2492 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2495 if (known_levels & (1ull << level_type)) {
2497 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2498 levels[levels_index].level_type = level_type;
2499 levels[levels_index].mask_width = mask_width;
2500 levels[levels_index].nitems = nitems;
2504 if (levels_index > 0) {
2505 levels[levels_index - 1].mask_width = mask_width;
2506 levels[levels_index - 1].nitems = nitems;
2510 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2513 if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID)
2517 for (
unsigned i = 0; i < levels_index; ++i) {
2518 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2519 levels[i].mask = ~((-1) << levels[i].mask_width);
2520 levels[i].cache_mask = (-1) << levels[i].mask_width;
2521 for (
unsigned j = 0; j < i; ++j)
2522 levels[i].mask ^= levels[j].mask;
2524 KMP_DEBUG_ASSERT(i > 0);
2525 levels[i].mask = (-1) << levels[i - 1].mask_width;
2526 levels[i].cache_mask = 0;
2529 return levels_index;
2532 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2534 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2535 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2536 unsigned levels_index;
2538 kmp_uint64 known_levels;
2539 int topology_leaf, highest_leaf, apic_id;
2541 static int leaves[] = {0, 0};
2543 kmp_i18n_id_t leaf_message_id;
2545 KMP_BUILD_ASSERT(
sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2547 *msg_id = kmp_i18n_null;
2548 if (__kmp_affinity.flags.verbose) {
2549 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2553 known_levels = 0ull;
2554 for (
int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2555 if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2556 known_levels |= (1ull << i);
2561 __kmp_x86_cpuid(0, 0, &buf);
2562 highest_leaf = buf.eax;
2567 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2570 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2571 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2574 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2579 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2583 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2585 for (
int i = 0; i < num_leaves; ++i) {
2586 int leaf = leaves[i];
2587 if (highest_leaf < leaf)
2589 __kmp_x86_cpuid(leaf, 0, &buf);
2592 topology_leaf = leaf;
2593 levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2594 if (levels_index == 0)
2598 if (topology_leaf == -1 || levels_index == 0) {
2599 *msg_id = leaf_message_id;
2602 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2609 if (!KMP_AFFINITY_CAPABLE()) {
2612 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2613 for (
unsigned i = 0; i < levels_index; ++i) {
2614 if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2615 __kmp_nThreadsPerCore = levels[i].nitems;
2616 }
else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2617 nCoresPerPkg = levels[i].nitems;
2620 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2621 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2626 int depth = levels_index;
2627 for (
int i = depth - 1, j = 0; i >= 0; --i, ++j)
2628 types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2630 kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2633 kmp_cache_info_t cache_info;
2634 for (
size_t i = 0; i < cache_info.get_depth(); ++i) {
2635 const kmp_cache_info_t::info_t &info = cache_info[i];
2636 unsigned cache_mask = info.mask;
2637 unsigned cache_level = info.level;
2638 for (
unsigned j = 0; j < levels_index; ++j) {
2639 unsigned hw_cache_mask = levels[j].cache_mask;
2640 kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2641 if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2643 __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2644 __kmp_topology->set_equivalent_type(cache_type, type);
2654 kmp_affinity_raii_t previous_affinity;
2659 int hw_thread_index = 0;
2660 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2661 cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2662 unsigned my_levels_index;
2665 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2668 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2670 __kmp_affinity_dispatch->bind_thread(proc);
2673 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2675 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2677 __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2678 if (my_levels_index == 0 || my_levels_index != levels_index) {
2679 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2683 hw_thread.os_id = proc;
2685 for (
unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2686 hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2688 hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2692 if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2693 kmp_hw_core_type_t type;
2694 unsigned native_model_id;
2696 __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2697 hw_thread.attrs.set_core_type(type);
2698 hw_thread.attrs.set_core_eff(efficiency);
2702 KMP_ASSERT(hw_thread_index > 0);
2703 __kmp_topology->sort_ids();
2704 if (!__kmp_topology->check_ids()) {
2705 kmp_topology_t::deallocate(__kmp_topology);
2706 __kmp_topology =
nullptr;
2707 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2715 #define threadIdIndex 1
2716 #define coreIdIndex 2
2717 #define pkgIdIndex 3
2718 #define nodeIdIndex 4
2720 typedef unsigned *ProcCpuInfo;
2721 static unsigned maxIndex = pkgIdIndex;
2723 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
2726 const unsigned *aa = *(
unsigned *
const *)a;
2727 const unsigned *bb = *(
unsigned *
const *)b;
2728 for (i = maxIndex;; i--) {
2739 #if KMP_USE_HIER_SCHED
2741 static void __kmp_dispatch_set_hierarchy_values() {
2747 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2748 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2749 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2750 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2752 if (__kmp_mic_type >= mic3)
2753 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2755 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2756 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2757 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2758 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2759 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2762 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2763 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2764 __kmp_nThreadsPerCore;
2765 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2767 if (__kmp_mic_type >= mic3)
2768 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2769 2 * __kmp_nThreadsPerCore;
2771 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2772 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2773 __kmp_nThreadsPerCore;
2774 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2775 nCoresPerPkg * __kmp_nThreadsPerCore;
2776 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2777 nCoresPerPkg * __kmp_nThreadsPerCore;
2778 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2779 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2784 int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
2785 int index = type + 1;
2786 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2787 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2788 if (type == kmp_hier_layer_e::LAYER_THREAD)
2790 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2792 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2793 if (tid >= num_hw_threads)
2794 tid = tid % num_hw_threads;
2795 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2799 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2802 KMP_DEBUG_ASSERT(i1 <= i2);
2803 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2804 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2805 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2807 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2809 #endif // KMP_USE_HIER_SCHED
2811 static inline const char *__kmp_cpuinfo_get_filename() {
2812 const char *filename;
2813 if (__kmp_cpuinfo_file !=
nullptr)
2814 filename = __kmp_cpuinfo_file;
2816 filename =
"/proc/cpuinfo";
2820 static inline const char *__kmp_cpuinfo_get_envvar() {
2821 const char *envvar =
nullptr;
2822 if (__kmp_cpuinfo_file !=
nullptr)
2823 envvar =
"KMP_CPUINFO_FILE";
2829 static bool __kmp_affinity_create_cpuinfo_map(
int *line,
2830 kmp_i18n_id_t *
const msg_id) {
2831 const char *filename = __kmp_cpuinfo_get_filename();
2832 const char *envvar = __kmp_cpuinfo_get_envvar();
2833 *msg_id = kmp_i18n_null;
2835 if (__kmp_affinity.flags.verbose) {
2836 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
2844 unsigned num_records = 0;
2846 buf[
sizeof(buf) - 1] = 1;
2847 if (!fgets(buf,
sizeof(buf), f)) {
2852 char s1[] =
"processor";
2853 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2860 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2862 if (level > (
unsigned)__kmp_xproc) {
2863 level = __kmp_xproc;
2865 if (nodeIdIndex + level >= maxIndex) {
2866 maxIndex = nodeIdIndex + level;
2874 if (num_records == 0) {
2875 *msg_id = kmp_i18n_str_NoProcRecords;
2878 if (num_records > (
unsigned)__kmp_xproc) {
2879 *msg_id = kmp_i18n_str_TooManyProcRecords;
2888 if (fseek(f, 0, SEEK_SET) != 0) {
2889 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2895 unsigned **threadInfo =
2896 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
2898 for (i = 0; i <= num_records; i++) {
2900 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2903 #define CLEANUP_THREAD_INFO \
2904 for (i = 0; i <= num_records; i++) { \
2905 __kmp_free(threadInfo[i]); \
2907 __kmp_free(threadInfo);
2912 #define INIT_PROC_INFO(p) \
2913 for (__index = 0; __index <= maxIndex; __index++) { \
2914 (p)[__index] = UINT_MAX; \
2917 for (i = 0; i <= num_records; i++) {
2918 INIT_PROC_INFO(threadInfo[i]);
2921 unsigned num_avail = 0;
2928 buf[
sizeof(buf) - 1] = 1;
2929 bool long_line =
false;
2930 if (!fgets(buf,
sizeof(buf), f)) {
2935 for (i = 0; i <= maxIndex; i++) {
2936 if (threadInfo[num_avail][i] != UINT_MAX) {
2944 }
else if (!buf[
sizeof(buf) - 1]) {
2949 #define CHECK_LINE \
2951 CLEANUP_THREAD_INFO; \
2952 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2958 #if KMP_ARCH_LOONGARCH64
2965 if (*buf ==
'\n' && *line == 2)
2969 char s1[] =
"processor";
2970 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2972 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2974 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2976 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2977 #if KMP_ARCH_AARCH64
2986 threadInfo[num_avail][osIdIndex] = val;
2987 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2991 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2992 threadInfo[num_avail][osIdIndex]);
2993 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2995 KMP_SNPRINTF(path,
sizeof(path),
2996 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2997 threadInfo[num_avail][osIdIndex]);
2998 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
3002 char s2[] =
"physical id";
3003 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
3005 char *p = strchr(buf +
sizeof(s2) - 1,
':');
3007 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3009 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
3011 threadInfo[num_avail][pkgIdIndex] = val;
3014 char s3[] =
"core id";
3015 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
3017 char *p = strchr(buf +
sizeof(s3) - 1,
':');
3019 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3021 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
3023 threadInfo[num_avail][coreIdIndex] = val;
3025 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
3027 char s4[] =
"thread id";
3028 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
3030 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3032 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3034 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3036 threadInfo[num_avail][threadIdIndex] = val;
3040 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
3042 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3044 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3047 if (level > (
unsigned)__kmp_xproc) {
3048 level = __kmp_xproc;
3050 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3052 threadInfo[num_avail][nodeIdIndex + level] = val;
3059 if ((*buf != 0) && (*buf !=
'\n')) {
3064 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
3072 if ((
int)num_avail == __kmp_xproc) {
3073 CLEANUP_THREAD_INFO;
3074 *msg_id = kmp_i18n_str_TooManyEntries;
3080 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3081 CLEANUP_THREAD_INFO;
3082 *msg_id = kmp_i18n_str_MissingProcField;
3085 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3086 CLEANUP_THREAD_INFO;
3087 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3092 if (KMP_AFFINITY_CAPABLE() &&
3093 !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3094 __kmp_affin_fullMask)) {
3095 INIT_PROC_INFO(threadInfo[num_avail]);
3102 KMP_ASSERT(num_avail <= num_records);
3103 INIT_PROC_INFO(threadInfo[num_avail]);
3108 CLEANUP_THREAD_INFO;
3109 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3113 CLEANUP_THREAD_INFO;
3114 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3119 #if KMP_MIC && REDUCE_TEAM_SIZE
3120 unsigned teamSize = 0;
3121 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3128 KMP_ASSERT(num_avail > 0);
3129 KMP_ASSERT(num_avail <= num_records);
3132 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3133 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3145 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3147 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3149 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3151 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3153 bool assign_thread_ids =
false;
3154 unsigned threadIdCt;
3157 restart_radix_check:
3161 if (assign_thread_ids) {
3162 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3163 threadInfo[0][threadIdIndex] = threadIdCt++;
3164 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3165 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3168 for (index = 0; index <= maxIndex; index++) {
3172 lastId[index] = threadInfo[0][index];
3177 for (i = 1; i < num_avail; i++) {
3180 for (index = maxIndex; index >= threadIdIndex; index--) {
3181 if (assign_thread_ids && (index == threadIdIndex)) {
3183 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3184 threadInfo[i][threadIdIndex] = threadIdCt++;
3188 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3189 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3192 if (threadInfo[i][index] != lastId[index]) {
3197 for (index2 = threadIdIndex; index2 < index; index2++) {
3199 if (counts[index2] > maxCt[index2]) {
3200 maxCt[index2] = counts[index2];
3203 lastId[index2] = threadInfo[i][index2];
3207 lastId[index] = threadInfo[i][index];
3209 if (assign_thread_ids && (index > threadIdIndex)) {
3211 #if KMP_MIC && REDUCE_TEAM_SIZE
3214 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3215 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3221 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3222 threadInfo[i][threadIdIndex] = threadIdCt++;
3228 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3229 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3235 if (index < threadIdIndex) {
3239 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3244 CLEANUP_THREAD_INFO;
3245 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3251 assign_thread_ids =
true;
3252 goto restart_radix_check;
3256 #if KMP_MIC && REDUCE_TEAM_SIZE
3259 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3260 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3262 for (index = threadIdIndex; index <= maxIndex; index++) {
3263 if (counts[index] > maxCt[index]) {
3264 maxCt[index] = counts[index];
3268 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3269 nCoresPerPkg = maxCt[coreIdIndex];
3270 nPackages = totals[pkgIdIndex];
3276 __kmp_ncores = totals[coreIdIndex];
3277 if (!KMP_AFFINITY_CAPABLE()) {
3278 KMP_ASSERT(__kmp_affinity.type == affinity_none);
3282 #if KMP_MIC && REDUCE_TEAM_SIZE
3284 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3285 __kmp_dflt_team_nth = teamSize;
3286 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3287 "__kmp_dflt_team_nth = %d\n",
3288 __kmp_dflt_team_nth));
3290 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3292 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
3299 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
3300 for (index = threadIdIndex; index < maxIndex; index++) {
3301 KMP_ASSERT(totals[index] >= totals[index + 1]);
3302 inMap[index] = (totals[index] > totals[index + 1]);
3304 inMap[maxIndex] = (totals[maxIndex] > 1);
3305 inMap[pkgIdIndex] =
true;
3306 inMap[coreIdIndex] =
true;
3307 inMap[threadIdIndex] =
true;
3311 kmp_hw_t types[KMP_HW_LAST];
3314 int threadLevel = -1;
3315 for (index = threadIdIndex; index <= maxIndex; index++) {
3320 if (inMap[pkgIdIndex]) {
3322 types[idx++] = KMP_HW_SOCKET;
3324 if (inMap[coreIdIndex]) {
3326 types[idx++] = KMP_HW_CORE;
3328 if (inMap[threadIdIndex]) {
3330 types[idx++] = KMP_HW_THREAD;
3332 KMP_ASSERT(depth > 0);
3335 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3337 for (i = 0; i < num_avail; ++i) {
3338 unsigned os = threadInfo[i][osIdIndex];
3340 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3342 hw_thread.os_id = os;
3345 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3346 if (!inMap[src_index]) {
3349 if (src_index == pkgIdIndex) {
3350 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3351 }
else if (src_index == coreIdIndex) {
3352 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3353 }
else if (src_index == threadIdIndex) {
3354 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3364 CLEANUP_THREAD_INFO;
3365 __kmp_topology->sort_ids();
3366 if (!__kmp_topology->check_ids()) {
3367 kmp_topology_t::deallocate(__kmp_topology);
3368 __kmp_topology =
nullptr;
3369 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3378 static void __kmp_create_os_id_masks(
unsigned *numUnique,
3379 kmp_affinity_t &affinity) {
3383 int numAddrs = __kmp_topology->get_num_hw_threads();
3384 int depth = __kmp_topology->get_depth();
3385 const char *env_var = affinity.env_var;
3386 KMP_ASSERT(numAddrs);
3390 for (i = numAddrs - 1;; --i) {
3391 int osId = __kmp_topology->at(i).os_id;
3392 if (osId > maxOsId) {
3398 affinity.num_os_id_masks = maxOsId + 1;
3399 KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks);
3400 KMP_ASSERT(affinity.gran_levels >= 0);
3401 if (affinity.flags.verbose && (affinity.gran_levels > 0)) {
3402 KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels);
3404 if (affinity.gran_levels >= (
int)depth) {
3405 KMP_AFF_WARNING(affinity, AffThreadsMayMigrate);
3415 kmp_affin_mask_t *sum;
3416 KMP_CPU_ALLOC_ON_STACK(sum);
3418 KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3419 for (i = 1; i < numAddrs; i++) {
3423 if (__kmp_topology->is_close(leader, i, affinity.gran_levels)) {
3424 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3430 for (; j < i; j++) {
3431 int osId = __kmp_topology->at(j).os_id;
3432 KMP_DEBUG_ASSERT(osId <= maxOsId);
3433 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3434 KMP_CPU_COPY(mask, sum);
3435 __kmp_topology->at(j).leader = (j == leader);
3442 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3447 for (; j < i; j++) {
3448 int osId = __kmp_topology->at(j).os_id;
3449 KMP_DEBUG_ASSERT(osId <= maxOsId);
3450 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3451 KMP_CPU_COPY(mask, sum);
3452 __kmp_topology->at(j).leader = (j == leader);
3455 KMP_CPU_FREE_FROM_STACK(sum);
3457 *numUnique = unique;
3463 static kmp_affin_mask_t *newMasks;
3464 static int numNewMasks;
3465 static int nextNewMask;
3467 #define ADD_MASK(_mask) \
3469 if (nextNewMask >= numNewMasks) { \
3472 kmp_affin_mask_t *temp; \
3473 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3474 for (i = 0; i < numNewMasks / 2; i++) { \
3475 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3476 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3477 KMP_CPU_COPY(dest, src); \
3479 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3482 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3486 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3488 if (((_osId) > _maxOsId) || \
3489 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3490 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
3492 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3498 static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) {
3500 kmp_affin_mask_t **out_masks = &affinity.masks;
3501 unsigned *out_numMasks = &affinity.num_masks;
3502 const char *proclist = affinity.proclist;
3503 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3504 int maxOsId = affinity.num_os_id_masks - 1;
3505 const char *scan = proclist;
3506 const char *next = proclist;
3511 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3513 kmp_affin_mask_t *sumMask;
3514 KMP_CPU_ALLOC(sumMask);
3518 int start, end, stride;
3522 if (*next ==
'\0') {
3534 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
3536 num = __kmp_str_to_int(scan, *next);
3537 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3540 if ((num > maxOsId) ||
3541 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3542 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3543 KMP_CPU_ZERO(sumMask);
3545 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3565 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3568 num = __kmp_str_to_int(scan, *next);
3569 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3572 if ((num > maxOsId) ||
3573 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3574 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3576 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3593 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3595 start = __kmp_str_to_int(scan, *next);
3596 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
3601 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3615 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3617 end = __kmp_str_to_int(scan, *next);
3618 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
3635 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3637 stride = __kmp_str_to_int(scan, *next);
3638 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
3643 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
3645 KMP_ASSERT2(start <= end,
"bad explicit proc list");
3647 KMP_ASSERT2(start >= end,
"bad explicit proc list");
3649 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3654 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3656 }
while (start <= end);
3659 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3661 }
while (start >= end);
3672 *out_numMasks = nextNewMask;
3673 if (nextNewMask == 0) {
3675 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3678 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3679 for (i = 0; i < nextNewMask; i++) {
3680 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3681 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3682 KMP_CPU_COPY(dest, src);
3684 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3685 KMP_CPU_FREE(sumMask);
3708 static void __kmp_process_subplace_list(
const char **scan,
3709 kmp_affinity_t &affinity,
int maxOsId,
3710 kmp_affin_mask_t *tempMask,
3713 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3716 int start, count, stride, i;
3720 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3723 start = __kmp_str_to_int(*scan, *next);
3724 KMP_ASSERT(start >= 0);
3729 if (**scan ==
'}' || **scan ==
',') {
3730 if ((start > maxOsId) ||
3731 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3732 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3734 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3737 if (**scan ==
'}') {
3743 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3748 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3751 count = __kmp_str_to_int(*scan, *next);
3752 KMP_ASSERT(count >= 0);
3757 if (**scan ==
'}' || **scan ==
',') {
3758 for (i = 0; i < count; i++) {
3759 if ((start > maxOsId) ||
3760 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3761 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3764 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3769 if (**scan ==
'}') {
3775 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3782 if (**scan ==
'+') {
3786 if (**scan ==
'-') {
3794 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3797 stride = __kmp_str_to_int(*scan, *next);
3798 KMP_ASSERT(stride >= 0);
3804 if (**scan ==
'}' || **scan ==
',') {
3805 for (i = 0; i < count; i++) {
3806 if ((start > maxOsId) ||
3807 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3808 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3811 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3816 if (**scan ==
'}') {
3823 KMP_ASSERT2(0,
"bad explicit places list");
3827 static void __kmp_process_place(
const char **scan, kmp_affinity_t &affinity,
3828 int maxOsId, kmp_affin_mask_t *tempMask,
3831 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3835 if (**scan ==
'{') {
3837 __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize);
3838 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3840 }
else if (**scan ==
'!') {
3842 __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize);
3843 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3844 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
3847 int num = __kmp_str_to_int(*scan, *next);
3848 KMP_ASSERT(num >= 0);
3849 if ((num > maxOsId) ||
3850 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3851 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3853 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3858 KMP_ASSERT2(0,
"bad explicit places list");
3863 void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) {
3864 int i, j, count, stride, sign;
3865 kmp_affin_mask_t **out_masks = &affinity.masks;
3866 unsigned *out_numMasks = &affinity.num_masks;
3867 const char *placelist = affinity.proclist;
3868 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3869 int maxOsId = affinity.num_os_id_masks - 1;
3870 const char *scan = placelist;
3871 const char *next = placelist;
3874 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3880 kmp_affin_mask_t *tempMask;
3881 kmp_affin_mask_t *previousMask;
3882 KMP_CPU_ALLOC(tempMask);
3883 KMP_CPU_ZERO(tempMask);
3884 KMP_CPU_ALLOC(previousMask);
3885 KMP_CPU_ZERO(previousMask);
3889 __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize);
3893 if (*scan ==
'\0' || *scan ==
',') {
3897 KMP_CPU_ZERO(tempMask);
3899 if (*scan ==
'\0') {
3906 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3911 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3914 count = __kmp_str_to_int(scan, *next);
3915 KMP_ASSERT(count >= 0);
3920 if (*scan ==
'\0' || *scan ==
',') {
3923 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3942 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3945 stride = __kmp_str_to_int(scan, *next);
3946 KMP_DEBUG_ASSERT(stride >= 0);
3952 for (i = 0; i < count; i++) {
3957 KMP_CPU_COPY(previousMask, tempMask);
3958 ADD_MASK(previousMask);
3959 KMP_CPU_ZERO(tempMask);
3961 KMP_CPU_SET_ITERATE(j, previousMask) {
3962 if (!KMP_CPU_ISSET(j, previousMask)) {
3965 if ((j + stride > maxOsId) || (j + stride < 0) ||
3966 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3967 (!KMP_CPU_ISSET(j + stride,
3968 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3969 if (i < count - 1) {
3970 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride);
3974 KMP_CPU_SET(j + stride, tempMask);
3978 KMP_CPU_ZERO(tempMask);
3983 if (*scan ==
'\0') {
3991 KMP_ASSERT2(0,
"bad explicit places list");
3994 *out_numMasks = nextNewMask;
3995 if (nextNewMask == 0) {
3997 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4000 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4001 KMP_CPU_FREE(tempMask);
4002 KMP_CPU_FREE(previousMask);
4003 for (i = 0; i < nextNewMask; i++) {
4004 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
4005 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
4006 KMP_CPU_COPY(dest, src);
4008 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4012 #undef ADD_MASK_OSID
4016 static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
4019 for (
int i = 0; i < nprocs; i++) {
4020 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
4021 for (
int j = bottom_level; j > 0; j--) {
4022 if (hw_thread.ids[j] > 0) {
4023 if (core_level < (j - 1)) {
4033 static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
4035 return __kmp_topology->get_count(core_level);
4038 static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4041 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4042 for (
int i = 0; i <= proc; ++i) {
4043 if (i + 1 <= proc) {
4044 for (
int j = 0; j <= core_level; ++j) {
4045 if (__kmp_topology->at(i + 1).sub_ids[j] !=
4046 __kmp_topology->at(i).sub_ids[j]) {
4058 static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4060 if (core_level >= bottom_level)
4062 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4063 return __kmp_topology->calculate_ratio(thread_level, core_level);
4066 static int *procarr = NULL;
4067 static int __kmp_aff_depth = 0;
4068 static int *__kmp_osid_to_hwthread_map = NULL;
4070 static void __kmp_affinity_get_mask_topology_info(
const kmp_affin_mask_t *mask,
4071 kmp_affinity_ids_t &ids,
4072 kmp_affinity_attrs_t &attrs) {
4073 if (!KMP_AFFINITY_CAPABLE())
4077 for (
int i = 0; i < KMP_HW_LAST; ++i)
4078 ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
4079 attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4084 int depth = __kmp_topology->get_depth();
4085 KMP_CPU_SET_ITERATE(cpu, mask) {
4086 int osid_idx = __kmp_osid_to_hwthread_map[cpu];
4087 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(osid_idx);
4088 for (
int level = 0; level < depth; ++level) {
4089 kmp_hw_t type = __kmp_topology->get_type(level);
4090 int id = hw_thread.sub_ids[level];
4091 if (ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids[type] ==
id) {
4096 ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4097 for (; level < depth; ++level) {
4098 kmp_hw_t type = __kmp_topology->get_type(level);
4099 ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4104 attrs.core_type = hw_thread.attrs.get_core_type();
4105 attrs.core_eff = hw_thread.attrs.get_core_eff();
4109 if (attrs.core_type != hw_thread.attrs.get_core_type())
4110 attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN;
4111 if (attrs.core_eff != hw_thread.attrs.get_core_eff())
4112 attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF;
4117 static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) {
4118 if (!KMP_AFFINITY_CAPABLE())
4120 const kmp_affin_mask_t *mask = th->th.th_affin_mask;
4121 kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4122 kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs;
4123 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4129 static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) {
4130 if (!KMP_AFFINITY_CAPABLE())
4132 if (affinity.type != affinity_none) {
4133 KMP_ASSERT(affinity.num_os_id_masks);
4134 KMP_ASSERT(affinity.os_id_masks);
4136 KMP_ASSERT(affinity.num_masks);
4137 KMP_ASSERT(affinity.masks);
4138 KMP_ASSERT(__kmp_affin_fullMask);
4140 int max_cpu = __kmp_affin_fullMask->get_max_cpu();
4141 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4144 if (!affinity.ids) {
4145 affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(
4146 sizeof(kmp_affinity_ids_t) * affinity.num_masks);
4148 if (!affinity.attrs) {
4149 affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate(
4150 sizeof(kmp_affinity_attrs_t) * affinity.num_masks);
4152 if (!__kmp_osid_to_hwthread_map) {
4154 __kmp_osid_to_hwthread_map =
4155 (
int *)__kmp_allocate(
sizeof(
int) * (max_cpu + 1));
4159 for (
int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread)
4160 __kmp_osid_to_hwthread_map[__kmp_topology->at(hw_thread).os_id] = hw_thread;
4162 for (
unsigned i = 0; i < affinity.num_masks; ++i) {
4163 kmp_affinity_ids_t &ids = affinity.ids[i];
4164 kmp_affinity_attrs_t &attrs = affinity.attrs[i];
4165 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i);
4166 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4172 static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) {
4173 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4174 KMP_ASSERT(affinity.type == affinity_none);
4175 affinity.num_masks = 1;
4176 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4177 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0);
4178 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4179 __kmp_affinity_get_topology_info(affinity);
4182 static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) {
4187 int verbose = affinity.flags.verbose;
4188 const char *env_var = affinity.env_var;
4191 if (__kmp_affin_fullMask && __kmp_affin_origMask)
4194 if (__kmp_affin_fullMask == NULL) {
4195 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4197 if (__kmp_affin_origMask == NULL) {
4198 KMP_CPU_ALLOC(__kmp_affin_origMask);
4200 if (KMP_AFFINITY_CAPABLE()) {
4201 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4203 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4204 if (affinity.flags.respect) {
4207 __kmp_avail_proc = 0;
4208 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4209 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4214 if (__kmp_avail_proc > __kmp_xproc) {
4215 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4216 affinity.type = affinity_none;
4217 KMP_AFFINITY_DISABLE();
4222 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4223 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4224 __kmp_affin_fullMask);
4225 KMP_INFORM(InitOSProcSetRespect, env_var, buf);
4229 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4230 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4231 __kmp_affin_fullMask);
4232 KMP_INFORM(InitOSProcSetNotRespect, env_var, buf);
4235 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4237 if (__kmp_num_proc_groups <= 1) {
4239 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4243 __kmp_affin_fullMask->set_process_affinity(
true);
4249 static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
4250 bool success =
false;
4251 const char *env_var = affinity.env_var;
4252 kmp_i18n_id_t msg_id = kmp_i18n_null;
4253 int verbose = affinity.flags.verbose;
4257 if ((__kmp_cpuinfo_file != NULL) &&
4258 (__kmp_affinity_top_method == affinity_top_method_all)) {
4259 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4262 if (__kmp_affinity_top_method == affinity_top_method_all) {
4268 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4269 if (!__kmp_hwloc_error) {
4270 success = __kmp_affinity_create_hwloc_map(&msg_id);
4271 if (!success && verbose) {
4272 KMP_INFORM(AffIgnoringHwloc, env_var);
4274 }
else if (verbose) {
4275 KMP_INFORM(AffIgnoringHwloc, env_var);
4280 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4282 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4283 if (!success && verbose && msg_id != kmp_i18n_null) {
4284 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4288 success = __kmp_affinity_create_apicid_map(&msg_id);
4289 if (!success && verbose && msg_id != kmp_i18n_null) {
4290 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4298 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4299 if (!success && verbose && msg_id != kmp_i18n_null) {
4300 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4305 #if KMP_GROUP_AFFINITY
4306 if (!success && (__kmp_num_proc_groups > 1)) {
4307 success = __kmp_affinity_create_proc_group_map(&msg_id);
4308 if (!success && verbose && msg_id != kmp_i18n_null) {
4309 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4315 success = __kmp_affinity_create_flat_map(&msg_id);
4316 if (!success && verbose && msg_id != kmp_i18n_null) {
4317 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4319 KMP_ASSERT(success);
4327 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4328 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4329 success = __kmp_affinity_create_hwloc_map(&msg_id);
4331 KMP_ASSERT(msg_id != kmp_i18n_null);
4332 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4335 #endif // KMP_USE_HWLOC
4337 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4338 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4339 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4340 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4342 KMP_ASSERT(msg_id != kmp_i18n_null);
4343 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4345 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4346 success = __kmp_affinity_create_apicid_map(&msg_id);
4348 KMP_ASSERT(msg_id != kmp_i18n_null);
4349 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4354 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4356 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4358 KMP_ASSERT(msg_id != kmp_i18n_null);
4359 const char *filename = __kmp_cpuinfo_get_filename();
4361 KMP_FATAL(FileLineMsgExiting, filename, line,
4362 __kmp_i18n_catgets(msg_id));
4364 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4369 #if KMP_GROUP_AFFINITY
4370 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4371 success = __kmp_affinity_create_proc_group_map(&msg_id);
4372 KMP_ASSERT(success);
4374 KMP_ASSERT(msg_id != kmp_i18n_null);
4375 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4380 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4381 success = __kmp_affinity_create_flat_map(&msg_id);
4383 KMP_ASSERT(success);
4387 if (!__kmp_topology) {
4388 if (KMP_AFFINITY_CAPABLE()) {
4389 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4391 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4393 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4394 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4395 __kmp_nThreadsPerCore, __kmp_ncores);
4397 __kmp_topology->print(env_var);
4404 __kmp_topology->canonicalize();
4406 __kmp_topology->print(env_var);
4407 bool filtered = __kmp_topology->filter_hw_subset();
4411 if (__kmp_num_proc_groups <= 1)
4413 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4415 if (filtered && verbose)
4416 __kmp_topology->print(
"KMP_HW_SUBSET");
4420 static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) {
4421 bool is_regular_affinity = (&affinity == &__kmp_affinity);
4422 bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity);
4423 const char *env_var = affinity.env_var;
4425 if (affinity.flags.initialized) {
4426 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4430 if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask))
4431 __kmp_aux_affinity_initialize_masks(affinity);
4433 if (is_regular_affinity && !__kmp_topology) {
4434 bool success = __kmp_aux_affinity_initialize_topology(affinity);
4437 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4438 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4440 affinity.type = affinity_none;
4441 KMP_AFFINITY_DISABLE();
4448 if (affinity.type == affinity_none) {
4449 __kmp_create_affinity_none_places(affinity);
4450 #if KMP_USE_HIER_SCHED
4451 __kmp_dispatch_set_hierarchy_values();
4453 affinity.flags.initialized = TRUE;
4457 __kmp_topology->set_granularity(affinity);
4458 int depth = __kmp_topology->get_depth();
4462 __kmp_create_os_id_masks(&numUnique, affinity);
4463 if (affinity.gran_levels == 0) {
4464 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
4467 switch (affinity.type) {
4469 case affinity_explicit:
4470 KMP_DEBUG_ASSERT(affinity.proclist != NULL);
4471 if (is_hidden_helper_affinity ||
4472 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4473 __kmp_affinity_process_proclist(affinity);
4475 __kmp_affinity_process_placelist(affinity);
4477 if (affinity.num_masks == 0) {
4478 KMP_AFF_WARNING(affinity, AffNoValidProcID);
4479 affinity.type = affinity_none;
4480 __kmp_create_affinity_none_places(affinity);
4481 affinity.flags.initialized = TRUE;
4490 case affinity_logical:
4491 affinity.compact = 0;
4492 if (affinity.offset) {
4494 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4498 case affinity_physical:
4499 if (__kmp_nThreadsPerCore > 1) {
4500 affinity.compact = 1;
4501 if (affinity.compact >= depth) {
4502 affinity.compact = 0;
4505 affinity.compact = 0;
4507 if (affinity.offset) {
4509 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4513 case affinity_scatter:
4514 if (affinity.compact >= depth) {
4515 affinity.compact = 0;
4517 affinity.compact = depth - 1 - affinity.compact;
4521 case affinity_compact:
4522 if (affinity.compact >= depth) {
4523 affinity.compact = depth - 1;
4527 case affinity_balanced:
4528 if (depth <= 1 || is_hidden_helper_affinity) {
4529 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4530 affinity.type = affinity_none;
4531 __kmp_create_affinity_none_places(affinity);
4532 affinity.flags.initialized = TRUE;
4534 }
else if (!__kmp_topology->is_uniform()) {
4536 __kmp_aff_depth = depth;
4539 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4540 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4542 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4543 __kmp_avail_proc, depth - 1, core_level);
4545 int nproc = ncores * maxprocpercore;
4546 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4547 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4548 affinity.type = affinity_none;
4549 __kmp_create_affinity_none_places(affinity);
4550 affinity.flags.initialized = TRUE;
4554 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
4555 for (
int i = 0; i < nproc; i++) {
4561 for (
int i = 0; i < __kmp_avail_proc; i++) {
4562 int proc = __kmp_topology->at(i).os_id;
4563 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4565 if (core == lastcore) {
4572 procarr[core * maxprocpercore + inlastcore] = proc;
4575 if (affinity.compact >= depth) {
4576 affinity.compact = depth - 1;
4581 if (affinity.flags.dups) {
4582 affinity.num_masks = __kmp_avail_proc;
4584 affinity.num_masks = numUnique;
4587 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4588 (__kmp_affinity_num_places > 0) &&
4589 ((
unsigned)__kmp_affinity_num_places < affinity.num_masks) &&
4590 !is_hidden_helper_affinity) {
4591 affinity.num_masks = __kmp_affinity_num_places;
4594 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4598 __kmp_topology->sort_compact(affinity);
4602 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4603 for (i = 0, j = 0; i < num_hw_threads; i++) {
4604 if ((!affinity.flags.dups) && (!__kmp_topology->at(i).leader)) {
4607 int osId = __kmp_topology->at(i).os_id;
4609 kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4610 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j);
4611 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4612 KMP_CPU_COPY(dest, src);
4613 if (++j >= affinity.num_masks) {
4617 KMP_DEBUG_ASSERT(j == affinity.num_masks);
4620 __kmp_topology->sort_ids();
4624 KMP_ASSERT2(0,
"Unexpected affinity setting");
4626 __kmp_affinity_get_topology_info(affinity);
4627 affinity.flags.initialized = TRUE;
4630 void __kmp_affinity_initialize(kmp_affinity_t &affinity) {
4639 int disabled = (affinity.type == affinity_disabled);
4640 if (!KMP_AFFINITY_CAPABLE())
4641 KMP_ASSERT(disabled);
4643 affinity.type = affinity_none;
4644 __kmp_aux_affinity_initialize(affinity);
4646 affinity.type = affinity_disabled;
4649 void __kmp_affinity_uninitialize(
void) {
4650 for (kmp_affinity_t *affinity : __kmp_affinities) {
4651 if (affinity->masks != NULL)
4652 KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks);
4653 if (affinity->os_id_masks != NULL)
4654 KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks);
4655 if (affinity->proclist != NULL)
4656 __kmp_free(affinity->proclist);
4657 if (affinity->ids != NULL)
4658 __kmp_free(affinity->ids);
4659 if (affinity->attrs != NULL)
4660 __kmp_free(affinity->attrs);
4661 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
4663 if (__kmp_affin_origMask != NULL) {
4664 if (KMP_AFFINITY_CAPABLE()) {
4665 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
4667 KMP_CPU_FREE(__kmp_affin_origMask);
4668 __kmp_affin_origMask = NULL;
4670 __kmp_affinity_num_places = 0;
4671 if (procarr != NULL) {
4672 __kmp_free(procarr);
4675 if (__kmp_osid_to_hwthread_map) {
4676 __kmp_free(__kmp_osid_to_hwthread_map);
4677 __kmp_osid_to_hwthread_map = NULL;
4680 if (__kmp_hwloc_topology != NULL) {
4681 hwloc_topology_destroy(__kmp_hwloc_topology);
4682 __kmp_hwloc_topology = NULL;
4685 if (__kmp_hw_subset) {
4686 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4687 __kmp_hw_subset =
nullptr;
4689 if (__kmp_topology) {
4690 kmp_topology_t::deallocate(__kmp_topology);
4691 __kmp_topology =
nullptr;
4693 KMPAffinity::destroy_api();
4696 static void __kmp_select_mask_by_gtid(
int gtid,
const kmp_affinity_t *affinity,
4697 int *place, kmp_affin_mask_t **mask) {
4699 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
4700 if (is_hidden_helper)
4703 mask_idx = gtid - 2;
4705 mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4706 KMP_DEBUG_ASSERT(affinity->num_masks > 0);
4707 *place = (mask_idx + affinity->offset) % affinity->num_masks;
4708 *mask = KMP_CPU_INDEX(affinity->masks, *place);
4713 void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
4715 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4718 for (
int id = 0;
id < KMP_HW_LAST; ++id)
4719 th->th.th_topology_ids[
id] = kmp_hw_thread_t::UNKNOWN_ID;
4720 th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4722 if (!KMP_AFFINITY_CAPABLE()) {
4726 if (th->th.th_affin_mask == NULL) {
4727 KMP_CPU_ALLOC(th->th.th_affin_mask);
4729 KMP_CPU_ZERO(th->th.th_affin_mask);
4737 kmp_affin_mask_t *mask;
4739 const kmp_affinity_t *affinity;
4740 const char *env_var;
4741 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
4743 if (is_hidden_helper)
4744 affinity = &__kmp_hh_affinity;
4746 affinity = &__kmp_affinity;
4747 env_var = affinity->env_var;
4749 if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) {
4750 if ((affinity->type == affinity_none) ||
4751 (affinity->type == affinity_balanced) ||
4752 KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
4753 #if KMP_GROUP_AFFINITY
4754 if (__kmp_num_proc_groups > 1) {
4758 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4760 mask = __kmp_affin_fullMask;
4762 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
4765 if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) {
4766 #if KMP_GROUP_AFFINITY
4767 if (__kmp_num_proc_groups > 1) {
4771 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4773 mask = __kmp_affin_fullMask;
4775 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
4779 th->th.th_current_place = i;
4780 if (isa_root && !is_hidden_helper) {
4781 th->th.th_new_place = i;
4782 th->th.th_first_place = 0;
4783 th->th.th_last_place = affinity->num_masks - 1;
4784 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
4787 th->th.th_first_place = 0;
4788 th->th.th_last_place = affinity->num_masks - 1;
4792 th->th.th_topology_ids = __kmp_affinity.ids[i];
4793 th->th.th_topology_attrs = __kmp_affinity.attrs[i];
4796 if (i == KMP_PLACE_ALL) {
4797 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4800 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4804 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4807 if (affinity->flags.verbose &&
4808 (affinity->type == affinity_none ||
4809 (i != KMP_PLACE_ALL && affinity->type != affinity_balanced)) &&
4810 !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
4811 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4812 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4813 th->th.th_affin_mask);
4814 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
4822 if (affinity->type == affinity_none) {
4823 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4826 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4829 void __kmp_affinity_set_place(
int gtid) {
4831 if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4835 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4837 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current "
4839 gtid, th->th.th_new_place, th->th.th_current_place));
4842 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4843 KMP_ASSERT(th->th.th_new_place >= 0);
4844 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity.num_masks);
4845 if (th->th.th_first_place <= th->th.th_last_place) {
4846 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4847 (th->th.th_new_place <= th->th.th_last_place));
4849 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4850 (th->th.th_new_place >= th->th.th_last_place));
4855 kmp_affin_mask_t *mask =
4856 KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place);
4857 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4858 th->th.th_current_place = th->th.th_new_place;
4860 th->th.th_topology_ids = __kmp_affinity.ids[th->th.th_new_place];
4861 th->th.th_topology_attrs = __kmp_affinity.attrs[th->th.th_new_place];
4863 if (__kmp_affinity.flags.verbose) {
4864 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4865 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4866 th->th.th_affin_mask);
4867 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4868 __kmp_gettid(), gtid, buf);
4870 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4873 int __kmp_aux_set_affinity(
void **mask) {
4878 if (!KMP_AFFINITY_CAPABLE()) {
4882 gtid = __kmp_entry_gtid();
4885 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4886 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4887 (kmp_affin_mask_t *)(*mask));
4889 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4893 if (__kmp_env_consistency_check) {
4894 if ((mask == NULL) || (*mask == NULL)) {
4895 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4900 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4901 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4902 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4904 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4909 if (num_procs == 0) {
4910 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4913 #if KMP_GROUP_AFFINITY
4914 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4915 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4921 th = __kmp_threads[gtid];
4922 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4923 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4925 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4928 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4929 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4930 th->th.th_first_place = 0;
4931 th->th.th_last_place = __kmp_affinity.num_masks - 1;
4934 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4939 int __kmp_aux_get_affinity(
void **mask) {
4942 #if KMP_OS_WINDOWS || KMP_DEBUG
4945 if (!KMP_AFFINITY_CAPABLE()) {
4949 gtid = __kmp_entry_gtid();
4950 #if KMP_OS_WINDOWS || KMP_DEBUG
4951 th = __kmp_threads[gtid];
4955 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4959 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4960 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4961 th->th.th_affin_mask);
4963 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4967 if (__kmp_env_consistency_check) {
4968 if ((mask == NULL) || (*mask == NULL)) {
4969 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4975 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4978 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4979 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4980 (kmp_affin_mask_t *)(*mask));
4982 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4990 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4996 int __kmp_aux_get_affinity_max_proc() {
4997 if (!KMP_AFFINITY_CAPABLE()) {
5000 #if KMP_GROUP_AFFINITY
5001 if (__kmp_num_proc_groups > 1) {
5002 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
5008 int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
5009 if (!KMP_AFFINITY_CAPABLE()) {
5015 int gtid = __kmp_entry_gtid();
5016 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5017 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5018 (kmp_affin_mask_t *)(*mask));
5019 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
5020 "affinity mask for thread %d = %s\n",
5024 if (__kmp_env_consistency_check) {
5025 if ((mask == NULL) || (*mask == NULL)) {
5026 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
5030 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5033 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5037 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5041 int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
5042 if (!KMP_AFFINITY_CAPABLE()) {
5048 int gtid = __kmp_entry_gtid();
5049 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5050 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5051 (kmp_affin_mask_t *)(*mask));
5052 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
5053 "affinity mask for thread %d = %s\n",
5057 if (__kmp_env_consistency_check) {
5058 if ((mask == NULL) || (*mask == NULL)) {
5059 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
5063 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5066 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5070 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5074 int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
5075 if (!KMP_AFFINITY_CAPABLE()) {
5081 int gtid = __kmp_entry_gtid();
5082 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5083 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5084 (kmp_affin_mask_t *)(*mask));
5085 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
5086 "affinity mask for thread %d = %s\n",
5090 if (__kmp_env_consistency_check) {
5091 if ((mask == NULL) || (*mask == NULL)) {
5092 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
5096 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5099 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5103 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5107 void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
5108 KMP_DEBUG_ASSERT(th);
5109 bool fine_gran =
true;
5110 int tid = th->th.th_info.ds.ds_tid;
5111 const char *env_var =
"KMP_AFFINITY";
5114 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
5117 switch (__kmp_affinity.gran) {
5121 if (__kmp_nThreadsPerCore > 1) {
5126 if (nCoresPerPkg > 1) {
5134 if (__kmp_topology->is_uniform()) {
5138 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5140 int ncores = __kmp_ncores;
5141 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5142 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5146 int chunk = nthreads / ncores;
5148 int big_cores = nthreads % ncores;
5150 int big_nth = (chunk + 1) * big_cores;
5151 if (tid < big_nth) {
5152 coreID = tid / (chunk + 1);
5153 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5155 coreID = (tid - big_cores) / chunk;
5156 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5158 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5159 "Illegal set affinity operation when not capable");
5161 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5166 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
5167 KMP_CPU_SET(osID, mask);
5169 for (
int i = 0; i < __kmp_nth_per_core; i++) {
5171 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
5172 KMP_CPU_SET(osID, mask);
5175 if (__kmp_affinity.flags.verbose) {
5176 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5177 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5178 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5181 __kmp_affinity_get_thread_topology_info(th);
5182 __kmp_set_system_affinity(mask, TRUE);
5185 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5189 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5190 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5191 __kmp_aff_depth - 1, core_level);
5192 int nth_per_core = __kmp_affinity_max_proc_per_core(
5193 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5197 if (nthreads == __kmp_avail_proc) {
5199 int osID = __kmp_topology->at(tid).os_id;
5200 KMP_CPU_SET(osID, mask);
5203 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5204 for (
int i = 0; i < __kmp_avail_proc; i++) {
5205 int osID = __kmp_topology->at(i).os_id;
5206 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5208 KMP_CPU_SET(osID, mask);
5212 }
else if (nthreads <= ncores) {
5215 for (
int i = 0; i < ncores; i++) {
5218 for (
int j = 0; j < nth_per_core; j++) {
5219 if (procarr[i * nth_per_core + j] != -1) {
5226 for (
int j = 0; j < nth_per_core; j++) {
5227 int osID = procarr[i * nth_per_core + j];
5229 KMP_CPU_SET(osID, mask);
5245 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
5247 int *ncores_with_x_procs =
5248 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5250 int *ncores_with_x_to_max_procs =
5251 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5253 for (
int i = 0; i <= nth_per_core; i++) {
5254 ncores_with_x_procs[i] = 0;
5255 ncores_with_x_to_max_procs[i] = 0;
5258 for (
int i = 0; i < ncores; i++) {
5260 for (
int j = 0; j < nth_per_core; j++) {
5261 if (procarr[i * nth_per_core + j] != -1) {
5265 nproc_at_core[i] = cnt;
5266 ncores_with_x_procs[cnt]++;
5269 for (
int i = 0; i <= nth_per_core; i++) {
5270 for (
int j = i; j <= nth_per_core; j++) {
5271 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5276 int nproc = nth_per_core * ncores;
5278 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5279 for (
int i = 0; i < nproc; i++) {
5286 for (
int j = 1; j <= nth_per_core; j++) {
5287 int cnt = ncores_with_x_to_max_procs[j];
5288 for (
int i = 0; i < ncores; i++) {
5290 if (nproc_at_core[i] == 0) {
5293 for (
int k = 0; k < nth_per_core; k++) {
5294 if (procarr[i * nth_per_core + k] != -1) {
5295 if (newarr[i * nth_per_core + k] == 0) {
5296 newarr[i * nth_per_core + k] = 1;
5302 newarr[i * nth_per_core + k]++;
5310 if (cnt == 0 || nth == 0) {
5321 for (
int i = 0; i < nproc; i++) {
5325 int osID = procarr[i];
5326 KMP_CPU_SET(osID, mask);
5328 int coreID = i / nth_per_core;
5329 for (
int ii = 0; ii < nth_per_core; ii++) {
5330 int osID = procarr[coreID * nth_per_core + ii];
5332 KMP_CPU_SET(osID, mask);
5342 if (__kmp_affinity.flags.verbose) {
5343 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5344 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5345 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5348 __kmp_affinity_get_thread_topology_info(th);
5349 __kmp_set_system_affinity(mask, TRUE);
5353 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5367 kmp_set_thread_affinity_mask_initial()
5372 int gtid = __kmp_get_gtid();
5375 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5376 "non-omp thread, returning\n"));
5379 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5380 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5381 "affinity not initialized, returning\n"));
5384 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5385 "set full mask for thread %d\n",
5387 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5388 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5392 #endif // KMP_AFFINITY_SUPPORTED