1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #include "governor.h"
18 #include "main.h"
19 #include "thread_data.h"
20 #include "market.h"
21 #include "arena.h"
22 #include "dynamic_link.h"
23 #include "concurrent_monitor.h"
24 
25 #include "oneapi/tbb/task_group.h"
26 #include "oneapi/tbb/global_control.h"
27 #include "oneapi/tbb/tbb_allocator.h"
28 #include "oneapi/tbb/info.h"
29 
30 #include "task_dispatcher.h"
31 
32 #include <cstdio>
33 #include <cstdlib>
34 #include <cstring>
35 #include <atomic>
36 #include <algorithm>
37 
38 namespace tbb {
39 namespace detail {
40 namespace r1 {
41 
42 void clear_address_waiter_table();
43 
44 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
45 //! global_control.cpp contains definition
46 bool remove_and_check_if_empty(d1::global_control& gc);
47 bool is_present(d1::global_control& gc);
48 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
49 
50 namespace rml {
51 tbb_server* make_private_server( tbb_client& client );
52 } // namespace rml
53 
54 namespace system_topology {
55     void destroy();
56 }
57 
58 //------------------------------------------------------------------------
59 // governor
60 //------------------------------------------------------------------------
61 
acquire_resources()62 void governor::acquire_resources () {
63 #if __TBB_USE_POSIX
64     int status = theTLS.create(auto_terminate);
65 #else
66     int status = theTLS.create();
67 #endif
68     if( status )
69         handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
70     detect_cpu_features(cpu_features);
71 
72     is_rethrow_broken = gcc_rethrow_exception_broken();
73 }
74 
release_resources()75 void governor::release_resources () {
76     theRMLServerFactory.close();
77     destroy_process_mask();
78 
79     __TBB_ASSERT(!(__TBB_InitOnce::initialization_done() && theTLS.get()), "TBB is unloaded while thread data still alive?");
80 
81     int status = theTLS.destroy();
82     if( status )
83         runtime_warning("failed to destroy task scheduler TLS: %s", std::strerror(status));
84     clear_address_waiter_table();
85 
86     system_topology::destroy();
87     dynamic_unlink_all();
88 }
89 
create_rml_server(rml::tbb_client & client)90 rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
91     rml::tbb_server* server = NULL;
92     if( !UsePrivateRML ) {
93         ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
94         if( status != ::rml::factory::st_success ) {
95             UsePrivateRML = true;
96             runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
97         }
98     }
99     if ( !server ) {
100         __TBB_ASSERT( UsePrivateRML, NULL );
101         server = rml::make_private_server( client );
102     }
103     __TBB_ASSERT( server, "Failed to create RML server" );
104     return server;
105 }
106 
one_time_init()107 void governor::one_time_init() {
108     if ( !__TBB_InitOnce::initialization_done() ) {
109         DoOneTimeInitialization();
110     }
111 }
112 
113 /*
114     There is no portable way to get stack base address in Posix, however the modern
115     Linux versions provide pthread_attr_np API that can be used  to obtain thread's
116     stack size and base address. Unfortunately even this function does not provide
117     enough information for the main thread on IA-64 architecture (RSE spill area
118     and memory stack are allocated as two separate discontinuous chunks of memory),
119     and there is no portable way to discern the main and the secondary threads.
120     Thus for macOS* and IA-64 architecture for Linux* OS we use the TBB worker stack size for
121     all threads and use the current stack top as the stack base. This simplified
122     approach is based on the following assumptions:
123     1) If the default stack size is insufficient for the user app needs, the
124     required amount will be explicitly specified by the user at the point of the
125     TBB scheduler initialization (as an argument to tbb::task_scheduler_init
126     constructor).
127     2) When an external thread initializes the scheduler, it has enough space on its
128     stack. Here "enough" means "at least as much as worker threads have".
129     3) If the user app strives to conserve the memory by cutting stack size, it
130     should do this for TBB workers too (as in the #1).
131 */
get_stack_base(std::size_t stack_size)132 static std::uintptr_t get_stack_base(std::size_t stack_size) {
133     // Stacks are growing top-down. Highest address is called "stack base",
134     // and the lowest is "stack limit".
135 #if USE_WINTHREAD
136     suppress_unused_warning(stack_size);
137     NT_TIB* pteb = (NT_TIB*)NtCurrentTeb();
138     __TBB_ASSERT(&pteb < pteb->StackBase && &pteb > pteb->StackLimit, "invalid stack info in TEB");
139     return pteb->StackBase;
140 #else /* USE_PTHREAD */
141     // There is no portable way to get stack base address in Posix, so we use
142     // non-portable method (on all modern Linux) or the simplified approach
143     // based on the common sense assumptions. The most important assumption
144     // is that the main thread's stack size is not less than that of other threads.
145 
146     // Points to the lowest addressable byte of a stack.
147     void* stack_limit = nullptr;
148 #if __linux__ && !__bg__
149     size_t np_stack_size = 0;
150     pthread_attr_t np_attr_stack;
151     if (0 == pthread_getattr_np(pthread_self(), &np_attr_stack)) {
152         if (0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size)) {
153             __TBB_ASSERT( &stack_limit > stack_limit, "stack size must be positive" );
154         }
155         pthread_attr_destroy(&np_attr_stack);
156     }
157 #endif /* __linux__ */
158     std::uintptr_t stack_base{};
159     if (stack_limit) {
160         stack_base = reinterpret_cast<std::uintptr_t>(stack_limit) + stack_size;
161     } else {
162         // Use an anchor as a base stack address.
163         int anchor{};
164         stack_base = reinterpret_cast<std::uintptr_t>(&anchor);
165     }
166     return stack_base;
167 #endif /* USE_PTHREAD */
168 }
169 
init_external_thread()170 void governor::init_external_thread() {
171     one_time_init();
172     // Create new scheduler instance with arena
173     int num_slots = default_num_threads();
174     // TODO_REVAMP: support an external thread without an implicit arena
175     int num_reserved_slots = 1;
176     unsigned arena_priority_level = 1; // corresponds to tbb::task_arena::priority::normal
177     std::size_t stack_size = 0;
178     arena& a = *market::create_arena(num_slots, num_reserved_slots, arena_priority_level, stack_size);
179     // We need an internal reference to the market. TODO: is it legacy?
180     market::global_market(false);
181     // External thread always occupies the first slot
182     thread_data& td = *new(cache_aligned_allocate(sizeof(thread_data))) thread_data(0, false);
183     td.attach_arena(a, /*slot index*/ 0);
184     __TBB_ASSERT(td.my_inbox.is_idle_state(false), nullptr);
185 
186     stack_size = a.my_market->worker_stack_size();
187     std::uintptr_t stack_base = get_stack_base(stack_size);
188     task_dispatcher& task_disp = td.my_arena_slot->default_task_dispatcher();
189     task_disp.set_stealing_threshold(calculate_stealing_threshold(stack_base, stack_size));
190     td.attach_task_dispatcher(task_disp);
191 
192     td.my_arena_slot->occupy();
193     a.my_market->add_external_thread(td);
194     set_thread_data(td);
195 }
196 
auto_terminate(void * tls)197 void governor::auto_terminate(void* tls) {
198     __TBB_ASSERT(get_thread_data_if_initialized() == nullptr ||
199         get_thread_data_if_initialized() == tls, NULL);
200     if (tls) {
201         thread_data* td = static_cast<thread_data*>(tls);
202 
203         // Only external thread can be inside an arena during termination.
204         if (td->my_arena_slot) {
205             arena* a = td->my_arena;
206             market* m = a->my_market;
207 
208             a->my_observers.notify_exit_observers(td->my_last_observer, td->my_is_worker);
209 
210             td->my_task_dispatcher->m_stealing_threshold = 0;
211             td->detach_task_dispatcher();
212             td->my_arena_slot->release();
213             // Release an arena
214             a->on_thread_leaving<arena::ref_external>();
215 
216             m->remove_external_thread(*td);
217             // If there was an associated arena, it added a public market reference
218             m->release( /*is_public*/ true, /*blocking_terminate*/ false);
219         }
220 
221         td->~thread_data();
222         cache_aligned_deallocate(td);
223 
224         clear_thread_data();
225     }
226     __TBB_ASSERT(get_thread_data_if_initialized() == nullptr, NULL);
227 }
228 
initialize_rml_factory()229 void governor::initialize_rml_factory () {
230     ::rml::factory::status_type res = theRMLServerFactory.open();
231     UsePrivateRML = res != ::rml::factory::st_success;
232 }
233 
234 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
get(d1::task_scheduler_handle & handle)235 void __TBB_EXPORTED_FUNC get(d1::task_scheduler_handle& handle) {
236     handle.m_ctl = new(allocate_memory(sizeof(global_control))) global_control(global_control::scheduler_handle, 1);
237 }
238 
release_impl(d1::task_scheduler_handle & handle)239 void release_impl(d1::task_scheduler_handle& handle) {
240     if (handle.m_ctl != nullptr) {
241         handle.m_ctl->~global_control();
242         deallocate_memory(handle.m_ctl);
243         handle.m_ctl = nullptr;
244     }
245 }
246 
finalize_impl(d1::task_scheduler_handle & handle)247 bool finalize_impl(d1::task_scheduler_handle& handle) {
248     __TBB_ASSERT_RELEASE(handle, "trying to finalize with null handle");
249     market::global_market_mutex_type::scoped_lock lock( market::theMarketMutex );
250     bool ok = true; // ok if theMarket does not exist yet
251     market* m = market::theMarket; // read the state of theMarket
252     if (m != nullptr) {
253         lock.release();
254         __TBB_ASSERT(is_present(*handle.m_ctl), "finalize or release was already called on this object");
255         thread_data* td = governor::get_thread_data_if_initialized();
256         if (td) {
257             task_dispatcher* task_disp = td->my_task_dispatcher;
258             __TBB_ASSERT(task_disp, nullptr);
259             if (task_disp->m_properties.outermost && !td->my_is_worker) { // is not inside a parallel region
260                 governor::auto_terminate(td);
261             }
262         }
263         if (remove_and_check_if_empty(*handle.m_ctl)) {
264             ok = m->release(/*is_public*/ true, /*blocking_terminate*/ true);
265         } else {
266             ok = false;
267         }
268     }
269     return ok;
270 }
271 
finalize(d1::task_scheduler_handle & handle,std::intptr_t mode)272 bool __TBB_EXPORTED_FUNC finalize(d1::task_scheduler_handle& handle, std::intptr_t mode) {
273     if (mode == d1::release_nothrowing) {
274         release_impl(handle);
275         return true;
276     } else {
277         bool ok = finalize_impl(handle);
278         // TODO: it is unsafe when finalize is called concurrently and further library unload
279         release_impl(handle);
280         if (mode == d1::finalize_throwing && !ok) {
281             throw_exception(exception_id::unsafe_wait);
282         }
283         return ok;
284     }
285 }
286 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
287 
288 #if __TBB_ARENA_BINDING
289 
290 #if __TBB_WEAK_SYMBOLS_PRESENT
291 #pragma weak __TBB_internal_initialize_system_topology
292 #pragma weak __TBB_internal_destroy_system_topology
293 #pragma weak __TBB_internal_allocate_binding_handler
294 #pragma weak __TBB_internal_deallocate_binding_handler
295 #pragma weak __TBB_internal_apply_affinity
296 #pragma weak __TBB_internal_restore_affinity
297 #pragma weak __TBB_internal_get_default_concurrency
298 
299 extern "C" {
300 void __TBB_internal_initialize_system_topology(
301     size_t groups_num,
302     int& numa_nodes_count, int*& numa_indexes_list,
303     int& core_types_count, int*& core_types_indexes_list
304 );
305 void __TBB_internal_destroy_system_topology( );
306 
307 //TODO: consider renaming to `create_binding_handler` and `destroy_binding_handler`
308 binding_handler* __TBB_internal_allocate_binding_handler( int slot_num, int numa_id, int core_type_id, int max_threads_per_core );
309 void __TBB_internal_deallocate_binding_handler( binding_handler* handler_ptr );
310 
311 void __TBB_internal_apply_affinity( binding_handler* handler_ptr, int slot_num );
312 void __TBB_internal_restore_affinity( binding_handler* handler_ptr, int slot_num );
313 
314 int __TBB_internal_get_default_concurrency( int numa_id, int core_type_id, int max_threads_per_core );
315 }
316 #endif /* __TBB_WEAK_SYMBOLS_PRESENT */
317 
318 // Stubs that will be used if TBBbind library is unavailable.
dummy_destroy_system_topology()319 static void dummy_destroy_system_topology ( ) { }
dummy_allocate_binding_handler(int,int,int,int)320 static binding_handler* dummy_allocate_binding_handler ( int, int, int, int ) { return nullptr; }
dummy_deallocate_binding_handler(binding_handler *)321 static void dummy_deallocate_binding_handler ( binding_handler* ) { }
dummy_apply_affinity(binding_handler *,int)322 static void dummy_apply_affinity ( binding_handler*, int ) { }
dummy_restore_affinity(binding_handler *,int)323 static void dummy_restore_affinity ( binding_handler*, int ) { }
dummy_get_default_concurrency(int,int,int)324 static int dummy_get_default_concurrency( int, int, int ) { return governor::default_num_threads(); }
325 
326 // Handlers for communication with TBBbind
327 static void (*initialize_system_topology_ptr)(
328     size_t groups_num,
329     int& numa_nodes_count, int*& numa_indexes_list,
330     int& core_types_count, int*& core_types_indexes_list
331 ) = nullptr;
332 static void (*destroy_system_topology_ptr)( ) = dummy_destroy_system_topology;
333 
334 static binding_handler* (*allocate_binding_handler_ptr)( int slot_num, int numa_id, int core_type_id, int max_threads_per_core )
335     = dummy_allocate_binding_handler;
336 static void (*deallocate_binding_handler_ptr)( binding_handler* handler_ptr )
337     = dummy_deallocate_binding_handler;
338 static void (*apply_affinity_ptr)( binding_handler* handler_ptr, int slot_num )
339     = dummy_apply_affinity;
340 static void (*restore_affinity_ptr)( binding_handler* handler_ptr, int slot_num )
341     = dummy_restore_affinity;
342 int (*get_default_concurrency_ptr)( int numa_id, int core_type_id, int max_threads_per_core )
343     = dummy_get_default_concurrency;
344 
345 #if _WIN32 || _WIN64 || __unix__
346 // Table describing how to link the handlers.
347 static const dynamic_link_descriptor TbbBindLinkTable[] = {
348     DLD(__TBB_internal_initialize_system_topology, initialize_system_topology_ptr),
349     DLD(__TBB_internal_destroy_system_topology, destroy_system_topology_ptr),
350     DLD(__TBB_internal_allocate_binding_handler, allocate_binding_handler_ptr),
351     DLD(__TBB_internal_deallocate_binding_handler, deallocate_binding_handler_ptr),
352     DLD(__TBB_internal_apply_affinity, apply_affinity_ptr),
353     DLD(__TBB_internal_restore_affinity, restore_affinity_ptr),
354     DLD(__TBB_internal_get_default_concurrency, get_default_concurrency_ptr)
355 };
356 
357 static const unsigned LinkTableSize = sizeof(TbbBindLinkTable) / sizeof(dynamic_link_descriptor);
358 
359 #if TBB_USE_DEBUG
360 #define DEBUG_SUFFIX "_debug"
361 #else
362 #define DEBUG_SUFFIX
363 #endif /* TBB_USE_DEBUG */
364 
365 #if _WIN32 || _WIN64
366 #define LIBRARY_EXTENSION ".dll"
367 #define LIBRARY_PREFIX
368 #elif __unix__
369 #define LIBRARY_EXTENSION __TBB_STRING(.so.3)
370 #define LIBRARY_PREFIX "lib"
371 #endif /* __unix__ */
372 
373 #define TBBBIND_NAME LIBRARY_PREFIX "tbbbind" DEBUG_SUFFIX LIBRARY_EXTENSION
374 #define TBBBIND_2_0_NAME LIBRARY_PREFIX "tbbbind_2_0" DEBUG_SUFFIX LIBRARY_EXTENSION
375 
376 #define TBBBIND_2_5_NAME LIBRARY_PREFIX "tbbbind_2_5" DEBUG_SUFFIX LIBRARY_EXTENSION
377 #endif /* _WIN32 || _WIN64 || __unix__ */
378 
379 // Representation of system hardware topology information on the TBB side.
380 // System topology may be initialized by third-party component (e.g. hwloc)
381 // or just filled in with default stubs.
382 namespace system_topology {
383 
384 constexpr int automatic = -1;
385 
386 static std::atomic<do_once_state> initialization_state;
387 
388 namespace {
389 int  numa_nodes_count = 0;
390 int* numa_nodes_indexes = nullptr;
391 
392 int  core_types_count = 0;
393 int* core_types_indexes = nullptr;
394 
load_tbbbind_shared_object()395 const char* load_tbbbind_shared_object() {
396 #if _WIN32 || _WIN64 || __unix__
397 #if _WIN32 && !_WIN64
398     // For 32-bit Windows applications, process affinity masks can only support up to 32 logical CPUs.
399     SYSTEM_INFO si;
400     GetNativeSystemInfo(&si);
401     if (si.dwNumberOfProcessors > 32) return nullptr;
402 #endif /* _WIN32 && !_WIN64 */
403     for (const auto& tbbbind_version : {TBBBIND_2_5_NAME, TBBBIND_2_0_NAME, TBBBIND_NAME}) {
404         if (dynamic_link(tbbbind_version, TbbBindLinkTable, LinkTableSize, nullptr, DYNAMIC_LINK_LOCAL_BINDING)) {
405             return tbbbind_version;
406         }
407     }
408 #endif /* _WIN32 || _WIN64 || __unix__ */
409     return nullptr;
410 }
411 
processor_groups_num()412 int processor_groups_num() {
413 #if _WIN32
414     return NumberOfProcessorGroups();
415 #else
416     // Stub to improve code readability by reducing number of the compile-time conditions
417     return 1;
418 #endif
419 }
420 } // internal namespace
421 
422 // Tries to load TBBbind library API, if success, gets NUMA topology information from it,
423 // in another case, fills NUMA topology by stubs.
initialization_impl()424 void initialization_impl() {
425     governor::one_time_init();
426 
427     if (const char* tbbbind_name = load_tbbbind_shared_object()) {
428         initialize_system_topology_ptr(
429             processor_groups_num(),
430             numa_nodes_count, numa_nodes_indexes,
431             core_types_count, core_types_indexes
432         );
433 
434         PrintExtraVersionInfo("TBBBIND", tbbbind_name);
435         return;
436     }
437 
438     static int dummy_index = automatic;
439 
440     numa_nodes_count = 1;
441     numa_nodes_indexes = &dummy_index;
442 
443     core_types_count = 1;
444     core_types_indexes = &dummy_index;
445 
446     PrintExtraVersionInfo("TBBBIND", "UNAVAILABLE");
447 }
448 
initialize()449 void initialize() {
450     atomic_do_once(initialization_impl, initialization_state);
451 }
452 
destroy()453 void destroy() {
454     destroy_system_topology_ptr();
455 }
456 } // namespace system_topology
457 
construct_binding_handler(int slot_num,int numa_id,int core_type_id,int max_threads_per_core)458 binding_handler* construct_binding_handler(int slot_num, int numa_id, int core_type_id, int max_threads_per_core) {
459     system_topology::initialize();
460     return allocate_binding_handler_ptr(slot_num, numa_id, core_type_id, max_threads_per_core);
461 }
462 
destroy_binding_handler(binding_handler * handler_ptr)463 void destroy_binding_handler(binding_handler* handler_ptr) {
464     __TBB_ASSERT(deallocate_binding_handler_ptr, "tbbbind loading was not performed");
465     deallocate_binding_handler_ptr(handler_ptr);
466 }
467 
apply_affinity_mask(binding_handler * handler_ptr,int slot_index)468 void apply_affinity_mask(binding_handler* handler_ptr, int slot_index) {
469     __TBB_ASSERT(slot_index >= 0, "Negative thread index");
470     __TBB_ASSERT(apply_affinity_ptr, "tbbbind loading was not performed");
471     apply_affinity_ptr(handler_ptr, slot_index);
472 }
473 
restore_affinity_mask(binding_handler * handler_ptr,int slot_index)474 void restore_affinity_mask(binding_handler* handler_ptr, int slot_index) {
475     __TBB_ASSERT(slot_index >= 0, "Negative thread index");
476     __TBB_ASSERT(restore_affinity_ptr, "tbbbind loading was not performed");
477     restore_affinity_ptr(handler_ptr, slot_index);
478 }
479 
numa_node_count()480 unsigned __TBB_EXPORTED_FUNC numa_node_count() {
481     system_topology::initialize();
482     return system_topology::numa_nodes_count;
483 }
484 
fill_numa_indices(int * index_array)485 void __TBB_EXPORTED_FUNC fill_numa_indices(int* index_array) {
486     system_topology::initialize();
487     std::memcpy(index_array, system_topology::numa_nodes_indexes, system_topology::numa_nodes_count * sizeof(int));
488 }
489 
numa_default_concurrency(int node_id)490 int __TBB_EXPORTED_FUNC numa_default_concurrency(int node_id) {
491     if (node_id >= 0) {
492         system_topology::initialize();
493         int result = get_default_concurrency_ptr(
494             node_id,
495             /*core_type*/system_topology::automatic,
496             /*threads_per_core*/system_topology::automatic
497         );
498         if (result > 0) return result;
499     }
500     return governor::default_num_threads();
501 }
502 
core_type_count(intptr_t)503 unsigned __TBB_EXPORTED_FUNC core_type_count(intptr_t /*reserved*/) {
504     system_topology::initialize();
505     return system_topology::core_types_count;
506 }
507 
fill_core_type_indices(int * index_array,intptr_t)508 void __TBB_EXPORTED_FUNC fill_core_type_indices(int* index_array, intptr_t /*reserved*/) {
509     system_topology::initialize();
510     std::memcpy(index_array, system_topology::core_types_indexes, system_topology::core_types_count * sizeof(int));
511 }
512 
constraints_assertion(d1::constraints c)513 void constraints_assertion(d1::constraints c) {
514     bool is_topology_initialized = system_topology::initialization_state == do_once_state::initialized;
515     __TBB_ASSERT_RELEASE(c.max_threads_per_core == system_topology::automatic || c.max_threads_per_core > 0,
516         "Wrong max_threads_per_core constraints field value.");
517 
518     auto numa_nodes_begin = system_topology::numa_nodes_indexes;
519     auto numa_nodes_end = system_topology::numa_nodes_indexes + system_topology::numa_nodes_count;
520     __TBB_ASSERT_RELEASE(
521         c.numa_id == system_topology::automatic ||
522         (is_topology_initialized && std::find(numa_nodes_begin, numa_nodes_end, c.numa_id) != numa_nodes_end),
523         "The constraints::numa_id value is not known to the library. Use tbb::info::numa_nodes() to get the list of possible values.");
524 
525     int* core_types_begin = system_topology::core_types_indexes;
526     int* core_types_end = system_topology::core_types_indexes + system_topology::core_types_count;
527     __TBB_ASSERT_RELEASE(c.core_type == system_topology::automatic ||
528         (is_topology_initialized && std::find(core_types_begin, core_types_end, c.core_type) != core_types_end),
529         "The constraints::core_type value is not known to the library. Use tbb::info::core_types() to get the list of possible values.");
530 }
531 
constraints_default_concurrency(const d1::constraints & c,intptr_t)532 int __TBB_EXPORTED_FUNC constraints_default_concurrency(const d1::constraints& c, intptr_t /*reserved*/) {
533     constraints_assertion(c);
534 
535     if (c.numa_id >= 0 || c.core_type >= 0 || c.max_threads_per_core > 0) {
536         system_topology::initialize();
537         return get_default_concurrency_ptr(c.numa_id, c.core_type, c.max_threads_per_core);
538     }
539     return governor::default_num_threads();
540 }
541 
constraints_threads_per_core(const d1::constraints &,intptr_t)542 int __TBB_EXPORTED_FUNC constraints_threads_per_core(const d1::constraints&, intptr_t /*reserved*/) {
543     return system_topology::automatic;
544 }
545 #endif /* __TBB_ARENA_BINDING */
546 
547 } // namespace r1
548 } // namespace detail
549 } // namespace tbb
550