1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2006-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24 
25 #if defined(__WIN32__)
26 #  include <windows.h>
27 #endif
28 
29 #include "ethread_inline.h"
30 #include "erl_misc_utils.h"
31 
32 #if !defined(__WIN32__) /* UNIX */
33 #  include <stdarg.h>
34 #  include <stdio.h>
35 #  include <sys/types.h>
36 #  include <sys/param.h>
37 #  include <limits.h>
38 #  include <dirent.h>
39 #  include <sys/stat.h>
40 #  include <fcntl.h>
41 #  ifdef SYS_SELECT_H
42 #    include <sys/select.h>
43 #  endif
44 #  if TIME_WITH_SYS_TIME
45 #     include <sys/time.h>
46 #     include <time.h>
47 #  else
48 #     if HAVE_SYS_TIME_H
49 #         include <sys/time.h>
50 #     else
51 #         include <time.h>
52 #     endif
53 #  endif
54 #  include <string.h>
55 #  include <stdio.h>
56 #  ifdef HAVE_UNISTD_H
57 #    include <unistd.h>
58 #  endif
59 #  if defined(_SC_NPROC_CONF) && !defined(_SC_NPROCESSORS_CONF)
60 #    define _SC_NPROCESSORS_CONF _SC_NPROC_CONF
61 #  endif
62 #  if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN)
63 #    define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
64 #  endif
65 #  if (defined(NO_SYSCONF) || !defined(_SC_NPROCESSORS_CONF))
66 #    ifdef HAVE_SYS_SYSCTL_H
67 #      include <sys/sysctl.h>
68 #    endif
69 #  endif
70 #endif
71 
72 #if defined(HAVE_SCHED_xETAFFINITY)
73 #  include <sched.h>
74 #  define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
75 #define ERTS_MU_GET_PROC_AFFINITY__(CPUINFOP, CPUSET)			\
76      (sched_getaffinity((CPUINFOP)->pid,				\
77 			sizeof(cpu_set_t),				\
78 			(CPUSET)) != 0 ? -errno : 0)
79 #define ERTS_MU_SET_THR_AFFINITY__(SETP)				\
80      (sched_setaffinity(0, sizeof(cpu_set_t), (SETP)) != 0 ? -errno : 0)
81 #elif defined(HAVE_CPUSET_xETAFFINITY)
82 #  include <sys/param.h>
83 #  include <sys/cpuset.h>
84 #  define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
85 #define ERTS_MU_GET_PROC_AFFINITY__(CPUINFOP, CPUSET)			\
86      (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, 	    \
87 			sizeof(cpuset_t),				\
88 			(CPUSET)) != 0 ? -errno : 0)
89 #define ERTS_MU_SET_THR_AFFINITY__(CPUSETP)				\
90      (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,    \
91             sizeof(cpuset_t),               \
92             (CPUSETP)) != 0 ? -errno : 0)
93 #  define cpu_set_t cpuset_t
94 #elif defined(__WIN32__)
95 #  define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
96 #  define cpu_set_t DWORD
97 #  define CPU_SETSIZE (sizeof(DWORD)*8)
98 #  define CPU_ZERO(SETP) (*(SETP) = (DWORD) 0)
99 #  define CPU_SET(CPU, SETP) (*(SETP) |= (((DWORD) 1) << (CPU)))
100 #  define CPU_CLR(CPU, SETP) (*(SETP) &= ~(((DWORD) 1) << (CPU)))
101 #  define CPU_ISSET(CPU, SETP) ((*(SETP) & (((DWORD) 1) << (CPU))) != (DWORD) 0)
102 #define ERTS_MU_GET_PROC_AFFINITY__ get_proc_affinity
103 #define ERTS_MU_SET_THR_AFFINITY__ set_thr_affinity
104 #endif
105 #ifdef HAVE_PSET_INFO
106 #  include <sys/pset.h>
107 #endif
108 #ifdef HAVE_PROCESSOR_BIND
109 #  include <sys/processor.h>
110 #  include <sys/procset.h>
111 #endif
112 
113 #include <stdlib.h>
114 #ifdef HAVE_LIMITS_H
115 #include <limits.h>
116 #endif
117 
118 #ifdef __linux__
119 #  define ERTS_SYS_NODE_PATH	"/sys/devices/system/node"
120 #  define ERTS_SYS_CPU_PATH	"/sys/devices/system/cpu"
121 #endif
122 
123 #ifdef __FreeBSD__
124 #include <sys/types.h>
125 #include <sys/sysctl.h>
126 #endif
127 
128 /* Simplify include for static functions */
129 
130 #if defined(__linux__) || defined(HAVE_KSTAT) || defined(__WIN32__) || defined(__FreeBSD__)
131 #  define ERTS_CPU_TOPOLOGY_ENABLED (1)
132 #endif
133 
134 static int read_topology(erts_cpu_info_t *cpuinfo);
135 static int read_cpu_quota(int limit);
136 
137 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
138 static int
cpu_sets_are_eq(cpu_set_t * x,cpu_set_t * y)139 cpu_sets_are_eq(cpu_set_t *x, cpu_set_t *y)
140 {
141     int i;
142     for (i = 0; i < CPU_SETSIZE; i++) {
143 	if (CPU_ISSET(i, x)) {
144 	    if (!CPU_ISSET(i, y))
145 		return 0;
146 	}
147 	else {
148 	    if (CPU_ISSET(i, y))
149 		return 0;
150 	}
151     }
152     return 1;
153 }
154 
155 #endif
156 
157 int
erts_milli_sleep(long ms)158 erts_milli_sleep(long ms)
159 {
160     if (ms > 0) {
161 #ifdef __WIN32__
162 	Sleep((DWORD) ms);
163 #else
164 	struct timeval tv;
165 	tv.tv_sec = ms / 1000;
166 	tv.tv_usec = (ms % 1000) * 1000;
167 	if (select(0, NULL, NULL, NULL, &tv) < 0)
168 	    return errno == EINTR ? 1 : -1;
169 #endif
170     }
171     return 0;
172 }
173 
174 struct erts_cpu_info_t_ {
175     int configured;
176     int online;
177     int available;
178     int topology_size;
179     int quota;
180     erts_cpu_topology_t *topology;
181 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
182     char *affinity_str;
183     char affinity_str_buf[CPU_SETSIZE/4+2];
184     cpu_set_t cpuset;
185 #if defined(HAVE_SCHED_xETAFFINITY)
186     pid_t pid;
187 #endif
188 #elif defined(HAVE_PSET_INFO)
189     processorid_t *cpuids;
190 #endif
191 };
192 
193 #if defined(__WIN32__)
194 
195 static ETHR_FORCE_INLINE int
get_proc_affinity(erts_cpu_info_t * cpuinfo,cpu_set_t * cpuset)196 get_proc_affinity(erts_cpu_info_t *cpuinfo, cpu_set_t *cpuset)
197 {
198     DWORD_PTR pamask;
199     DWORD_PTR samask;
200     if (GetProcessAffinityMask(GetCurrentProcess(), &pamask, &samask)) {
201 	*cpuset = (cpu_set_t) pamask;
202 	return 0;
203     }
204     else {
205 	*cpuset = (cpu_set_t) 0;
206 	return -erts_get_last_win_errno();
207     }
208 }
209 
210 static ETHR_FORCE_INLINE int
set_thr_affinity(cpu_set_t * set)211 set_thr_affinity(cpu_set_t *set)
212 {
213     if (*set == (cpu_set_t) 0)
214 	return -ENOTSUP;
215     if (SetThreadAffinityMask(GetCurrentThread(), *set) == 0)
216 	return -erts_get_last_win_errno();
217     else
218 	return 0;
219 }
220 
221 #endif
222 
223 erts_cpu_info_t *
erts_cpu_info_create(void)224 erts_cpu_info_create(void)
225 {
226     erts_cpu_info_t *cpuinfo = malloc(sizeof(erts_cpu_info_t));
227     if (!cpuinfo)
228 	return NULL;
229 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
230     cpuinfo->affinity_str = NULL;
231 #if defined(HAVE_SCHED_xETAFFINITY)
232     cpuinfo->pid = getpid();
233 #endif
234 #elif defined(HAVE_PSET_INFO)
235     cpuinfo->cpuids = NULL;
236 #endif
237     cpuinfo->topology_size = 0;
238     cpuinfo->topology = NULL;
239     cpuinfo->configured = -1;
240     cpuinfo->online = -1;
241     cpuinfo->available = -1;
242     cpuinfo->quota = -1;
243     erts_cpu_info_update(cpuinfo);
244     return cpuinfo;
245 }
246 
247 void
erts_cpu_info_destroy(erts_cpu_info_t * cpuinfo)248 erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo)
249 {
250     if (cpuinfo) {
251 	cpuinfo->configured = 0;
252 	cpuinfo->online = 0;
253 	cpuinfo->available = 0;
254 #ifdef HAVE_PSET_INFO
255 	if (cpuinfo->cpuids)
256 	    free(cpuinfo->cpuids);
257 #endif
258 	cpuinfo->topology_size = 0;
259 	if (cpuinfo->topology) {
260 	    cpuinfo->topology = NULL;
261 	    free(cpuinfo->topology);
262 	}
263 	free(cpuinfo);
264     }
265 }
266 
267 int
erts_cpu_info_update(erts_cpu_info_t * cpuinfo)268 erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
269 {
270     int changed = 0;
271     int configured = 0;
272     int online = 0;
273     int available = 0;
274     int quota = 0;
275     erts_cpu_topology_t *old_topology;
276     int old_topology_size;
277 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
278     cpu_set_t cpuset;
279 #endif
280 
281 #ifdef __WIN32__
282     {
283 	int i;
284 	SYSTEM_INFO sys_info;
285 	GetSystemInfo(&sys_info);
286 	configured = (int) sys_info.dwNumberOfProcessors;
287 	for (i = 0; i < sizeof(DWORD)*8; i++)
288 	    if (sys_info.dwActiveProcessorMask & (((DWORD) 1) << i))
289 		online++;
290     }
291 #elif !defined(NO_SYSCONF) && (defined(_SC_NPROCESSORS_CONF) \
292 			       || defined(_SC_NPROCESSORS_ONLN))
293 #ifdef _SC_NPROCESSORS_CONF
294     configured = (int) sysconf(_SC_NPROCESSORS_CONF);
295     if (configured < 0)
296 	configured = 0;
297 #endif
298 #ifdef _SC_NPROCESSORS_ONLN
299     online = (int) sysconf(_SC_NPROCESSORS_ONLN);
300     if (online < 0)
301 	online = 0;
302 #endif
303 #elif defined(HAVE_SYS_SYSCTL_H) && defined(CTL_HW) && (defined(HW_NCPU) \
304 							|| defined(HW_AVAILCPU))
305     {
306 	int mib[2];
307 	size_t len;
308 
309 #ifdef HW_NCPU
310 	len = sizeof(int);
311 	mib[0] = CTL_HW;
312 	mib[1] = HW_NCPU;
313 	if (sysctl(&mib[0], 2, &configured, &len, NULL, 0) < 0)
314 	    configured = 0;
315 #endif
316 #ifdef HW_AVAILCPU
317 	len = sizeof(int);
318 	mib[0] = CTL_HW;
319 	mib[1] = HW_AVAILCPU;
320 	if (sysctl(&mib[0], 2, &online, &len, NULL, 0) < 0)
321 	    online = 0;
322 #endif
323     }
324 #endif
325 
326     if (online > configured)
327 	online = configured;
328 
329     if (cpuinfo->configured != configured)
330 	changed = 1;
331     if (cpuinfo->online != online)
332 	changed = 1;
333 
334 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
335     if (ERTS_MU_GET_PROC_AFFINITY__(cpuinfo, &cpuset) == 0) {
336 	if (!changed && !cpu_sets_are_eq(&cpuset, &cpuinfo->cpuset))
337 	    changed = 1;
338 
339 	if (!changed)
340 	    available = cpuinfo->available;
341 	else {
342 	    int i, c, cn, si;
343 
344 	    memcpy((void *) &cpuinfo->cpuset,
345 		   (void *) &cpuset,
346 		   sizeof(cpu_set_t));
347 
348 	    c = cn = 0;
349 	    si = sizeof(cpuinfo->affinity_str_buf) - 1;
350 	    cpuinfo->affinity_str_buf[si] = '\0';
351 	    for (i = 0; i < CPU_SETSIZE; i++) {
352 		if (CPU_ISSET(i, &cpuinfo->cpuset)) {
353 		    c |= 1 << cn;
354 		    available++;
355 		}
356 		cn++;
357 		if (cn == 4) {
358 		    cpuinfo->affinity_str_buf[--si] = (c < 10
359 						       ? '0' + c
360 						       : 'A' + c - 10);
361 		    c = cn = 0;
362 		}
363 	    }
364 	    if (c)
365 		cpuinfo->affinity_str_buf[--si] = (c < 10
366 						   ? '0' + c
367 						   : 'A' + c - 10);
368 	    while (cpuinfo->affinity_str_buf[si] == '0')
369 		si++;
370 	    cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
371 	}
372     }
373 #elif defined(HAVE_PSET_INFO)
374     {
375 	processorid_t *cpuids;
376 	uint_t numcpus = configured;
377 	cpuids = malloc(sizeof(processorid_t)*numcpus);
378 	if (cpuids) {
379 	    if (pset_info(PS_MYID, NULL, &numcpus, &cpuids) == 0)
380 		available = (int) numcpus;
381 	    if (available < 0) {
382 		free(cpuids);
383 		cpuids = NULL;
384 		available = 0;
385 	    }
386 	}
387 	if (!cpuids) {
388 	    if (cpuinfo->cpuids)
389 		changed = 1;
390 	}
391 	else {
392 	    if (cpuinfo->cpuids)
393 		changed = 1;
394 	    if (memcmp((void *) cpuinfo->cpuids,
395 		       (void *) cpuids,
396 		       sizeof(processorid_t)*numcpus) != 0)
397 		changed = 1;
398 
399 	}
400 	if (!changed) {
401 	    if (cpuids)
402 		free(cpuids);
403 	}
404 	else {
405 	    if (cpuinfo->cpuids)
406 		free(cpuinfo->cpuids);
407 	    cpuinfo->cpuids = cpuids;
408 	}
409     }
410 #endif
411 
412     if (available > online)
413 	available = online;
414     else if (available == 0) /* shouldn't happen */
415         available = online;
416 
417     if (cpuinfo->available != available)
418 	changed = 1;
419 
420     quota = read_cpu_quota(online);
421     if (cpuinfo->quota != quota)
422 	changed = 1;
423 
424     cpuinfo->configured = configured;
425     cpuinfo->online = online;
426     cpuinfo->available = available;
427     cpuinfo->quota = quota;
428 
429     old_topology = cpuinfo->topology;
430     old_topology_size = cpuinfo->topology_size;
431     cpuinfo->topology = NULL;
432 
433     read_topology(cpuinfo);
434 
435     if (cpuinfo->topology_size != old_topology_size
436 	|| (old_topology_size != 0
437 	    && memcmp((void *) cpuinfo->topology,
438 		      (void *) old_topology,
439 		      (sizeof(erts_cpu_topology_t)
440 		       * old_topology_size)) != 0)) {
441 	changed = 1;
442 	if (old_topology)
443 	    free(old_topology);
444     }
445     else {
446 	if (cpuinfo->topology)
447 	    free(cpuinfo->topology);
448 	cpuinfo->topology = old_topology;
449     }
450 
451     return changed;
452 }
453 
454 int
erts_get_cpu_configured(erts_cpu_info_t * cpuinfo)455 erts_get_cpu_configured(erts_cpu_info_t *cpuinfo)
456 {
457     if (!cpuinfo)
458 	return -EINVAL;
459     if (cpuinfo->configured <= 0)
460 	return -ENOTSUP;
461     return cpuinfo->configured;
462 }
463 
464 int
erts_get_cpu_online(erts_cpu_info_t * cpuinfo)465 erts_get_cpu_online(erts_cpu_info_t *cpuinfo)
466 {
467     if (!cpuinfo)
468 	return -EINVAL;
469     if (cpuinfo->online <= 0)
470 	return -ENOTSUP;
471     return cpuinfo->online;
472 }
473 
474 int
erts_get_cpu_available(erts_cpu_info_t * cpuinfo)475 erts_get_cpu_available(erts_cpu_info_t *cpuinfo)
476 {
477     if (!cpuinfo)
478 	return -EINVAL;
479     if (cpuinfo->available <= 0)
480 	return -ENOTSUP;
481     return cpuinfo->available;
482 }
483 
484 int
erts_get_cpu_quota(erts_cpu_info_t * cpuinfo)485 erts_get_cpu_quota(erts_cpu_info_t *cpuinfo)
486 {
487     if (!cpuinfo)
488 	return -EINVAL;
489     if (cpuinfo->quota <= 0)
490 	return -ENOTSUP;
491     return cpuinfo->quota;
492 }
493 
494 char *
erts_get_unbind_from_cpu_str(erts_cpu_info_t * cpuinfo)495 erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo)
496 {
497 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
498     if (!cpuinfo)
499 	return "false";
500     return cpuinfo->affinity_str;
501 #else
502     return "true";
503 #endif
504 }
505 
506 int
erts_get_available_cpu(erts_cpu_info_t * cpuinfo,int no)507 erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no)
508 {
509     if (!cpuinfo || no < 1 || cpuinfo->available < no)
510 	return -EINVAL;
511 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
512     {
513 	cpu_set_t *allowed = &cpuinfo->cpuset;
514 	int ix, n;
515 	for (ix = 0, n = 1; ix < CPU_SETSIZE; ix++) {
516 	    if (CPU_ISSET(ix, allowed)) {
517 		if (no == n)
518 		    return ix;
519 		n++;
520 	    }
521 	}
522     }
523     return -EINVAL;
524 #elif defined(HAVE_PROCESSOR_BIND)
525 #if defined(HAVE_PSET_INFO)
526     return (int) cpuinfo->cpuids[no-1];
527 #elif defined(HAVE_KSTAT)
528     if (cpuinfo->topology && cpuinfo->online <= no) {
529 	/* May not be available, but this is the best we can do */
530 	return cpuinfo->topology[no-1].logical;
531     }
532     return -EINVAL;
533 #endif
534 #else
535     return -ENOTSUP;
536 #endif
537 }
538 
539 int
erts_is_cpu_available(erts_cpu_info_t * cpuinfo,int id)540 erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id)
541 {
542     if (cpuinfo && 0 <= id) {
543 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
544 	if (id < CPU_SETSIZE)
545 	    return CPU_ISSET(id, &cpuinfo->cpuset);
546 #elif defined(HAVE_PROCESSOR_BIND)
547 	int no;
548 #if defined(HAVE_PSET_INFO)
549 	for (no = 0; no < cpuinfo->available; no++)
550 	    if (id == (int) cpuinfo->cpuids[no])
551 		return 1;
552 #elif defined(HAVE_KSTAT)
553 	if (cpuinfo->topology) {
554 	    for (no = 0; no < cpuinfo->online; no++) {
555 		if (id == (int) cpuinfo->topology[no].logical) {
556 		    /* May not be available, but this is the best we can do... */
557 		    return 1;
558 		}
559 	    }
560 	}
561 #endif
562 #endif
563     }
564     return 0;
565 }
566 
567 int
erts_get_cpu_topology_size(erts_cpu_info_t * cpuinfo)568 erts_get_cpu_topology_size(erts_cpu_info_t *cpuinfo)
569 {
570     return cpuinfo->topology_size;
571 }
572 
573 int
erts_get_cpu_topology(erts_cpu_info_t * cpuinfo,erts_cpu_topology_t * topology)574 erts_get_cpu_topology(erts_cpu_info_t *cpuinfo,
575 		      erts_cpu_topology_t *topology)
576 {
577     if (!cpuinfo->topology)
578 	return 0;
579     memcpy((void *) topology,
580 	   (void *) cpuinfo->topology,
581 	   cpuinfo->topology_size*sizeof(erts_cpu_topology_t));
582     return cpuinfo->topology_size;
583 }
584 
585 int
erts_bind_to_cpu(erts_cpu_info_t * cpuinfo,int cpu)586 erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
587 {
588     /*
589      * Caller can test for available functionality by
590      * passing a negative cpu id. If functionality is
591      * available -EINVAL is returned; otherwise,
592      * -ENOTSUP.
593      */
594     if (!cpuinfo)
595 	return -EINVAL;
596 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
597     {
598 	cpu_set_t bind_set;
599 	if (cpu < 0)
600 	    return -EINVAL;
601 	if (!CPU_ISSET(cpu, &cpuinfo->cpuset))
602 	    return -EINVAL;
603 
604 	CPU_ZERO(&bind_set);
605 	CPU_SET(cpu, &bind_set);
606 	return ERTS_MU_SET_THR_AFFINITY__(&bind_set);
607     }
608 #elif defined(HAVE_PROCESSOR_BIND)
609     if (cpu < 0)
610 	return -EINVAL;
611     if (processor_bind(P_LWPID, P_MYID, (processorid_t) cpu, NULL) != 0)
612 	return -errno;
613     return 0;
614 #else
615     return -ENOTSUP;
616 #endif
617 }
618 
619 int
erts_unbind_from_cpu(erts_cpu_info_t * cpuinfo)620 erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
621 {
622     if (!cpuinfo)
623 	return -EINVAL;
624 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
625     return ERTS_MU_SET_THR_AFFINITY__(&cpuinfo->cpuset);
626 #elif defined(HAVE_PROCESSOR_BIND)
627     if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
628 	return -errno;
629     return 0;
630 #else
631     return -ENOTSUP;
632 #endif
633 }
634 
635 int
erts_unbind_from_cpu_str(char * str)636 erts_unbind_from_cpu_str(char *str)
637 {
638 #if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
639     char *c = str;
640     int cpus = 0;
641     int shft = 0;
642     cpu_set_t cpuset;
643 
644     CPU_ZERO(&cpuset);
645 
646     if (!c)
647 	return -EINVAL;
648 
649     while (*c)
650 	c++;
651 
652     while (c != str) {
653 	int shft2;
654 	int mask = 0;
655 	c--;
656 	switch (*c) {
657 	case '0': mask = 0; break;
658 	case '1': mask = 1; break;
659 	case '2': mask = 2; break;
660 	case '3': mask = 3; break;
661 	case '4': mask = 4; break;
662 	case '5': mask = 5; break;
663 	case '6': mask = 6; break;
664 	case '7': mask = 7; break;
665 	case '8': mask = 8; break;
666 	case '9': mask = 9; break;
667 	case 'A': case 'a': mask = 10; break;
668 	case 'B': case 'b': mask = 11; break;
669 	case 'C': case 'c': mask = 12; break;
670 	case 'D': case 'd': mask = 13; break;
671 	case 'E': case 'e': mask = 14; break;
672 	case 'F': case 'f': mask = 15; break;
673 	default: return -EINVAL;
674 	}
675 	for (shft2 = 0; shft2 < 4; shft2++) {
676 	    if (mask & (1 << shft2)) {
677 		int cpu = shft + shft2;
678 		if (cpu >= CPU_SETSIZE)
679 		    return -EINVAL;
680 		cpus++;
681 		CPU_SET(cpu, &cpuset);
682 	    }
683 	}
684 	shft += 4;
685     }
686 
687     if (!cpus)
688 	return -EINVAL;
689 
690     return ERTS_MU_SET_THR_AFFINITY__(&cpuset);
691 #elif defined(HAVE_PROCESSOR_BIND)
692     if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
693 	return -errno;
694     return 0;
695 #else
696     return -ENOTSUP;
697 #endif
698 }
699 
700 
701 #if defined(ERTS_CPU_TOPOLOGY_ENABLED)
702 static int
pn_cmp(const void * vx,const void * vy)703 pn_cmp(const void *vx, const void *vy)
704 {
705     erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
706     erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
707 
708     if (x->processor != y->processor)
709 	return x->processor - y->processor;
710     if (x->node != y->node)
711 	return x->node - y->node;
712     if (x->processor_node != y->processor_node)
713 	return x->processor_node - y->processor_node;
714     if (x->core != y->core)
715 	return x->core - y->core;
716     if (x->thread != y->thread)
717 	return x->thread - y->thread;
718     if (x->logical != y->logical)
719 	return x->logical - y->logical;
720     return 0;
721 }
722 
723 static int
cpu_cmp(const void * vx,const void * vy)724 cpu_cmp(const void *vx, const void *vy)
725 {
726     erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
727     erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
728 
729     if (x->node != y->node)
730 	return x->node - y->node;
731     if (x->processor != y->processor)
732 	return x->processor - y->processor;
733     if (x->processor_node != y->processor_node)
734 	return x->processor_node - y->processor_node;
735     if (x->core != y->core)
736 	return x->core - y->core;
737     if (x->thread != y->thread)
738 	return x->thread - y->thread;
739     if (x->logical != y->logical)
740 	return x->logical - y->logical;
741     return 0;
742 }
743 
744 static void
adjust_processor_nodes(erts_cpu_info_t * cpuinfo,int no_nodes)745 adjust_processor_nodes(erts_cpu_info_t *cpuinfo, int no_nodes)
746 {
747     erts_cpu_topology_t *prev, *this, *last;
748     if (no_nodes > 1) {
749 	int processor = -1;
750 	int processor_node = 0;
751 	int node = -1;
752 
753 	qsort(cpuinfo->topology,
754 	      cpuinfo->topology_size,
755 	      sizeof(erts_cpu_topology_t),
756 	      pn_cmp);
757 
758 	prev = NULL;
759 	this = &cpuinfo->topology[0];
760 	last = &cpuinfo->topology[cpuinfo->topology_size-1];
761 	while (1) {
762 	    if (processor == this->processor) {
763 		if (node != this->node)
764 		    processor_node = 1;
765 	    }
766 	    else {
767 		if (processor_node) {
768 		make_processor_node:
769 		    while (prev->processor == processor) {
770 			prev->processor_node = prev->node;
771 			prev->node = -1;
772 			if (prev == &cpuinfo->topology[0])
773 			    break;
774 			prev--;
775 		    }
776 		    processor_node = 0;
777 		}
778 		processor = this->processor;
779 		node = this->node;
780 	    }
781 	    if (this == last) {
782 		if (processor_node) {
783 		    prev = this;
784 		    goto make_processor_node;
785 		}
786 		break;
787 	    }
788 	    prev = this++;
789 	}
790     }
791 }
792 #endif
793 
794 
795 #ifdef __linux__
796 
797 static int
read_file(const char * path,char * buf,int size)798 read_file(const char *path, char *buf, int size)
799 {
800     int ix = 0;
801     ssize_t sz = size-1;
802     int fd = open(path, O_RDONLY);
803     if (fd < 0)
804 	goto error;
805     while (size > ix) {
806 	sz = read(fd, &buf[ix], size - ix);
807 	if (sz <= 0) {
808 	    if (sz == 0)
809 		break;
810 	    if (errno == EINTR)
811 		continue;
812 	    goto error;
813 	}
814 	ix += sz;
815     }
816     buf[ix] = '\0';
817     close(fd);
818     return ix;
819 
820  error: {
821 	int saved_errno = errno;
822 	if (fd >= 0)
823 	    close(fd);
824 	if (saved_errno)
825 	    return -saved_errno;
826 	else
827 	    return -EINVAL;
828     }
829 }
830 
831 /* Macro to convert in int to a string */
832 #define STR_INDIR(x) #x
833 #define STR(x) STR_INDIR(x)
834 
835 static int
read_topology(erts_cpu_info_t * cpuinfo)836 read_topology(erts_cpu_info_t *cpuinfo)
837 {
838     /* Need to fit all of the path in these buffers... */
839     char npath[MAXPATHLEN];
840     char cpath[MAXPATHLEN];
841     char tpath[MAXPATHLEN+5+30];
842     char fpath[MAXPATHLEN];
843     DIR *ndir = NULL;
844     DIR *cdir = NULL;
845     struct dirent *nde;
846     int ix;
847     int res = 0;
848     int got_nodes = 0;
849     int no_nodes = 0;
850 
851     errno = 0;
852 
853     if (cpuinfo->configured < 1)
854 	goto error;
855 
856     cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
857 			       * cpuinfo->configured);
858     if (!cpuinfo->topology)
859 	goto error;
860 
861     for (ix = 0; ix < cpuinfo->configured; ix++) {
862 	cpuinfo->topology[ix].node = -1;
863 	cpuinfo->topology[ix].processor = -1;
864 	cpuinfo->topology[ix].processor_node = -1;
865 	cpuinfo->topology[ix].core = -1;
866 	cpuinfo->topology[ix].thread = -1;
867 	cpuinfo->topology[ix].logical = -1;
868     }
869 
870     ix = 0;
871 
872     if (realpath(ERTS_SYS_NODE_PATH, npath)) {
873 	ndir = opendir(npath);
874 	got_nodes = (ndir != NULL);
875     }
876 
877     do {
878 	int node_id = -1;
879 
880 	if (!got_nodes) {
881 	    if (!realpath(ERTS_SYS_CPU_PATH, cpath))
882 		goto error;
883 	}
884 	else {
885 
886 	    nde = readdir(ndir);
887 
888 	    if (!nde)
889 		break;
890 
891 	    if (sscanf(nde->d_name, "node%d", &node_id) != 1)
892 		continue;
893 
894 	    no_nodes++;
895 
896 	    sprintf(tpath, "%." STR(MAXPATHLEN) "s/node%d", npath, node_id);
897 
898 	    if (!realpath(tpath, cpath))
899 		goto error;
900 	}
901 
902 	cdir = opendir(cpath);
903 	if (!cdir)
904 	    goto error;
905 
906 	while (1) {
907 	    int cpu_id;
908 	    struct dirent *cde = readdir(cdir);
909 	    if (!cde) {
910 		closedir(cdir);
911 		cdir = NULL;
912 		break;
913 	    }
914 
915 	    if (sscanf(cde->d_name, "cpu%d", &cpu_id) == 1) {
916 		char buf[50]; /* Much more than enough for an integer */
917 		int processor_id, core_id;
918 		sprintf(tpath, "%." STR(MAXPATHLEN) "s/cpu%d/topology/physical_package_id",
919 			cpath, cpu_id);
920 		if (!realpath(tpath, fpath))
921 		    continue;
922 		if (read_file(fpath, buf, sizeof(buf)) <= 0)
923 		    continue;
924 		if (sscanf(buf, "%d", &processor_id) != 1)
925 		    continue;
926 		sprintf(tpath, "%." STR(MAXPATHLEN) "s/cpu%d/topology/core_id",
927 			cpath, cpu_id);
928 		if (!realpath(tpath, fpath))
929 		    continue;
930 		if (read_file(fpath, buf, sizeof(buf)) <= 0)
931 		    continue;
932 		if (sscanf(buf, "%d", &core_id) != 1)
933 		    continue;
934 
935                 /*
936                  * The number of CPUs that proc fs presents is greater
937                  * then the number of CPUs configured in sysconf.
938                  * This has been known to happen in docker. When this
939                  * happens we refuse to give a CPU topology.
940                  */
941                 if (ix >= cpuinfo->configured)
942                     goto error;
943 
944 		/*
945 		 * We now know node id, processor id, and
946 		 * core id of the logical processor with
947 		 * the cpu id 'cpu_id'.
948 		 */
949 		cpuinfo->topology[ix].node	= node_id;
950 		cpuinfo->topology[ix].processor	= processor_id;
951 		cpuinfo->topology[ix].processor_node = -1; /* Fixed later */
952 		cpuinfo->topology[ix].core	= core_id;
953 		cpuinfo->topology[ix].thread	= 0; /* we'll numerate later */
954 		cpuinfo->topology[ix].logical	= cpu_id;
955 		ix++;
956 
957 	    }
958 	}
959     } while (got_nodes);
960 
961     res = ix;
962 
963     if (!res || res < cpuinfo->online)
964 	res = 0;
965     else {
966 	erts_cpu_topology_t *prev, *this, *last;
967 
968 	cpuinfo->topology_size = res;
969 
970 	if (cpuinfo->topology_size != cpuinfo->configured) {
971 	    void *t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t)
972 						  * cpuinfo->topology_size));
973 	    if (t)
974 		cpuinfo->topology = t;
975 	}
976 
977 	adjust_processor_nodes(cpuinfo, no_nodes);
978 
979 	qsort(cpuinfo->topology,
980 	      cpuinfo->topology_size,
981 	      sizeof(erts_cpu_topology_t),
982 	      cpu_cmp);
983 
984 	this = &cpuinfo->topology[0];
985 	this->thread = 0;
986 
987 	if (res > 1) {
988 	    prev = this++;
989 	    last = &cpuinfo->topology[cpuinfo->topology_size-1];
990 
991 	    while (1) {
992 		this->thread = ((this->node == prev->node
993 				 && this->processor == prev->processor
994 				 && this->processor_node == prev->processor_node
995 				 && this->core == prev->core)
996 				? prev->thread + 1
997 				: 0);
998 		if (this == last)
999 		    break;
1000 		prev = this++;
1001 	    }
1002 	}
1003     }
1004 
1005  error:
1006 
1007     if (res == 0) {
1008 	cpuinfo->topology_size = 0;
1009 	if (cpuinfo->topology) {
1010 	    free(cpuinfo->topology);
1011 	    cpuinfo->topology = NULL;
1012 	}
1013 	if (errno)
1014 	    res = -errno;
1015 	else
1016 	    res = -EINVAL;
1017     }
1018 
1019     if (ndir)
1020 	closedir(ndir);
1021     if (cdir)
1022 	closedir(cdir);
1023 
1024     return res;
1025 }
1026 
1027 static int
csv_contains(const char * haystack,const char * element,char separator)1028 csv_contains(const char *haystack,
1029              const char *element,
1030              char separator) {
1031     size_t element_len;
1032     const char *ptr;
1033 
1034     element_len = strlen(element);
1035     ptr = strstr(haystack, element);
1036 
1037     while (ptr) {
1038         if (!ptr[element_len] || ptr[element_len] == separator) {
1039             if (ptr == haystack || ptr[-1] == separator) {
1040                 return 1;
1041             }
1042         }
1043 
1044         ptr = strstr(&ptr[1], element);
1045     }
1046 
1047     return 0;
1048 }
1049 
1050 static const char*
str_combine(const char * a,const char * b)1051 str_combine(const char *a, const char *b) {
1052     size_t a_len, b_len;
1053     char *result;
1054 
1055     a_len = strlen(a);
1056     b_len = strlen(b);
1057 
1058     result = malloc(a_len + b_len + 1);
1059 
1060     memcpy(&result[0], a, a_len);
1061     memcpy(&result[a_len], b, b_len + 1);
1062 
1063     return result;
1064 }
1065 
1066 static const char*
get_cgroup_v1_base_dir(const char * controller)1067 get_cgroup_v1_base_dir(const char *controller) {
1068     char line_buf[5 << 10];
1069     FILE *var_file;
1070 
1071     var_file = fopen("/proc/self/cgroup", "r");
1072 
1073     if (var_file == NULL) {
1074         return NULL;
1075     }
1076 
1077     while (fgets(line_buf, sizeof(line_buf), var_file)) {
1078         /* sscanf_s requires C11, so we use hardcoded sizes (rather than rely
1079          * on macros like MAXPATHLEN) so we can specify them directly in the
1080          * format string. */
1081         char base_dir[4 << 10];
1082         char controllers[256];
1083 
1084         if (sscanf(line_buf, "%*d:%255[^:]:%4095s\n",
1085                    controllers, base_dir) != 2) {
1086             continue;
1087         }
1088 
1089         if (csv_contains(controllers, controller, ',')) {
1090             fclose(var_file);
1091             return strdup(base_dir);
1092         }
1093     }
1094 
1095     fclose(var_file);
1096     return NULL;
1097 }
1098 
1099 enum cgroup_version_t {
1100     ERTS_CGROUP_NONE,
1101     ERTS_CGROUP_V1,
1102     ERTS_CGROUP_V2
1103 };
1104 
1105 static enum cgroup_version_t
get_cgroup_path(const char * controller,const char ** path)1106 get_cgroup_path(const char *controller, const char **path) {
1107     char line_buf[10 << 10];
1108     FILE *var_file;
1109 
1110     var_file = fopen("/proc/self/mountinfo", "r");
1111 
1112     if (var_file == NULL) {
1113         return ERTS_CGROUP_NONE;
1114     }
1115 
1116     while (fgets(line_buf, sizeof(line_buf), var_file)) {
1117         char mount_path[4 << 10];
1118         char root_path[4 << 10];
1119         char fs_flags[512];
1120         char fs_type[64];
1121 
1122         /* Format:
1123          *    [Mount id] [Parent id] [Major] [Minor] [Root] [Mounted at]    \
1124          *    [Mount flags] ... (options terminated by a single hyphen) ... \
1125          *    [FS type] [Mount source] [Flags]
1126          *
1127          * (See proc(5) for a more complete description.)
1128          *
1129          * This fails if any of the fs options contain a hyphen, but this is
1130          * not likely to happen on a cgroup, so we just skip such lines. */
1131         if (sscanf(line_buf,
1132                    "%*d %*d %*d:%*d %4095s %4095s %*s %*[^-]- "
1133                    "%63s %*s %511[^\n]\n",
1134                    root_path, mount_path,
1135                    fs_type, fs_flags) != 4) {
1136             continue;
1137         }
1138 
1139         if (!strcmp(fs_type, "cgroup2")) {
1140             char controllers[256];
1141             const char *cgc_path;
1142 
1143             cgc_path = str_combine(mount_path, "/cgroup.controllers");
1144             if (read_file(cgc_path, controllers, sizeof(controllers)) > 0) {
1145                 if (csv_contains(controllers, controller, ' ')) {
1146                     free((void*)cgc_path);
1147                     fclose(var_file);
1148 
1149                     *path = strdup(mount_path);
1150                     return ERTS_CGROUP_V2;
1151                 }
1152             }
1153             free((void*)cgc_path);
1154         } else if (!strcmp(fs_type, "cgroup")) {
1155             if (csv_contains(fs_flags, controller, ',')) {
1156                 const char *base_dir = get_cgroup_v1_base_dir(controller);
1157 
1158                 if (base_dir) {
1159                     if (strcmp(root_path, base_dir)) {
1160                         *path = str_combine(mount_path, base_dir);
1161                     } else {
1162                         *path = strdup(mount_path);
1163                     }
1164 
1165                     free((void*)base_dir);
1166                     fclose(var_file);
1167 
1168                     return ERTS_CGROUP_V1;
1169                 }
1170             }
1171         }
1172     }
1173 
1174     fclose(var_file);
1175 
1176     return ERTS_CGROUP_NONE;
1177 }
1178 
read_cgroup_interface(const char * group_path,const char * if_name,int arg_count,const char * format,...)1179 static int read_cgroup_interface(const char *group_path, const char *if_name,
1180                                  int arg_count, const char *format, ...) {
1181     const char *var_path;
1182     int res;
1183 
1184     var_path = str_combine(group_path, if_name);
1185     res = 0;
1186 
1187     if (var_path) {
1188         FILE *var_file;
1189 
1190         var_file = fopen(var_path, "r");
1191         free((void*)var_path);
1192 
1193         if (var_file) {
1194             va_list va_args;
1195 
1196             va_start(va_args, format);
1197 
1198             if (vfscanf(var_file, format, va_args) == arg_count) {
1199                 res = 1;
1200             }
1201 
1202             va_end(va_args);
1203 
1204             fclose(var_file);
1205         }
1206     }
1207 
1208     return res;
1209 }
1210 
1211 /* CPU quotas are read from the cgroup configuration, which can be pretty hairy
1212  * as we need to support both v1 and v2, and it's possible for both versions to
1213  * be active at the same time. */
1214 
1215 static int
read_cpu_quota(int limit)1216 read_cpu_quota(int limit)
1217 {
1218     ssize_t cfs_period_us, cfs_quota_us;
1219     const char *cgroup_path;
1220     int succeeded;
1221 
1222     switch (get_cgroup_path("cpu", &cgroup_path)) {
1223     case ERTS_CGROUP_V1:
1224         succeeded = read_cgroup_interface(cgroup_path, "/cpu.cfs_quota_us",
1225                         1, "%zi", &cfs_quota_us) &&
1226                     read_cgroup_interface(cgroup_path, "/cpu.cfs_period_us",
1227                         1, "%zi", &cfs_period_us);
1228 
1229         free((void*)cgroup_path);
1230         break;
1231     case ERTS_CGROUP_V2:
1232         succeeded = read_cgroup_interface(cgroup_path, "/cpu.max",
1233                         2, "%zi %zi", &cfs_quota_us, &cfs_period_us);
1234 
1235         free((void*)cgroup_path);
1236         break;
1237     default:
1238         succeeded = 0;
1239         break;
1240     }
1241 
1242     if (succeeded) {
1243         if (cfs_period_us > 0 && cfs_quota_us > 0) {
1244             size_t quota = cfs_quota_us / cfs_period_us;
1245 
1246             if (quota == 0) {
1247                 quota = 1;
1248             }
1249 
1250             if (quota > 0 && quota <= (size_t)limit) {
1251                 return quota;
1252             }
1253         }
1254 
1255         return limit;
1256     }
1257 
1258     return 0;
1259 }
1260 
1261 #elif defined(HAVE_KSTAT) /* SunOS kstat */
1262 
1263 #include <kstat.h>
1264 
1265 static int
data_lookup_int(kstat_t * ks,char * what)1266 data_lookup_int(kstat_t *ks, char *what)
1267 {
1268     int res;
1269     kstat_named_t *ks_n;
1270 
1271     ks_n = kstat_data_lookup(ks, what);
1272     if (!ks_n)
1273 	return 0;
1274 
1275     switch (ks_n->data_type) {
1276     case KSTAT_DATA_CHAR:
1277 	res = atoi(ks_n->value.c);
1278 	break;
1279     case KSTAT_DATA_INT32:
1280 	res = (int) ks_n->value.i32;
1281 	break;
1282     case KSTAT_DATA_UINT32:
1283 	res = (int) ks_n->value.ui32;
1284 	break;
1285     case KSTAT_DATA_INT64:
1286 	res = (int) ks_n->value.i64;
1287 	break;
1288     case KSTAT_DATA_UINT64:
1289 	res = (int) ks_n->value.ui64;
1290 	break;
1291     default:
1292 	res = 0;
1293 	break;
1294     }
1295     return res;
1296 }
1297 
1298 static int
read_topology(erts_cpu_info_t * cpuinfo)1299 read_topology(erts_cpu_info_t *cpuinfo)
1300 {
1301     int res = 0;
1302     int ix;
1303     kstat_ctl_t *ks_ctl;
1304     kstat_t *ks;
1305 
1306     errno = 0;
1307 
1308     if (cpuinfo->configured < 1)
1309 	goto error;
1310 
1311     cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
1312 			       * cpuinfo->configured);
1313     if (!cpuinfo->topology)
1314 	goto error;
1315 
1316     for (ix = 0; ix < cpuinfo->configured; ix++) {
1317 	cpuinfo->topology[ix].node = -1;
1318 	cpuinfo->topology[ix].processor = -1;
1319 	cpuinfo->topology[ix].processor_node = -1;
1320 	cpuinfo->topology[ix].core = -1;
1321 	cpuinfo->topology[ix].thread = -1;
1322 	cpuinfo->topology[ix].logical = -1;
1323     }
1324 
1325     ks_ctl = kstat_open();
1326     if (!ks_ctl)
1327 	goto error;
1328 
1329     ix = 0;
1330     for (ks = ks_ctl->kc_chain; ks; ks = ks->ks_next) {
1331 	if (strcmp("cpu_info", ks->ks_module) == 0) {
1332 	    kstat_read(ks_ctl, ks, NULL);
1333 	    if (ks->ks_type == KSTAT_TYPE_NAMED) {
1334 		/*
1335 		 * Don't know how to figure numa nodes out;
1336 		 * hope there is none...
1337 		 */
1338 		cpuinfo->topology[ix].node = -1;
1339 		cpuinfo->topology[ix].processor = data_lookup_int(ks,"chip_id");
1340 		cpuinfo->topology[ix].processor_node = -1;
1341 		cpuinfo->topology[ix].core = data_lookup_int(ks, "core_id");
1342 		cpuinfo->topology[ix].thread = 0; /* we'll numerate later */
1343 		cpuinfo->topology[ix].logical = ks->ks_instance;
1344 		if (++ix == cpuinfo->configured)
1345 		    break;
1346 	    }
1347 	}
1348     }
1349 
1350     kstat_close(ks_ctl);
1351 
1352     res = ix;
1353 
1354     if (!res || res < cpuinfo->online)
1355 	res = 0;
1356     else {
1357 	erts_cpu_topology_t *prev, *this, *last;
1358 
1359 	cpuinfo->topology_size = res;
1360 
1361 	if (cpuinfo->topology_size != cpuinfo->configured) {
1362 	    void *t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t)
1363 						  * cpuinfo->topology_size));
1364 	    if (t)
1365 		cpuinfo->topology = t;
1366 	}
1367 
1368 	qsort(cpuinfo->topology,
1369 	      cpuinfo->topology_size,
1370 	      sizeof(erts_cpu_topology_t),
1371 	      cpu_cmp);
1372 
1373 	this = &cpuinfo->topology[0];
1374 	this->thread = 0;
1375 
1376 	if (res > 1) {
1377 	    prev = this++;
1378 	    last = &cpuinfo->topology[cpuinfo->topology_size-1];
1379 
1380 	    while (1) {
1381 		this->thread = ((this->node == prev->node
1382 				 && this->processor == prev->processor
1383 				 && this->processor_node == prev->processor_node
1384 				 && this->core == prev->core)
1385 				? prev->thread + 1
1386 				: 0);
1387 		if (this == last)
1388 		    break;
1389 		prev = this++;
1390 	    }
1391 	}
1392     }
1393 
1394     adjust_processor_nodes(cpuinfo, 1);
1395 
1396  error:
1397 
1398     if (res == 0) {
1399 	cpuinfo->topology_size = 0;
1400 	if (cpuinfo->topology) {
1401 	    free(cpuinfo->topology);
1402 	    cpuinfo->topology = NULL;
1403 	}
1404 	if (errno)
1405 	    res = -errno;
1406 	else
1407 	    res = -EINVAL;
1408     }
1409 
1410     return res;
1411 
1412 }
1413 
1414 static int
read_cpu_quota(int limit)1415 read_cpu_quota(int limit)
1416 {
1417     (void)limit;
1418     return 0;
1419 }
1420 
1421 #elif defined(__WIN32__)
1422 
1423 /*
1424  * We cannot use Relation* out of the box since all of them are not
1425  * always part of the LOGICAL_PROCESSOR_RELATIONSHIP enum. They are
1426  * however documented as follows...
1427  */
1428 #define ERTS_MU_RELATION_PROCESSOR_CORE       0 /* RelationProcessorCore */
1429 #define ERTS_MU_RELATION_NUMA_NODE            1 /* RelationNumaNode */
1430 #define ERTS_MU_RELATION_CACHE                2 /* RelationCache */
1431 #define ERTS_MU_RELATION_PROCESSOR_PACKAGE    3 /* RelationProcessorPackage */
1432 
1433 static ETHR_FORCE_INLINE int
rel_cmp_val(int r)1434 rel_cmp_val(int r)
1435 {
1436     switch (r) {
1437     case ERTS_MU_RELATION_NUMA_NODE:         return 0;
1438     case ERTS_MU_RELATION_PROCESSOR_PACKAGE: return 1;
1439     case ERTS_MU_RELATION_PROCESSOR_CORE:    return 2;
1440     default: /* currently not used */        return 3;
1441     }
1442 }
1443 
1444 static int
slpi_cmp(const void * vx,const void * vy)1445 slpi_cmp(const void *vx, const void *vy)
1446 {
1447     PSYSTEM_LOGICAL_PROCESSOR_INFORMATION x, y;
1448     x = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vx;
1449     y = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vy;
1450 
1451     if ((int) x->Relationship != (int) y->Relationship)
1452 	return (rel_cmp_val((int) x->Relationship)
1453 		- rel_cmp_val((int) y->Relationship));
1454 
1455     switch ((int) x->Relationship) {
1456     case ERTS_MU_RELATION_NUMA_NODE:
1457 	if (x->NumaNode.NodeNumber == y->NumaNode.NodeNumber)
1458 	    break;
1459 	return ((int) x->NumaNode.NodeNumber) - ((int) y->NumaNode.NodeNumber);
1460     case ERTS_MU_RELATION_PROCESSOR_CORE:
1461     case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
1462     default:
1463 	break;
1464     }
1465 
1466     if (x->ProcessorMask == y->ProcessorMask)
1467 	return 0;
1468     return x->ProcessorMask < y->ProcessorMask ? -1 : 1;
1469 }
1470 
1471 typedef BOOL (WINAPI *glpi_t)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
1472 
1473 static int
read_topology(erts_cpu_info_t * cpuinfo)1474 read_topology(erts_cpu_info_t *cpuinfo)
1475 {
1476     int res = 0;
1477     glpi_t glpi;
1478     int *core_id = NULL;
1479     PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpip = NULL;
1480     int wix, rix, max_l, l, packages, nodes, no_slpi;
1481     DWORD slpi_size = 0;
1482 
1483 
1484     glpi = (glpi_t) GetProcAddress(GetModuleHandle("kernel32"),
1485 				   "GetLogicalProcessorInformation");
1486     if (!glpi)
1487 	return -ENOTSUP;
1488 
1489     cpuinfo->topology = NULL;
1490 
1491     if (cpuinfo->configured < 1 || sizeof(ULONG_PTR)*8 < cpuinfo->configured)
1492 	goto error;
1493 
1494     while (1) {
1495 	DWORD werr;
1496 	if (TRUE == glpi(slpip, &slpi_size))
1497 	    break;
1498 	werr = GetLastError();
1499 	if (werr != ERROR_INSUFFICIENT_BUFFER) {
1500 	    res = -erts_map_win_error_to_errno(werr);
1501 	    goto error;
1502 	}
1503 	if (slpip)
1504 	    free(slpip);
1505 	slpip = malloc(slpi_size);
1506 	if (!slpip) {
1507 	    res = -ENOMEM;
1508 	    goto error;
1509 	}
1510     }
1511 
1512     no_slpi = (int) slpi_size/sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
1513 
1514     qsort(slpip,
1515 	  no_slpi,
1516 	  sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION),
1517 	  slpi_cmp);
1518 
1519     /*
1520      * Now numa node relations appear before package relations which
1521      * appear before core relations which appear before relations
1522      * we aren't interested in...
1523      */
1524 
1525     max_l = 0;
1526     packages = 0;
1527     nodes = 0;
1528     for (rix = 0; rix < no_slpi; rix++) {
1529 	PSYSTEM_LOGICAL_PROCESSOR_INFORMATION this = &slpip[rix];
1530 	for (l = sizeof(ULONG_PTR)*8 - 1; l > 0; l--) {
1531 	    if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
1532 		if (max_l < l)
1533 		    max_l = l;
1534 		break;
1535 	    }
1536 	}
1537 	if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_PROCESSOR_PACKAGE)
1538 	    packages++;
1539 	if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_NUMA_NODE)
1540 	    nodes++;
1541     }
1542 
1543     if (!packages) {
1544       packages = 1;
1545     }
1546     core_id = malloc(sizeof(int)*packages);
1547     if (!core_id) {
1548 	res = -ENOMEM;
1549 	goto error;
1550     }
1551 
1552     for (rix = 0; rix < packages; rix++)
1553 	core_id[rix] = 0;
1554 
1555     cpuinfo->topology_size = max_l + 1;
1556     cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
1557 			       * cpuinfo->topology_size);
1558     if (!cpuinfo->topology) {
1559 	res = -ENOMEM;
1560 	goto error;
1561     }
1562 
1563     for (wix = 0; wix < cpuinfo->topology_size; wix++) {
1564 	cpuinfo->topology[wix].node = -1;
1565 	cpuinfo->topology[wix].processor = -1;
1566 	cpuinfo->topology[wix].processor_node = -1;
1567 	cpuinfo->topology[wix].core = -1;
1568 	cpuinfo->topology[wix].thread = -1;
1569 	cpuinfo->topology[wix].logical = -1;
1570     }
1571 
1572     nodes = 0;
1573     packages = 0;
1574 
1575     for (rix = 0; rix < no_slpi; rix++) {
1576 
1577         switch ((int) slpip[rix].Relationship) {
1578         case ERTS_MU_RELATION_NUMA_NODE:
1579 	    for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
1580 		if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
1581 		    cpuinfo->topology[l].logical = l;
1582 		    cpuinfo->topology[l].node = slpip[rix].NumaNode.NodeNumber;
1583 		}
1584 	    }
1585 	    nodes++;
1586             break;
1587         case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
1588 	    for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
1589 		if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
1590 		    cpuinfo->topology[l].logical = l;
1591 		    cpuinfo->topology[l].processor = packages;
1592 		}
1593 	    }
1594 	    packages++;
1595             break;
1596         case ERTS_MU_RELATION_PROCESSOR_CORE: {
1597 	    int thread = 0;
1598 	    int processor = -1;
1599 	    for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
1600 		/*
1601 		 * Nodes and packages may not be supported; pretend
1602 		 * that there are one if this is the case...
1603 		 */
1604 		if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
1605 		    if (!nodes) {
1606 		      cpuinfo->topology[l].node = 0;
1607 		    }
1608 		    if (!packages) {
1609 		      cpuinfo->topology[l].processor = 0;
1610 		    }
1611 		    if (processor < 0) {
1612 			processor = cpuinfo->topology[l].processor;
1613 			if (processor < 0) {
1614 			    res = -EINVAL;
1615 			    goto error;
1616 			}
1617 		    }
1618 		    else if (processor != cpuinfo->topology[l].processor) {
1619 			res = -EINVAL;
1620 			goto error;
1621 		    }
1622 		    cpuinfo->topology[l].logical = l;
1623 		    cpuinfo->topology[l].thread = thread;
1624 		    cpuinfo->topology[l].core = core_id[processor];
1625 		    thread++;
1626 		}
1627 	    }
1628 	    core_id[processor]++;
1629             break;
1630 	}
1631         default:
1632 	    /*
1633 	     * We have reached the end of the relationships
1634 	     * that we (currently) are interested in...
1635 	     */
1636 	    goto relationships_done;
1637         }
1638     }
1639 
1640  relationships_done:
1641 
1642     /*
1643      * There may be unused entries; remove them...
1644      */
1645     for (rix = wix = 0; rix < cpuinfo->topology_size; rix++) {
1646 	if (cpuinfo->topology[rix].logical >= 0) {
1647 	    if (wix != rix)
1648 		cpuinfo->topology[wix] = cpuinfo->topology[rix];
1649 	    wix++;
1650 	}
1651     }
1652 
1653     if (cpuinfo->topology_size != wix) {
1654 	erts_cpu_topology_t *new = cpuinfo->topology;
1655 	new = realloc(cpuinfo->topology,
1656 		      sizeof(erts_cpu_topology_t)*wix);
1657 	if (!new) {
1658 	    res = -ENOMEM;
1659 	    goto error;
1660 	}
1661 	cpuinfo->topology = new;
1662 	cpuinfo->topology_size = wix;
1663     }
1664 
1665     res = wix;
1666 
1667     adjust_processor_nodes(cpuinfo, nodes);
1668 
1669     qsort(cpuinfo->topology,
1670 	  cpuinfo->topology_size,
1671 	  sizeof(erts_cpu_topology_t),
1672 	  cpu_cmp);
1673 
1674     if (res < cpuinfo->online)
1675 	res = -EINVAL;
1676 
1677  error:
1678 
1679     if (res <= 0) {
1680 	cpuinfo->topology_size = 0;
1681 	if (cpuinfo->topology) {
1682 	    free(cpuinfo->topology);
1683 	    cpuinfo->topology = NULL;
1684 	}
1685     }
1686 
1687     if (slpip)
1688 	free(slpip);
1689     if (core_id)
1690 	free(core_id);
1691 
1692     return res;
1693 }
1694 
1695 static int
read_cpu_quota(int limit)1696 read_cpu_quota(int limit)
1697 {
1698     (void)limit;
1699     return 0;
1700 }
1701 
1702 #elif defined(__FreeBSD__)
1703 
1704 /**
1705  * FreeBSD topology detection is based on kern.sched.topology_spec XML as
1706  * exposed by the ULE scheduler and described in SMP(4). It is available in
1707  * 8.0 and higher.
1708  *
1709  * Threads are identified in this XML chunk with a THREAD flag. The function
1710  * (simplistically) distinguishes cores and processors by the amount of cache
1711  * they share (0 => processor, otherwise => core). Nodes are not identified
1712  * (ULE doesn't handle NUMA yet, I believe).
1713  */
1714 
1715 /**
1716  * Recursively parse a topology_spec <group> tag.
1717  */
1718 static
parse_topology_spec_group(erts_cpu_info_t * cpuinfo,const char * xml,int parentCacheLevel,int * processor_p,int * core_p,int * index_procs_p)1719 const char* parse_topology_spec_group(erts_cpu_info_t *cpuinfo, const char* xml, int parentCacheLevel, int* processor_p, int* core_p, int* index_procs_p) {
1720     int error = 0;
1721     int cacheLevel = parentCacheLevel;
1722     const char* next_group_start = strstr(xml + 1, "<group");
1723     int is_thread_group = 0;
1724     const char* next_cache_level;
1725     const char* next_thread_flag;
1726     const char* next_group_end;
1727     const char* next_children;
1728     const char* next_children_end;
1729 
1730     /* parse the cache level */
1731     next_cache_level = strstr(xml, "cache-level=\"");
1732     if (next_cache_level && (next_group_start == NULL || next_cache_level < next_group_start)) {
1733 	sscanf(next_cache_level, "cache-level=\"%i\"", &cacheLevel);
1734     }
1735 
1736     /* parse the threads flag */
1737     next_thread_flag = strstr(xml, "THREAD");
1738     if (next_thread_flag && (next_group_start == NULL || next_thread_flag < next_group_start))
1739 	is_thread_group = 1;
1740 
1741     /* Determine if it's a leaf with the position of the next children tag */
1742     next_group_end = strstr(xml, "</group>");
1743     next_children = strstr(xml, "<children>");
1744     next_children_end = strstr(xml, "</children>");
1745     if (next_children == NULL || next_group_end < next_children) {
1746 	do {
1747 	    const char* next_cpu_start;
1748 	    const char* next_cpu_cdata;
1749 	    const char* next_cpu_end;
1750 	    int cpu_str_size;
1751 	    char* cpu_str;
1752 	    char* cpu_crsr;
1753 	    char* brkb;
1754 	    int thread = 0;
1755 	    int index_procs = *index_procs_p;
1756 
1757 	    next_cpu_start = strstr(xml, "<cpu");
1758 	    if (!next_cpu_start) {
1759 		error = 1;
1760 		break;
1761 	    }
1762 	    next_cpu_cdata = strstr(next_cpu_start, ">") + 1;
1763 	    if (!next_cpu_cdata) {
1764 		error = 1;
1765 		break;
1766 	    }
1767 	    next_cpu_end = strstr(next_cpu_cdata, "</cpu>");
1768 	    if (!next_cpu_end) {
1769 		error = 1;
1770 		break;
1771 	    }
1772 	    cpu_str_size = next_cpu_end - next_cpu_cdata;
1773 	    cpu_str = (char*) malloc(cpu_str_size + 1);
1774 	    memcpy(cpu_str, (const char*) next_cpu_cdata, cpu_str_size);
1775 	    cpu_str[cpu_str_size] = 0;
1776 	    for (cpu_crsr = strtok_r(cpu_str, " \t,", &brkb); cpu_crsr; cpu_crsr = strtok_r(NULL, " \t,", &brkb)) {
1777 		int cpu_id;
1778 		if (index_procs >= cpuinfo->configured) {
1779 		    void* t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t) * (index_procs + 1)));
1780 		    if (t) {
1781 			cpuinfo->topology = t;
1782 		    } else {
1783 			error = 1;
1784 			break;
1785 		    }
1786 		}
1787 		cpu_id = atoi(cpu_crsr);
1788 		cpuinfo->topology[index_procs].node = -1;
1789 		cpuinfo->topology[index_procs].processor = *processor_p;
1790 		cpuinfo->topology[index_procs].processor_node = -1;
1791 		cpuinfo->topology[index_procs].core = *core_p;
1792 		cpuinfo->topology[index_procs].thread = thread;
1793 		cpuinfo->topology[index_procs].logical = cpu_id;
1794 		if (is_thread_group) {
1795 		    thread++;
1796 		} else {
1797 		    *core_p = (*core_p) + 1;
1798 		}
1799 		index_procs++;
1800 	    }
1801 	    *index_procs_p = index_procs;
1802 	    free(cpu_str);
1803 	} while (0);
1804 	xml = next_group_end;
1805     } else {
1806 	while (next_group_start != NULL && next_group_start < next_children_end) {
1807 	    xml = parse_topology_spec_group(cpuinfo, next_group_start, cacheLevel, processor_p, core_p, index_procs_p);
1808 	    if (!xml)
1809 		break;
1810 	    next_group_start = strstr(xml, "<group");
1811 	    next_children_end = strstr(xml, "</children>");
1812 	}
1813     }
1814 
1815     if (parentCacheLevel == 0) {
1816 	*core_p = 0;
1817 	*processor_p = (*processor_p) + 1;
1818     } else {
1819 	*core_p = (*core_p) + 1;
1820     }
1821 
1822     if (error)
1823 	xml = NULL;
1824 
1825     return xml;
1826 }
1827 
1828 /**
1829  * Parse the topology_spec. Return the number of CPUs or 0 if parsing failed.
1830  */
1831 static
parse_topology_spec(erts_cpu_info_t * cpuinfo,const char * xml)1832 int parse_topology_spec(erts_cpu_info_t *cpuinfo, const char* xml) {
1833     int res = 1;
1834     int index_procs = 0;
1835     int core = 0;
1836     int processor = 0;
1837     xml = strstr(xml, "<groups");
1838     if (!xml)
1839 	return -1;
1840 
1841     xml += 7;
1842     xml = strstr(xml, "<group");
1843     while (xml) {
1844 	xml = parse_topology_spec_group(cpuinfo, xml, 0, &processor, &core, &index_procs);
1845 	if (!xml) {
1846 	    res = 0;
1847 	    break;
1848 	}
1849 	xml = strstr(xml, "<group");
1850     }
1851 
1852     if (res)
1853 	res = index_procs;
1854 
1855     return res;
1856 }
1857 
1858 static int
read_topology(erts_cpu_info_t * cpuinfo)1859 read_topology(erts_cpu_info_t *cpuinfo)
1860 {
1861     int ix;
1862     int res = 0;
1863     size_t topology_spec_size = 0;
1864     void* topology_spec = NULL;
1865 
1866     errno = 0;
1867 
1868     if (cpuinfo->configured < 1)
1869 	goto error;
1870 
1871     cpuinfo->topology_size = cpuinfo->configured;
1872     cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
1873 			       * cpuinfo->configured);
1874     if (!cpuinfo->topology) {
1875 	res = -ENOMEM;
1876 	goto error;
1877     }
1878 
1879     for (ix = 0; ix < cpuinfo->configured; ix++) {
1880 	cpuinfo->topology[ix].node = -1;
1881 	cpuinfo->topology[ix].processor = -1;
1882 	cpuinfo->topology[ix].processor_node = -1;
1883 	cpuinfo->topology[ix].core = -1;
1884 	cpuinfo->topology[ix].thread = -1;
1885 	cpuinfo->topology[ix].logical = -1;
1886     }
1887 
1888     if (!sysctlbyname("kern.sched.topology_spec", NULL, &topology_spec_size, NULL, 0)) {
1889 	topology_spec = malloc(topology_spec_size);
1890 	if (!topology_spec) {
1891 	    res = -ENOMEM;
1892 	    goto error;
1893 	}
1894 
1895 	if (sysctlbyname("kern.sched.topology_spec", topology_spec, &topology_spec_size, NULL, 0)) {
1896 	    goto error;
1897 	}
1898 
1899 	res = parse_topology_spec(cpuinfo, topology_spec);
1900 	if (!res || res < cpuinfo->online)
1901 	    res = 0;
1902 	else {
1903 	    cpuinfo->topology_size = res;
1904 
1905 	    if (cpuinfo->topology_size != cpuinfo->configured) {
1906 		void *t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t)
1907 						  * cpuinfo->topology_size));
1908 		if (t)
1909 		    cpuinfo->topology = t;
1910 	    }
1911 
1912 	    adjust_processor_nodes(cpuinfo, 1);
1913 
1914 	    qsort(cpuinfo->topology,
1915 	        cpuinfo->topology_size,
1916 	        sizeof(erts_cpu_topology_t),
1917 	        cpu_cmp);
1918 	}
1919     }
1920 
1921 error:
1922 
1923     if (res == 0) {
1924 	cpuinfo->topology_size = 0;
1925 	if (cpuinfo->topology) {
1926 	    free(cpuinfo->topology);
1927 	    cpuinfo->topology = NULL;
1928 	}
1929 	if (errno)
1930 	    res = -errno;
1931 	else
1932 	    res = -EINVAL;
1933     }
1934 
1935     if (topology_spec)
1936 	free(topology_spec);
1937 
1938     return res;
1939 }
1940 
1941 static int
read_cpu_quota(int limit)1942 read_cpu_quota(int limit)
1943 {
1944     (void)limit;
1945     return 0;
1946 }
1947 
1948 #else
1949 
1950 static int
read_cpu_quota(int limit)1951 read_cpu_quota(int limit)
1952 {
1953     (void)limit;
1954     return 0;
1955 }
1956 
1957 static int
read_topology(erts_cpu_info_t * cpuinfo)1958 read_topology(erts_cpu_info_t *cpuinfo)
1959 {
1960     return -ENOTSUP;
1961 }
1962 
1963 #endif
1964 
1965 #if defined(__WIN32__)
1966 
1967 int
erts_map_win_error_to_errno(DWORD win_error)1968 erts_map_win_error_to_errno(DWORD win_error)
1969 {
1970     switch (win_error) {
1971     case ERROR_INVALID_FUNCTION:		return EINVAL;	/* 1	*/
1972     case ERROR_FILE_NOT_FOUND:			return ENOENT;	/* 2	*/
1973     case ERROR_PATH_NOT_FOUND:			return ENOENT;	/* 3	*/
1974     case ERROR_TOO_MANY_OPEN_FILES:		return EMFILE;	/* 4	*/
1975     case ERROR_ACCESS_DENIED:			return EACCES;	/* 5	*/
1976     case ERROR_INVALID_HANDLE:			return EBADF;	/* 6	*/
1977     case ERROR_ARENA_TRASHED:			return ENOMEM;	/* 7	*/
1978     case ERROR_NOT_ENOUGH_MEMORY:		return ENOMEM;	/* 8	*/
1979     case ERROR_INVALID_BLOCK:			return ENOMEM;	/* 9	*/
1980     case ERROR_BAD_ENVIRONMENT:			return E2BIG;	/* 10	*/
1981     case ERROR_BAD_FORMAT:			return ENOEXEC;	/* 11	*/
1982     case ERROR_INVALID_ACCESS:			return EINVAL;	/* 12	*/
1983     case ERROR_INVALID_DATA:			return EINVAL;	/* 13	*/
1984     case ERROR_OUTOFMEMORY:			return ENOMEM;	/* 14	*/
1985     case ERROR_INVALID_DRIVE:			return ENOENT;	/* 15	*/
1986     case ERROR_CURRENT_DIRECTORY:		return EACCES;	/* 16	*/
1987     case ERROR_NOT_SAME_DEVICE:			return EXDEV;	/* 17	*/
1988     case ERROR_NO_MORE_FILES:			return ENOENT;	/* 18	*/
1989     case ERROR_WRITE_PROTECT:			return EACCES;	/* 19	*/
1990     case ERROR_BAD_UNIT:			return EACCES;	/* 20	*/
1991     case ERROR_NOT_READY:			return EACCES;	/* 21	*/
1992     case ERROR_BAD_COMMAND:			return EACCES;	/* 22	*/
1993     case ERROR_CRC:				return EACCES;	/* 23	*/
1994     case ERROR_BAD_LENGTH:			return EACCES;	/* 24	*/
1995     case ERROR_SEEK:				return EACCES;	/* 25	*/
1996     case ERROR_NOT_DOS_DISK:			return EACCES;	/* 26	*/
1997     case ERROR_SECTOR_NOT_FOUND:		return EACCES;	/* 27	*/
1998     case ERROR_OUT_OF_PAPER:			return EACCES;	/* 28	*/
1999     case ERROR_WRITE_FAULT:			return EACCES;	/* 29	*/
2000     case ERROR_READ_FAULT:			return EACCES;	/* 30	*/
2001     case ERROR_GEN_FAILURE:			return EACCES;	/* 31	*/
2002     case ERROR_SHARING_VIOLATION:		return EACCES;	/* 32	*/
2003     case ERROR_LOCK_VIOLATION:			return EACCES;	/* 33	*/
2004     case ERROR_WRONG_DISK:			return EACCES;	/* 34	*/
2005     case ERROR_SHARING_BUFFER_EXCEEDED:		return EACCES;	/* 36	*/
2006     case ERROR_BAD_NETPATH:			return ENOENT;	/* 53	*/
2007     case ERROR_NETWORK_ACCESS_DENIED:		return EACCES;	/* 65	*/
2008     case ERROR_BAD_NET_NAME:			return ENOENT;	/* 67	*/
2009     case ERROR_FILE_EXISTS:			return EEXIST;	/* 80	*/
2010     case ERROR_CANNOT_MAKE:			return EACCES;	/* 82	*/
2011     case ERROR_FAIL_I24:			return EACCES;	/* 83	*/
2012     case ERROR_INVALID_PARAMETER:		return EINVAL;	/* 87	*/
2013     case ERROR_NO_PROC_SLOTS:			return EAGAIN;	/* 89	*/
2014     case ERROR_DRIVE_LOCKED:			return EACCES;	/* 108	*/
2015     case ERROR_BROKEN_PIPE:			return EPIPE;	/* 109	*/
2016     case ERROR_DISK_FULL:			return ENOSPC;	/* 112	*/
2017     case ERROR_INVALID_TARGET_HANDLE:		return EBADF;	/* 114	*/
2018     case ERROR_WAIT_NO_CHILDREN:		return ECHILD;	/* 128	*/
2019     case ERROR_CHILD_NOT_COMPLETE:		return ECHILD;	/* 129	*/
2020     case ERROR_DIRECT_ACCESS_HANDLE:		return EBADF;	/* 130	*/
2021     case ERROR_NEGATIVE_SEEK:			return EINVAL;	/* 131	*/
2022     case ERROR_SEEK_ON_DEVICE:			return EACCES;	/* 132	*/
2023     case ERROR_DIR_NOT_EMPTY:			return ENOTEMPTY;/* 145	*/
2024     case ERROR_NOT_LOCKED:			return EACCES;	/* 158	*/
2025     case ERROR_BAD_PATHNAME:			return ENOENT;	/* 161	*/
2026     case ERROR_MAX_THRDS_REACHED:		return EAGAIN;	/* 164	*/
2027     case ERROR_LOCK_FAILED:			return EACCES;	/* 167	*/
2028     case ERROR_ALREADY_EXISTS:			return EEXIST;	/* 183	*/
2029     case ERROR_INVALID_STARTING_CODESEG:	return ENOEXEC;	/* 188	*/
2030     case ERROR_INVALID_STACKSEG:		return ENOEXEC;	/* 189	*/
2031     case ERROR_INVALID_MODULETYPE:		return ENOEXEC;	/* 190	*/
2032     case ERROR_INVALID_EXE_SIGNATURE:		return ENOEXEC;	/* 191	*/
2033     case ERROR_EXE_MARKED_INVALID:		return ENOEXEC;	/* 192	*/
2034     case ERROR_BAD_EXE_FORMAT:			return ENOEXEC;	/* 193	*/
2035     case ERROR_ITERATED_DATA_EXCEEDS_64k:	return ENOEXEC;	/* 194	*/
2036     case ERROR_INVALID_MINALLOCSIZE:		return ENOEXEC;	/* 195	*/
2037     case ERROR_DYNLINK_FROM_INVALID_RING:	return ENOEXEC;	/* 196	*/
2038     case ERROR_IOPL_NOT_ENABLED:		return ENOEXEC;	/* 197	*/
2039     case ERROR_INVALID_SEGDPL:			return ENOEXEC;	/* 198	*/
2040     case ERROR_AUTODATASEG_EXCEEDS_64k:		return ENOEXEC;	/* 199	*/
2041     case ERROR_RING2SEG_MUST_BE_MOVABLE:	return ENOEXEC;	/* 200	*/
2042     case ERROR_RELOC_CHAIN_XEEDS_SEGLIM:	return ENOEXEC;	/* 201	*/
2043     case ERROR_INFLOOP_IN_RELOC_CHAIN:		return ENOEXEC;	/* 202	*/
2044     case ERROR_FILENAME_EXCED_RANGE:		return ENOENT;	/* 206	*/
2045     case ERROR_NESTING_NOT_ALLOWED:		return EAGAIN;	/* 215	*/
2046     case ERROR_NOT_ENOUGH_QUOTA:		return ENOMEM;	/* 1816	*/
2047     default:					return EINVAL;
2048     }
2049 }
2050 
2051 int
erts_get_last_win_errno(void)2052 erts_get_last_win_errno(void)
2053 {
2054     return erts_map_win_error_to_errno(GetLastError());
2055 }
2056 
2057 
2058 #endif
2059