1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 /* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
22 * EPOLL* counterparts. We use the POLL* variants in this file because that
23 * is what libuv uses elsewhere and it avoids a dependency on <sys/epoll.h>.
24 */
25
26 #include "uv.h"
27 #include "internal.h"
28
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34 #include <errno.h>
35
36 #include <net/if.h>
37 #include <sys/param.h>
38 #include <sys/prctl.h>
39 #include <sys/sysinfo.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42 #include <time.h>
43
44 #define HAVE_IFADDRS_H 1
45
46 #ifdef __UCLIBC__
47 # if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
48 # undef HAVE_IFADDRS_H
49 # endif
50 #endif
51
52 #ifdef HAVE_IFADDRS_H
53 # if defined(__ANDROID__)
54 # include "android-ifaddrs.h"
55 # else
56 # include <ifaddrs.h>
57 # endif
58 # include <sys/socket.h>
59 # include <net/ethernet.h>
60 # include <netpacket/packet.h>
61 #endif /* HAVE_IFADDRS_H */
62
63 /* Available from 2.6.32 onwards. */
64 #ifndef CLOCK_MONOTONIC_COARSE
65 # define CLOCK_MONOTONIC_COARSE 6
66 #endif
67
68 /* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
69 * include that file because it conflicts with <time.h>. We'll just have to
70 * define it ourselves.
71 */
72 #ifndef CLOCK_BOOTTIME
73 # define CLOCK_BOOTTIME 7
74 #endif
75
76 static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
77 static int read_times(FILE* statfile_fp,
78 unsigned int numcpus,
79 uv_cpu_info_t* ci);
80 static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
81 static unsigned long read_cpufreq(unsigned int cpunum);
82
83
uv__platform_loop_init(uv_loop_t * loop)84 int uv__platform_loop_init(uv_loop_t* loop) {
85 int fd;
86
87 fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
88
89 /* epoll_create1() can fail either because it's not implemented (old kernel)
90 * or because it doesn't understand the EPOLL_CLOEXEC flag.
91 */
92 if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
93 fd = uv__epoll_create(256);
94
95 if (fd != -1)
96 uv__cloexec(fd, 1);
97 }
98
99 loop->backend_fd = fd;
100 loop->inotify_fd = -1;
101 loop->inotify_watchers = NULL;
102
103 if (fd == -1)
104 return UV__ERR(errno);
105
106 return 0;
107 }
108
109
uv__io_fork(uv_loop_t * loop)110 int uv__io_fork(uv_loop_t* loop) {
111 int err;
112 void* old_watchers;
113
114 old_watchers = loop->inotify_watchers;
115
116 uv__close(loop->backend_fd);
117 loop->backend_fd = -1;
118 uv__platform_loop_delete(loop);
119
120 err = uv__platform_loop_init(loop);
121 if (err)
122 return err;
123
124 return uv__inotify_fork(loop, old_watchers);
125 }
126
127
uv__platform_loop_delete(uv_loop_t * loop)128 void uv__platform_loop_delete(uv_loop_t* loop) {
129 if (loop->inotify_fd == -1) return;
130 uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
131 uv__close(loop->inotify_fd);
132 loop->inotify_fd = -1;
133 }
134
135
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)136 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
137 struct uv__epoll_event* events;
138 struct uv__epoll_event dummy;
139 uintptr_t i;
140 uintptr_t nfds;
141
142 assert(loop->watchers != NULL);
143
144 events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
145 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
146 if (events != NULL)
147 /* Invalidate events with same file descriptor */
148 for (i = 0; i < nfds; i++)
149 if ((int) events[i].data == fd)
150 events[i].data = -1;
151
152 /* Remove the file descriptor from the epoll.
153 * This avoids a problem where the same file description remains open
154 * in another process, causing repeated junk epoll events.
155 *
156 * We pass in a dummy epoll_event, to work around a bug in old kernels.
157 */
158 if (loop->backend_fd >= 0) {
159 /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
160 * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
161 */
162 memset(&dummy, 0, sizeof(dummy));
163 uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy);
164 }
165 }
166
167
uv__io_check_fd(uv_loop_t * loop,int fd)168 int uv__io_check_fd(uv_loop_t* loop, int fd) {
169 struct uv__epoll_event e;
170 int rc;
171
172 e.events = POLLIN;
173 e.data = -1;
174
175 rc = 0;
176 if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_ADD, fd, &e))
177 if (errno != EEXIST)
178 rc = UV__ERR(errno);
179
180 if (rc == 0)
181 if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &e))
182 abort();
183
184 return rc;
185 }
186
187
uv__io_poll(uv_loop_t * loop,int timeout)188 void uv__io_poll(uv_loop_t* loop, int timeout) {
189 /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
190 * effectively infinite on 32 bits architectures. To avoid blocking
191 * indefinitely, we cap the timeout and poll again if necessary.
192 *
193 * Note that "30 minutes" is a simplification because it depends on
194 * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
195 * that being the largest value I have seen in the wild (and only once.)
196 */
197 static const int max_safe_timeout = 1789569;
198 static int no_epoll_pwait;
199 static int no_epoll_wait;
200 struct uv__epoll_event events[1024];
201 struct uv__epoll_event* pe;
202 struct uv__epoll_event e;
203 int real_timeout;
204 QUEUE* q;
205 uv__io_t* w;
206 sigset_t sigset;
207 uint64_t sigmask;
208 uint64_t base;
209 int have_signals;
210 int nevents;
211 int count;
212 int nfds;
213 int fd;
214 int op;
215 int i;
216
217 if (loop->nfds == 0) {
218 assert(QUEUE_EMPTY(&loop->watcher_queue));
219 return;
220 }
221
222 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
223 q = QUEUE_HEAD(&loop->watcher_queue);
224 QUEUE_REMOVE(q);
225 QUEUE_INIT(q);
226
227 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
228 assert(w->pevents != 0);
229 assert(w->fd >= 0);
230 assert(w->fd < (int) loop->nwatchers);
231
232 e.events = w->pevents;
233 e.data = w->fd;
234
235 if (w->events == 0)
236 op = UV__EPOLL_CTL_ADD;
237 else
238 op = UV__EPOLL_CTL_MOD;
239
240 /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
241 * events, skip the syscall and squelch the events after epoll_wait().
242 */
243 if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
244 if (errno != EEXIST)
245 abort();
246
247 assert(op == UV__EPOLL_CTL_ADD);
248
249 /* We've reactivated a file descriptor that's been watched before. */
250 if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
251 abort();
252 }
253
254 w->events = w->pevents;
255 }
256
257 sigmask = 0;
258 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
259 sigemptyset(&sigset);
260 sigaddset(&sigset, SIGPROF);
261 sigmask |= 1 << (SIGPROF - 1);
262 }
263
264 assert(timeout >= -1);
265 base = loop->time;
266 count = 48; /* Benchmarks suggest this gives the best throughput. */
267 real_timeout = timeout;
268
269 for (;;) {
270 /* See the comment for max_safe_timeout for an explanation of why
271 * this is necessary. Executive summary: kernel bug workaround.
272 */
273 if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
274 timeout = max_safe_timeout;
275
276 if (sigmask != 0 && no_epoll_pwait != 0)
277 if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
278 abort();
279
280 if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
281 nfds = uv__epoll_pwait(loop->backend_fd,
282 events,
283 ARRAY_SIZE(events),
284 timeout,
285 sigmask);
286 if (nfds == -1 && errno == ENOSYS)
287 no_epoll_pwait = 1;
288 } else {
289 nfds = uv__epoll_wait(loop->backend_fd,
290 events,
291 ARRAY_SIZE(events),
292 timeout);
293 if (nfds == -1 && errno == ENOSYS)
294 no_epoll_wait = 1;
295 }
296
297 if (sigmask != 0 && no_epoll_pwait != 0)
298 if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
299 abort();
300
301 /* Update loop->time unconditionally. It's tempting to skip the update when
302 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
303 * operating system didn't reschedule our process while in the syscall.
304 */
305 SAVE_ERRNO(uv__update_time(loop));
306
307 if (nfds == 0) {
308 assert(timeout != -1);
309
310 if (timeout == 0)
311 return;
312
313 /* We may have been inside the system call for longer than |timeout|
314 * milliseconds so we need to update the timestamp to avoid drift.
315 */
316 goto update_timeout;
317 }
318
319 if (nfds == -1) {
320 if (errno == ENOSYS) {
321 /* epoll_wait() or epoll_pwait() failed, try the other system call. */
322 assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
323 continue;
324 }
325
326 if (errno != EINTR)
327 abort();
328
329 if (timeout == -1)
330 continue;
331
332 if (timeout == 0)
333 return;
334
335 /* Interrupted by a signal. Update timeout and poll again. */
336 goto update_timeout;
337 }
338
339 have_signals = 0;
340 nevents = 0;
341
342 assert(loop->watchers != NULL);
343 loop->watchers[loop->nwatchers] = (void*) events;
344 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
345 for (i = 0; i < nfds; i++) {
346 pe = events + i;
347 fd = pe->data;
348
349 /* Skip invalidated events, see uv__platform_invalidate_fd */
350 if (fd == -1)
351 continue;
352
353 assert(fd >= 0);
354 assert((unsigned) fd < loop->nwatchers);
355
356 w = loop->watchers[fd];
357
358 if (w == NULL) {
359 /* File descriptor that we've stopped watching, disarm it.
360 *
361 * Ignore all errors because we may be racing with another thread
362 * when the file descriptor is closed.
363 */
364 uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
365 continue;
366 }
367
368 /* Give users only events they're interested in. Prevents spurious
369 * callbacks when previous callback invocation in this loop has stopped
370 * the current watcher. Also, filters out events that users has not
371 * requested us to watch.
372 */
373 pe->events &= w->pevents | POLLERR | POLLHUP;
374
375 /* Work around an epoll quirk where it sometimes reports just the
376 * EPOLLERR or EPOLLHUP event. In order to force the event loop to
377 * move forward, we merge in the read/write events that the watcher
378 * is interested in; uv__read() and uv__write() will then deal with
379 * the error or hangup in the usual fashion.
380 *
381 * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
382 * reads the available data, calls uv_read_stop(), then sometime later
383 * calls uv_read_start() again. By then, libuv has forgotten about the
384 * hangup and the kernel won't report EPOLLIN again because there's
385 * nothing left to read. If anything, libuv is to blame here. The
386 * current hack is just a quick bandaid; to properly fix it, libuv
387 * needs to remember the error/hangup event. We should get that for
388 * free when we switch over to edge-triggered I/O.
389 */
390 if (pe->events == POLLERR || pe->events == POLLHUP)
391 pe->events |= w->pevents & (POLLIN | POLLOUT | UV__POLLPRI);
392
393 if (pe->events != 0) {
394 /* Run signal watchers last. This also affects child process watchers
395 * because those are implemented in terms of signal watchers.
396 */
397 if (w == &loop->signal_io_watcher)
398 have_signals = 1;
399 else
400 w->cb(loop, w, pe->events);
401
402 nevents++;
403 }
404 }
405
406 if (have_signals != 0)
407 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
408
409 loop->watchers[loop->nwatchers] = NULL;
410 loop->watchers[loop->nwatchers + 1] = NULL;
411
412 if (have_signals != 0)
413 return; /* Event loop should cycle now so don't poll again. */
414
415 if (nevents != 0) {
416 if (nfds == ARRAY_SIZE(events) && --count != 0) {
417 /* Poll for more events but don't block this time. */
418 timeout = 0;
419 continue;
420 }
421 return;
422 }
423
424 if (timeout == 0)
425 return;
426
427 if (timeout == -1)
428 continue;
429
430 update_timeout:
431 assert(timeout > 0);
432
433 real_timeout -= (loop->time - base);
434 if (real_timeout <= 0)
435 return;
436
437 timeout = real_timeout;
438 }
439 }
440
441
uv__hrtime(uv_clocktype_t type)442 uint64_t uv__hrtime(uv_clocktype_t type) {
443 static clock_t fast_clock_id = -1;
444 struct timespec t;
445 clock_t clock_id;
446
447 /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
448 * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
449 * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
450 * decide to make a costly system call.
451 */
452 /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
453 * when it has microsecond granularity or better (unlikely).
454 */
455 if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
456 if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
457 t.tv_nsec <= 1 * 1000 * 1000) {
458 fast_clock_id = CLOCK_MONOTONIC_COARSE;
459 } else {
460 fast_clock_id = CLOCK_MONOTONIC;
461 }
462 }
463
464 clock_id = CLOCK_MONOTONIC;
465 if (type == UV_CLOCK_FAST)
466 clock_id = fast_clock_id;
467
468 if (clock_gettime(clock_id, &t))
469 return 0; /* Not really possible. */
470
471 return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
472 }
473
474
uv_resident_set_memory(size_t * rss)475 int uv_resident_set_memory(size_t* rss) {
476 char buf[1024];
477 const char* s;
478 ssize_t n;
479 long val;
480 int fd;
481 int i;
482
483 do
484 fd = open("/proc/self/stat", O_RDONLY);
485 while (fd == -1 && errno == EINTR);
486
487 if (fd == -1)
488 return UV__ERR(errno);
489
490 do
491 n = read(fd, buf, sizeof(buf) - 1);
492 while (n == -1 && errno == EINTR);
493
494 uv__close(fd);
495 if (n == -1)
496 return UV__ERR(errno);
497 buf[n] = '\0';
498
499 s = strchr(buf, ' ');
500 if (s == NULL)
501 goto err;
502
503 s += 1;
504 if (*s != '(')
505 goto err;
506
507 s = strchr(s, ')');
508 if (s == NULL)
509 goto err;
510
511 for (i = 1; i <= 22; i++) {
512 s = strchr(s + 1, ' ');
513 if (s == NULL)
514 goto err;
515 }
516
517 errno = 0;
518 val = strtol(s, NULL, 10);
519 if (errno != 0)
520 goto err;
521 if (val < 0)
522 goto err;
523
524 *rss = val * getpagesize();
525 return 0;
526
527 err:
528 return UV_EINVAL;
529 }
530
531
uv_uptime(double * uptime)532 int uv_uptime(double* uptime) {
533 static volatile int no_clock_boottime;
534 struct timespec now;
535 int r;
536
537 /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
538 * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
539 * is suspended.
540 */
541 if (no_clock_boottime) {
542 retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
543 }
544 else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
545 no_clock_boottime = 1;
546 goto retry;
547 }
548
549 if (r)
550 return UV__ERR(errno);
551
552 *uptime = now.tv_sec;
553 return 0;
554 }
555
556
uv__cpu_num(FILE * statfile_fp,unsigned int * numcpus)557 static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
558 unsigned int num;
559 char buf[1024];
560
561 if (!fgets(buf, sizeof(buf), statfile_fp))
562 return UV_EIO;
563
564 num = 0;
565 while (fgets(buf, sizeof(buf), statfile_fp)) {
566 if (strncmp(buf, "cpu", 3))
567 break;
568 num++;
569 }
570
571 if (num == 0)
572 return UV_EIO;
573
574 *numcpus = num;
575 return 0;
576 }
577
578
uv_cpu_info(uv_cpu_info_t ** cpu_infos,int * count)579 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
580 unsigned int numcpus;
581 uv_cpu_info_t* ci;
582 int err;
583 FILE* statfile_fp;
584
585 *cpu_infos = NULL;
586 *count = 0;
587
588 statfile_fp = uv__open_file("/proc/stat");
589 if (statfile_fp == NULL)
590 return UV__ERR(errno);
591
592 err = uv__cpu_num(statfile_fp, &numcpus);
593 if (err < 0)
594 goto out;
595
596 err = UV_ENOMEM;
597 ci = uv__calloc(numcpus, sizeof(*ci));
598 if (ci == NULL)
599 goto out;
600
601 err = read_models(numcpus, ci);
602 if (err == 0)
603 err = read_times(statfile_fp, numcpus, ci);
604
605 if (err) {
606 uv_free_cpu_info(ci, numcpus);
607 goto out;
608 }
609
610 /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
611 * We don't check for errors here. Worst case, the field is left zero.
612 */
613 if (ci[0].speed == 0)
614 read_speeds(numcpus, ci);
615
616 *cpu_infos = ci;
617 *count = numcpus;
618 err = 0;
619
620 out:
621
622 if (fclose(statfile_fp))
623 if (errno != EINTR && errno != EINPROGRESS)
624 abort();
625
626 return err;
627 }
628
629
read_speeds(unsigned int numcpus,uv_cpu_info_t * ci)630 static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
631 unsigned int num;
632
633 for (num = 0; num < numcpus; num++)
634 ci[num].speed = read_cpufreq(num) / 1000;
635 }
636
637
638 /* Also reads the CPU frequency on x86. The other architectures only have
639 * a BogoMIPS field, which may not be very accurate.
640 *
641 * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
642 */
read_models(unsigned int numcpus,uv_cpu_info_t * ci)643 static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
644 static const char model_marker[] = "model name\t: ";
645 static const char speed_marker[] = "cpu MHz\t\t: ";
646 const char* inferred_model;
647 unsigned int model_idx;
648 unsigned int speed_idx;
649 char buf[1024];
650 char* model;
651 FILE* fp;
652
653 /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
654 (void) &model_marker;
655 (void) &speed_marker;
656 (void) &speed_idx;
657 (void) &model;
658 (void) &buf;
659 (void) &fp;
660
661 model_idx = 0;
662 speed_idx = 0;
663
664 #if defined(__arm__) || \
665 defined(__i386__) || \
666 defined(__mips__) || \
667 defined(__x86_64__)
668 fp = uv__open_file("/proc/cpuinfo");
669 if (fp == NULL)
670 return UV__ERR(errno);
671
672 while (fgets(buf, sizeof(buf), fp)) {
673 if (model_idx < numcpus) {
674 if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
675 model = buf + sizeof(model_marker) - 1;
676 model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
677 if (model == NULL) {
678 fclose(fp);
679 return UV_ENOMEM;
680 }
681 ci[model_idx++].model = model;
682 continue;
683 }
684 }
685 #if defined(__arm__) || defined(__mips__)
686 if (model_idx < numcpus) {
687 #if defined(__arm__)
688 /* Fallback for pre-3.8 kernels. */
689 static const char model_marker[] = "Processor\t: ";
690 #else /* defined(__mips__) */
691 static const char model_marker[] = "cpu model\t\t: ";
692 #endif
693 if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
694 model = buf + sizeof(model_marker) - 1;
695 model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
696 if (model == NULL) {
697 fclose(fp);
698 return UV_ENOMEM;
699 }
700 ci[model_idx++].model = model;
701 continue;
702 }
703 }
704 #else /* !__arm__ && !__mips__ */
705 if (speed_idx < numcpus) {
706 if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
707 ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
708 continue;
709 }
710 }
711 #endif /* __arm__ || __mips__ */
712 }
713
714 fclose(fp);
715 #endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
716
717 /* Now we want to make sure that all the models contain *something* because
718 * it's not safe to leave them as null. Copy the last entry unless there
719 * isn't one, in that case we simply put "unknown" into everything.
720 */
721 inferred_model = "unknown";
722 if (model_idx > 0)
723 inferred_model = ci[model_idx - 1].model;
724
725 while (model_idx < numcpus) {
726 model = uv__strndup(inferred_model, strlen(inferred_model));
727 if (model == NULL)
728 return UV_ENOMEM;
729 ci[model_idx++].model = model;
730 }
731
732 return 0;
733 }
734
735
read_times(FILE * statfile_fp,unsigned int numcpus,uv_cpu_info_t * ci)736 static int read_times(FILE* statfile_fp,
737 unsigned int numcpus,
738 uv_cpu_info_t* ci) {
739 unsigned long clock_ticks;
740 struct uv_cpu_times_s ts;
741 unsigned long user;
742 unsigned long nice;
743 unsigned long sys;
744 unsigned long idle;
745 unsigned long dummy;
746 unsigned long irq;
747 unsigned int num;
748 unsigned int len;
749 char buf[1024];
750
751 clock_ticks = sysconf(_SC_CLK_TCK);
752 assert(clock_ticks != (unsigned long) -1);
753 assert(clock_ticks != 0);
754
755 rewind(statfile_fp);
756
757 if (!fgets(buf, sizeof(buf), statfile_fp))
758 abort();
759
760 num = 0;
761
762 while (fgets(buf, sizeof(buf), statfile_fp)) {
763 if (num >= numcpus)
764 break;
765
766 if (strncmp(buf, "cpu", 3))
767 break;
768
769 /* skip "cpu<num> " marker */
770 {
771 unsigned int n;
772 int r = sscanf(buf, "cpu%u ", &n);
773 assert(r == 1);
774 (void) r; /* silence build warning */
775 for (len = sizeof("cpu0"); n /= 10; len++);
776 }
777
778 /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
779 * guest, guest_nice but we're only interested in the first four + irq.
780 *
781 * Don't use %*s to skip fields or %ll to read straight into the uint64_t
782 * fields, they're not allowed in C89 mode.
783 */
784 if (6 != sscanf(buf + len,
785 "%lu %lu %lu %lu %lu %lu",
786 &user,
787 &nice,
788 &sys,
789 &idle,
790 &dummy,
791 &irq))
792 abort();
793
794 ts.user = clock_ticks * user;
795 ts.nice = clock_ticks * nice;
796 ts.sys = clock_ticks * sys;
797 ts.idle = clock_ticks * idle;
798 ts.irq = clock_ticks * irq;
799 ci[num++].cpu_times = ts;
800 }
801 assert(num == numcpus);
802
803 return 0;
804 }
805
806
read_cpufreq(unsigned int cpunum)807 static unsigned long read_cpufreq(unsigned int cpunum) {
808 unsigned long val;
809 char buf[1024];
810 FILE* fp;
811
812 snprintf(buf,
813 sizeof(buf),
814 "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
815 cpunum);
816
817 fp = uv__open_file(buf);
818 if (fp == NULL)
819 return 0;
820
821 if (fscanf(fp, "%lu", &val) != 1)
822 val = 0;
823
824 fclose(fp);
825
826 return val;
827 }
828
829
uv_free_cpu_info(uv_cpu_info_t * cpu_infos,int count)830 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
831 int i;
832
833 for (i = 0; i < count; i++) {
834 uv__free(cpu_infos[i].model);
835 }
836
837 uv__free(cpu_infos);
838 }
839
uv__ifaddr_exclude(struct ifaddrs * ent,int exclude_type)840 static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
841 if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
842 return 1;
843 if (ent->ifa_addr == NULL)
844 return 1;
845 /*
846 * On Linux getifaddrs returns information related to the raw underlying
847 * devices. We're not interested in this information yet.
848 */
849 if (ent->ifa_addr->sa_family == PF_PACKET)
850 return exclude_type;
851 return !exclude_type;
852 }
853
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)854 int uv_interface_addresses(uv_interface_address_t** addresses,
855 int* count) {
856 #ifndef HAVE_IFADDRS_H
857 return UV_ENOSYS;
858 #else
859 struct ifaddrs *addrs, *ent;
860 uv_interface_address_t* address;
861 int i;
862 struct sockaddr_ll *sll;
863
864 if (getifaddrs(&addrs))
865 return UV__ERR(errno);
866
867 *count = 0;
868 *addresses = NULL;
869
870 /* Count the number of interfaces */
871 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
872 if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
873 continue;
874
875 (*count)++;
876 }
877
878 if (*count == 0)
879 return 0;
880
881 *addresses = uv__malloc(*count * sizeof(**addresses));
882 if (!(*addresses)) {
883 freeifaddrs(addrs);
884 return UV_ENOMEM;
885 }
886
887 address = *addresses;
888
889 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
890 if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
891 continue;
892
893 address->name = uv__strdup(ent->ifa_name);
894
895 if (ent->ifa_addr->sa_family == AF_INET6) {
896 address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
897 } else {
898 address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
899 }
900
901 if (ent->ifa_netmask->sa_family == AF_INET6) {
902 address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
903 } else {
904 address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
905 }
906
907 address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
908
909 address++;
910 }
911
912 /* Fill in physical addresses for each interface */
913 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
914 if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
915 continue;
916
917 address = *addresses;
918
919 for (i = 0; i < (*count); i++) {
920 if (strcmp(address->name, ent->ifa_name) == 0) {
921 sll = (struct sockaddr_ll*)ent->ifa_addr;
922 memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
923 }
924 address++;
925 }
926 }
927
928 freeifaddrs(addrs);
929
930 return 0;
931 #endif
932 }
933
934
uv_free_interface_addresses(uv_interface_address_t * addresses,int count)935 void uv_free_interface_addresses(uv_interface_address_t* addresses,
936 int count) {
937 int i;
938
939 for (i = 0; i < count; i++) {
940 uv__free(addresses[i].name);
941 }
942
943 uv__free(addresses);
944 }
945
946
uv__set_process_title(const char * title)947 void uv__set_process_title(const char* title) {
948 #if defined(PR_SET_NAME)
949 prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
950 #endif
951 }
952