1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #if defined(_PR_PTHREADS)
7
8 #error "This file should not be compiled"
9
10 #else /* defined(_PR_PTHREADS) */
11
12 #include "primpl.h"
13
14 #include <sys/time.h>
15
16 #include <fcntl.h>
17 #ifdef _PR_USE_POLL
18 #include <poll.h>
19 #endif
20
21 #if defined(_PR_USE_POLL)
NativeThreadPoll(PRPollDesc * pds,PRIntn npds,PRIntervalTime timeout)22 static PRInt32 NativeThreadPoll(
23 PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
24 {
25 /*
26 * This function is mostly duplicated from ptio.s's PR_Poll().
27 */
28 PRInt32 ready = 0;
29 /*
30 * For restarting poll() if it is interrupted by a signal.
31 * We use these variables to figure out how much time has
32 * elapsed and how much of the timeout still remains.
33 */
34 PRIntn index, msecs;
35 struct pollfd *syspoll = NULL;
36 PRIntervalTime start, elapsed, remaining;
37
38 syspoll = (struct pollfd*)PR_MALLOC(npds * sizeof(struct pollfd));
39 if (NULL == syspoll)
40 {
41 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
42 return -1;
43 }
44 for (index = 0; index < npds; ++index)
45 {
46 PRFileDesc *bottom;
47 PRInt16 in_flags_read = 0, in_flags_write = 0;
48 PRInt16 out_flags_read = 0, out_flags_write = 0;
49
50 if ((NULL != pds[index].fd) && (0 != pds[index].in_flags))
51 {
52 if (pds[index].in_flags & PR_POLL_READ)
53 {
54 in_flags_read = (pds[index].fd->methods->poll)(
55 pds[index].fd,
56 pds[index].in_flags & ~PR_POLL_WRITE,
57 &out_flags_read);
58 }
59 if (pds[index].in_flags & PR_POLL_WRITE)
60 {
61 in_flags_write = (pds[index].fd->methods->poll)(
62 pds[index].fd,
63 pds[index].in_flags & ~PR_POLL_READ,
64 &out_flags_write);
65 }
66 if ((0 != (in_flags_read & out_flags_read))
67 || (0 != (in_flags_write & out_flags_write)))
68 {
69 /* this one is ready right now */
70 if (0 == ready)
71 {
72 /*
73 * We will return without calling the system
74 * poll function. So zero the out_flags
75 * fields of all the poll descriptors before
76 * this one.
77 */
78 int i;
79 for (i = 0; i < index; i++)
80 {
81 pds[i].out_flags = 0;
82 }
83 }
84 ready += 1;
85 pds[index].out_flags = out_flags_read | out_flags_write;
86 }
87 else
88 {
89 pds[index].out_flags = 0; /* pre-condition */
90 /* now locate the NSPR layer at the bottom of the stack */
91 bottom = PR_GetIdentitiesLayer(pds[index].fd, PR_NSPR_IO_LAYER);
92 PR_ASSERT(NULL != bottom); /* what to do about that? */
93 if ((NULL != bottom)
94 && (_PR_FILEDESC_OPEN == bottom->secret->state))
95 {
96 if (0 == ready)
97 {
98 syspoll[index].fd = bottom->secret->md.osfd;
99 syspoll[index].events = 0; /* pre-condition */
100 if (in_flags_read & PR_POLL_READ)
101 {
102 pds[index].out_flags |=
103 _PR_POLL_READ_SYS_READ;
104 syspoll[index].events |= POLLIN;
105 }
106 if (in_flags_read & PR_POLL_WRITE)
107 {
108 pds[index].out_flags |=
109 _PR_POLL_READ_SYS_WRITE;
110 syspoll[index].events |= POLLOUT;
111 }
112 if (in_flags_write & PR_POLL_READ)
113 {
114 pds[index].out_flags |=
115 _PR_POLL_WRITE_SYS_READ;
116 syspoll[index].events |= POLLIN;
117 }
118 if (in_flags_write & PR_POLL_WRITE)
119 {
120 pds[index].out_flags |=
121 _PR_POLL_WRITE_SYS_WRITE;
122 syspoll[index].events |= POLLOUT;
123 }
124 if (pds[index].in_flags & PR_POLL_EXCEPT)
125 syspoll[index].events |= POLLPRI;
126 }
127 }
128 else
129 {
130 if (0 == ready)
131 {
132 int i;
133 for (i = 0; i < index; i++)
134 {
135 pds[i].out_flags = 0;
136 }
137 }
138 ready += 1; /* this will cause an abrupt return */
139 pds[index].out_flags = PR_POLL_NVAL; /* bogii */
140 }
141 }
142 }
143 else
144 {
145 /* make poll() ignore this entry */
146 syspoll[index].fd = -1;
147 syspoll[index].events = 0;
148 pds[index].out_flags = 0;
149 }
150 }
151
152 if (0 == ready)
153 {
154 switch (timeout)
155 {
156 case PR_INTERVAL_NO_WAIT: msecs = 0; break;
157 case PR_INTERVAL_NO_TIMEOUT: msecs = -1; break;
158 default:
159 msecs = PR_IntervalToMilliseconds(timeout);
160 start = PR_IntervalNow();
161 }
162
163 retry:
164 ready = _MD_POLL(syspoll, npds, msecs);
165 if (-1 == ready)
166 {
167 PRIntn oserror = errno;
168
169 if (EINTR == oserror)
170 {
171 if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry;
172 else if (timeout == PR_INTERVAL_NO_WAIT) ready = 0;
173 else
174 {
175 elapsed = (PRIntervalTime)(PR_IntervalNow() - start);
176 if (elapsed > timeout) ready = 0; /* timed out */
177 else
178 {
179 remaining = timeout - elapsed;
180 msecs = PR_IntervalToMilliseconds(remaining);
181 goto retry;
182 }
183 }
184 }
185 else _PR_MD_MAP_POLL_ERROR(oserror);
186 }
187 else if (ready > 0)
188 {
189 for (index = 0; index < npds; ++index)
190 {
191 PRInt16 out_flags = 0;
192 if ((NULL != pds[index].fd) && (0 != pds[index].in_flags))
193 {
194 if (0 != syspoll[index].revents)
195 {
196 /*
197 ** Set up the out_flags so that it contains the
198 ** bits that the highest layer thinks are nice
199 ** to have. Then the client of that layer will
200 ** call the appropriate I/O function and maybe
201 ** the protocol will make progress.
202 */
203 if (syspoll[index].revents & POLLIN)
204 {
205 if (pds[index].out_flags
206 & _PR_POLL_READ_SYS_READ)
207 {
208 out_flags |= PR_POLL_READ;
209 }
210 if (pds[index].out_flags
211 & _PR_POLL_WRITE_SYS_READ)
212 {
213 out_flags |= PR_POLL_WRITE;
214 }
215 }
216 if (syspoll[index].revents & POLLOUT)
217 {
218 if (pds[index].out_flags
219 & _PR_POLL_READ_SYS_WRITE)
220 {
221 out_flags |= PR_POLL_READ;
222 }
223 if (pds[index].out_flags
224 & _PR_POLL_WRITE_SYS_WRITE)
225 {
226 out_flags |= PR_POLL_WRITE;
227 }
228 }
229 if (syspoll[index].revents & POLLPRI)
230 out_flags |= PR_POLL_EXCEPT;
231 if (syspoll[index].revents & POLLERR)
232 out_flags |= PR_POLL_ERR;
233 if (syspoll[index].revents & POLLNVAL)
234 out_flags |= PR_POLL_NVAL;
235 if (syspoll[index].revents & POLLHUP)
236 out_flags |= PR_POLL_HUP;
237 }
238 }
239 pds[index].out_flags = out_flags;
240 }
241 }
242 }
243
244 PR_DELETE(syspoll);
245 return ready;
246
247 } /* NativeThreadPoll */
248 #endif /* defined(_PR_USE_POLL) */
249
250 #if !defined(_PR_USE_POLL)
NativeThreadSelect(PRPollDesc * pds,PRIntn npds,PRIntervalTime timeout)251 static PRInt32 NativeThreadSelect(
252 PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
253 {
254 /*
255 * This code is almost a duplicate of w32poll.c's _PR_MD_PR_POLL().
256 */
257 fd_set rd, wt, ex;
258 PRFileDesc *bottom;
259 PRPollDesc *pd, *epd;
260 PRInt32 maxfd = -1, ready, err;
261 PRIntervalTime remaining, elapsed, start;
262
263 struct timeval tv, *tvp = NULL;
264
265 FD_ZERO(&rd);
266 FD_ZERO(&wt);
267 FD_ZERO(&ex);
268
269 ready = 0;
270 for (pd = pds, epd = pd + npds; pd < epd; pd++)
271 {
272 PRInt16 in_flags_read = 0, in_flags_write = 0;
273 PRInt16 out_flags_read = 0, out_flags_write = 0;
274
275 if ((NULL != pd->fd) && (0 != pd->in_flags))
276 {
277 if (pd->in_flags & PR_POLL_READ)
278 {
279 in_flags_read = (pd->fd->methods->poll)(
280 pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read);
281 }
282 if (pd->in_flags & PR_POLL_WRITE)
283 {
284 in_flags_write = (pd->fd->methods->poll)(
285 pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write);
286 }
287 if ((0 != (in_flags_read & out_flags_read))
288 || (0 != (in_flags_write & out_flags_write)))
289 {
290 /* this one's ready right now */
291 if (0 == ready)
292 {
293 /*
294 * We will have to return without calling the
295 * system poll/select function. So zero the
296 * out_flags fields of all the poll descriptors
297 * before this one.
298 */
299 PRPollDesc *prev;
300 for (prev = pds; prev < pd; prev++)
301 {
302 prev->out_flags = 0;
303 }
304 }
305 ready += 1;
306 pd->out_flags = out_flags_read | out_flags_write;
307 }
308 else
309 {
310 pd->out_flags = 0; /* pre-condition */
311
312 /* make sure this is an NSPR supported stack */
313 bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
314 PR_ASSERT(NULL != bottom); /* what to do about that? */
315 if ((NULL != bottom)
316 && (_PR_FILEDESC_OPEN == bottom->secret->state))
317 {
318 if (0 == ready)
319 {
320 PRInt32 osfd = bottom->secret->md.osfd;
321 if (osfd > maxfd) maxfd = osfd;
322 if (in_flags_read & PR_POLL_READ)
323 {
324 pd->out_flags |= _PR_POLL_READ_SYS_READ;
325 FD_SET(osfd, &rd);
326 }
327 if (in_flags_read & PR_POLL_WRITE)
328 {
329 pd->out_flags |= _PR_POLL_READ_SYS_WRITE;
330 FD_SET(osfd, &wt);
331 }
332 if (in_flags_write & PR_POLL_READ)
333 {
334 pd->out_flags |= _PR_POLL_WRITE_SYS_READ;
335 FD_SET(osfd, &rd);
336 }
337 if (in_flags_write & PR_POLL_WRITE)
338 {
339 pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE;
340 FD_SET(osfd, &wt);
341 }
342 if (pd->in_flags & PR_POLL_EXCEPT) FD_SET(osfd, &ex);
343 }
344 }
345 else
346 {
347 if (0 == ready)
348 {
349 PRPollDesc *prev;
350 for (prev = pds; prev < pd; prev++)
351 {
352 prev->out_flags = 0;
353 }
354 }
355 ready += 1; /* this will cause an abrupt return */
356 pd->out_flags = PR_POLL_NVAL; /* bogii */
357 }
358 }
359 }
360 else
361 {
362 pd->out_flags = 0;
363 }
364 }
365
366 if (0 != ready) return ready; /* no need to block */
367
368 remaining = timeout;
369 start = PR_IntervalNow();
370
371 retry:
372 if (timeout != PR_INTERVAL_NO_TIMEOUT)
373 {
374 PRInt32 ticksPerSecond = PR_TicksPerSecond();
375 tv.tv_sec = remaining / ticksPerSecond;
376 tv.tv_usec = PR_IntervalToMicroseconds( remaining % ticksPerSecond );
377 tvp = &tv;
378 }
379
380 ready = _MD_SELECT(maxfd + 1, &rd, &wt, &ex, tvp);
381
382 if (ready == -1 && errno == EINTR)
383 {
384 if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry;
385 else
386 {
387 elapsed = (PRIntervalTime) (PR_IntervalNow() - start);
388 if (elapsed > timeout) ready = 0; /* timed out */
389 else
390 {
391 remaining = timeout - elapsed;
392 goto retry;
393 }
394 }
395 }
396
397 /*
398 ** Now to unravel the select sets back into the client's poll
399 ** descriptor list. Is this possibly an area for pissing away
400 ** a few cycles or what?
401 */
402 if (ready > 0)
403 {
404 ready = 0;
405 for (pd = pds, epd = pd + npds; pd < epd; pd++)
406 {
407 PRInt16 out_flags = 0;
408 if ((NULL != pd->fd) && (0 != pd->in_flags))
409 {
410 PRInt32 osfd;
411 bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
412 PR_ASSERT(NULL != bottom);
413
414 osfd = bottom->secret->md.osfd;
415
416 if (FD_ISSET(osfd, &rd))
417 {
418 if (pd->out_flags & _PR_POLL_READ_SYS_READ)
419 out_flags |= PR_POLL_READ;
420 if (pd->out_flags & _PR_POLL_WRITE_SYS_READ)
421 out_flags |= PR_POLL_WRITE;
422 }
423 if (FD_ISSET(osfd, &wt))
424 {
425 if (pd->out_flags & _PR_POLL_READ_SYS_WRITE)
426 out_flags |= PR_POLL_READ;
427 if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE)
428 out_flags |= PR_POLL_WRITE;
429 }
430 if (FD_ISSET(osfd, &ex)) out_flags |= PR_POLL_EXCEPT;
431 }
432 pd->out_flags = out_flags;
433 if (out_flags) ready++;
434 }
435 PR_ASSERT(ready > 0);
436 }
437 else if (ready < 0)
438 {
439 err = _MD_ERRNO();
440 if (err == EBADF)
441 {
442 /* Find the bad fds */
443 ready = 0;
444 for (pd = pds, epd = pd + npds; pd < epd; pd++)
445 {
446 pd->out_flags = 0;
447 if ((NULL != pd->fd) && (0 != pd->in_flags))
448 {
449 bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
450 if (fcntl(bottom->secret->md.osfd, F_GETFL, 0) == -1)
451 {
452 pd->out_flags = PR_POLL_NVAL;
453 ready++;
454 }
455 }
456 }
457 PR_ASSERT(ready > 0);
458 }
459 else _PR_MD_MAP_SELECT_ERROR(err);
460 }
461
462 return ready;
463 } /* NativeThreadSelect */
464 #endif /* !defined(_PR_USE_POLL) */
465
LocalThreads(PRPollDesc * pds,PRIntn npds,PRIntervalTime timeout)466 static PRInt32 LocalThreads(
467 PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
468 {
469 PRPollDesc *pd, *epd;
470 PRInt32 ready, pdcnt;
471 _PRUnixPollDesc *unixpds, *unixpd;
472
473 /*
474 * XXX
475 * PRPollDesc has a PRFileDesc field, fd, while the IOQ
476 * is a list of PRPollQueue structures, each of which contains
477 * a _PRUnixPollDesc. A _PRUnixPollDesc struct contains
478 * the OS file descriptor, osfd, and not a PRFileDesc.
479 * So, we have allocate memory for _PRUnixPollDesc structures,
480 * copy the flags information from the pds list and have pq
481 * point to this list of _PRUnixPollDesc structures.
482 *
483 * It would be better if the memory allocation can be avoided.
484 */
485
486 unixpd = unixpds = (_PRUnixPollDesc*)
487 PR_MALLOC(npds * sizeof(_PRUnixPollDesc));
488 if (NULL == unixpds)
489 {
490 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
491 return -1;
492 }
493
494 ready = 0;
495 for (pdcnt = 0, pd = pds, epd = pd + npds; pd < epd; pd++)
496 {
497 PRFileDesc *bottom;
498 PRInt16 in_flags_read = 0, in_flags_write = 0;
499 PRInt16 out_flags_read = 0, out_flags_write = 0;
500
501 if ((NULL != pd->fd) && (0 != pd->in_flags))
502 {
503 if (pd->in_flags & PR_POLL_READ)
504 {
505 in_flags_read = (pd->fd->methods->poll)(
506 pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read);
507 }
508 if (pd->in_flags & PR_POLL_WRITE)
509 {
510 in_flags_write = (pd->fd->methods->poll)(
511 pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write);
512 }
513 if ((0 != (in_flags_read & out_flags_read))
514 || (0 != (in_flags_write & out_flags_write)))
515 {
516 /* this one's ready right now */
517 if (0 == ready)
518 {
519 /*
520 * We will have to return without calling the
521 * system poll/select function. So zero the
522 * out_flags fields of all the poll descriptors
523 * before this one.
524 */
525 PRPollDesc *prev;
526 for (prev = pds; prev < pd; prev++)
527 {
528 prev->out_flags = 0;
529 }
530 }
531 ready += 1;
532 pd->out_flags = out_flags_read | out_flags_write;
533 }
534 else
535 {
536 pd->out_flags = 0; /* pre-condition */
537 bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
538 PR_ASSERT(NULL != bottom); /* what to do about that? */
539 if ((NULL != bottom)
540 && (_PR_FILEDESC_OPEN == bottom->secret->state))
541 {
542 if (0 == ready)
543 {
544 unixpd->osfd = bottom->secret->md.osfd;
545 unixpd->in_flags = 0;
546 if (in_flags_read & PR_POLL_READ)
547 {
548 unixpd->in_flags |= _PR_UNIX_POLL_READ;
549 pd->out_flags |= _PR_POLL_READ_SYS_READ;
550 }
551 if (in_flags_read & PR_POLL_WRITE)
552 {
553 unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
554 pd->out_flags |= _PR_POLL_READ_SYS_WRITE;
555 }
556 if (in_flags_write & PR_POLL_READ)
557 {
558 unixpd->in_flags |= _PR_UNIX_POLL_READ;
559 pd->out_flags |= _PR_POLL_WRITE_SYS_READ;
560 }
561 if (in_flags_write & PR_POLL_WRITE)
562 {
563 unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
564 pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE;
565 }
566 if ((in_flags_read | in_flags_write) & PR_POLL_EXCEPT)
567 {
568 unixpd->in_flags |= _PR_UNIX_POLL_EXCEPT;
569 }
570 unixpd++; pdcnt++;
571 }
572 }
573 else
574 {
575 if (0 == ready)
576 {
577 PRPollDesc *prev;
578 for (prev = pds; prev < pd; prev++)
579 {
580 prev->out_flags = 0;
581 }
582 }
583 ready += 1; /* this will cause an abrupt return */
584 pd->out_flags = PR_POLL_NVAL; /* bogii */
585 }
586 }
587 }
588 }
589
590 if (0 != ready)
591 {
592 /* no need to block */
593 PR_DELETE(unixpds);
594 return ready;
595 }
596
597 ready = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout);
598
599 /*
600 * Copy the out_flags from the _PRUnixPollDesc structures to the
601 * user's PRPollDesc structures and free the allocated memory
602 */
603 unixpd = unixpds;
604 for (pd = pds, epd = pd + npds; pd < epd; pd++)
605 {
606 PRInt16 out_flags = 0;
607 if ((NULL != pd->fd) && (0 != pd->in_flags))
608 {
609 /*
610 * take errors from the poll operation,
611 * the R/W bits from the request
612 */
613 if (0 != unixpd->out_flags)
614 {
615 if (unixpd->out_flags & _PR_UNIX_POLL_READ)
616 {
617 if (pd->out_flags & _PR_POLL_READ_SYS_READ)
618 out_flags |= PR_POLL_READ;
619 if (pd->out_flags & _PR_POLL_WRITE_SYS_READ)
620 out_flags |= PR_POLL_WRITE;
621 }
622 if (unixpd->out_flags & _PR_UNIX_POLL_WRITE)
623 {
624 if (pd->out_flags & _PR_POLL_READ_SYS_WRITE)
625 out_flags |= PR_POLL_READ;
626 if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE)
627 out_flags |= PR_POLL_WRITE;
628 }
629 if (unixpd->out_flags & _PR_UNIX_POLL_EXCEPT)
630 out_flags |= PR_POLL_EXCEPT;
631 if (unixpd->out_flags & _PR_UNIX_POLL_ERR)
632 out_flags |= PR_POLL_ERR;
633 if (unixpd->out_flags & _PR_UNIX_POLL_NVAL)
634 out_flags |= PR_POLL_NVAL;
635 if (unixpd->out_flags & _PR_UNIX_POLL_HUP)
636 out_flags |= PR_POLL_HUP;
637 }
638 unixpd++;
639 }
640 pd->out_flags = out_flags;
641 }
642
643 PR_DELETE(unixpds);
644
645 return ready;
646 } /* LocalThreads */
647
648 #if defined(_PR_USE_POLL)
649 #define NativeThreads NativeThreadPoll
650 #else
651 #define NativeThreads NativeThreadSelect
652 #endif
653
_MD_pr_poll(PRPollDesc * pds,PRIntn npds,PRIntervalTime timeout)654 PRInt32 _MD_pr_poll(PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
655 {
656 PRInt32 rv = 0;
657 PRThread *me = _PR_MD_CURRENT_THREAD();
658
659 if (_PR_PENDING_INTERRUPT(me))
660 {
661 me->flags &= ~_PR_INTERRUPT;
662 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
663 return -1;
664 }
665 if (0 == npds) PR_Sleep(timeout);
666 else if (_PR_IS_NATIVE_THREAD(me))
667 rv = NativeThreads(pds, npds, timeout);
668 else rv = LocalThreads(pds, npds, timeout);
669
670 return rv;
671 } /* _MD_pr_poll */
672
673 #endif /* defined(_PR_PTHREADS) */
674
675 /* uxpoll.c */
676
677