1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include <assert.h>
23 #include <io.h>
24 
25 #include "uv.h"
26 #include "internal.h"
27 #include "handle-inl.h"
28 #include "req-inl.h"
29 
30 
31 static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
32   {0xe70f1aa0, 0xab8b, 0x11cf,
33       {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
34   {0xf9eab0c0, 0x26d4, 0x11d0,
35       {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
36   {0x9fc48064, 0x7298, 0x43e4,
37       {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
38 };
39 
40 typedef struct uv_single_fd_set_s {
41   unsigned int fd_count;
42   SOCKET fd_array[1];
43 } uv_single_fd_set_t;
44 
45 
46 static OVERLAPPED overlapped_dummy_;
47 static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
48 
49 static AFD_POLL_INFO afd_poll_info_dummy_;
50 
51 
uv__init_overlapped_dummy(void)52 static void uv__init_overlapped_dummy(void) {
53   HANDLE event;
54 
55   event = CreateEvent(NULL, TRUE, TRUE, NULL);
56   if (event == NULL)
57     uv_fatal_error(GetLastError(), "CreateEvent");
58 
59   memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
60   overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
61 }
62 
63 
uv__get_overlapped_dummy(void)64 static OVERLAPPED* uv__get_overlapped_dummy(void) {
65   uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
66   return &overlapped_dummy_;
67 }
68 
69 
uv__get_afd_poll_info_dummy(void)70 static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) {
71   return &afd_poll_info_dummy_;
72 }
73 
74 
uv__fast_poll_submit_poll_req(uv_loop_t * loop,uv_poll_t * handle)75 static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
76   uv_req_t* req;
77   AFD_POLL_INFO* afd_poll_info;
78   DWORD result;
79 
80   /* Find a yet unsubmitted req to submit. */
81   if (handle->submitted_events_1 == 0) {
82     req = &handle->poll_req_1;
83     afd_poll_info = &handle->afd_poll_info_1;
84     handle->submitted_events_1 = handle->events;
85     handle->mask_events_1 = 0;
86     handle->mask_events_2 = handle->events;
87   } else if (handle->submitted_events_2 == 0) {
88     req = &handle->poll_req_2;
89     afd_poll_info = &handle->afd_poll_info_2;
90     handle->submitted_events_2 = handle->events;
91     handle->mask_events_1 = handle->events;
92     handle->mask_events_2 = 0;
93   } else {
94     /* Just wait until there's an unsubmitted req. */
95     /* This will happen almost immediately as one of the 2 outstanding */
96     /* requests is about to return. When this happens, */
97     /* uv__fast_poll_process_poll_req will be called, and the pending */
98     /* events, if needed, will be processed in a subsequent request. */
99     return;
100   }
101 
102   /* Setting Exclusive to TRUE makes the other poll request return if there */
103   /* is any. */
104   afd_poll_info->Exclusive = TRUE;
105   afd_poll_info->NumberOfHandles = 1;
106   afd_poll_info->Timeout.QuadPart = INT64_MAX;
107   afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
108   afd_poll_info->Handles[0].Status = 0;
109   afd_poll_info->Handles[0].Events = 0;
110 
111   if (handle->events & UV_READABLE) {
112     afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
113         AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
114   } else {
115     if (handle->events & UV_DISCONNECT) {
116       afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
117     }
118   }
119   if (handle->events & UV_WRITABLE) {
120     afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
121   }
122 
123   memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
124 
125   result = uv_msafd_poll((SOCKET) handle->peer_socket,
126                          afd_poll_info,
127                          afd_poll_info,
128                          &req->u.io.overlapped);
129   if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
130     /* Queue this req, reporting an error. */
131     SET_REQ_ERROR(req, WSAGetLastError());
132     uv_insert_pending_req(loop, req);
133   }
134 }
135 
136 
uv__fast_poll_cancel_poll_req(uv_loop_t * loop,uv_poll_t * handle)137 static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
138   AFD_POLL_INFO afd_poll_info;
139   DWORD result;
140 
141   afd_poll_info.Exclusive = TRUE;
142   afd_poll_info.NumberOfHandles = 1;
143   afd_poll_info.Timeout.QuadPart = INT64_MAX;
144   afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
145   afd_poll_info.Handles[0].Status = 0;
146   afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
147 
148   result = uv_msafd_poll(handle->socket,
149                          &afd_poll_info,
150                          uv__get_afd_poll_info_dummy(),
151                          uv__get_overlapped_dummy());
152 
153   if (result == SOCKET_ERROR) {
154     DWORD error = WSAGetLastError();
155     if (error != WSA_IO_PENDING)
156       return error;
157   }
158 
159   return 0;
160 }
161 
162 
uv__fast_poll_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)163 static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
164     uv_req_t* req) {
165   unsigned char mask_events;
166   AFD_POLL_INFO* afd_poll_info;
167 
168   if (req == &handle->poll_req_1) {
169     afd_poll_info = &handle->afd_poll_info_1;
170     handle->submitted_events_1 = 0;
171     mask_events = handle->mask_events_1;
172   } else if (req == &handle->poll_req_2) {
173     afd_poll_info = &handle->afd_poll_info_2;
174     handle->submitted_events_2 = 0;
175     mask_events = handle->mask_events_2;
176   } else {
177     assert(0);
178     return;
179   }
180 
181   /* Report an error unless the select was just interrupted. */
182   if (!REQ_SUCCESS(req)) {
183     DWORD error = GET_REQ_SOCK_ERROR(req);
184     if (error != WSAEINTR && handle->events != 0) {
185       handle->events = 0; /* Stop the watcher */
186       handle->poll_cb(handle, uv_translate_sys_error(error), 0);
187     }
188 
189   } else if (afd_poll_info->NumberOfHandles >= 1) {
190     unsigned char events = 0;
191 
192     if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
193         AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
194       events |= UV_READABLE;
195       if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
196         events |= UV_DISCONNECT;
197       }
198     }
199     if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
200         AFD_POLL_CONNECT_FAIL)) != 0) {
201       events |= UV_WRITABLE;
202     }
203 
204     events &= handle->events & ~mask_events;
205 
206     if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
207       /* Stop polling. */
208       handle->events = 0;
209       if (uv__is_active(handle))
210         uv__handle_stop(handle);
211     }
212 
213     if (events != 0) {
214       handle->poll_cb(handle, 0, events);
215     }
216   }
217 
218   if ((handle->events & ~(handle->submitted_events_1 |
219       handle->submitted_events_2)) != 0) {
220     uv__fast_poll_submit_poll_req(loop, handle);
221   } else if ((handle->flags & UV__HANDLE_CLOSING) &&
222              handle->submitted_events_1 == 0 &&
223              handle->submitted_events_2 == 0) {
224     uv_want_endgame(loop, (uv_handle_t*) handle);
225   }
226 }
227 
228 
uv__fast_poll_set(uv_loop_t * loop,uv_poll_t * handle,int events)229 static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
230   assert(handle->type == UV_POLL);
231   assert(!(handle->flags & UV__HANDLE_CLOSING));
232   assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
233 
234   handle->events = events;
235 
236   if (handle->events != 0) {
237     uv__handle_start(handle);
238   } else {
239     uv__handle_stop(handle);
240   }
241 
242   if ((handle->events & ~(handle->submitted_events_1 |
243       handle->submitted_events_2)) != 0) {
244     uv__fast_poll_submit_poll_req(handle->loop, handle);
245   }
246 
247   return 0;
248 }
249 
250 
uv__fast_poll_close(uv_loop_t * loop,uv_poll_t * handle)251 static int uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
252   handle->events = 0;
253   uv__handle_closing(handle);
254 
255   if (handle->submitted_events_1 == 0 &&
256       handle->submitted_events_2 == 0) {
257     uv_want_endgame(loop, (uv_handle_t*) handle);
258     return 0;
259   } else {
260     /* Cancel outstanding poll requests by executing another, unique poll */
261     /* request that forces the outstanding ones to return. */
262     return uv__fast_poll_cancel_poll_req(loop, handle);
263   }
264 }
265 
266 
uv__fast_poll_create_peer_socket(HANDLE iocp,WSAPROTOCOL_INFOW * protocol_info)267 static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
268     WSAPROTOCOL_INFOW* protocol_info) {
269   SOCKET sock = 0;
270 
271   sock = WSASocketW(protocol_info->iAddressFamily,
272                     protocol_info->iSocketType,
273                     protocol_info->iProtocol,
274                     protocol_info,
275                     0,
276                     WSA_FLAG_OVERLAPPED);
277   if (sock == INVALID_SOCKET) {
278     return INVALID_SOCKET;
279   }
280 
281   if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
282     goto error;
283   };
284 
285   if (CreateIoCompletionPort((HANDLE) sock,
286                              iocp,
287                              (ULONG_PTR) sock,
288                              0) == NULL) {
289     goto error;
290   }
291 
292   return sock;
293 
294  error:
295   closesocket(sock);
296   return INVALID_SOCKET;
297 }
298 
299 
uv__fast_poll_get_peer_socket(uv_loop_t * loop,WSAPROTOCOL_INFOW * protocol_info)300 static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
301     WSAPROTOCOL_INFOW* protocol_info) {
302   int index, i;
303   SOCKET peer_socket;
304 
305   index = -1;
306   for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
307     if (memcmp((void*) &protocol_info->ProviderId,
308                (void*) &uv_msafd_provider_ids[i],
309                sizeof protocol_info->ProviderId) == 0) {
310       index = i;
311     }
312   }
313 
314   /* Check if the protocol uses an msafd socket. */
315   if (index < 0) {
316     return INVALID_SOCKET;
317   }
318 
319   /* If we didn't (try) to create a peer socket yet, try to make one. Don't */
320   /* try again if the peer socket creation failed earlier for the same */
321   /* protocol. */
322   peer_socket = loop->poll_peer_sockets[index];
323   if (peer_socket == 0) {
324     peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
325     loop->poll_peer_sockets[index] = peer_socket;
326   }
327 
328   return peer_socket;
329 }
330 
331 
uv__slow_poll_thread_proc(void * arg)332 static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
333   uv_req_t* req = (uv_req_t*) arg;
334   uv_poll_t* handle = (uv_poll_t*) req->data;
335   unsigned char reported_events;
336   int r;
337   uv_single_fd_set_t rfds, wfds, efds;
338   struct timeval timeout;
339 
340   assert(handle->type == UV_POLL);
341   assert(req->type == UV_POLL_REQ);
342 
343   if (handle->events & UV_READABLE) {
344     rfds.fd_count = 1;
345     rfds.fd_array[0] = handle->socket;
346   } else {
347     rfds.fd_count = 0;
348   }
349 
350   if (handle->events & UV_WRITABLE) {
351     wfds.fd_count = 1;
352     wfds.fd_array[0] = handle->socket;
353     efds.fd_count = 1;
354     efds.fd_array[0] = handle->socket;
355   } else {
356     wfds.fd_count = 0;
357     efds.fd_count = 0;
358   }
359 
360   /* Make the select() time out after 3 minutes. If select() hangs because */
361   /* the user closed the socket, we will at least not hang indefinitely. */
362   timeout.tv_sec = 3 * 60;
363   timeout.tv_usec = 0;
364 
365   r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
366   if (r == SOCKET_ERROR) {
367     /* Queue this req, reporting an error. */
368     SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
369     POST_COMPLETION_FOR_REQ(handle->loop, req);
370     return 0;
371   }
372 
373   reported_events = 0;
374 
375   if (r > 0) {
376     if (rfds.fd_count > 0) {
377       assert(rfds.fd_count == 1);
378       assert(rfds.fd_array[0] == handle->socket);
379       reported_events |= UV_READABLE;
380     }
381 
382     if (wfds.fd_count > 0) {
383       assert(wfds.fd_count == 1);
384       assert(wfds.fd_array[0] == handle->socket);
385       reported_events |= UV_WRITABLE;
386     } else if (efds.fd_count > 0) {
387       assert(efds.fd_count == 1);
388       assert(efds.fd_array[0] == handle->socket);
389       reported_events |= UV_WRITABLE;
390     }
391   }
392 
393   SET_REQ_SUCCESS(req);
394   req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
395   POST_COMPLETION_FOR_REQ(handle->loop, req);
396 
397   return 0;
398 }
399 
400 
uv__slow_poll_submit_poll_req(uv_loop_t * loop,uv_poll_t * handle)401 static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
402   uv_req_t* req;
403 
404   /* Find a yet unsubmitted req to submit. */
405   if (handle->submitted_events_1 == 0) {
406     req = &handle->poll_req_1;
407     handle->submitted_events_1 = handle->events;
408     handle->mask_events_1 = 0;
409     handle->mask_events_2 = handle->events;
410   } else if (handle->submitted_events_2 == 0) {
411     req = &handle->poll_req_2;
412     handle->submitted_events_2 = handle->events;
413     handle->mask_events_1 = handle->events;
414     handle->mask_events_2 = 0;
415   } else {
416     assert(0);
417     return;
418   }
419 
420   if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
421                          (void*) req,
422                          WT_EXECUTELONGFUNCTION)) {
423     /* Make this req pending, reporting an error. */
424     SET_REQ_ERROR(req, GetLastError());
425     uv_insert_pending_req(loop, req);
426   }
427 }
428 
429 
430 
uv__slow_poll_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)431 static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
432     uv_req_t* req) {
433   unsigned char mask_events;
434   int err;
435 
436   if (req == &handle->poll_req_1) {
437     handle->submitted_events_1 = 0;
438     mask_events = handle->mask_events_1;
439   } else if (req == &handle->poll_req_2) {
440     handle->submitted_events_2 = 0;
441     mask_events = handle->mask_events_2;
442   } else {
443     assert(0);
444     return;
445   }
446 
447   if (!REQ_SUCCESS(req)) {
448     /* Error. */
449     if (handle->events != 0) {
450       err = GET_REQ_ERROR(req);
451       handle->events = 0; /* Stop the watcher */
452       handle->poll_cb(handle, uv_translate_sys_error(err), 0);
453     }
454   } else {
455     /* Got some events. */
456     int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
457     if (events != 0) {
458       handle->poll_cb(handle, 0, events);
459     }
460   }
461 
462   if ((handle->events & ~(handle->submitted_events_1 |
463       handle->submitted_events_2)) != 0) {
464     uv__slow_poll_submit_poll_req(loop, handle);
465   } else if ((handle->flags & UV__HANDLE_CLOSING) &&
466              handle->submitted_events_1 == 0 &&
467              handle->submitted_events_2 == 0) {
468     uv_want_endgame(loop, (uv_handle_t*) handle);
469   }
470 }
471 
472 
uv__slow_poll_set(uv_loop_t * loop,uv_poll_t * handle,int events)473 static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
474   assert(handle->type == UV_POLL);
475   assert(!(handle->flags & UV__HANDLE_CLOSING));
476   assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
477 
478   handle->events = events;
479 
480   if (handle->events != 0) {
481     uv__handle_start(handle);
482   } else {
483     uv__handle_stop(handle);
484   }
485 
486   if ((handle->events &
487       ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) {
488     uv__slow_poll_submit_poll_req(handle->loop, handle);
489   }
490 
491   return 0;
492 }
493 
494 
uv__slow_poll_close(uv_loop_t * loop,uv_poll_t * handle)495 static int uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
496   handle->events = 0;
497   uv__handle_closing(handle);
498 
499   if (handle->submitted_events_1 == 0 &&
500       handle->submitted_events_2 == 0) {
501     uv_want_endgame(loop, (uv_handle_t*) handle);
502   }
503 
504   return 0;
505 }
506 
507 
uv_poll_init(uv_loop_t * loop,uv_poll_t * handle,int fd)508 int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
509   return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
510 }
511 
512 
uv_poll_init_socket(uv_loop_t * loop,uv_poll_t * handle,uv_os_sock_t socket)513 int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
514     uv_os_sock_t socket) {
515   WSAPROTOCOL_INFOW protocol_info;
516   int len;
517   SOCKET peer_socket, base_socket;
518   DWORD bytes;
519   DWORD yes = 1;
520 
521   /* Set the socket to nonblocking mode */
522   if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
523     return uv_translate_sys_error(WSAGetLastError());
524 
525   /* Try to obtain a base handle for the socket. This increases this chances */
526   /* that we find an AFD handle and are able to use the fast poll mechanism. */
527   /* This will always fail on windows XP/2k3, since they don't support the */
528   /* SIO_BASE_HANDLE ioctl. */
529 #ifndef NDEBUG
530   base_socket = INVALID_SOCKET;
531 #endif
532 
533   if (WSAIoctl(socket,
534                SIO_BASE_HANDLE,
535                NULL,
536                0,
537                &base_socket,
538                sizeof base_socket,
539                &bytes,
540                NULL,
541                NULL) == 0) {
542     assert(base_socket != 0 && base_socket != INVALID_SOCKET);
543     socket = base_socket;
544   }
545 
546   uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
547   handle->socket = socket;
548   handle->events = 0;
549 
550   /* Obtain protocol information about the socket. */
551   len = sizeof protocol_info;
552   if (getsockopt(socket,
553                  SOL_SOCKET,
554                  SO_PROTOCOL_INFOW,
555                  (char*) &protocol_info,
556                  &len) != 0) {
557     return uv_translate_sys_error(WSAGetLastError());
558   }
559 
560   /* Get the peer socket that is needed to enable fast poll. If the returned */
561   /* value is NULL, the protocol is not implemented by MSAFD and we'll have */
562   /* to use slow mode. */
563   peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
564 
565   if (peer_socket != INVALID_SOCKET) {
566     /* Initialize fast poll specific fields. */
567     handle->peer_socket = peer_socket;
568   } else {
569     /* Initialize slow poll specific fields. */
570     handle->flags |= UV_HANDLE_POLL_SLOW;
571   }
572 
573   /* Initialize 2 poll reqs. */
574   handle->submitted_events_1 = 0;
575   UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ);
576   handle->poll_req_1.data = handle;
577 
578   handle->submitted_events_2 = 0;
579   UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ);
580   handle->poll_req_2.data = handle;
581 
582   return 0;
583 }
584 
585 
uv_poll_start(uv_poll_t * handle,int events,uv_poll_cb cb)586 int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
587   int err;
588 
589   if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
590     err = uv__fast_poll_set(handle->loop, handle, events);
591   } else {
592     err = uv__slow_poll_set(handle->loop, handle, events);
593   }
594 
595   if (err) {
596     return uv_translate_sys_error(err);
597   }
598 
599   handle->poll_cb = cb;
600 
601   return 0;
602 }
603 
604 
uv_poll_stop(uv_poll_t * handle)605 int uv_poll_stop(uv_poll_t* handle) {
606   int err;
607 
608   if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
609     err = uv__fast_poll_set(handle->loop, handle, 0);
610   } else {
611     err = uv__slow_poll_set(handle->loop, handle, 0);
612   }
613 
614   return uv_translate_sys_error(err);
615 }
616 
617 
uv_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)618 void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
619   if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
620     uv__fast_poll_process_poll_req(loop, handle, req);
621   } else {
622     uv__slow_poll_process_poll_req(loop, handle, req);
623   }
624 }
625 
626 
uv_poll_close(uv_loop_t * loop,uv_poll_t * handle)627 int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
628   if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
629     return uv__fast_poll_close(loop, handle);
630   } else {
631     return uv__slow_poll_close(loop, handle);
632   }
633 }
634 
635 
uv_poll_endgame(uv_loop_t * loop,uv_poll_t * handle)636 void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
637   assert(handle->flags & UV__HANDLE_CLOSING);
638   assert(!(handle->flags & UV_HANDLE_CLOSED));
639 
640   assert(handle->submitted_events_1 == 0);
641   assert(handle->submitted_events_2 == 0);
642 
643   uv__handle_close(handle);
644 }
645