1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include <assert.h>
23 #include <io.h>
24
25 #include "uv.h"
26 #include "internal.h"
27 #include "handle-inl.h"
28 #include "req-inl.h"
29
30
31 static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
32 {0xe70f1aa0, 0xab8b, 0x11cf,
33 {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
34 {0xf9eab0c0, 0x26d4, 0x11d0,
35 {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
36 {0x9fc48064, 0x7298, 0x43e4,
37 {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
38 };
39
40 typedef struct uv_single_fd_set_s {
41 unsigned int fd_count;
42 SOCKET fd_array[1];
43 } uv_single_fd_set_t;
44
45
46 static OVERLAPPED overlapped_dummy_;
47 static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
48
49 static AFD_POLL_INFO afd_poll_info_dummy_;
50
51
uv__init_overlapped_dummy(void)52 static void uv__init_overlapped_dummy(void) {
53 HANDLE event;
54
55 event = CreateEvent(NULL, TRUE, TRUE, NULL);
56 if (event == NULL)
57 uv_fatal_error(GetLastError(), "CreateEvent");
58
59 memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
60 overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
61 }
62
63
uv__get_overlapped_dummy(void)64 static OVERLAPPED* uv__get_overlapped_dummy(void) {
65 uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
66 return &overlapped_dummy_;
67 }
68
69
uv__get_afd_poll_info_dummy(void)70 static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) {
71 return &afd_poll_info_dummy_;
72 }
73
74
uv__fast_poll_submit_poll_req(uv_loop_t * loop,uv_poll_t * handle)75 static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
76 uv_req_t* req;
77 AFD_POLL_INFO* afd_poll_info;
78 int result;
79
80 /* Find a yet unsubmitted req to submit. */
81 if (handle->submitted_events_1 == 0) {
82 req = &handle->poll_req_1;
83 afd_poll_info = &handle->afd_poll_info_1;
84 handle->submitted_events_1 = handle->events;
85 handle->mask_events_1 = 0;
86 handle->mask_events_2 = handle->events;
87 } else if (handle->submitted_events_2 == 0) {
88 req = &handle->poll_req_2;
89 afd_poll_info = &handle->afd_poll_info_2;
90 handle->submitted_events_2 = handle->events;
91 handle->mask_events_1 = handle->events;
92 handle->mask_events_2 = 0;
93 } else {
94 /* Just wait until there's an unsubmitted req. This will happen almost
95 * immediately as one of the 2 outstanding requests is about to return.
96 * When this happens, uv__fast_poll_process_poll_req will be called, and
97 * the pending events, if needed, will be processed in a subsequent
98 * request. */
99 return;
100 }
101
102 /* Setting Exclusive to TRUE makes the other poll request return if there is
103 * any. */
104 afd_poll_info->Exclusive = TRUE;
105 afd_poll_info->NumberOfHandles = 1;
106 afd_poll_info->Timeout.QuadPart = INT64_MAX;
107 afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
108 afd_poll_info->Handles[0].Status = 0;
109 afd_poll_info->Handles[0].Events = 0;
110
111 if (handle->events & UV_READABLE) {
112 afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
113 AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
114 } else {
115 if (handle->events & UV_DISCONNECT) {
116 afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
117 }
118 }
119 if (handle->events & UV_WRITABLE) {
120 afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
121 }
122
123 memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
124
125 result = uv_msafd_poll((SOCKET) handle->peer_socket,
126 afd_poll_info,
127 afd_poll_info,
128 &req->u.io.overlapped);
129 if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
130 /* Queue this req, reporting an error. */
131 SET_REQ_ERROR(req, WSAGetLastError());
132 uv_insert_pending_req(loop, req);
133 }
134 }
135
136
uv__fast_poll_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)137 static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
138 uv_req_t* req) {
139 unsigned char mask_events;
140 AFD_POLL_INFO* afd_poll_info;
141
142 if (req == &handle->poll_req_1) {
143 afd_poll_info = &handle->afd_poll_info_1;
144 handle->submitted_events_1 = 0;
145 mask_events = handle->mask_events_1;
146 } else if (req == &handle->poll_req_2) {
147 afd_poll_info = &handle->afd_poll_info_2;
148 handle->submitted_events_2 = 0;
149 mask_events = handle->mask_events_2;
150 } else {
151 assert(0);
152 return;
153 }
154
155 /* Report an error unless the select was just interrupted. */
156 if (!REQ_SUCCESS(req)) {
157 DWORD error = GET_REQ_SOCK_ERROR(req);
158 if (error != WSAEINTR && handle->events != 0) {
159 handle->events = 0; /* Stop the watcher */
160 handle->poll_cb(handle, uv_translate_sys_error(error), 0);
161 }
162
163 } else if (afd_poll_info->NumberOfHandles >= 1) {
164 unsigned char events = 0;
165
166 if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
167 AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
168 events |= UV_READABLE;
169 if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
170 events |= UV_DISCONNECT;
171 }
172 }
173 if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
174 AFD_POLL_CONNECT_FAIL)) != 0) {
175 events |= UV_WRITABLE;
176 }
177
178 events &= handle->events & ~mask_events;
179
180 if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
181 /* Stop polling. */
182 handle->events = 0;
183 if (uv__is_active(handle))
184 uv__handle_stop(handle);
185 }
186
187 if (events != 0) {
188 handle->poll_cb(handle, 0, events);
189 }
190 }
191
192 if ((handle->events & ~(handle->submitted_events_1 |
193 handle->submitted_events_2)) != 0) {
194 uv__fast_poll_submit_poll_req(loop, handle);
195 } else if ((handle->flags & UV_HANDLE_CLOSING) &&
196 handle->submitted_events_1 == 0 &&
197 handle->submitted_events_2 == 0) {
198 uv_want_endgame(loop, (uv_handle_t*) handle);
199 }
200 }
201
202
uv__fast_poll_create_peer_socket(HANDLE iocp,WSAPROTOCOL_INFOW * protocol_info)203 static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
204 WSAPROTOCOL_INFOW* protocol_info) {
205 SOCKET sock = 0;
206
207 sock = WSASocketW(protocol_info->iAddressFamily,
208 protocol_info->iSocketType,
209 protocol_info->iProtocol,
210 protocol_info,
211 0,
212 WSA_FLAG_OVERLAPPED);
213 if (sock == INVALID_SOCKET) {
214 return INVALID_SOCKET;
215 }
216
217 if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
218 goto error;
219 };
220
221 if (CreateIoCompletionPort((HANDLE) sock,
222 iocp,
223 (ULONG_PTR) sock,
224 0) == NULL) {
225 goto error;
226 }
227
228 return sock;
229
230 error:
231 closesocket(sock);
232 return INVALID_SOCKET;
233 }
234
235
uv__fast_poll_get_peer_socket(uv_loop_t * loop,WSAPROTOCOL_INFOW * protocol_info)236 static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
237 WSAPROTOCOL_INFOW* protocol_info) {
238 int index, i;
239 SOCKET peer_socket;
240
241 index = -1;
242 for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
243 if (memcmp((void*) &protocol_info->ProviderId,
244 (void*) &uv_msafd_provider_ids[i],
245 sizeof protocol_info->ProviderId) == 0) {
246 index = i;
247 }
248 }
249
250 /* Check if the protocol uses an msafd socket. */
251 if (index < 0) {
252 return INVALID_SOCKET;
253 }
254
255 /* If we didn't (try) to create a peer socket yet, try to make one. Don't try
256 * again if the peer socket creation failed earlier for the same protocol. */
257 peer_socket = loop->poll_peer_sockets[index];
258 if (peer_socket == 0) {
259 peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
260 loop->poll_peer_sockets[index] = peer_socket;
261 }
262
263 return peer_socket;
264 }
265
266
uv__slow_poll_thread_proc(void * arg)267 static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
268 uv_req_t* req = (uv_req_t*) arg;
269 uv_poll_t* handle = (uv_poll_t*) req->data;
270 unsigned char reported_events;
271 int r;
272 uv_single_fd_set_t rfds, wfds, efds;
273 struct timeval timeout;
274
275 assert(handle->type == UV_POLL);
276 assert(req->type == UV_POLL_REQ);
277
278 if (handle->events & UV_READABLE) {
279 rfds.fd_count = 1;
280 rfds.fd_array[0] = handle->socket;
281 } else {
282 rfds.fd_count = 0;
283 }
284
285 if (handle->events & UV_WRITABLE) {
286 wfds.fd_count = 1;
287 wfds.fd_array[0] = handle->socket;
288 efds.fd_count = 1;
289 efds.fd_array[0] = handle->socket;
290 } else {
291 wfds.fd_count = 0;
292 efds.fd_count = 0;
293 }
294
295 /* Make the select() time out after 3 minutes. If select() hangs because the
296 * user closed the socket, we will at least not hang indefinitely. */
297 timeout.tv_sec = 3 * 60;
298 timeout.tv_usec = 0;
299
300 r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
301 if (r == SOCKET_ERROR) {
302 /* Queue this req, reporting an error. */
303 SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
304 POST_COMPLETION_FOR_REQ(handle->loop, req);
305 return 0;
306 }
307
308 reported_events = 0;
309
310 if (r > 0) {
311 if (rfds.fd_count > 0) {
312 assert(rfds.fd_count == 1);
313 assert(rfds.fd_array[0] == handle->socket);
314 reported_events |= UV_READABLE;
315 }
316
317 if (wfds.fd_count > 0) {
318 assert(wfds.fd_count == 1);
319 assert(wfds.fd_array[0] == handle->socket);
320 reported_events |= UV_WRITABLE;
321 } else if (efds.fd_count > 0) {
322 assert(efds.fd_count == 1);
323 assert(efds.fd_array[0] == handle->socket);
324 reported_events |= UV_WRITABLE;
325 }
326 }
327
328 SET_REQ_SUCCESS(req);
329 req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
330 POST_COMPLETION_FOR_REQ(handle->loop, req);
331
332 return 0;
333 }
334
335
uv__slow_poll_submit_poll_req(uv_loop_t * loop,uv_poll_t * handle)336 static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
337 uv_req_t* req;
338
339 /* Find a yet unsubmitted req to submit. */
340 if (handle->submitted_events_1 == 0) {
341 req = &handle->poll_req_1;
342 handle->submitted_events_1 = handle->events;
343 handle->mask_events_1 = 0;
344 handle->mask_events_2 = handle->events;
345 } else if (handle->submitted_events_2 == 0) {
346 req = &handle->poll_req_2;
347 handle->submitted_events_2 = handle->events;
348 handle->mask_events_1 = handle->events;
349 handle->mask_events_2 = 0;
350 } else {
351 assert(0);
352 return;
353 }
354
355 if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
356 (void*) req,
357 WT_EXECUTELONGFUNCTION)) {
358 /* Make this req pending, reporting an error. */
359 SET_REQ_ERROR(req, GetLastError());
360 uv_insert_pending_req(loop, req);
361 }
362 }
363
364
365
uv__slow_poll_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)366 static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
367 uv_req_t* req) {
368 unsigned char mask_events;
369 int err;
370
371 if (req == &handle->poll_req_1) {
372 handle->submitted_events_1 = 0;
373 mask_events = handle->mask_events_1;
374 } else if (req == &handle->poll_req_2) {
375 handle->submitted_events_2 = 0;
376 mask_events = handle->mask_events_2;
377 } else {
378 assert(0);
379 return;
380 }
381
382 if (!REQ_SUCCESS(req)) {
383 /* Error. */
384 if (handle->events != 0) {
385 err = GET_REQ_ERROR(req);
386 handle->events = 0; /* Stop the watcher */
387 handle->poll_cb(handle, uv_translate_sys_error(err), 0);
388 }
389 } else {
390 /* Got some events. */
391 int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
392 if (events != 0) {
393 handle->poll_cb(handle, 0, events);
394 }
395 }
396
397 if ((handle->events & ~(handle->submitted_events_1 |
398 handle->submitted_events_2)) != 0) {
399 uv__slow_poll_submit_poll_req(loop, handle);
400 } else if ((handle->flags & UV_HANDLE_CLOSING) &&
401 handle->submitted_events_1 == 0 &&
402 handle->submitted_events_2 == 0) {
403 uv_want_endgame(loop, (uv_handle_t*) handle);
404 }
405 }
406
407
uv_poll_init(uv_loop_t * loop,uv_poll_t * handle,int fd)408 int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
409 return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
410 }
411
412
uv_poll_init_socket(uv_loop_t * loop,uv_poll_t * handle,uv_os_sock_t socket)413 int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
414 uv_os_sock_t socket) {
415 WSAPROTOCOL_INFOW protocol_info;
416 int len;
417 SOCKET peer_socket, base_socket;
418 DWORD bytes;
419 DWORD yes = 1;
420
421 /* Set the socket to nonblocking mode */
422 if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
423 return uv_translate_sys_error(WSAGetLastError());
424
425 /* Try to obtain a base handle for the socket. This increases this chances that
426 * we find an AFD handle and are able to use the fast poll mechanism. This will
427 * always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE
428 * ioctl. */
429 #ifndef NDEBUG
430 base_socket = INVALID_SOCKET;
431 #endif
432
433 if (WSAIoctl(socket,
434 SIO_BASE_HANDLE,
435 NULL,
436 0,
437 &base_socket,
438 sizeof base_socket,
439 &bytes,
440 NULL,
441 NULL) == 0) {
442 assert(base_socket != 0 && base_socket != INVALID_SOCKET);
443 socket = base_socket;
444 }
445
446 uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
447 handle->socket = socket;
448 handle->events = 0;
449
450 /* Obtain protocol information about the socket. */
451 len = sizeof protocol_info;
452 if (getsockopt(socket,
453 SOL_SOCKET,
454 SO_PROTOCOL_INFOW,
455 (char*) &protocol_info,
456 &len) != 0) {
457 return uv_translate_sys_error(WSAGetLastError());
458 }
459
460 /* Get the peer socket that is needed to enable fast poll. If the returned
461 * value is NULL, the protocol is not implemented by MSAFD and we'll have to
462 * use slow mode. */
463 peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
464
465 if (peer_socket != INVALID_SOCKET) {
466 /* Initialize fast poll specific fields. */
467 handle->peer_socket = peer_socket;
468 } else {
469 /* Initialize slow poll specific fields. */
470 handle->flags |= UV_HANDLE_POLL_SLOW;
471 }
472
473 /* Initialize 2 poll reqs. */
474 handle->submitted_events_1 = 0;
475 UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ);
476 handle->poll_req_1.data = handle;
477
478 handle->submitted_events_2 = 0;
479 UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ);
480 handle->poll_req_2.data = handle;
481
482 return 0;
483 }
484
485
uv__poll_set(uv_poll_t * handle,int events,uv_poll_cb cb)486 static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
487 int submitted_events;
488
489 assert(handle->type == UV_POLL);
490 assert(!(handle->flags & UV_HANDLE_CLOSING));
491 assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
492
493 handle->events = events;
494 handle->poll_cb = cb;
495
496 if (handle->events == 0) {
497 uv__handle_stop(handle);
498 return 0;
499 }
500
501 uv__handle_start(handle);
502 submitted_events = handle->submitted_events_1 | handle->submitted_events_2;
503
504 if (handle->events & ~submitted_events) {
505 if (handle->flags & UV_HANDLE_POLL_SLOW) {
506 uv__slow_poll_submit_poll_req(handle->loop, handle);
507 } else {
508 uv__fast_poll_submit_poll_req(handle->loop, handle);
509 }
510 }
511
512 return 0;
513 }
514
515
uv_poll_start(uv_poll_t * handle,int events,uv_poll_cb cb)516 int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
517 return uv__poll_set(handle, events, cb);
518 }
519
520
uv_poll_stop(uv_poll_t * handle)521 int uv_poll_stop(uv_poll_t* handle) {
522 return uv__poll_set(handle, 0, handle->poll_cb);
523 }
524
525
uv_process_poll_req(uv_loop_t * loop,uv_poll_t * handle,uv_req_t * req)526 void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
527 if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
528 uv__fast_poll_process_poll_req(loop, handle, req);
529 } else {
530 uv__slow_poll_process_poll_req(loop, handle, req);
531 }
532 }
533
534
uv_poll_close(uv_loop_t * loop,uv_poll_t * handle)535 int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
536 AFD_POLL_INFO afd_poll_info;
537 DWORD error;
538 int result;
539
540 handle->events = 0;
541 uv__handle_closing(handle);
542
543 if (handle->submitted_events_1 == 0 &&
544 handle->submitted_events_2 == 0) {
545 uv_want_endgame(loop, (uv_handle_t*) handle);
546 return 0;
547 }
548
549 if (handle->flags & UV_HANDLE_POLL_SLOW)
550 return 0;
551
552 /* Cancel outstanding poll requests by executing another, unique poll
553 * request that forces the outstanding ones to return. */
554 afd_poll_info.Exclusive = TRUE;
555 afd_poll_info.NumberOfHandles = 1;
556 afd_poll_info.Timeout.QuadPart = INT64_MAX;
557 afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
558 afd_poll_info.Handles[0].Status = 0;
559 afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
560
561 result = uv_msafd_poll(handle->socket,
562 &afd_poll_info,
563 uv__get_afd_poll_info_dummy(),
564 uv__get_overlapped_dummy());
565
566 if (result == SOCKET_ERROR) {
567 error = WSAGetLastError();
568 if (error != WSA_IO_PENDING)
569 return uv_translate_sys_error(error);
570 }
571
572 return 0;
573 }
574
575
uv_poll_endgame(uv_loop_t * loop,uv_poll_t * handle)576 void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
577 assert(handle->flags & UV_HANDLE_CLOSING);
578 assert(!(handle->flags & UV_HANDLE_CLOSED));
579
580 assert(handle->submitted_events_1 == 0);
581 assert(handle->submitted_events_2 == 0);
582
583 uv__handle_close(handle);
584 }
585