1 /* w32-estream.c - es_poll support on W32.
2 * Copyright (C) 2000 Werner Koch (dd9jn)
3 * Copyright (C) 2001, 2002, 2003, 2004, 2007, 2010, 2016 g10 Code GmbH
4 *
5 * This file is part of libgpg-error.
6 *
7 * libgpg-error is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public License
9 * as published by the Free Software Foundation; either version 2.1 of
10 * the License, or (at your option) any later version.
11 *
12 * libgpg-error is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this program; if not, see <https://www.gnu.org/licenses/>.
19 */
20
21 /*
22 * This file is based on GPGME's w32-io.c started in 2001.
23 */
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #ifdef HAVE_SYS_TIME_H
34 # include <sys/time.h>
35 #endif
36 #ifdef HAVE_SYS_TYPES_H
37 # include <sys/types.h>
38 #endif
39 #include <io.h>
40 #include <windows.h>
41
42 /* Enable tracing. The value is the module name to be printed. */
43 /*#define ENABLE_TRACING "estream" */
44
45 #include "gpgrt-int.h"
46
47 /*
48 * In order to support es_poll on Windows, we create a proxy shim that
49 * we use as the estream I/O functions. This shim creates reader and
50 * writer threads that use the original I/O functions.
51 */
52
53
54 /* Calculate array dimension. */
55 #ifndef DIM
56 #define DIM(array) (sizeof (array) / sizeof (*array))
57 #endif
58
59 #define READBUF_SIZE 8192
60 #define WRITEBUF_SIZE 8192
61
62
63 typedef struct estream_cookie_w32_pollable *estream_cookie_w32_pollable_t;
64
65 struct reader_context_s
66 {
67 estream_cookie_w32_pollable_t pcookie;
68 HANDLE thread_hd;
69
70 CRITICAL_SECTION mutex;
71
72 int stop_me;
73 int eof;
74 int eof_shortcut;
75 int error;
76 int error_code;
77
78 /* This is manually reset. */
79 HANDLE have_data_ev;
80 /* This is automatically reset. */
81 HANDLE have_space_ev;
82 /* This is manually reset but actually only triggered once. */
83 HANDLE close_ev;
84
85 size_t readpos, writepos;
86 char buffer[READBUF_SIZE];
87 };
88
89 struct writer_context_s
90 {
91 estream_cookie_w32_pollable_t pcookie;
92 HANDLE thread_hd;
93
94 CRITICAL_SECTION mutex;
95
96 int stop_me;
97 int error;
98 int error_code;
99
100 /* This is manually reset. */
101 HANDLE have_data;
102 HANDLE is_empty;
103 HANDLE close_ev;
104 size_t nbytes;
105 char buffer[WRITEBUF_SIZE];
106 };
107
108 /* Cookie for pollable objects. */
109 struct estream_cookie_w32_pollable
110 {
111 unsigned int modeflags;
112
113 struct cookie_io_functions_s next_functions;
114 void *next_cookie;
115
116 struct reader_context_s *reader;
117 struct writer_context_s *writer;
118 };
119
120
121 static DWORD CALLBACK
reader(void * arg)122 reader (void *arg)
123 {
124 struct reader_context_s *ctx = arg;
125 int nbytes;
126 ssize_t nread;
127
128 trace (("%p: reader starting", ctx));
129
130 for (;;)
131 {
132 EnterCriticalSection (&ctx->mutex);
133 /* Leave a 1 byte gap so that we can see whether it is empty or
134 full. */
135 while ((ctx->writepos + 1) % READBUF_SIZE == ctx->readpos)
136 {
137 /* Wait for space. */
138 if (!ResetEvent (ctx->have_space_ev))
139 trace (("%p: ResetEvent failed: ec=%d", ctx, (int)GetLastError()));
140 LeaveCriticalSection (&ctx->mutex);
141 trace (("%p: waiting for space", ctx));
142 WaitForSingleObject (ctx->have_space_ev, INFINITE);
143 trace (("%p: got space", ctx));
144 EnterCriticalSection (&ctx->mutex);
145 }
146 gpgrt_assert (((ctx->writepos + 1) % READBUF_SIZE != ctx->readpos));
147 if (ctx->stop_me)
148 {
149 LeaveCriticalSection (&ctx->mutex);
150 break;
151 }
152 nbytes = (ctx->readpos + READBUF_SIZE
153 - ctx->writepos - 1) % READBUF_SIZE;
154 gpgrt_assert (nbytes);
155 if (nbytes > READBUF_SIZE - ctx->writepos)
156 nbytes = READBUF_SIZE - ctx->writepos;
157 LeaveCriticalSection (&ctx->mutex);
158
159 trace (("%p: reading up to %d bytes", ctx, nbytes));
160
161 nread = ctx->pcookie->next_functions.public.func_read
162 (ctx->pcookie->next_cookie, ctx->buffer + ctx->writepos, nbytes);
163 trace (("%p: got %d bytes", ctx, nread));
164 if (nread < 0)
165 {
166 ctx->error_code = (int) errno;
167 /* NOTE (W32CE): Do not ignore ERROR_BUSY! Check at
168 least stop_me if that happens. */
169 if (ctx->error_code == ERROR_BROKEN_PIPE)
170 {
171 ctx->eof = 1;
172 trace (("%p: got EOF (broken pipe)", ctx));
173 }
174 else
175 {
176 ctx->error = 1;
177 trace (("%p: read error: ec=%d", ctx, ctx->error_code));
178 }
179 break;
180 }
181
182 EnterCriticalSection (&ctx->mutex);
183 if (ctx->stop_me)
184 {
185 LeaveCriticalSection (&ctx->mutex);
186 break;
187 }
188 if (!nread)
189 {
190 ctx->eof = 1;
191 trace (("%p: got eof", ctx));
192 LeaveCriticalSection (&ctx->mutex);
193 break;
194 }
195
196 ctx->writepos = (ctx->writepos + nread) % READBUF_SIZE;
197 if (!SetEvent (ctx->have_data_ev))
198 trace (("%p: SetEvent (%p) failed: ec=%d",
199 ctx, ctx->have_data_ev, (int)GetLastError ()));
200 LeaveCriticalSection (&ctx->mutex);
201 }
202 /* Indicate that we have an error or EOF. */
203 if (!SetEvent (ctx->have_data_ev))
204 trace (("%p: SetEvent (%p) failed: ec=%d",
205 ctx, ctx->have_data_ev, (int)GetLastError ()));
206
207 trace (("%p: waiting for close", ctx));
208 WaitForSingleObject (ctx->close_ev, INFINITE);
209
210 CloseHandle (ctx->close_ev);
211 CloseHandle (ctx->have_data_ev);
212 CloseHandle (ctx->have_space_ev);
213 CloseHandle (ctx->thread_hd);
214 DeleteCriticalSection (&ctx->mutex);
215 free (ctx); /* Standard free! See comment in create_reader. */
216
217 return 0;
218 }
219
220
221 static struct reader_context_s *
create_reader(estream_cookie_w32_pollable_t pcookie)222 create_reader (estream_cookie_w32_pollable_t pcookie)
223 {
224 struct reader_context_s *ctx;
225 SECURITY_ATTRIBUTES sec_attr;
226 DWORD tid;
227
228 memset (&sec_attr, 0, sizeof sec_attr);
229 sec_attr.nLength = sizeof sec_attr;
230 sec_attr.bInheritHandle = FALSE;
231
232 /* The CTX must be allocated in standard system memory so that we
233 * won't use any custom allocation handler which may use our lock
234 * primitives for its implementation. The problem here is that the
235 * syscall clamp mechanism (e.g. nPth) would be called recursively:
236 * 1. For example by the caller of _gpgrt_w32_poll and 2. by
237 * gpgrt_lock_lock on behalf of the the custom allocation and free
238 * functions. */
239 ctx = calloc (1, sizeof *ctx);
240 if (!ctx)
241 {
242 return NULL;
243 }
244
245 ctx->pcookie = pcookie;
246
247 ctx->have_data_ev = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
248 if (ctx->have_data_ev)
249 ctx->have_space_ev = CreateEvent (&sec_attr, FALSE, TRUE, NULL);
250 if (ctx->have_space_ev)
251 ctx->close_ev = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
252 if (!ctx->have_data_ev || !ctx->have_space_ev || !ctx->close_ev)
253 {
254 trace (("%p: CreateEvent failed: ec=%d", ctx, (int)GetLastError ()));
255 if (ctx->have_data_ev)
256 CloseHandle (ctx->have_data_ev);
257 if (ctx->have_space_ev)
258 CloseHandle (ctx->have_space_ev);
259 if (ctx->close_ev)
260 CloseHandle (ctx->close_ev);
261 _gpgrt_free (ctx);
262 return NULL;
263 }
264
265 InitializeCriticalSection (&ctx->mutex);
266
267 #ifdef HAVE_W32CE_SYSTEM
268 ctx->thread_hd = CreateThread (&sec_attr, 64 * 1024, reader, ctx,
269 STACK_SIZE_PARAM_IS_A_RESERVATION, &tid);
270 #else
271 ctx->thread_hd = CreateThread (&sec_attr, 0, reader, ctx, 0, &tid);
272 #endif
273
274 if (!ctx->thread_hd)
275 {
276 trace (("%p: CreateThread failed: ec=%d", ctx, (int)GetLastError ()));
277 DeleteCriticalSection (&ctx->mutex);
278 if (ctx->have_data_ev)
279 CloseHandle (ctx->have_data_ev);
280 if (ctx->have_space_ev)
281 CloseHandle (ctx->have_space_ev);
282 if (ctx->close_ev)
283 CloseHandle (ctx->close_ev);
284 _gpgrt_free (ctx);
285 return NULL;
286 }
287 else
288 {
289 #if 0
290 /* We set the priority of the thread higher because we know that
291 it only runs for a short time. This greatly helps to
292 increase the performance of the I/O. */
293 SetThreadPriority (ctx->thread_hd, get_desired_thread_priority ());
294 #endif
295 }
296
297 return ctx;
298 }
299
300
301 /* Prepare destruction of the reader thread for CTX. Returns 0 if a
302 call to this function is sufficient and destroy_reader_finish shall
303 not be called. */
304 static void
destroy_reader(struct reader_context_s * ctx)305 destroy_reader (struct reader_context_s *ctx)
306 {
307 EnterCriticalSection (&ctx->mutex);
308 ctx->stop_me = 1;
309 if (ctx->have_space_ev)
310 SetEvent (ctx->have_space_ev);
311 LeaveCriticalSection (&ctx->mutex);
312
313 #ifdef HAVE_W32CE_SYSTEM
314 /* Scenario: We never create a full pipe, but already started
315 reading. Then we need to unblock the reader in the pipe driver
316 to make our reader thread notice that we want it to go away. */
317
318 if (ctx->file_hd != INVALID_HANDLE_VALUE)
319 {
320 if (!DeviceIoControl (ctx->file_hd, GPGCEDEV_IOCTL_UNBLOCK,
321 NULL, 0, NULL, 0, NULL, NULL))
322 {
323 trace (("%p: unblock control call failed: ec=%d",
324 ctx, (int)GetLastError ()));
325 }
326 }
327 #endif
328
329 /* XXX is it feasible to unblock the thread? */
330
331 /* After setting this event CTX is void. */
332 SetEvent (ctx->close_ev);
333 }
334
335
336 /*
337 * Read function for pollable objects.
338 */
339 static gpgrt_ssize_t
func_w32_pollable_read(void * cookie,void * buffer,size_t count)340 func_w32_pollable_read (void *cookie, void *buffer, size_t count)
341 {
342 estream_cookie_w32_pollable_t pcookie = cookie;
343 gpgrt_ssize_t nread;
344 struct reader_context_s *ctx;
345
346 trace (("%p: enter buffer=%p count=%u", cookie, buffer, count));
347
348 /* FIXME: implement pending check if COUNT==0 */
349
350 ctx = pcookie->reader;
351 if (ctx == NULL)
352 {
353 pcookie->reader = ctx = create_reader (pcookie);
354 if (!ctx)
355 {
356 _gpg_err_set_errno (EBADF);
357 nread = -1;
358 goto leave;
359 }
360 trace (("%p: new reader %p", cookie, pcookie->reader));
361 }
362
363 if (ctx->eof_shortcut)
364 {
365 nread = 0;
366 goto leave;
367 }
368
369 EnterCriticalSection (&ctx->mutex);
370 trace (("%p: readpos: %d, writepos %d", cookie, ctx->readpos, ctx->writepos));
371 if (ctx->readpos == ctx->writepos && !ctx->error)
372 {
373 /* No data available. */
374 int eof = ctx->eof;
375
376 LeaveCriticalSection (&ctx->mutex);
377
378 if (pcookie->modeflags & O_NONBLOCK && ! eof)
379 {
380 _gpg_err_set_errno (EAGAIN);
381 nread = -1;
382 goto leave;
383 }
384
385 trace (("%p: waiting for data", cookie));
386 WaitForSingleObject (ctx->have_data_ev, INFINITE);
387 trace (("%p: data available", cookie));
388 EnterCriticalSection (&ctx->mutex);
389 }
390
391 if (ctx->readpos == ctx->writepos || ctx->error)
392 {
393 LeaveCriticalSection (&ctx->mutex);
394 ctx->eof_shortcut = 1;
395 if (ctx->eof)
396 return 0;
397 if (!ctx->error)
398 {
399 trace (("%p: EOF but ctx->eof flag not set", cookie));
400 nread = 0;
401 goto leave;
402 }
403 _gpg_err_set_errno (ctx->error_code);
404 return -1;
405 }
406
407 nread = ctx->readpos < ctx->writepos
408 ? ctx->writepos - ctx->readpos
409 : READBUF_SIZE - ctx->readpos;
410 if (nread > count)
411 nread = count;
412 memcpy (buffer, ctx->buffer + ctx->readpos, nread);
413 ctx->readpos = (ctx->readpos + nread) % READBUF_SIZE;
414 if (ctx->readpos == ctx->writepos && !ctx->eof)
415 {
416 if (!ResetEvent (ctx->have_data_ev))
417 {
418 trace (("%p: ResetEvent failed: ec=%d",
419 cookie, (int)GetLastError ()));
420 LeaveCriticalSection (&ctx->mutex);
421 /* FIXME: Should translate the error code. */
422 _gpg_err_set_errno (EIO);
423 nread = -1;
424 goto leave;
425 }
426 }
427 if (!SetEvent (ctx->have_space_ev))
428 {
429 trace (("%p: SetEvent (%p) failed: ec=%d",
430 cookie, ctx->have_space_ev, (int)GetLastError ()));
431 LeaveCriticalSection (&ctx->mutex);
432 /* FIXME: Should translate the error code. */
433 _gpg_err_set_errno (EIO);
434 nread = -1;
435 goto leave;
436 }
437 LeaveCriticalSection (&ctx->mutex);
438
439 leave:
440 trace_errno (nread==-1,("%p: leave nread=%d", cookie, (int)nread));
441 return nread;
442 }
443
444
445 /* The writer does use a simple buffering strategy so that we are
446 informed about write errors as soon as possible (i. e. with the the
447 next call to the write function. */
448 static DWORD CALLBACK
writer(void * arg)449 writer (void *arg)
450 {
451 struct writer_context_s *ctx = arg;
452 ssize_t nwritten;
453
454 trace (("%p: writer starting", ctx));
455
456 for (;;)
457 {
458 EnterCriticalSection (&ctx->mutex);
459 if (ctx->stop_me && !ctx->nbytes)
460 {
461 LeaveCriticalSection (&ctx->mutex);
462 break;
463 }
464 if (!ctx->nbytes)
465 {
466 if (!SetEvent (ctx->is_empty))
467 trace (("%p: SetEvent failed: ec=%d", ctx, (int)GetLastError ()));
468 if (!ResetEvent (ctx->have_data))
469 trace (("%p: ResetEvent failed: ec=%d", ctx, (int)GetLastError ()));
470 LeaveCriticalSection (&ctx->mutex);
471 trace (("%p: idle", ctx));
472 WaitForSingleObject (ctx->have_data, INFINITE);
473 trace (("%p: got data to write", ctx));
474 EnterCriticalSection (&ctx->mutex);
475 }
476 if (ctx->stop_me && !ctx->nbytes)
477 {
478 LeaveCriticalSection (&ctx->mutex);
479 break;
480 }
481 LeaveCriticalSection (&ctx->mutex);
482
483 trace (("%p: writing up to %d bytes", ctx, ctx->nbytes));
484
485 nwritten = ctx->pcookie->next_functions.public.func_write
486 (ctx->pcookie->next_cookie, ctx->buffer, ctx->nbytes);
487 trace (("%p: wrote %d bytes", ctx, nwritten));
488 if (nwritten < 1)
489 {
490 /* XXX */
491 if (errno == ERROR_BUSY)
492 {
493 /* Probably stop_me is set now. */
494 trace (("%p: pipe busy (unblocked?)", ctx));
495 continue;
496 }
497
498 ctx->error_code = errno;
499 ctx->error = 1;
500 trace (("%p: write error: ec=%d", ctx, ctx->error_code));
501 break;
502 }
503
504 EnterCriticalSection (&ctx->mutex);
505 ctx->nbytes -= nwritten;
506 LeaveCriticalSection (&ctx->mutex);
507 }
508 /* Indicate that we have an error. */
509 if (!SetEvent (ctx->is_empty))
510 trace (("%p: SetEvent failed: ec=%d", ctx, (int)GetLastError ()));
511
512 trace (("%p: waiting for close", ctx));
513 WaitForSingleObject (ctx->close_ev, INFINITE);
514
515 if (ctx->nbytes)
516 trace (("%p: still %d bytes in buffer at close time", ctx, ctx->nbytes));
517
518 CloseHandle (ctx->close_ev);
519 CloseHandle (ctx->have_data);
520 CloseHandle (ctx->is_empty);
521 CloseHandle (ctx->thread_hd);
522 DeleteCriticalSection (&ctx->mutex);
523 trace (("%p: writer is destroyed", ctx));
524 free (ctx); /* Standard free! See comment in create_writer. */
525
526 return 0;
527 }
528
529
530 static struct writer_context_s *
create_writer(estream_cookie_w32_pollable_t pcookie)531 create_writer (estream_cookie_w32_pollable_t pcookie)
532 {
533 struct writer_context_s *ctx;
534 SECURITY_ATTRIBUTES sec_attr;
535 DWORD tid;
536
537 memset (&sec_attr, 0, sizeof sec_attr);
538 sec_attr.nLength = sizeof sec_attr;
539 sec_attr.bInheritHandle = FALSE;
540
541 /* See comment at create_reader. */
542 ctx = calloc (1, sizeof *ctx);
543 if (!ctx)
544 {
545 return NULL;
546 }
547
548 ctx->pcookie = pcookie;
549
550 ctx->have_data = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
551 if (ctx->have_data)
552 ctx->is_empty = CreateEvent (&sec_attr, TRUE, TRUE, NULL);
553 if (ctx->is_empty)
554 ctx->close_ev = CreateEvent (&sec_attr, TRUE, FALSE, NULL);
555 if (!ctx->have_data || !ctx->is_empty || !ctx->close_ev)
556 {
557 trace (("%p: CreateEvent failed: ec=%d", ctx, (int)GetLastError ()));
558 if (ctx->have_data)
559 CloseHandle (ctx->have_data);
560 if (ctx->is_empty)
561 CloseHandle (ctx->is_empty);
562 if (ctx->close_ev)
563 CloseHandle (ctx->close_ev);
564 _gpgrt_free (ctx);
565 return NULL;
566 }
567
568 InitializeCriticalSection (&ctx->mutex);
569
570 #ifdef HAVE_W32CE_SYSTEM
571 ctx->thread_hd = CreateThread (&sec_attr, 64 * 1024, writer, ctx,
572 STACK_SIZE_PARAM_IS_A_RESERVATION, &tid);
573 #else
574 ctx->thread_hd = CreateThread (&sec_attr, 0, writer, ctx, 0, &tid );
575 #endif
576
577 if (!ctx->thread_hd)
578 {
579 trace (("%p: CreateThread failed: ec=%d", ctx, (int)GetLastError ()));
580 DeleteCriticalSection (&ctx->mutex);
581 if (ctx->have_data)
582 CloseHandle (ctx->have_data);
583 if (ctx->is_empty)
584 CloseHandle (ctx->is_empty);
585 if (ctx->close_ev)
586 CloseHandle (ctx->close_ev);
587 _gpgrt_free (ctx);
588 return NULL;
589 }
590 else
591 {
592 #if 0
593 /* We set the priority of the thread higher because we know
594 that it only runs for a short time. This greatly helps to
595 increase the performance of the I/O. */
596 SetThreadPriority (ctx->thread_hd, get_desired_thread_priority ());
597 #endif
598 }
599
600 return ctx;
601 }
602
603
604 static void
destroy_writer(struct writer_context_s * ctx)605 destroy_writer (struct writer_context_s *ctx)
606 {
607 trace (("%p: enter pollable_destroy_writer", ctx));
608 EnterCriticalSection (&ctx->mutex);
609 trace (("%p: setting stopme", ctx));
610 ctx->stop_me = 1;
611 if (ctx->have_data)
612 SetEvent (ctx->have_data);
613 LeaveCriticalSection (&ctx->mutex);
614
615 trace (("%p: waiting for empty", ctx));
616
617 /* Give the writer a chance to flush the buffer. */
618 WaitForSingleObject (ctx->is_empty, INFINITE);
619
620 #ifdef HAVE_W32CE_SYSTEM
621 /* Scenario: We never create a full pipe, but already started
622 writing more than the pipe buffer. Then we need to unblock the
623 writer in the pipe driver to make our writer thread notice that
624 we want it to go away. */
625
626 if (!DeviceIoControl (ctx->file_hd, GPGCEDEV_IOCTL_UNBLOCK,
627 NULL, 0, NULL, 0, NULL, NULL))
628 {
629 trace (("%p: unblock control call failed: ec=%d",
630 ctx, (int)GetLastError ()));
631 }
632 #endif
633
634 /* After setting this event CTX is void. */
635 trace (("%p: set close_ev", ctx));
636 SetEvent (ctx->close_ev);
637 trace (("%p: leave pollable_destroy_writer", ctx));
638 }
639
640
641 /*
642 * Write function for pollable objects.
643 */
644 static gpgrt_ssize_t
func_w32_pollable_write(void * cookie,const void * buffer,size_t count)645 func_w32_pollable_write (void *cookie, const void *buffer, size_t count)
646 {
647 estream_cookie_w32_pollable_t pcookie = cookie;
648 struct writer_context_s *ctx = pcookie->writer;
649 int nwritten;
650
651 trace (("%p: enter buffer: %p count: %d", cookie, buffer, count));
652 if (count == 0)
653 {
654 nwritten = 0;
655 goto leave;
656 }
657
658 if (ctx == NULL)
659 {
660 pcookie->writer = ctx = create_writer (pcookie);
661 if (!ctx)
662 {
663 nwritten = -1;
664 goto leave;
665 }
666 trace (("%p: new writer %p", cookie, pcookie->writer));
667 }
668
669 EnterCriticalSection (&ctx->mutex);
670 trace (("%p: buffer: %p, count: %d, nbytes: %d",
671 cookie, buffer, count, ctx->nbytes));
672 if (!ctx->error && ctx->nbytes)
673 {
674 /* Bytes are pending for send. */
675
676 /* Reset the is_empty event. Better safe than sorry. */
677 if (!ResetEvent (ctx->is_empty))
678 {
679 trace (("%p: ResetEvent failed: ec=%d",
680 cookie, (int)GetLastError ()));
681 LeaveCriticalSection (&ctx->mutex);
682 /* FIXME: Should translate the error code. */
683 _gpg_err_set_errno (EIO);
684 nwritten = -1;
685 goto leave;
686 }
687 LeaveCriticalSection (&ctx->mutex);
688
689 if (pcookie->modeflags & O_NONBLOCK)
690 {
691 trace (("%p: would block", cookie));
692 _gpg_err_set_errno (EAGAIN);
693 nwritten = -1;
694 goto leave;
695 }
696
697 trace (("%p: waiting for empty buffer", cookie));
698 WaitForSingleObject (ctx->is_empty, INFINITE);
699 trace (("%p: buffer is empty", cookie));
700 EnterCriticalSection (&ctx->mutex);
701 }
702
703 if (ctx->error)
704 {
705 LeaveCriticalSection (&ctx->mutex);
706 if (ctx->error_code == ERROR_NO_DATA)
707 _gpg_err_set_errno (EPIPE);
708 else
709 _gpg_err_set_errno (EIO);
710 nwritten = -1;
711 goto leave;
712 }
713
714 /* If no error occurred, the number of bytes in the buffer must be
715 zero. */
716 gpgrt_assert (!ctx->nbytes);
717
718 if (count > WRITEBUF_SIZE)
719 count = WRITEBUF_SIZE;
720 memcpy (ctx->buffer, buffer, count);
721 ctx->nbytes = count;
722
723 /* We have to reset the is_empty event early, because it is also
724 used by the select() implementation to probe the channel. */
725 if (!ResetEvent (ctx->is_empty))
726 {
727 trace (("%p: ResetEvent failed: ec=%d", cookie, (int)GetLastError ()));
728 LeaveCriticalSection (&ctx->mutex);
729 /* FIXME: Should translate the error code. */
730 _gpg_err_set_errno (EIO);
731 nwritten = -1;
732 goto leave;
733 }
734 if (!SetEvent (ctx->have_data))
735 {
736 trace (("%p: SetEvent failed: ec=%d", cookie, (int)GetLastError ()));
737 LeaveCriticalSection (&ctx->mutex);
738 /* FIXME: Should translate the error code. */
739 _gpg_err_set_errno (EIO);
740 nwritten = -1;
741 goto leave;
742 }
743 LeaveCriticalSection (&ctx->mutex);
744
745 nwritten = count;
746
747 leave:
748 trace_errno (nwritten==-1,("%p: leave nwritten=%d", cookie, nwritten));
749 return nwritten;
750 }
751
752
753 /* This is the core of _gpgrt_poll. The caller needs to make sure that
754 * the syscall clamp has been engaged. */
755 int
_gpgrt_w32_poll(gpgrt_poll_t * fds,size_t nfds,int timeout)756 _gpgrt_w32_poll (gpgrt_poll_t *fds, size_t nfds, int timeout)
757 {
758 HANDLE waitbuf[MAXIMUM_WAIT_OBJECTS];
759 int waitidx[MAXIMUM_WAIT_OBJECTS];
760 #ifdef ENABLE_TRACING
761 char waitinfo[MAXIMUM_WAIT_OBJECTS];
762 #endif
763 unsigned int code;
764 int nwait;
765 int i;
766 int any;
767 int count;
768
769 #if 0
770 restart:
771 #endif
772
773 any = 0;
774 nwait = 0;
775 count = 0;
776 for (i = 0; i < nfds; i++)
777 {
778 struct estream_cookie_w32_pollable *pcookie;
779
780 if (fds[i].ignore)
781 continue;
782
783 if (fds[i].stream->intern->kind != BACKEND_W32_POLLABLE)
784 {
785 /* This stream does not support polling. */
786 fds[i].got_err = 1;
787 continue;
788 }
789
790 pcookie = fds[i].stream->intern->cookie;
791
792 if (fds[i].want_read || fds[i].want_write)
793 {
794 /* XXX: What if one wants read and write, is that supported? */
795 if (fds[i].want_read)
796 {
797 struct reader_context_s *ctx = pcookie->reader;
798 if (ctx == NULL)
799 {
800 pcookie->reader = ctx = create_reader (pcookie);
801 if (!ctx)
802 {
803 /* FIXME: Is the error code appropriate? */
804 _gpg_err_set_errno (EBADF);
805 return -1;
806 }
807 trace (("%p: new reader %p", pcookie, pcookie->reader));
808 }
809 trace (("%p: using reader %p", pcookie, pcookie->reader));
810
811 if (nwait >= DIM (waitbuf))
812 {
813 trace (("oops: too many objects for WFMO"));
814 /* FIXME: Should translate the error code. */
815 _gpg_err_set_errno (EIO);
816 return -1;
817 }
818 waitidx[nwait] = i;
819 #ifdef ENABLE_TRACING
820 waitinfo[nwait] = 'r';
821 #endif /*ENABLE_TRACING*/
822 waitbuf[nwait++] = ctx->have_data_ev;
823 any = 1;
824 }
825 else if (fds[i].want_write)
826 {
827 struct writer_context_s *ctx = pcookie->writer;
828 if (ctx == NULL)
829 {
830 pcookie->writer = ctx = create_writer (pcookie);
831 if (!ctx)
832 {
833 trace (("oops: create writer failed"));
834 /* FIXME: Is the error code appropriate? */
835 _gpg_err_set_errno (EBADF);
836 return -1;
837 }
838 trace (("%p: new writer %p", pcookie, pcookie->writer));
839 }
840 trace (("%p: using writer %p", pcookie, pcookie->writer));
841
842 if (nwait >= DIM (waitbuf))
843 {
844 trace (("oops: Too many objects for WFMO"));
845 /* FIXME: Should translate the error code. */
846 _gpg_err_set_errno (EIO);
847 return -1;
848 }
849 waitidx[nwait] = i;
850 #ifdef ENABLE_TRACING
851 waitinfo[nwait] = 'w';
852 #endif /*ENABLE_TRACING*/
853 waitbuf[nwait++] = ctx->is_empty;
854 any = 1;
855 }
856 }
857 }
858 #ifdef ENABLE_TRACING
859 trace_start (("poll on [ "));
860 for (i = 0; i < nwait; i++)
861 trace_append (("%d/%c ", waitidx[i], waitinfo[i]));
862 trace_finish (("]"));
863 #endif /*ENABLE_TRACING*/
864
865 if (!any)
866 {
867 /* WFMO needs at least one object, thus we use use sleep here.
868 * INFINITE wait does not make any sense in this case, so we
869 * error out. */
870 if (timeout == -1)
871 {
872 _gpg_err_set_errno (EINVAL);
873 return -1;
874 }
875 if (timeout)
876 Sleep (timeout);
877 code = WAIT_TIMEOUT;
878 }
879 else
880 code = WaitForMultipleObjects (nwait, waitbuf, 0,
881 timeout == -1 ? INFINITE : timeout);
882
883 if (code < WAIT_OBJECT_0 + nwait)
884 {
885 /* This WFMO is a really silly function: It does return either
886 the index of the signaled object or if 2 objects have been
887 signalled at the same time, the index of the object with the
888 lowest object is returned - so and how do we find out how
889 many objects have been signaled???. The only solution I can
890 imagine is to test each object starting with the returned
891 index individually - how dull. */
892 any = 0;
893 for (i = code - WAIT_OBJECT_0; i < nwait; i++)
894 {
895 if (WaitForSingleObject (waitbuf[i], 0) == WAIT_OBJECT_0)
896 {
897 gpgrt_assert (waitidx[i] >=0 && waitidx[i] < nfds);
898 /* XXX: What if one wants read and write, is that
899 supported? */
900 if (fds[waitidx[i]].want_read)
901 fds[waitidx[i]].got_read = 1;
902 else if (fds[waitidx[i]].want_write)
903 fds[waitidx[i]].got_write = 1;
904 any = 1;
905 count++;
906 }
907 }
908 if (!any)
909 {
910 trace (("no signaled objects found after WFMO"));
911 count = -1;
912 }
913 }
914 else if (code == WAIT_TIMEOUT)
915 trace (("WFMO timed out"));
916 else if (code == WAIT_FAILED)
917 {
918 trace (("WFMO failed: ec=%d", (int)GetLastError ()));
919 #if 0
920 if (GetLastError () == ERROR_INVALID_HANDLE)
921 {
922 int k;
923 int j = handle_to_fd (waitbuf[i]);
924
925 trace (("WFMO invalid handle %d removed", j));
926 for (k = 0 ; k < nfds; k++)
927 {
928 if (fds[k].fd == j)
929 {
930 fds[k].want_read = fds[k].want_write = 0;
931 goto restart;
932 }
933 }
934 trace ((" oops, or not???"));
935 }
936 #endif
937 count = -1;
938 }
939 else
940 {
941 trace (("WFMO returned %u", code));
942 count = -1;
943 }
944
945 if (count > 0)
946 {
947 trace_start (("poll OK [ "));
948 for (i = 0; i < nfds; i++)
949 {
950 if (fds[i].ignore)
951 continue;
952 if (fds[i].got_read || fds[i].got_write)
953 trace_append (("%c%d ", fds[i].want_read ? 'r' : 'w', i));
954 }
955 trace_finish (("]"));
956 }
957
958 if (count < 0)
959 {
960 /* FIXME: Should determine a proper error code. */
961 _gpg_err_set_errno (EIO);
962 }
963
964 return count;
965 }
966
967
968
969 /*
970 * Implementation of pollable I/O on Windows.
971 */
972
973 /*
974 * Constructor for pollable objects.
975 */
976 int
_gpgrt_w32_pollable_create(void * _GPGRT__RESTRICT * _GPGRT__RESTRICT cookie,unsigned int modeflags,struct cookie_io_functions_s next_functions,void * next_cookie)977 _gpgrt_w32_pollable_create (void *_GPGRT__RESTRICT *_GPGRT__RESTRICT cookie,
978 unsigned int modeflags,
979 struct cookie_io_functions_s next_functions,
980 void *next_cookie)
981 {
982 estream_cookie_w32_pollable_t pcookie;
983 int err;
984
985 pcookie = _gpgrt_malloc (sizeof *pcookie);
986 if (!pcookie)
987 err = -1;
988 else
989 {
990 pcookie->modeflags = modeflags;
991 pcookie->next_functions = next_functions;
992 pcookie->next_cookie = next_cookie;
993 pcookie->reader = NULL;
994 pcookie->writer = NULL;
995 *cookie = pcookie;
996 err = 0;
997 }
998
999 trace_errno (err,("cookie=%p", *cookie));
1000 return err;
1001 }
1002
1003
1004 /*
1005 * Seek function for pollable objects.
1006 */
1007 static int
func_w32_pollable_seek(void * cookie,gpgrt_off_t * offset,int whence)1008 func_w32_pollable_seek (void *cookie, gpgrt_off_t *offset, int whence)
1009 {
1010 estream_cookie_w32_pollable_t pcookie = cookie;
1011 (void) pcookie;
1012 (void) offset;
1013 (void) whence;
1014 /* XXX */
1015 _gpg_err_set_errno (EOPNOTSUPP);
1016 return -1;
1017 }
1018
1019
1020 /*
1021 * The IOCTL function for pollable objects.
1022 */
1023 static int
func_w32_pollable_ioctl(void * cookie,int cmd,void * ptr,size_t * len)1024 func_w32_pollable_ioctl (void *cookie, int cmd, void *ptr, size_t *len)
1025 {
1026 estream_cookie_w32_pollable_t pcookie = cookie;
1027 cookie_ioctl_function_t func_ioctl = pcookie->next_functions.func_ioctl;
1028
1029 if (cmd == COOKIE_IOCTL_NONBLOCK)
1030 {
1031 if (ptr)
1032 pcookie->modeflags |= O_NONBLOCK;
1033 else
1034 pcookie->modeflags &= ~O_NONBLOCK;
1035 return 0;
1036 }
1037
1038 if (func_ioctl)
1039 return func_ioctl (pcookie->next_cookie, cmd, ptr, len);
1040
1041 _gpg_err_set_errno (EOPNOTSUPP);
1042 return -1;
1043 }
1044
1045
1046 /*
1047 * The destroy function for pollable objects.
1048 */
1049 static int
func_w32_pollable_destroy(void * cookie)1050 func_w32_pollable_destroy (void *cookie)
1051 {
1052 estream_cookie_w32_pollable_t pcookie = cookie;
1053
1054 if (cookie)
1055 {
1056 if (pcookie->reader)
1057 destroy_reader (pcookie->reader);
1058 if (pcookie->writer)
1059 destroy_writer (pcookie->writer);
1060 pcookie->next_functions.public.func_close (pcookie->next_cookie);
1061 _gpgrt_free (pcookie);
1062 }
1063 return 0;
1064 }
1065
1066 /*
1067 * Access object for the pollable functions.
1068 */
1069 struct cookie_io_functions_s _gpgrt_functions_w32_pollable =
1070 {
1071 {
1072 func_w32_pollable_read,
1073 func_w32_pollable_write,
1074 func_w32_pollable_seek,
1075 func_w32_pollable_destroy,
1076 },
1077 func_w32_pollable_ioctl,
1078 };
1079