1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26 /* Stuff that reads stuff from the server. */
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include <assert.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <stdio.h>
37 #include <errno.h>
38
39 #if USE_POLL
40 #include <poll.h>
41 #endif
42 #ifndef _WIN32
43 #include <sys/select.h>
44 #include <sys/socket.h>
45 #endif
46
47 #ifdef _WIN32
48 #include "xcb_windefs.h"
49 #endif /* _WIN32 */
50
51 #include "xcb.h"
52 #include "xcbext.h"
53 #include "xcbint.h"
54
55 #define XCB_ERROR 0
56 #define XCB_REPLY 1
57 #define XCB_XGE_EVENT 35
58
59 struct event_list {
60 xcb_generic_event_t *event;
61 struct event_list *next;
62 };
63
64 struct xcb_special_event {
65
66 struct xcb_special_event *next;
67
68 /* Match XGE events for the specific extension and event ID (the
69 * first 32 bit word after evtype)
70 */
71 uint8_t extension;
72 uint32_t eid;
73 uint32_t *stamp;
74
75 struct event_list *events;
76 struct event_list **events_tail;
77
78 pthread_cond_t special_event_cond;
79 };
80
81 struct reply_list {
82 void *reply;
83 struct reply_list *next;
84 };
85
86 typedef struct pending_reply {
87 uint64_t first_request;
88 uint64_t last_request;
89 enum workarounds workaround;
90 int flags;
91 struct pending_reply *next;
92 } pending_reply;
93
94 typedef struct reader_list {
95 uint64_t request;
96 pthread_cond_t *data;
97 struct reader_list *next;
98 } reader_list;
99
100 typedef struct special_list {
101 xcb_special_event_t *se;
102 struct special_list *next;
103 } special_list;
104
remove_finished_readers(reader_list ** prev_reader,uint64_t completed)105 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
106 {
107 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
108 {
109 /* If you don't have what you're looking for now, you never
110 * will. Wake up and leave me alone. */
111 pthread_cond_signal((*prev_reader)->data);
112 *prev_reader = (*prev_reader)->next;
113 }
114 }
115
116 #if HAVE_SENDMSG
read_fds(xcb_connection_t * c,int * fds,int nfd)117 static int read_fds(xcb_connection_t *c, int *fds, int nfd)
118 {
119 int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd];
120 int infd = c->in.in_fd.nfd - c->in.in_fd.ifd;
121
122 if (nfd > infd)
123 return 0;
124 memcpy(fds, ifds, nfd * sizeof (int));
125 c->in.in_fd.ifd += nfd;
126 return 1;
127 }
128 #endif
129
130 typedef struct xcb_ge_special_event_t {
131 uint8_t response_type; /**< */
132 uint8_t extension; /**< */
133 uint16_t sequence; /**< */
134 uint32_t length; /**< */
135 uint16_t evtype; /**< */
136 uint8_t pad0[2]; /**< */
137 uint32_t eid; /**< */
138 uint8_t pad1[16]; /**< */
139 } xcb_ge_special_event_t;
140
event_special(xcb_connection_t * c,struct event_list * event)141 static int event_special(xcb_connection_t *c,
142 struct event_list *event)
143 {
144 struct xcb_special_event *special_event;
145 struct xcb_ge_special_event_t *ges = (void *) event->event;
146
147 /* Special events are always XGE events */
148 if ((ges->response_type & 0x7f) != XCB_XGE_EVENT)
149 return 0;
150
151 for (special_event = c->in.special_events;
152 special_event;
153 special_event = special_event->next)
154 {
155 if (ges->extension == special_event->extension &&
156 ges->eid == special_event->eid)
157 {
158 *special_event->events_tail = event;
159 special_event->events_tail = &event->next;
160 if (special_event->stamp)
161 ++(*special_event->stamp);
162 pthread_cond_signal(&special_event->special_event_cond);
163 return 1;
164 }
165 }
166
167 return 0;
168 }
169
read_packet(xcb_connection_t * c)170 static int read_packet(xcb_connection_t *c)
171 {
172 xcb_generic_reply_t genrep;
173 uint64_t length = 32;
174 uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */
175 int nfd = 0; /* Number of file descriptors attached to the reply */
176 uint64_t bufsize;
177 void *buf;
178 pending_reply *pend = 0;
179 struct event_list *event;
180
181 /* Wait for there to be enough data for us to read a whole packet */
182 if(c->in.queue_len < length)
183 return 0;
184
185 /* Get the response type, length, and sequence number. */
186 memcpy(&genrep, c->in.queue, sizeof(genrep));
187
188 /* Compute 32-bit sequence number of this packet. */
189 if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
190 {
191 uint64_t lastread = c->in.request_read;
192 c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
193 if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
194 c->in.request_read += 0x10000;
195 if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
196 c->in.request_expected = c->in.request_read;
197
198 if(c->in.request_read != lastread)
199 {
200 if(c->in.current_reply)
201 {
202 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
203 c->in.current_reply = 0;
204 c->in.current_reply_tail = &c->in.current_reply;
205 }
206 c->in.request_completed = c->in.request_read - 1;
207 }
208
209 while(c->in.pending_replies &&
210 c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
211 XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
212 {
213 pending_reply *oldpend = c->in.pending_replies;
214 c->in.pending_replies = oldpend->next;
215 if(!oldpend->next)
216 c->in.pending_replies_tail = &c->in.pending_replies;
217 free(oldpend);
218 }
219
220 if(genrep.response_type == XCB_ERROR)
221 c->in.request_completed = c->in.request_read;
222
223 remove_finished_readers(&c->in.readers, c->in.request_completed);
224 }
225
226 if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
227 {
228 pend = c->in.pending_replies;
229 if(pend &&
230 !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
231 (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
232 XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
233 pend = 0;
234 }
235
236 /* For reply packets, check that the entire packet is available. */
237 if(genrep.response_type == XCB_REPLY)
238 {
239 if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
240 {
241 uint32_t *p = (uint32_t *) c->in.queue;
242 genrep.length = p[2] * p[3] * 2;
243 }
244 length += genrep.length * 4;
245
246 /* XXX a bit of a hack -- we "know" that all FD replys place
247 * the number of fds in the pad0 byte */
248 if (pend && pend->flags & XCB_REQUEST_REPLY_FDS)
249 nfd = genrep.pad0;
250 }
251
252 /* XGE events may have sizes > 32 */
253 if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
254 eventlength = genrep.length * 4;
255
256 bufsize = length + eventlength + nfd * sizeof(int) +
257 (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t));
258 if (bufsize < INT32_MAX)
259 buf = malloc((size_t) bufsize);
260 else
261 buf = NULL;
262 if(!buf)
263 {
264 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
265 return 0;
266 }
267
268 if(_xcb_in_read_block(c, buf, length) <= 0)
269 {
270 free(buf);
271 return 0;
272 }
273
274 /* pull in XGE event data if available, append after event struct */
275 if (eventlength)
276 {
277 if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
278 {
279 free(buf);
280 return 0;
281 }
282 }
283
284 #if HAVE_SENDMSG
285 if (nfd)
286 {
287 if (!read_fds(c, (int *) &((char *) buf)[length], nfd))
288 {
289 free(buf);
290 return 0;
291 }
292 }
293 #endif
294
295 if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
296 {
297 free(buf);
298 return 1;
299 }
300
301 if(genrep.response_type != XCB_REPLY)
302 ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
303
304 /* reply, or checked error */
305 if( genrep.response_type == XCB_REPLY ||
306 (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
307 {
308 struct reply_list *cur = malloc(sizeof(struct reply_list));
309 if(!cur)
310 {
311 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
312 free(buf);
313 return 0;
314 }
315 cur->reply = buf;
316 cur->next = 0;
317 *c->in.current_reply_tail = cur;
318 c->in.current_reply_tail = &cur->next;
319 if(c->in.readers && c->in.readers->request == c->in.request_read)
320 pthread_cond_signal(c->in.readers->data);
321 return 1;
322 }
323
324 /* event, or unchecked error */
325 event = malloc(sizeof(struct event_list));
326 if(!event)
327 {
328 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
329 free(buf);
330 return 0;
331 }
332 event->event = buf;
333 event->next = 0;
334
335 if (!event_special(c, event)) {
336 *c->in.events_tail = event;
337 c->in.events_tail = &event->next;
338 pthread_cond_signal(&c->in.event_cond);
339 }
340 return 1; /* I have something for you... */
341 }
342
get_event(xcb_connection_t * c)343 static xcb_generic_event_t *get_event(xcb_connection_t *c)
344 {
345 struct event_list *cur = c->in.events;
346 xcb_generic_event_t *ret;
347 if(!c->in.events)
348 return 0;
349 ret = cur->event;
350 c->in.events = cur->next;
351 if(!cur->next)
352 c->in.events_tail = &c->in.events;
353 free(cur);
354 return ret;
355 }
356
free_reply_list(struct reply_list * head)357 static void free_reply_list(struct reply_list *head)
358 {
359 while(head)
360 {
361 struct reply_list *cur = head;
362 head = cur->next;
363 free(cur->reply);
364 free(cur);
365 }
366 }
367
read_block(const int fd,void * buf,const ssize_t len)368 static int read_block(const int fd, void *buf, const ssize_t len)
369 {
370 int done = 0;
371 while(done < len)
372 {
373 int ret = recv(fd, ((char *) buf) + done, len - done, 0);
374 if(ret > 0)
375 done += ret;
376 #ifndef _WIN32
377 if(ret < 0 && errno == EAGAIN)
378 #else
379 if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
380 #endif /* !_Win32 */
381 {
382 #if USE_POLL
383 struct pollfd pfd;
384 pfd.fd = fd;
385 pfd.events = POLLIN;
386 pfd.revents = 0;
387 do {
388 ret = poll(&pfd, 1, -1);
389 } while (ret == -1 && errno == EINTR);
390 #else
391 fd_set fds;
392 FD_ZERO(&fds);
393 FD_SET(fd, &fds);
394
395 /* Initializing errno here makes sure that for Win32 this loop will execute only once */
396 errno = 0;
397 do {
398 ret = select(fd + 1, &fds, 0, 0, 0);
399 } while (ret == -1 && errno == EINTR);
400 #endif /* USE_POLL */
401 }
402 if(ret <= 0)
403 return ret;
404 }
405 return len;
406 }
407
poll_for_reply(xcb_connection_t * c,uint64_t request,void ** reply,xcb_generic_error_t ** error)408 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
409 {
410 struct reply_list *head;
411
412 /* If an error occurred when issuing the request, fail immediately. */
413 if(!request)
414 head = 0;
415 /* We've read requests past the one we want, so if it has replies we have
416 * them all and they're in the replies map. */
417 else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
418 {
419 head = _xcb_map_remove(c->in.replies, request);
420 if(head && head->next)
421 _xcb_map_put(c->in.replies, request, head->next);
422 }
423 /* We're currently processing the responses to the request we want, and we
424 * have a reply ready to return. So just return it without blocking. */
425 else if(request == c->in.request_read && c->in.current_reply)
426 {
427 head = c->in.current_reply;
428 c->in.current_reply = head->next;
429 if(!head->next)
430 c->in.current_reply_tail = &c->in.current_reply;
431 }
432 /* We know this request can't have any more replies, and we've already
433 * established it doesn't have a reply now. Don't bother blocking. */
434 else if(request == c->in.request_completed)
435 head = 0;
436 /* We may have more replies on the way for this request: block until we're
437 * sure. */
438 else
439 return 0;
440
441 if(error)
442 *error = 0;
443 *reply = 0;
444
445 if(head)
446 {
447 if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
448 {
449 if(error)
450 *error = head->reply;
451 else
452 free(head->reply);
453 }
454 else
455 *reply = head->reply;
456
457 free(head);
458 }
459
460 return 1;
461 }
462
insert_reader(reader_list ** prev_reader,reader_list * reader,uint64_t request,pthread_cond_t * cond)463 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
464 {
465 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
466 prev_reader = &(*prev_reader)->next;
467 reader->request = request;
468 reader->data = cond;
469 reader->next = *prev_reader;
470 *prev_reader = reader;
471 }
472
remove_reader(reader_list ** prev_reader,reader_list * reader)473 static void remove_reader(reader_list **prev_reader, reader_list *reader)
474 {
475 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
476 if(*prev_reader == reader)
477 {
478 *prev_reader = (*prev_reader)->next;
479 break;
480 }
481 }
482
insert_special(special_list ** prev_special,special_list * special,xcb_special_event_t * se)483 static void insert_special(special_list **prev_special, special_list *special, xcb_special_event_t *se)
484 {
485 special->se = se;
486 special->next = *prev_special;
487 *prev_special = special;
488 }
489
remove_special(special_list ** prev_special,special_list * special)490 static void remove_special(special_list **prev_special, special_list *special)
491 {
492 while(*prev_special)
493 {
494 if(*prev_special == special)
495 {
496 *prev_special = (*prev_special)->next;
497 break;
498 }
499 prev_special = &(*prev_special)->next;
500 }
501 }
502
wait_for_reply(xcb_connection_t * c,uint64_t request,xcb_generic_error_t ** e)503 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
504 {
505 void *ret = 0;
506
507 /* If this request has not been written yet, write it. */
508 if(c->out.return_socket || _xcb_out_flush_to(c, request))
509 {
510 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
511 reader_list reader;
512
513 insert_reader(&c->in.readers, &reader, request, &cond);
514
515 while(!poll_for_reply(c, request, &ret, e))
516 if(!_xcb_conn_wait(c, &cond, 0, 0))
517 break;
518
519 remove_reader(&c->in.readers, &reader);
520 pthread_cond_destroy(&cond);
521 }
522
523 _xcb_in_wake_up_next_reader(c);
524 return ret;
525 }
526
widen(xcb_connection_t * c,unsigned int request)527 static uint64_t widen(xcb_connection_t *c, unsigned int request)
528 {
529 uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
530 if(widened_request > c->out.request)
531 widened_request -= UINT64_C(1) << 32;
532 return widened_request;
533 }
534
535 /* Public interface */
536
xcb_wait_for_reply(xcb_connection_t * c,unsigned int request,xcb_generic_error_t ** e)537 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
538 {
539 void *ret;
540 if(e)
541 *e = 0;
542 if(c->has_error)
543 return 0;
544
545 pthread_mutex_lock(&c->iolock);
546 ret = wait_for_reply(c, widen(c, request), e);
547 pthread_mutex_unlock(&c->iolock);
548 return ret;
549 }
550
xcb_wait_for_reply64(xcb_connection_t * c,uint64_t request,xcb_generic_error_t ** e)551 void *xcb_wait_for_reply64(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
552 {
553 void *ret;
554 if(e)
555 *e = 0;
556 if(c->has_error)
557 return 0;
558
559 pthread_mutex_lock(&c->iolock);
560 ret = wait_for_reply(c, request, e);
561 pthread_mutex_unlock(&c->iolock);
562 return ret;
563 }
564
xcb_get_reply_fds(xcb_connection_t * c,void * reply,size_t reply_size)565 int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size)
566 {
567 return (int *) (&((char *) reply)[reply_size]);
568 }
569
insert_pending_discard(xcb_connection_t * c,pending_reply ** prev_next,uint64_t seq)570 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
571 {
572 pending_reply *pend;
573 pend = malloc(sizeof(*pend));
574 if(!pend)
575 {
576 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
577 return;
578 }
579
580 pend->first_request = seq;
581 pend->last_request = seq;
582 pend->workaround = 0;
583 pend->flags = XCB_REQUEST_DISCARD_REPLY;
584 pend->next = *prev_next;
585 *prev_next = pend;
586
587 if(!pend->next)
588 c->in.pending_replies_tail = &pend->next;
589 }
590
discard_reply(xcb_connection_t * c,uint64_t request)591 static void discard_reply(xcb_connection_t *c, uint64_t request)
592 {
593 void *reply;
594 pending_reply **prev_pend;
595
596 /* Free any replies or errors that we've already read. Stop if
597 * xcb_wait_for_reply would block or we've run out of replies. */
598 while(poll_for_reply(c, request, &reply, 0) && reply)
599 free(reply);
600
601 /* If we've proven there are no more responses coming, we're done. */
602 if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
603 return;
604
605 /* Walk the list of pending requests. Mark the first match for deletion. */
606 for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
607 {
608 if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
609 break;
610
611 if((*prev_pend)->first_request == request)
612 {
613 /* Pending reply found. Mark for discard: */
614 (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
615 return;
616 }
617 }
618
619 /* Pending reply not found (likely due to _unchecked request). Create one: */
620 insert_pending_discard(c, prev_pend, request);
621 }
622
xcb_discard_reply(xcb_connection_t * c,unsigned int sequence)623 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
624 {
625 if(c->has_error)
626 return;
627
628 /* If an error occurred when issuing the request, fail immediately. */
629 if(!sequence)
630 return;
631
632 pthread_mutex_lock(&c->iolock);
633 discard_reply(c, widen(c, sequence));
634 pthread_mutex_unlock(&c->iolock);
635 }
636
xcb_discard_reply64(xcb_connection_t * c,uint64_t sequence)637 void xcb_discard_reply64(xcb_connection_t *c, uint64_t sequence)
638 {
639 if(c->has_error)
640 return;
641
642 /* If an error occurred when issuing the request, fail immediately. */
643 if(!sequence)
644 return;
645
646 pthread_mutex_lock(&c->iolock);
647 discard_reply(c, sequence);
648 pthread_mutex_unlock(&c->iolock);
649 }
650
xcb_poll_for_reply(xcb_connection_t * c,unsigned int request,void ** reply,xcb_generic_error_t ** error)651 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
652 {
653 int ret;
654 if(c->has_error)
655 {
656 *reply = 0;
657 if(error)
658 *error = 0;
659 return 1; /* would not block */
660 }
661 assert(reply != 0);
662 pthread_mutex_lock(&c->iolock);
663 ret = poll_for_reply(c, widen(c, request), reply, error);
664 if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
665 ret = poll_for_reply(c, widen(c, request), reply, error);
666 pthread_mutex_unlock(&c->iolock);
667 return ret;
668 }
669
xcb_poll_for_reply64(xcb_connection_t * c,uint64_t request,void ** reply,xcb_generic_error_t ** error)670 int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
671 {
672 int ret;
673 if(c->has_error)
674 {
675 *reply = 0;
676 if(error)
677 *error = 0;
678 return 1; /* would not block */
679 }
680 assert(reply != 0);
681 pthread_mutex_lock(&c->iolock);
682 ret = poll_for_reply(c, request, reply, error);
683 if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
684 ret = poll_for_reply(c, request, reply, error);
685 pthread_mutex_unlock(&c->iolock);
686 return ret;
687 }
688
xcb_wait_for_event(xcb_connection_t * c)689 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
690 {
691 xcb_generic_event_t *ret;
692 if(c->has_error)
693 return 0;
694 pthread_mutex_lock(&c->iolock);
695 /* get_event returns 0 on empty list. */
696 while(!(ret = get_event(c)))
697 if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
698 break;
699
700 _xcb_in_wake_up_next_reader(c);
701 pthread_mutex_unlock(&c->iolock);
702 return ret;
703 }
704
poll_for_next_event(xcb_connection_t * c,int queued)705 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
706 {
707 xcb_generic_event_t *ret = 0;
708 if(!c->has_error)
709 {
710 pthread_mutex_lock(&c->iolock);
711 /* FIXME: follow X meets Z architecture changes. */
712 ret = get_event(c);
713 if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
714 ret = get_event(c);
715 pthread_mutex_unlock(&c->iolock);
716 }
717 return ret;
718 }
719
xcb_poll_for_event(xcb_connection_t * c)720 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
721 {
722 return poll_for_next_event(c, 0);
723 }
724
xcb_poll_for_queued_event(xcb_connection_t * c)725 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
726 {
727 return poll_for_next_event(c, 1);
728 }
729
xcb_request_check(xcb_connection_t * c,xcb_void_cookie_t cookie)730 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
731 {
732 uint64_t request;
733 xcb_generic_error_t *ret = 0;
734 void *reply;
735 if(c->has_error)
736 return 0;
737 pthread_mutex_lock(&c->iolock);
738 request = widen(c, cookie.sequence);
739 if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
740 && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
741 {
742 _xcb_out_send_sync(c);
743 _xcb_out_flush_to(c, c->out.request);
744 }
745 reply = wait_for_reply(c, request, &ret);
746 assert(!reply);
747 pthread_mutex_unlock(&c->iolock);
748 return ret;
749 }
750
get_special_event(xcb_connection_t * c,xcb_special_event_t * se)751 static xcb_generic_event_t *get_special_event(xcb_connection_t *c,
752 xcb_special_event_t *se)
753 {
754 xcb_generic_event_t *event = NULL;
755 struct event_list *events;
756
757 if ((events = se->events) != NULL) {
758 event = events->event;
759 if (!(se->events = events->next))
760 se->events_tail = &se->events;
761 free (events);
762 }
763 return event;
764 }
765
xcb_poll_for_special_event(xcb_connection_t * c,xcb_special_event_t * se)766 xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c,
767 xcb_special_event_t *se)
768 {
769 xcb_generic_event_t *event;
770
771 if(c->has_error)
772 return 0;
773 pthread_mutex_lock(&c->iolock);
774 event = get_special_event(c, se);
775 if(!event && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
776 event = get_special_event(c, se);
777 pthread_mutex_unlock(&c->iolock);
778 return event;
779 }
780
xcb_wait_for_special_event(xcb_connection_t * c,xcb_special_event_t * se)781 xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c,
782 xcb_special_event_t *se)
783 {
784 special_list special;
785 xcb_generic_event_t *event;
786
787 if(c->has_error)
788 return 0;
789 pthread_mutex_lock(&c->iolock);
790
791 insert_special(&c->in.special_waiters, &special, se);
792
793 /* get_special_event returns 0 on empty list. */
794 while(!(event = get_special_event(c, se)))
795 if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0))
796 break;
797
798 remove_special(&c->in.special_waiters, &special);
799
800 _xcb_in_wake_up_next_reader(c);
801 pthread_mutex_unlock(&c->iolock);
802 return event;
803 }
804
805 xcb_special_event_t *
xcb_register_for_special_xge(xcb_connection_t * c,xcb_extension_t * ext,uint32_t eid,uint32_t * stamp)806 xcb_register_for_special_xge(xcb_connection_t *c,
807 xcb_extension_t *ext,
808 uint32_t eid,
809 uint32_t *stamp)
810 {
811 xcb_special_event_t *se;
812 const xcb_query_extension_reply_t *ext_reply;
813
814 if(c->has_error)
815 return NULL;
816 ext_reply = xcb_get_extension_data(c, ext);
817 if (!ext_reply)
818 return NULL;
819 pthread_mutex_lock(&c->iolock);
820 for (se = c->in.special_events; se; se = se->next) {
821 if (se->extension == ext_reply->major_opcode &&
822 se->eid == eid) {
823 pthread_mutex_unlock(&c->iolock);
824 return NULL;
825 }
826 }
827 se = calloc(1, sizeof(xcb_special_event_t));
828 if (!se) {
829 pthread_mutex_unlock(&c->iolock);
830 return NULL;
831 }
832
833 se->extension = ext_reply->major_opcode;
834 se->eid = eid;
835
836 se->events = NULL;
837 se->events_tail = &se->events;
838 se->stamp = stamp;
839
840 pthread_cond_init(&se->special_event_cond, 0);
841
842 se->next = c->in.special_events;
843 c->in.special_events = se;
844 pthread_mutex_unlock(&c->iolock);
845 return se;
846 }
847
848 void
xcb_unregister_for_special_event(xcb_connection_t * c,xcb_special_event_t * se)849 xcb_unregister_for_special_event(xcb_connection_t *c,
850 xcb_special_event_t *se)
851 {
852 xcb_special_event_t *s, **prev;
853 struct event_list *events, *next;
854
855 if (!se)
856 return;
857
858 if (c->has_error)
859 return;
860
861 pthread_mutex_lock(&c->iolock);
862
863 for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) {
864 if (s == se) {
865 *prev = se->next;
866 for (events = se->events; events; events = next) {
867 next = events->next;
868 free (events->event);
869 free (events);
870 }
871 pthread_cond_destroy(&se->special_event_cond);
872 free (se);
873 break;
874 }
875 }
876 pthread_mutex_unlock(&c->iolock);
877 }
878
879 /* Private interface */
880
_xcb_in_init(_xcb_in * in)881 int _xcb_in_init(_xcb_in *in)
882 {
883 if(pthread_cond_init(&in->event_cond, 0))
884 return 0;
885 in->reading = 0;
886
887 in->queue_len = 0;
888
889 in->request_read = 0;
890 in->request_completed = 0;
891
892 in->replies = _xcb_map_new();
893 if(!in->replies)
894 return 0;
895
896 in->current_reply_tail = &in->current_reply;
897 in->events_tail = &in->events;
898 in->pending_replies_tail = &in->pending_replies;
899
900 return 1;
901 }
902
_xcb_in_destroy(_xcb_in * in)903 void _xcb_in_destroy(_xcb_in *in)
904 {
905 pthread_cond_destroy(&in->event_cond);
906 free_reply_list(in->current_reply);
907 _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
908 while(in->events)
909 {
910 struct event_list *e = in->events;
911 in->events = e->next;
912 free(e->event);
913 free(e);
914 }
915 while(in->pending_replies)
916 {
917 pending_reply *pend = in->pending_replies;
918 in->pending_replies = pend->next;
919 free(pend);
920 }
921 }
922
_xcb_in_wake_up_next_reader(xcb_connection_t * c)923 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
924 {
925 int pthreadret;
926 if(c->in.readers)
927 pthreadret = pthread_cond_signal(c->in.readers->data);
928 else if(c->in.special_waiters)
929 pthreadret = pthread_cond_signal(&c->in.special_waiters->se->special_event_cond);
930 else
931 pthreadret = pthread_cond_signal(&c->in.event_cond);
932 assert(pthreadret == 0);
933 }
934
_xcb_in_expect_reply(xcb_connection_t * c,uint64_t request,enum workarounds workaround,int flags)935 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
936 {
937 pending_reply *pend = malloc(sizeof(pending_reply));
938 assert(workaround != WORKAROUND_NONE || flags != 0);
939 if(!pend)
940 {
941 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
942 return 0;
943 }
944 pend->first_request = pend->last_request = request;
945 pend->workaround = workaround;
946 pend->flags = flags;
947 pend->next = 0;
948 *c->in.pending_replies_tail = pend;
949 c->in.pending_replies_tail = &pend->next;
950 return 1;
951 }
952
_xcb_in_replies_done(xcb_connection_t * c)953 void _xcb_in_replies_done(xcb_connection_t *c)
954 {
955 struct pending_reply *pend;
956 if (c->in.pending_replies_tail != &c->in.pending_replies)
957 {
958 pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
959 if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
960 {
961 if (XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->out.request)) {
962 pend->last_request = c->out.request;
963 pend->workaround = WORKAROUND_NONE;
964 } else {
965 /* The socket was taken, but no requests were actually sent
966 * so just discard the pending_reply that was created.
967 */
968 struct pending_reply **prev_next = &c->in.pending_replies;
969 while (*prev_next != pend)
970 prev_next = &(*prev_next)->next;
971 *prev_next = NULL;
972 c->in.pending_replies_tail = prev_next;
973 free(pend);
974 }
975 }
976 }
977 }
978
_xcb_in_read(xcb_connection_t * c)979 int _xcb_in_read(xcb_connection_t *c)
980 {
981 int n;
982
983 #if HAVE_SENDMSG
984 struct iovec iov = {
985 .iov_base = c->in.queue + c->in.queue_len,
986 .iov_len = sizeof(c->in.queue) - c->in.queue_len,
987 };
988 union {
989 struct cmsghdr cmsghdr;
990 char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))];
991 } cmsgbuf;
992 struct msghdr msg = {
993 .msg_name = NULL,
994 .msg_namelen = 0,
995 .msg_iov = &iov,
996 .msg_iovlen = 1,
997 .msg_control = cmsgbuf.buf,
998 .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)),
999 };
1000 n = recvmsg(c->fd, &msg, 0);
1001
1002 /* Check for truncation errors. Only MSG_CTRUNC is
1003 * probably possible here, which would indicate that
1004 * the sender tried to transmit more than XCB_MAX_PASS_FD
1005 * file descriptors.
1006 */
1007 if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
1008 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
1009 return 0;
1010 }
1011 #else
1012 n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
1013 #endif
1014 if(n > 0) {
1015 #if HAVE_SENDMSG
1016 struct cmsghdr *hdr;
1017
1018 if (msg.msg_controllen >= sizeof (struct cmsghdr)) {
1019 for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) {
1020 if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) {
1021 int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int);
1022 memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int));
1023 c->in.in_fd.nfd += nfd;
1024 }
1025 }
1026 }
1027 #endif
1028 c->in.total_read += n;
1029 c->in.queue_len += n;
1030 }
1031 while(read_packet(c))
1032 /* empty */;
1033 #if HAVE_SENDMSG
1034 if (c->in.in_fd.nfd) {
1035 c->in.in_fd.nfd -= c->in.in_fd.ifd;
1036 memmove(&c->in.in_fd.fd[0],
1037 &c->in.in_fd.fd[c->in.in_fd.ifd],
1038 c->in.in_fd.nfd * sizeof (int));
1039 c->in.in_fd.ifd = 0;
1040
1041 /* If we have any left-over file descriptors after emptying
1042 * the input buffer, then the server sent some that we weren't
1043 * expecting. Close them and mark the connection as broken;
1044 */
1045 if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) {
1046 int i;
1047 for (i = 0; i < c->in.in_fd.nfd; i++)
1048 close(c->in.in_fd.fd[i]);
1049 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
1050 return 0;
1051 }
1052 }
1053 #endif
1054 #ifndef _WIN32
1055 if((n > 0) || (n < 0 && (errno == EAGAIN || errno == EINTR)))
1056 #else
1057 if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
1058 #endif /* !_WIN32 */
1059 return 1;
1060 _xcb_conn_shutdown(c, XCB_CONN_ERROR);
1061 return 0;
1062 }
1063
_xcb_in_read_block(xcb_connection_t * c,void * buf,int len)1064 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
1065 {
1066 int done = c->in.queue_len;
1067 if(len < done)
1068 done = len;
1069
1070 memcpy(buf, c->in.queue, done);
1071 c->in.queue_len -= done;
1072 memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
1073
1074 if(len > done)
1075 {
1076 int ret = read_block(c->fd, (char *) buf + done, len - done);
1077 if(ret <= 0)
1078 {
1079 _xcb_conn_shutdown(c, XCB_CONN_ERROR);
1080 return ret;
1081 }
1082 }
1083
1084 return len;
1085 }
1086