1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26 /* Stuff that sends stuff to the server. */
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include <assert.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <string.h>
36
37 #include "xcb.h"
38 #include "xcbext.h"
39 #include "xcbint.h"
40 #include "bigreq.h"
41
send_request(xcb_connection_t * c,int isvoid,enum workarounds workaround,int flags,struct iovec * vector,int count)42 static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
43 {
44 if(c->has_error)
45 return;
46
47 ++c->out.request;
48 if(!isvoid)
49 c->in.request_expected = c->out.request;
50 if(workaround != WORKAROUND_NONE || flags != 0)
51 _xcb_in_expect_reply(c, c->out.request, workaround, flags);
52
53 while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
54 {
55 memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
56 c->out.queue_len += vector[0].iov_len;
57 vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
58 vector[0].iov_len = 0;
59 ++vector, --count;
60 }
61 if(!count)
62 return;
63
64 --vector, ++count;
65 vector[0].iov_base = c->out.queue;
66 vector[0].iov_len = c->out.queue_len;
67 c->out.queue_len = 0;
68 _xcb_out_send(c, vector, count);
69 }
70
send_sync(xcb_connection_t * c)71 static void send_sync(xcb_connection_t *c)
72 {
73 static const union {
74 struct {
75 uint8_t major;
76 uint8_t pad;
77 uint16_t len;
78 } fields;
79 uint32_t packet;
80 } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
81 struct iovec vector[2];
82 vector[1].iov_base = (char *) &sync_req;
83 vector[1].iov_len = sizeof(sync_req);
84 send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
85 }
86
get_socket_back(xcb_connection_t * c)87 static void get_socket_back(xcb_connection_t *c)
88 {
89 while(c->out.return_socket && c->out.socket_moving)
90 pthread_cond_wait(&c->out.socket_cond, &c->iolock);
91 if(!c->out.return_socket)
92 return;
93
94 c->out.socket_moving = 1;
95 pthread_mutex_unlock(&c->iolock);
96 c->out.return_socket(c->out.socket_closure);
97 pthread_mutex_lock(&c->iolock);
98 c->out.socket_moving = 0;
99
100 pthread_cond_broadcast(&c->out.socket_cond);
101 c->out.return_socket = 0;
102 c->out.socket_closure = 0;
103 _xcb_in_replies_done(c);
104 }
105
prepare_socket_request(xcb_connection_t * c)106 static void prepare_socket_request(xcb_connection_t *c)
107 {
108 /* We're about to append data to out.queue, so we need to
109 * atomically test for an external socket owner *and* some other
110 * thread currently writing.
111 *
112 * If we have an external socket owner, we have to get the socket back
113 * before we can use it again.
114 *
115 * If some other thread is writing to the socket, we assume it's
116 * writing from out.queue, and so we can't stick data there.
117 *
118 * We satisfy this condition by first calling get_socket_back
119 * (which may drop the lock, but will return when XCB owns the
120 * socket again) and then checking for another writing thread and
121 * escaping the loop if we're ready to go.
122 */
123 for (;;) {
124 if(c->has_error)
125 return;
126 get_socket_back(c);
127 if (!c->out.writing)
128 break;
129 pthread_cond_wait(&c->out.cond, &c->iolock);
130 }
131 }
132
133 /* Public interface */
134
xcb_prefetch_maximum_request_length(xcb_connection_t * c)135 void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
136 {
137 if(c->has_error)
138 return;
139 pthread_mutex_lock(&c->out.reqlenlock);
140 if(c->out.maximum_request_length_tag == LAZY_NONE)
141 {
142 const xcb_query_extension_reply_t *ext;
143 ext = xcb_get_extension_data(c, &xcb_big_requests_id);
144 if(ext && ext->present)
145 {
146 c->out.maximum_request_length_tag = LAZY_COOKIE;
147 c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
148 }
149 else
150 {
151 c->out.maximum_request_length_tag = LAZY_FORCED;
152 c->out.maximum_request_length.value = c->setup->maximum_request_length;
153 }
154 }
155 pthread_mutex_unlock(&c->out.reqlenlock);
156 }
157
xcb_get_maximum_request_length(xcb_connection_t * c)158 uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
159 {
160 if(c->has_error)
161 return 0;
162 xcb_prefetch_maximum_request_length(c);
163 pthread_mutex_lock(&c->out.reqlenlock);
164 if(c->out.maximum_request_length_tag == LAZY_COOKIE)
165 {
166 xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
167 c->out.maximum_request_length_tag = LAZY_FORCED;
168 if(r)
169 {
170 c->out.maximum_request_length.value = r->maximum_request_length;
171 free(r);
172 }
173 else
174 c->out.maximum_request_length.value = c->setup->maximum_request_length;
175 }
176 pthread_mutex_unlock(&c->out.reqlenlock);
177 return c->out.maximum_request_length.value;
178 }
179
close_fds(int * fds,unsigned int num_fds)180 static void close_fds(int *fds, unsigned int num_fds)
181 {
182 for (unsigned int index = 0; index < num_fds; index++)
183 close(fds[index]);
184 }
185
send_fds(xcb_connection_t * c,int * fds,unsigned int num_fds)186 static void send_fds(xcb_connection_t *c, int *fds, unsigned int num_fds)
187 {
188 #if HAVE_SENDMSG
189 /* Calling _xcb_out_flush_to() can drop the iolock and wait on a condition
190 * variable if another thread is currently writing (c->out.writing > 0).
191 * This call waits for writers to be done and thus _xcb_out_flush_to() will
192 * do the work itself (in which case we are a writer and
193 * prepare_socket_request() will wait for us to be done if another threads
194 * tries to send fds, too). Thanks to this, we can atomically write out FDs.
195 */
196 prepare_socket_request(c);
197
198 while (num_fds > 0) {
199 while (c->out.out_fd.nfd == XCB_MAX_PASS_FD && !c->has_error) {
200 /* XXX: if c->out.writing > 0, this releases the iolock and
201 * potentially allows other threads to interfere with their own fds.
202 */
203 _xcb_out_flush_to(c, c->out.request);
204
205 if (c->out.out_fd.nfd == XCB_MAX_PASS_FD) {
206 /* We need some request to send FDs with */
207 _xcb_out_send_sync(c);
208 }
209 }
210 if (c->has_error)
211 break;
212
213 c->out.out_fd.fd[c->out.out_fd.nfd++] = fds[0];
214 fds++;
215 num_fds--;
216 }
217 #endif
218 close_fds(fds, num_fds);
219 }
220
xcb_send_request_with_fds64(xcb_connection_t * c,int flags,struct iovec * vector,const xcb_protocol_request_t * req,unsigned int num_fds,int * fds)221 uint64_t xcb_send_request_with_fds64(xcb_connection_t *c, int flags, struct iovec *vector,
222 const xcb_protocol_request_t *req, unsigned int num_fds, int *fds)
223 {
224 uint64_t request;
225 uint32_t prefix[2];
226 int veclen = req->count;
227 enum workarounds workaround = WORKAROUND_NONE;
228
229 if(c->has_error) {
230 close_fds(fds, num_fds);
231 return 0;
232 }
233
234 assert(c != 0);
235 assert(vector != 0);
236 assert(req->count > 0);
237
238 if(!(flags & XCB_REQUEST_RAW))
239 {
240 static const char pad[3];
241 unsigned int i;
242 uint16_t shortlen = 0;
243 size_t longlen = 0;
244 assert(vector[0].iov_len >= 4);
245 /* set the major opcode, and the minor opcode for extensions */
246 if(req->ext)
247 {
248 const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
249 if(!(extension && extension->present))
250 {
251 close_fds(fds, num_fds);
252 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
253 return 0;
254 }
255 ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
256 ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
257 }
258 else
259 ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
260
261 /* put together the length field, possibly using BIGREQUESTS */
262 for(i = 0; i < req->count; ++i)
263 {
264 longlen += vector[i].iov_len;
265 if(!vector[i].iov_base)
266 {
267 vector[i].iov_base = (char *) pad;
268 assert(vector[i].iov_len <= sizeof(pad));
269 }
270 }
271 assert((longlen & 3) == 0);
272 longlen >>= 2;
273
274 if(longlen <= c->setup->maximum_request_length)
275 {
276 /* we don't need BIGREQUESTS. */
277 shortlen = longlen;
278 longlen = 0;
279 }
280 else if(longlen > xcb_get_maximum_request_length(c))
281 {
282 close_fds(fds, num_fds);
283 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
284 return 0; /* server can't take this; maybe need BIGREQUESTS? */
285 }
286
287 /* set the length field. */
288 ((uint16_t *) vector[0].iov_base)[1] = shortlen;
289 if(!shortlen)
290 {
291 prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
292 prefix[1] = ++longlen;
293 vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
294 vector[0].iov_len -= sizeof(uint32_t);
295 --vector, ++veclen;
296 vector[0].iov_base = prefix;
297 vector[0].iov_len = sizeof(prefix);
298 }
299 }
300 flags &= ~XCB_REQUEST_RAW;
301
302 /* do we need to work around the X server bug described in glx.xml? */
303 /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
304 * configuration, but that should be handled here anyway. */
305 if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
306 ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
307 req->opcode == 21))
308 workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
309
310 /* get a sequence number and arrange for delivery. */
311 pthread_mutex_lock(&c->iolock);
312
313 /* send FDs before establishing a good request number, because this might
314 * call send_sync(), too
315 */
316 send_fds(c, fds, num_fds);
317
318 prepare_socket_request(c);
319
320 /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
321 * a reply.
322 * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
323 * applications see sequence 0 as that is used to indicate
324 * an error in sending the request
325 */
326
327 while ((req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2) ||
328 (unsigned int) (c->out.request + 1) == 0)
329 {
330 send_sync(c);
331 prepare_socket_request(c);
332 }
333
334 send_request(c, req->isvoid, workaround, flags, vector, veclen);
335 request = c->has_error ? 0 : c->out.request;
336 pthread_mutex_unlock(&c->iolock);
337 return request;
338 }
339
340 /* request number are actually uint64_t internally but keep API compat with unsigned int */
xcb_send_request_with_fds(xcb_connection_t * c,int flags,struct iovec * vector,const xcb_protocol_request_t * req,unsigned int num_fds,int * fds)341 unsigned int xcb_send_request_with_fds(xcb_connection_t *c, int flags, struct iovec *vector,
342 const xcb_protocol_request_t *req, unsigned int num_fds, int *fds)
343 {
344 return xcb_send_request_with_fds64(c, flags, vector, req, num_fds, fds);
345 }
346
xcb_send_request64(xcb_connection_t * c,int flags,struct iovec * vector,const xcb_protocol_request_t * req)347 uint64_t xcb_send_request64(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
348 {
349 return xcb_send_request_with_fds64(c, flags, vector, req, 0, NULL);
350 }
351
352 /* request number are actually uint64_t internally but keep API compat with unsigned int */
xcb_send_request(xcb_connection_t * c,int flags,struct iovec * vector,const xcb_protocol_request_t * req)353 unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
354 {
355 return xcb_send_request64(c, flags, vector, req);
356 }
357
358 void
xcb_send_fd(xcb_connection_t * c,int fd)359 xcb_send_fd(xcb_connection_t *c, int fd)
360 {
361 int fds[1] = { fd };
362
363 if (c->has_error) {
364 close(fd);
365 return;
366 }
367 pthread_mutex_lock(&c->iolock);
368 send_fds(c, &fds[0], 1);
369 pthread_mutex_unlock(&c->iolock);
370 }
371
xcb_take_socket(xcb_connection_t * c,void (* return_socket)(void * closure),void * closure,int flags,uint64_t * sent)372 int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
373 {
374 int ret;
375 if(c->has_error)
376 return 0;
377 pthread_mutex_lock(&c->iolock);
378 get_socket_back(c);
379
380 /* _xcb_out_flush may drop the iolock allowing other threads to
381 * write requests, so keep flushing until we're done
382 */
383 do
384 ret = _xcb_out_flush_to(c, c->out.request);
385 while (ret && c->out.request != c->out.request_written);
386 if(ret)
387 {
388 c->out.return_socket = return_socket;
389 c->out.socket_closure = closure;
390 if(flags) {
391 /* c->out.request + 1 will be the first request sent by the external
392 * socket owner. If the socket is returned before this request is sent
393 * it will be detected in _xcb_in_replies_done and this pending_reply
394 * will be discarded.
395 */
396 _xcb_in_expect_reply(c, c->out.request + 1, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
397 }
398 assert(c->out.request == c->out.request_written);
399 *sent = c->out.request;
400 }
401 pthread_mutex_unlock(&c->iolock);
402 return ret;
403 }
404
xcb_writev(xcb_connection_t * c,struct iovec * vector,int count,uint64_t requests)405 int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
406 {
407 int ret;
408 if(c->has_error)
409 return 0;
410 pthread_mutex_lock(&c->iolock);
411 c->out.request += requests;
412 ret = _xcb_out_send(c, vector, count);
413 pthread_mutex_unlock(&c->iolock);
414 return ret;
415 }
416
xcb_flush(xcb_connection_t * c)417 int xcb_flush(xcb_connection_t *c)
418 {
419 int ret;
420 if(c->has_error)
421 return 0;
422 pthread_mutex_lock(&c->iolock);
423 ret = _xcb_out_flush_to(c, c->out.request);
424 pthread_mutex_unlock(&c->iolock);
425 return ret;
426 }
427
428 /* Private interface */
429
_xcb_out_init(_xcb_out * out)430 int _xcb_out_init(_xcb_out *out)
431 {
432 if(pthread_cond_init(&out->socket_cond, 0))
433 return 0;
434 out->return_socket = 0;
435 out->socket_closure = 0;
436 out->socket_moving = 0;
437
438 if(pthread_cond_init(&out->cond, 0))
439 return 0;
440 out->writing = 0;
441
442 out->queue_len = 0;
443
444 out->request = 0;
445 out->request_written = 0;
446
447 if(pthread_mutex_init(&out->reqlenlock, 0))
448 return 0;
449 out->maximum_request_length_tag = LAZY_NONE;
450
451 return 1;
452 }
453
_xcb_out_destroy(_xcb_out * out)454 void _xcb_out_destroy(_xcb_out *out)
455 {
456 pthread_cond_destroy(&out->cond);
457 pthread_mutex_destroy(&out->reqlenlock);
458 }
459
_xcb_out_send(xcb_connection_t * c,struct iovec * vector,int count)460 int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
461 {
462 int ret = 1;
463 while(ret && count)
464 ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
465 c->out.request_written = c->out.request;
466 pthread_cond_broadcast(&c->out.cond);
467 _xcb_in_wake_up_next_reader(c);
468 return ret;
469 }
470
_xcb_out_send_sync(xcb_connection_t * c)471 void _xcb_out_send_sync(xcb_connection_t *c)
472 {
473 prepare_socket_request(c);
474 send_sync(c);
475 }
476
_xcb_out_flush_to(xcb_connection_t * c,uint64_t request)477 int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
478 {
479 assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
480 if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
481 return 1;
482 if(c->out.queue_len)
483 {
484 struct iovec vec;
485 vec.iov_base = c->out.queue;
486 vec.iov_len = c->out.queue_len;
487 c->out.queue_len = 0;
488 return _xcb_out_send(c, &vec, 1);
489 }
490 while(c->out.writing)
491 pthread_cond_wait(&c->out.cond, &c->iolock);
492 assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
493 return 1;
494 }
495