1 /* 2 * QEMU I/O channels 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 * 19 */ 20 21 #ifndef QIO_CHANNEL_H 22 #define QIO_CHANNEL_H 23 24 #include "qom/object.h" 25 #include "qemu/coroutine-core.h" 26 #include "block/aio.h" 27 28 #define TYPE_QIO_CHANNEL "qio-channel" 29 OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, 30 QIO_CHANNEL) 31 32 33 #define QIO_CHANNEL_ERR_BLOCK -2 34 35 #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 36 37 #define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1 38 39 typedef enum QIOChannelFeature QIOChannelFeature; 40 41 enum QIOChannelFeature { 42 QIO_CHANNEL_FEATURE_FD_PASS, 43 QIO_CHANNEL_FEATURE_SHUTDOWN, 44 QIO_CHANNEL_FEATURE_LISTEN, 45 QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, 46 QIO_CHANNEL_FEATURE_READ_MSG_PEEK, 47 QIO_CHANNEL_FEATURE_SEEKABLE, 48 }; 49 50 51 typedef enum QIOChannelShutdown QIOChannelShutdown; 52 53 enum QIOChannelShutdown { 54 QIO_CHANNEL_SHUTDOWN_READ = 1, 55 QIO_CHANNEL_SHUTDOWN_WRITE = 2, 56 QIO_CHANNEL_SHUTDOWN_BOTH = 3, 57 }; 58 59 typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc, 60 GIOCondition condition, 61 gpointer data); 62 63 /** 64 * QIOChannel: 65 * 66 * The QIOChannel defines the core API for a generic I/O channel 67 * class hierarchy. It is inspired by GIOChannel, but has the 68 * following differences 69 * 70 * - Use QOM to properly support arbitrary subclassing 71 * - Support use of iovecs for efficient I/O with multiple blocks 72 * - None of the character set translation, binary data exclusively 73 * - Direct support for QEMU Error object reporting 74 * - File descriptor passing 75 * 76 * This base class is abstract so cannot be instantiated. There 77 * will be subclasses for dealing with sockets, files, and higher 78 * level protocols such as TLS, WebSocket, etc. 79 */ 80 81 struct QIOChannel { 82 Object parent; 83 unsigned int features; /* bitmask of QIOChannelFeatures */ 84 char *name; 85 AioContext *read_ctx; 86 Coroutine *read_coroutine; 87 AioContext *write_ctx; 88 Coroutine *write_coroutine; 89 bool follow_coroutine_ctx; 90 #ifdef _WIN32 91 HANDLE event; /* For use with GSource on Win32 */ 92 #endif 93 }; 94 95 /** 96 * QIOChannelClass: 97 * 98 * This class defines the contract that all subclasses 99 * must follow to provide specific channel implementations. 100 * The first five callbacks are mandatory to support, others 101 * provide additional optional features. 102 * 103 * Consult the corresponding public API docs for a description 104 * of the semantics of each callback. io_shutdown in particular 105 * must be thread-safe, terminate quickly and must not block. 106 */ 107 struct QIOChannelClass { 108 ObjectClass parent; 109 110 /* Mandatory callbacks */ 111 ssize_t (*io_writev)(QIOChannel *ioc, 112 const struct iovec *iov, 113 size_t niov, 114 int *fds, 115 size_t nfds, 116 int flags, 117 Error **errp); 118 ssize_t (*io_readv)(QIOChannel *ioc, 119 const struct iovec *iov, 120 size_t niov, 121 int **fds, 122 size_t *nfds, 123 int flags, 124 Error **errp); 125 int (*io_close)(QIOChannel *ioc, 126 Error **errp); 127 GSource * (*io_create_watch)(QIOChannel *ioc, 128 GIOCondition condition); 129 int (*io_set_blocking)(QIOChannel *ioc, 130 bool enabled, 131 Error **errp); 132 133 /* Optional callbacks */ 134 int (*io_shutdown)(QIOChannel *ioc, 135 QIOChannelShutdown how, 136 Error **errp); 137 void (*io_set_cork)(QIOChannel *ioc, 138 bool enabled); 139 void (*io_set_delay)(QIOChannel *ioc, 140 bool enabled); 141 off_t (*io_seek)(QIOChannel *ioc, 142 off_t offset, 143 int whence, 144 Error **errp); 145 void (*io_set_aio_fd_handler)(QIOChannel *ioc, 146 AioContext *read_ctx, 147 IOHandler *io_read, 148 AioContext *write_ctx, 149 IOHandler *io_write, 150 void *opaque); 151 int (*io_flush)(QIOChannel *ioc, 152 Error **errp); 153 }; 154 155 /* General I/O handling functions */ 156 157 /** 158 * qio_channel_has_feature: 159 * @ioc: the channel object 160 * @feature: the feature to check support of 161 * 162 * Determine whether the channel implementation supports 163 * the optional feature named in @feature. 164 * 165 * Returns: true if supported, false otherwise. 166 */ 167 bool qio_channel_has_feature(QIOChannel *ioc, 168 QIOChannelFeature feature); 169 170 /** 171 * qio_channel_set_feature: 172 * @ioc: the channel object 173 * @feature: the feature to set support for 174 * 175 * Add channel support for the feature named in @feature. 176 */ 177 void qio_channel_set_feature(QIOChannel *ioc, 178 QIOChannelFeature feature); 179 180 /** 181 * qio_channel_set_name: 182 * @ioc: the channel object 183 * @name: the name of the channel 184 * 185 * Sets the name of the channel, which serves as an aid 186 * to debugging. The name is used when creating GSource 187 * watches for this channel. 188 */ 189 void qio_channel_set_name(QIOChannel *ioc, 190 const char *name); 191 192 /** 193 * qio_channel_readv_full: 194 * @ioc: the channel object 195 * @iov: the array of memory regions to read data into 196 * @niov: the length of the @iov array 197 * @fds: pointer to an array that will received file handles 198 * @nfds: pointer filled with number of elements in @fds on return 199 * @flags: read flags (QIO_CHANNEL_READ_FLAG_*) 200 * @errp: pointer to a NULL-initialized error object 201 * 202 * Read data from the IO channel, storing it in the 203 * memory regions referenced by @iov. Each element 204 * in the @iov will be fully populated with data 205 * before the next one is used. The @niov parameter 206 * specifies the total number of elements in @iov. 207 * 208 * It is not required for all @iov to be filled with 209 * data. If the channel is in blocking mode, at least 210 * one byte of data will be read, but no more is 211 * guaranteed. If the channel is non-blocking and no 212 * data is available, it will return QIO_CHANNEL_ERR_BLOCK 213 * 214 * If the channel has passed any file descriptors, 215 * the @fds array pointer will be allocated and 216 * the elements filled with the received file 217 * descriptors. The @nfds pointer will be updated 218 * to indicate the size of the @fds array that 219 * was allocated. It is the callers responsibility 220 * to call close() on each file descriptor and to 221 * call g_free() on the array pointer in @fds. 222 * 223 * It is an error to pass a non-NULL @fds parameter 224 * unless qio_channel_has_feature() returns a true 225 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 226 * 227 * Returns: the number of bytes read, or -1 on error, 228 * or QIO_CHANNEL_ERR_BLOCK if no data is available 229 * and the channel is non-blocking 230 */ 231 ssize_t qio_channel_readv_full(QIOChannel *ioc, 232 const struct iovec *iov, 233 size_t niov, 234 int **fds, 235 size_t *nfds, 236 int flags, 237 Error **errp); 238 239 240 /** 241 * qio_channel_writev_full: 242 * @ioc: the channel object 243 * @iov: the array of memory regions to write data from 244 * @niov: the length of the @iov array 245 * @fds: an array of file handles to send 246 * @nfds: number of file handles in @fds 247 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 248 * @errp: pointer to a NULL-initialized error object 249 * 250 * Write data to the IO channel, reading it from the 251 * memory regions referenced by @iov. Each element 252 * in the @iov will be fully sent, before the next 253 * one is used. The @niov parameter specifies the 254 * total number of elements in @iov. 255 * 256 * It is not required for all @iov data to be fully 257 * sent. If the channel is in blocking mode, at least 258 * one byte of data will be sent, but no more is 259 * guaranteed. If the channel is non-blocking and no 260 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK 261 * 262 * If there are file descriptors to send, the @fds 263 * array should be non-NULL and provide the handles. 264 * All file descriptors will be sent if at least one 265 * byte of data was sent. 266 * 267 * It is an error to pass a non-NULL @fds parameter 268 * unless qio_channel_has_feature() returns a true 269 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 270 * 271 * Returns: the number of bytes sent, or -1 on error, 272 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent 273 * and the channel is non-blocking 274 */ 275 ssize_t qio_channel_writev_full(QIOChannel *ioc, 276 const struct iovec *iov, 277 size_t niov, 278 int *fds, 279 size_t nfds, 280 int flags, 281 Error **errp); 282 283 /** 284 * qio_channel_readv_all_eof: 285 * @ioc: the channel object 286 * @iov: the array of memory regions to read data into 287 * @niov: the length of the @iov array 288 * @errp: pointer to a NULL-initialized error object 289 * 290 * Read data from the IO channel, storing it in the 291 * memory regions referenced by @iov. Each element 292 * in the @iov will be fully populated with data 293 * before the next one is used. The @niov parameter 294 * specifies the total number of elements in @iov. 295 * 296 * The function will wait for all requested data 297 * to be read, yielding from the current coroutine 298 * if required. 299 * 300 * If end-of-file occurs before any data is read, 301 * no error is reported; otherwise, if it occurs 302 * before all requested data has been read, an error 303 * will be reported. 304 * 305 * Returns: 1 if all bytes were read, 0 if end-of-file 306 * occurs without data, or -1 on error 307 */ 308 int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc, 309 const struct iovec *iov, 310 size_t niov, 311 Error **errp); 312 313 /** 314 * qio_channel_readv_all: 315 * @ioc: the channel object 316 * @iov: the array of memory regions to read data into 317 * @niov: the length of the @iov array 318 * @errp: pointer to a NULL-initialized error object 319 * 320 * Read data from the IO channel, storing it in the 321 * memory regions referenced by @iov. Each element 322 * in the @iov will be fully populated with data 323 * before the next one is used. The @niov parameter 324 * specifies the total number of elements in @iov. 325 * 326 * The function will wait for all requested data 327 * to be read, yielding from the current coroutine 328 * if required. 329 * 330 * If end-of-file occurs before all requested data 331 * has been read, an error will be reported. 332 * 333 * Returns: 0 if all bytes were read, or -1 on error 334 */ 335 int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc, 336 const struct iovec *iov, 337 size_t niov, 338 Error **errp); 339 340 341 /** 342 * qio_channel_writev_all: 343 * @ioc: the channel object 344 * @iov: the array of memory regions to write data from 345 * @niov: the length of the @iov array 346 * @errp: pointer to a NULL-initialized error object 347 * 348 * Write data to the IO channel, reading it from the 349 * memory regions referenced by @iov. Each element 350 * in the @iov will be fully sent, before the next 351 * one is used. The @niov parameter specifies the 352 * total number of elements in @iov. 353 * 354 * The function will wait for all requested data 355 * to be written, yielding from the current coroutine 356 * if required. 357 * 358 * Returns: 0 if all bytes were written, or -1 on error 359 */ 360 int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc, 361 const struct iovec *iov, 362 size_t niov, 363 Error **errp); 364 365 /** 366 * qio_channel_readv: 367 * @ioc: the channel object 368 * @iov: the array of memory regions to read data into 369 * @niov: the length of the @iov array 370 * @errp: pointer to a NULL-initialized error object 371 * 372 * Behaves as qio_channel_readv_full() but does not support 373 * receiving of file handles. 374 */ 375 ssize_t qio_channel_readv(QIOChannel *ioc, 376 const struct iovec *iov, 377 size_t niov, 378 Error **errp); 379 380 /** 381 * qio_channel_writev: 382 * @ioc: the channel object 383 * @iov: the array of memory regions to write data from 384 * @niov: the length of the @iov array 385 * @errp: pointer to a NULL-initialized error object 386 * 387 * Behaves as qio_channel_writev_full() but does not support 388 * sending of file handles. 389 */ 390 ssize_t qio_channel_writev(QIOChannel *ioc, 391 const struct iovec *iov, 392 size_t niov, 393 Error **errp); 394 395 /** 396 * qio_channel_read: 397 * @ioc: the channel object 398 * @buf: the memory region to read data into 399 * @buflen: the length of @buf 400 * @errp: pointer to a NULL-initialized error object 401 * 402 * Behaves as qio_channel_readv_full() but does not support 403 * receiving of file handles, and only supports reading into 404 * a single memory region. 405 */ 406 ssize_t qio_channel_read(QIOChannel *ioc, 407 char *buf, 408 size_t buflen, 409 Error **errp); 410 411 /** 412 * qio_channel_write: 413 * @ioc: the channel object 414 * @buf: the memory regions to send data from 415 * @buflen: the length of @buf 416 * @errp: pointer to a NULL-initialized error object 417 * 418 * Behaves as qio_channel_writev_full() but does not support 419 * sending of file handles, and only supports writing from a 420 * single memory region. 421 */ 422 ssize_t qio_channel_write(QIOChannel *ioc, 423 const char *buf, 424 size_t buflen, 425 Error **errp); 426 427 /** 428 * qio_channel_read_all_eof: 429 * @ioc: the channel object 430 * @buf: the memory region to read data into 431 * @buflen: the number of bytes to @buf 432 * @errp: pointer to a NULL-initialized error object 433 * 434 * Reads @buflen bytes into @buf, possibly blocking or (if the 435 * channel is non-blocking) yielding from the current coroutine 436 * multiple times until the entire content is read. If end-of-file 437 * occurs immediately it is not an error, but if it occurs after 438 * data has been read it will return an error rather than a 439 * short-read. Otherwise behaves as qio_channel_read(). 440 * 441 * Returns: 1 if all bytes were read, 0 if end-of-file occurs 442 * without data, or -1 on error 443 */ 444 int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc, 445 char *buf, 446 size_t buflen, 447 Error **errp); 448 449 /** 450 * qio_channel_read_all: 451 * @ioc: the channel object 452 * @buf: the memory region to read data into 453 * @buflen: the number of bytes to @buf 454 * @errp: pointer to a NULL-initialized error object 455 * 456 * Reads @buflen bytes into @buf, possibly blocking or (if the 457 * channel is non-blocking) yielding from the current coroutine 458 * multiple times until the entire content is read. If end-of-file 459 * occurs it will return an error rather than a short-read. Otherwise 460 * behaves as qio_channel_read(). 461 * 462 * Returns: 0 if all bytes were read, or -1 on error 463 */ 464 int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc, 465 char *buf, 466 size_t buflen, 467 Error **errp); 468 469 /** 470 * qio_channel_write_all: 471 * @ioc: the channel object 472 * @buf: the memory region to write data into 473 * @buflen: the number of bytes to @buf 474 * @errp: pointer to a NULL-initialized error object 475 * 476 * Writes @buflen bytes from @buf, possibly blocking or (if the 477 * channel is non-blocking) yielding from the current coroutine 478 * multiple times until the entire content is written. Otherwise 479 * behaves as qio_channel_write(). 480 * 481 * Returns: 0 if all bytes were written, or -1 on error 482 */ 483 int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, 484 const char *buf, 485 size_t buflen, 486 Error **errp); 487 488 /** 489 * qio_channel_set_blocking: 490 * @ioc: the channel object 491 * @enabled: the blocking flag state 492 * @errp: pointer to a NULL-initialized error object 493 * 494 * If @enabled is true, then the channel is put into 495 * blocking mode, otherwise it will be non-blocking. 496 * 497 * In non-blocking mode, read/write operations may 498 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise 499 * block on I/O 500 */ 501 int qio_channel_set_blocking(QIOChannel *ioc, 502 bool enabled, 503 Error **errp); 504 505 /** 506 * qio_channel_set_follow_coroutine_ctx: 507 * @ioc: the channel object 508 * @enabled: whether or not to follow the coroutine's AioContext 509 * 510 * If @enabled is true, calls to qio_channel_yield() use the current 511 * coroutine's AioContext. Usually this is desirable. 512 * 513 * If @enabled is false, calls to qio_channel_yield() use the global iohandler 514 * AioContext. This is may be used by coroutines that run in the main loop and 515 * do not wish to respond to I/O during nested event loops. This is the 516 * default for compatibility with code that is not aware of AioContexts. 517 */ 518 void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled); 519 520 /** 521 * qio_channel_close: 522 * @ioc: the channel object 523 * @errp: pointer to a NULL-initialized error object 524 * 525 * Close the channel, flushing any pending I/O 526 * 527 * Returns: 0 on success, -1 on error 528 */ 529 int qio_channel_close(QIOChannel *ioc, 530 Error **errp); 531 532 /** 533 * qio_channel_shutdown: 534 * @ioc: the channel object 535 * @how: the direction to shutdown 536 * @errp: pointer to a NULL-initialized error object 537 * 538 * Shutdowns transmission and/or receiving of data 539 * without closing the underlying transport. 540 * 541 * Not all implementations will support this facility, 542 * so may report an error. To avoid errors, the 543 * caller may check for the feature flag 544 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling 545 * this method. 546 * 547 * This function is thread-safe, terminates quickly and does not block. 548 * 549 * Returns: 0 on success, -1 on error 550 */ 551 int qio_channel_shutdown(QIOChannel *ioc, 552 QIOChannelShutdown how, 553 Error **errp); 554 555 /** 556 * qio_channel_set_delay: 557 * @ioc: the channel object 558 * @enabled: the new flag state 559 * 560 * Controls whether the underlying transport is 561 * permitted to delay writes in order to merge 562 * small packets. If @enabled is true, then the 563 * writes may be delayed in order to opportunistically 564 * merge small packets into larger ones. If @enabled 565 * is false, writes are dispatched immediately with 566 * no delay. 567 * 568 * When @enabled is false, applications may wish to 569 * use the qio_channel_set_cork() method to explicitly 570 * control write merging. 571 * 572 * On channels which are backed by a socket, this 573 * API corresponds to the inverse of TCP_NODELAY flag, 574 * controlling whether the Nagle algorithm is active. 575 * 576 * This setting is merely a hint, so implementations are 577 * free to ignore this without it being considered an 578 * error. 579 */ 580 void qio_channel_set_delay(QIOChannel *ioc, 581 bool enabled); 582 583 /** 584 * qio_channel_set_cork: 585 * @ioc: the channel object 586 * @enabled: the new flag state 587 * 588 * Controls whether the underlying transport is 589 * permitted to dispatch data that is written. 590 * If @enabled is true, then any data written will 591 * be queued in local buffers until @enabled is 592 * set to false once again. 593 * 594 * This feature is typically used when the automatic 595 * write coalescing facility is disabled via the 596 * qio_channel_set_delay() method. 597 * 598 * On channels which are backed by a socket, this 599 * API corresponds to the TCP_CORK flag. 600 * 601 * This setting is merely a hint, so implementations are 602 * free to ignore this without it being considered an 603 * error. 604 */ 605 void qio_channel_set_cork(QIOChannel *ioc, 606 bool enabled); 607 608 609 /** 610 * qio_channel_seek: 611 * @ioc: the channel object 612 * @offset: the position to seek to, relative to @whence 613 * @whence: one of the (POSIX) SEEK_* constants listed below 614 * @errp: pointer to a NULL-initialized error object 615 * 616 * Moves the current I/O position within the channel 617 * @ioc, to be @offset. The value of @offset is 618 * interpreted relative to @whence: 619 * 620 * SEEK_SET - the position is set to @offset bytes 621 * SEEK_CUR - the position is moved by @offset bytes 622 * SEEK_END - the position is set to end of the file plus @offset bytes 623 * 624 * Not all implementations will support this facility, 625 * so may report an error. 626 * 627 * Returns: the new position on success, (off_t)-1 on failure 628 */ 629 off_t qio_channel_io_seek(QIOChannel *ioc, 630 off_t offset, 631 int whence, 632 Error **errp); 633 634 635 /** 636 * qio_channel_create_watch: 637 * @ioc: the channel object 638 * @condition: the I/O condition to monitor 639 * 640 * Create a new main loop source that is used to watch 641 * for the I/O condition @condition. Typically the 642 * qio_channel_add_watch() method would be used instead 643 * of this, since it directly attaches a callback to 644 * the source 645 * 646 * Returns: the new main loop source. 647 */ 648 GSource *qio_channel_create_watch(QIOChannel *ioc, 649 GIOCondition condition); 650 651 /** 652 * qio_channel_add_watch: 653 * @ioc: the channel object 654 * @condition: the I/O condition to monitor 655 * @func: callback to invoke when the source becomes ready 656 * @user_data: opaque data to pass to @func 657 * @notify: callback to free @user_data 658 * 659 * Create a new main loop source that is used to watch 660 * for the I/O condition @condition. The callback @func 661 * will be registered against the source, to be invoked 662 * when the source becomes ready. The optional @user_data 663 * will be passed to @func when it is invoked. The @notify 664 * callback will be used to free @user_data when the 665 * watch is deleted 666 * 667 * The returned source ID can be used with g_source_remove() 668 * to remove and free the source when no longer required. 669 * Alternatively the @func callback can return a FALSE 670 * value. 671 * 672 * Returns: the source ID 673 */ 674 guint qio_channel_add_watch(QIOChannel *ioc, 675 GIOCondition condition, 676 QIOChannelFunc func, 677 gpointer user_data, 678 GDestroyNotify notify); 679 680 /** 681 * qio_channel_add_watch_full: 682 * @ioc: the channel object 683 * @condition: the I/O condition to monitor 684 * @func: callback to invoke when the source becomes ready 685 * @user_data: opaque data to pass to @func 686 * @notify: callback to free @user_data 687 * @context: the context to run the watch source 688 * 689 * Similar as qio_channel_add_watch(), but allows to specify context 690 * to run the watch source. 691 * 692 * Returns: the source ID 693 */ 694 guint qio_channel_add_watch_full(QIOChannel *ioc, 695 GIOCondition condition, 696 QIOChannelFunc func, 697 gpointer user_data, 698 GDestroyNotify notify, 699 GMainContext *context); 700 701 /** 702 * qio_channel_add_watch_source: 703 * @ioc: the channel object 704 * @condition: the I/O condition to monitor 705 * @func: callback to invoke when the source becomes ready 706 * @user_data: opaque data to pass to @func 707 * @notify: callback to free @user_data 708 * @context: gcontext to bind the source to 709 * 710 * Similar as qio_channel_add_watch(), but allows to specify context 711 * to run the watch source, meanwhile return the GSource object 712 * instead of tag ID, with the GSource referenced already. 713 * 714 * Note: callers is responsible to unref the source when not needed. 715 * 716 * Returns: the source pointer 717 */ 718 GSource *qio_channel_add_watch_source(QIOChannel *ioc, 719 GIOCondition condition, 720 QIOChannelFunc func, 721 gpointer user_data, 722 GDestroyNotify notify, 723 GMainContext *context); 724 725 /** 726 * qio_channel_yield: 727 * @ioc: the channel object 728 * @condition: the I/O condition to wait for 729 * 730 * Yields execution from the current coroutine until the condition 731 * indicated by @condition becomes available. @condition must 732 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In 733 * addition, no two coroutine can be waiting on the same condition 734 * and channel at the same time. 735 * 736 * This must only be called from coroutine context. It is safe to 737 * reenter the coroutine externally while it is waiting; in this 738 * case the function will return even if @condition is not yet 739 * available. 740 */ 741 void coroutine_fn qio_channel_yield(QIOChannel *ioc, 742 GIOCondition condition); 743 744 /** 745 * qio_channel_wake_read: 746 * @ioc: the channel object 747 * 748 * If qio_channel_yield() is currently waiting for the channel to become 749 * readable, interrupt it and reenter immediately. This function is safe to call 750 * from any thread. 751 */ 752 void qio_channel_wake_read(QIOChannel *ioc); 753 754 /** 755 * qio_channel_wait: 756 * @ioc: the channel object 757 * @condition: the I/O condition to wait for 758 * 759 * Block execution from the current thread until 760 * the condition indicated by @condition becomes 761 * available. 762 * 763 * This will enter a nested event loop to perform 764 * the wait. 765 */ 766 void qio_channel_wait(QIOChannel *ioc, 767 GIOCondition condition); 768 769 /** 770 * qio_channel_set_aio_fd_handler: 771 * @ioc: the channel object 772 * @read_ctx: the AioContext to set the read handler on or NULL 773 * @io_read: the read handler 774 * @write_ctx: the AioContext to set the write handler on or NULL 775 * @io_write: the write handler 776 * @opaque: the opaque value passed to the handler 777 * 778 * This is used internally by qio_channel_yield(). It can 779 * be used by channel implementations to forward the handlers 780 * to another channel (e.g. from #QIOChannelTLS to the 781 * underlying socket). 782 * 783 * When @read_ctx is NULL, don't touch the read handler. When @write_ctx is 784 * NULL, don't touch the write handler. Note that setting the read handler 785 * clears the write handler, and vice versa, if they share the same AioContext. 786 * Therefore the caller must pass both handlers together when sharing the same 787 * AioContext. 788 */ 789 void qio_channel_set_aio_fd_handler(QIOChannel *ioc, 790 AioContext *read_ctx, 791 IOHandler *io_read, 792 AioContext *write_ctx, 793 IOHandler *io_write, 794 void *opaque); 795 796 /** 797 * qio_channel_readv_full_all_eof: 798 * @ioc: the channel object 799 * @iov: the array of memory regions to read data to 800 * @niov: the length of the @iov array 801 * @fds: an array of file handles to read 802 * @nfds: number of file handles in @fds 803 * @errp: pointer to a NULL-initialized error object 804 * 805 * 806 * Performs same function as qio_channel_readv_all_eof. 807 * Additionally, attempts to read file descriptors shared 808 * over the channel. The function will wait for all 809 * requested data to be read, yielding from the current 810 * coroutine if required. data refers to both file 811 * descriptors and the iovs. 812 * 813 * Returns: 1 if all bytes were read, 0 if end-of-file 814 * occurs without data, or -1 on error 815 */ 816 817 int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, 818 const struct iovec *iov, 819 size_t niov, 820 int **fds, size_t *nfds, 821 Error **errp); 822 823 /** 824 * qio_channel_readv_full_all: 825 * @ioc: the channel object 826 * @iov: the array of memory regions to read data to 827 * @niov: the length of the @iov array 828 * @fds: an array of file handles to read 829 * @nfds: number of file handles in @fds 830 * @errp: pointer to a NULL-initialized error object 831 * 832 * 833 * Performs same function as qio_channel_readv_all_eof. 834 * Additionally, attempts to read file descriptors shared 835 * over the channel. The function will wait for all 836 * requested data to be read, yielding from the current 837 * coroutine if required. data refers to both file 838 * descriptors and the iovs. 839 * 840 * Returns: 0 if all bytes were read, or -1 on error 841 */ 842 843 int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc, 844 const struct iovec *iov, 845 size_t niov, 846 int **fds, size_t *nfds, 847 Error **errp); 848 849 /** 850 * qio_channel_writev_full_all: 851 * @ioc: the channel object 852 * @iov: the array of memory regions to write data from 853 * @niov: the length of the @iov array 854 * @fds: an array of file handles to send 855 * @nfds: number of file handles in @fds 856 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 857 * @errp: pointer to a NULL-initialized error object 858 * 859 * 860 * Behaves like qio_channel_writev_full but will attempt 861 * to send all data passed (file handles and memory regions). 862 * The function will wait for all requested data 863 * to be written, yielding from the current coroutine 864 * if required. 865 * 866 * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags, 867 * instead of waiting for all requested data to be written, 868 * this function will wait until it's all queued for writing. 869 * In this case, if the buffer gets changed between queueing and 870 * sending, the updated buffer will be sent. If this is not a 871 * desired behavior, it's suggested to call qio_channel_flush() 872 * before reusing the buffer. 873 * 874 * Returns: 0 if all bytes were written, or -1 on error 875 */ 876 877 int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc, 878 const struct iovec *iov, 879 size_t niov, 880 int *fds, size_t nfds, 881 int flags, Error **errp); 882 883 /** 884 * qio_channel_flush: 885 * @ioc: the channel object 886 * @errp: pointer to a NULL-initialized error object 887 * 888 * Will block until every packet queued with 889 * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 890 * is sent, or return in case of any error. 891 * 892 * If not implemented, acts as a no-op, and returns 0. 893 * 894 * Returns -1 if any error is found, 895 * 1 if every send failed to use zero copy. 896 * 0 otherwise. 897 */ 898 899 int qio_channel_flush(QIOChannel *ioc, 900 Error **errp); 901 902 #endif /* QIO_CHANNEL_H */ 903