1 /* 2 * Copyright Red Hat 3 * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws> 4 * 5 * Network Block Device Server Side 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; under version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #include "block/block_int.h" 23 #include "block/export.h" 24 #include "block/dirty-bitmap.h" 25 #include "qapi/error.h" 26 #include "qemu/queue.h" 27 #include "trace.h" 28 #include "nbd-internal.h" 29 #include "qemu/units.h" 30 #include "qemu/memalign.h" 31 32 #define NBD_META_ID_BASE_ALLOCATION 0 33 #define NBD_META_ID_ALLOCATION_DEPTH 1 34 /* Dirty bitmaps use 'NBD_META_ID_DIRTY_BITMAP + i', so keep this id last. */ 35 #define NBD_META_ID_DIRTY_BITMAP 2 36 37 /* 38 * NBD_MAX_BLOCK_STATUS_EXTENTS: 1 MiB of extents data. An empirical 39 * constant. If an increase is needed, note that the NBD protocol 40 * recommends no larger than 32 mb, so that the client won't consider 41 * the reply as a denial of service attack. 42 */ 43 #define NBD_MAX_BLOCK_STATUS_EXTENTS (1 * MiB / 8) 44 45 static int system_errno_to_nbd_errno(int err) 46 { 47 switch (err) { 48 case 0: 49 return NBD_SUCCESS; 50 case EPERM: 51 case EROFS: 52 return NBD_EPERM; 53 case EIO: 54 return NBD_EIO; 55 case ENOMEM: 56 return NBD_ENOMEM; 57 #ifdef EDQUOT 58 case EDQUOT: 59 #endif 60 case EFBIG: 61 case ENOSPC: 62 return NBD_ENOSPC; 63 case EOVERFLOW: 64 return NBD_EOVERFLOW; 65 case ENOTSUP: 66 #if ENOTSUP != EOPNOTSUPP 67 case EOPNOTSUPP: 68 #endif 69 return NBD_ENOTSUP; 70 case ESHUTDOWN: 71 return NBD_ESHUTDOWN; 72 case EINVAL: 73 default: 74 return NBD_EINVAL; 75 } 76 } 77 78 /* Definitions for opaque data types */ 79 80 typedef struct NBDRequestData NBDRequestData; 81 82 struct NBDRequestData { 83 NBDClient *client; 84 uint8_t *data; 85 bool complete; 86 }; 87 88 struct NBDExport { 89 BlockExport common; 90 91 char *name; 92 char *description; 93 uint64_t size; 94 uint16_t nbdflags; 95 QTAILQ_HEAD(, NBDClient) clients; 96 QTAILQ_ENTRY(NBDExport) next; 97 98 BlockBackend *eject_notifier_blk; 99 Notifier eject_notifier; 100 101 bool allocation_depth; 102 BdrvDirtyBitmap **export_bitmaps; 103 size_t nr_export_bitmaps; 104 }; 105 106 static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports); 107 108 /* 109 * NBDMetaContexts represents a list of meta contexts in use, 110 * as selected by NBD_OPT_SET_META_CONTEXT. Also used for 111 * NBD_OPT_LIST_META_CONTEXT. 112 */ 113 struct NBDMetaContexts { 114 const NBDExport *exp; /* associated export */ 115 size_t count; /* number of negotiated contexts */ 116 bool base_allocation; /* export base:allocation context (block status) */ 117 bool allocation_depth; /* export qemu:allocation-depth */ 118 bool *bitmaps; /* 119 * export qemu:dirty-bitmap:<export bitmap name>, 120 * sized by exp->nr_export_bitmaps 121 */ 122 }; 123 124 struct NBDClient { 125 int refcount; /* atomic */ 126 void (*close_fn)(NBDClient *client, bool negotiated); 127 128 NBDExport *exp; 129 QCryptoTLSCreds *tlscreds; 130 char *tlsauthz; 131 QIOChannelSocket *sioc; /* The underlying data channel */ 132 QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ 133 134 Coroutine *recv_coroutine; 135 136 CoMutex send_lock; 137 Coroutine *send_coroutine; 138 139 bool read_yielding; 140 bool quiescing; 141 142 QTAILQ_ENTRY(NBDClient) next; 143 int nb_requests; 144 bool closing; 145 146 uint32_t check_align; /* If non-zero, check for aligned client requests */ 147 148 NBDMode mode; 149 NBDMetaContexts contexts; /* Negotiated meta contexts */ 150 151 uint32_t opt; /* Current option being negotiated */ 152 uint32_t optlen; /* remaining length of data in ioc for the option being 153 negotiated now */ 154 }; 155 156 static void nbd_client_receive_next_request(NBDClient *client); 157 158 /* Basic flow for negotiation 159 160 Server Client 161 Negotiate 162 163 or 164 165 Server Client 166 Negotiate #1 167 Option 168 Negotiate #2 169 170 ---- 171 172 followed by 173 174 Server Client 175 Request 176 Response 177 Request 178 Response 179 ... 180 ... 181 Request (type == 2) 182 183 */ 184 185 static inline void set_be_option_rep(NBDOptionReply *rep, uint32_t option, 186 uint32_t type, uint32_t length) 187 { 188 stq_be_p(&rep->magic, NBD_REP_MAGIC); 189 stl_be_p(&rep->option, option); 190 stl_be_p(&rep->type, type); 191 stl_be_p(&rep->length, length); 192 } 193 194 /* Send a reply header, including length, but no payload. 195 * Return -errno on error, 0 on success. */ 196 static int nbd_negotiate_send_rep_len(NBDClient *client, uint32_t type, 197 uint32_t len, Error **errp) 198 { 199 NBDOptionReply rep; 200 201 trace_nbd_negotiate_send_rep_len(client->opt, nbd_opt_lookup(client->opt), 202 type, nbd_rep_lookup(type), len); 203 204 assert(len < NBD_MAX_BUFFER_SIZE); 205 206 set_be_option_rep(&rep, client->opt, type, len); 207 return nbd_write(client->ioc, &rep, sizeof(rep), errp); 208 } 209 210 /* Send a reply header with default 0 length. 211 * Return -errno on error, 0 on success. */ 212 static int nbd_negotiate_send_rep(NBDClient *client, uint32_t type, 213 Error **errp) 214 { 215 return nbd_negotiate_send_rep_len(client, type, 0, errp); 216 } 217 218 /* Send an error reply. 219 * Return -errno on error, 0 on success. */ 220 static int G_GNUC_PRINTF(4, 0) 221 nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, 222 Error **errp, const char *fmt, va_list va) 223 { 224 ERRP_GUARD(); 225 g_autofree char *msg = NULL; 226 int ret; 227 size_t len; 228 229 msg = g_strdup_vprintf(fmt, va); 230 len = strlen(msg); 231 assert(len < NBD_MAX_STRING_SIZE); 232 trace_nbd_negotiate_send_rep_err(msg); 233 ret = nbd_negotiate_send_rep_len(client, type, len, errp); 234 if (ret < 0) { 235 return ret; 236 } 237 if (nbd_write(client->ioc, msg, len, errp) < 0) { 238 error_prepend(errp, "write failed (error message): "); 239 return -EIO; 240 } 241 242 return 0; 243 } 244 245 /* 246 * Return a malloc'd copy of @name suitable for use in an error reply. 247 */ 248 static char * 249 nbd_sanitize_name(const char *name) 250 { 251 if (strnlen(name, 80) < 80) { 252 return g_strdup(name); 253 } 254 /* XXX Should we also try to sanitize any control characters? */ 255 return g_strdup_printf("%.80s...", name); 256 } 257 258 /* Send an error reply. 259 * Return -errno on error, 0 on success. */ 260 static int G_GNUC_PRINTF(4, 5) 261 nbd_negotiate_send_rep_err(NBDClient *client, uint32_t type, 262 Error **errp, const char *fmt, ...) 263 { 264 va_list va; 265 int ret; 266 267 va_start(va, fmt); 268 ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va); 269 va_end(va); 270 return ret; 271 } 272 273 /* Drop remainder of the current option, and send a reply with the 274 * given error type and message. Return -errno on read or write 275 * failure; or 0 if connection is still live. */ 276 static int G_GNUC_PRINTF(4, 0) 277 nbd_opt_vdrop(NBDClient *client, uint32_t type, Error **errp, 278 const char *fmt, va_list va) 279 { 280 int ret = nbd_drop(client->ioc, client->optlen, errp); 281 282 client->optlen = 0; 283 if (!ret) { 284 ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va); 285 } 286 return ret; 287 } 288 289 static int G_GNUC_PRINTF(4, 5) 290 nbd_opt_drop(NBDClient *client, uint32_t type, Error **errp, 291 const char *fmt, ...) 292 { 293 int ret; 294 va_list va; 295 296 va_start(va, fmt); 297 ret = nbd_opt_vdrop(client, type, errp, fmt, va); 298 va_end(va); 299 300 return ret; 301 } 302 303 static int G_GNUC_PRINTF(3, 4) 304 nbd_opt_invalid(NBDClient *client, Error **errp, const char *fmt, ...) 305 { 306 int ret; 307 va_list va; 308 309 va_start(va, fmt); 310 ret = nbd_opt_vdrop(client, NBD_REP_ERR_INVALID, errp, fmt, va); 311 va_end(va); 312 313 return ret; 314 } 315 316 /* Read size bytes from the unparsed payload of the current option. 317 * If @check_nul, require that no NUL bytes appear in buffer. 318 * Return -errno on I/O error, 0 if option was completely handled by 319 * sending a reply about inconsistent lengths, or 1 on success. */ 320 static int nbd_opt_read(NBDClient *client, void *buffer, size_t size, 321 bool check_nul, Error **errp) 322 { 323 if (size > client->optlen) { 324 return nbd_opt_invalid(client, errp, 325 "Inconsistent lengths in option %s", 326 nbd_opt_lookup(client->opt)); 327 } 328 client->optlen -= size; 329 if (qio_channel_read_all(client->ioc, buffer, size, errp) < 0) { 330 return -EIO; 331 } 332 333 if (check_nul && strnlen(buffer, size) != size) { 334 return nbd_opt_invalid(client, errp, 335 "Unexpected embedded NUL in option %s", 336 nbd_opt_lookup(client->opt)); 337 } 338 return 1; 339 } 340 341 /* Drop size bytes from the unparsed payload of the current option. 342 * Return -errno on I/O error, 0 if option was completely handled by 343 * sending a reply about inconsistent lengths, or 1 on success. */ 344 static int nbd_opt_skip(NBDClient *client, size_t size, Error **errp) 345 { 346 if (size > client->optlen) { 347 return nbd_opt_invalid(client, errp, 348 "Inconsistent lengths in option %s", 349 nbd_opt_lookup(client->opt)); 350 } 351 client->optlen -= size; 352 return nbd_drop(client->ioc, size, errp) < 0 ? -EIO : 1; 353 } 354 355 /* nbd_opt_read_name 356 * 357 * Read a string with the format: 358 * uint32_t len (<= NBD_MAX_STRING_SIZE) 359 * len bytes string (not 0-terminated) 360 * 361 * On success, @name will be allocated. 362 * If @length is non-null, it will be set to the actual string length. 363 * 364 * Return -errno on I/O error, 0 if option was completely handled by 365 * sending a reply about inconsistent lengths, or 1 on success. 366 */ 367 static int nbd_opt_read_name(NBDClient *client, char **name, uint32_t *length, 368 Error **errp) 369 { 370 int ret; 371 uint32_t len; 372 g_autofree char *local_name = NULL; 373 374 *name = NULL; 375 ret = nbd_opt_read(client, &len, sizeof(len), false, errp); 376 if (ret <= 0) { 377 return ret; 378 } 379 len = cpu_to_be32(len); 380 381 if (len > NBD_MAX_STRING_SIZE) { 382 return nbd_opt_invalid(client, errp, 383 "Invalid name length: %" PRIu32, len); 384 } 385 386 local_name = g_malloc(len + 1); 387 ret = nbd_opt_read(client, local_name, len, true, errp); 388 if (ret <= 0) { 389 return ret; 390 } 391 local_name[len] = '\0'; 392 393 if (length) { 394 *length = len; 395 } 396 *name = g_steal_pointer(&local_name); 397 398 return 1; 399 } 400 401 /* Send a single NBD_REP_SERVER reply to NBD_OPT_LIST, including payload. 402 * Return -errno on error, 0 on success. */ 403 static int nbd_negotiate_send_rep_list(NBDClient *client, NBDExport *exp, 404 Error **errp) 405 { 406 ERRP_GUARD(); 407 size_t name_len, desc_len; 408 uint32_t len; 409 const char *name = exp->name ? exp->name : ""; 410 const char *desc = exp->description ? exp->description : ""; 411 QIOChannel *ioc = client->ioc; 412 int ret; 413 414 trace_nbd_negotiate_send_rep_list(name, desc); 415 name_len = strlen(name); 416 desc_len = strlen(desc); 417 assert(name_len <= NBD_MAX_STRING_SIZE && desc_len <= NBD_MAX_STRING_SIZE); 418 len = name_len + desc_len + sizeof(len); 419 ret = nbd_negotiate_send_rep_len(client, NBD_REP_SERVER, len, errp); 420 if (ret < 0) { 421 return ret; 422 } 423 424 len = cpu_to_be32(name_len); 425 if (nbd_write(ioc, &len, sizeof(len), errp) < 0) { 426 error_prepend(errp, "write failed (name length): "); 427 return -EINVAL; 428 } 429 430 if (nbd_write(ioc, name, name_len, errp) < 0) { 431 error_prepend(errp, "write failed (name buffer): "); 432 return -EINVAL; 433 } 434 435 if (nbd_write(ioc, desc, desc_len, errp) < 0) { 436 error_prepend(errp, "write failed (description buffer): "); 437 return -EINVAL; 438 } 439 440 return 0; 441 } 442 443 /* Process the NBD_OPT_LIST command, with a potential series of replies. 444 * Return -errno on error, 0 on success. */ 445 static int nbd_negotiate_handle_list(NBDClient *client, Error **errp) 446 { 447 NBDExport *exp; 448 assert(client->opt == NBD_OPT_LIST); 449 450 /* For each export, send a NBD_REP_SERVER reply. */ 451 QTAILQ_FOREACH(exp, &exports, next) { 452 if (nbd_negotiate_send_rep_list(client, exp, errp)) { 453 return -EINVAL; 454 } 455 } 456 /* Finish with a NBD_REP_ACK. */ 457 return nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 458 } 459 460 static void nbd_check_meta_export(NBDClient *client, NBDExport *exp) 461 { 462 if (exp != client->contexts.exp) { 463 client->contexts.count = 0; 464 } 465 } 466 467 /* Send a reply to NBD_OPT_EXPORT_NAME. 468 * Return -errno on error, 0 on success. */ 469 static int nbd_negotiate_handle_export_name(NBDClient *client, bool no_zeroes, 470 Error **errp) 471 { 472 ERRP_GUARD(); 473 g_autofree char *name = NULL; 474 char buf[NBD_REPLY_EXPORT_NAME_SIZE] = ""; 475 size_t len; 476 int ret; 477 uint16_t myflags; 478 479 /* Client sends: 480 [20 .. xx] export name (length bytes) 481 Server replies: 482 [ 0 .. 7] size 483 [ 8 .. 9] export flags 484 [10 .. 133] reserved (0) [unless no_zeroes] 485 */ 486 trace_nbd_negotiate_handle_export_name(); 487 if (client->mode >= NBD_MODE_EXTENDED) { 488 error_setg(errp, "Extended headers already negotiated"); 489 return -EINVAL; 490 } 491 if (client->optlen > NBD_MAX_STRING_SIZE) { 492 error_setg(errp, "Bad length received"); 493 return -EINVAL; 494 } 495 name = g_malloc(client->optlen + 1); 496 if (nbd_read(client->ioc, name, client->optlen, "export name", errp) < 0) { 497 return -EIO; 498 } 499 name[client->optlen] = '\0'; 500 client->optlen = 0; 501 502 trace_nbd_negotiate_handle_export_name_request(name); 503 504 client->exp = nbd_export_find(name); 505 if (!client->exp) { 506 error_setg(errp, "export not found"); 507 return -EINVAL; 508 } 509 nbd_check_meta_export(client, client->exp); 510 511 myflags = client->exp->nbdflags; 512 if (client->mode >= NBD_MODE_STRUCTURED) { 513 myflags |= NBD_FLAG_SEND_DF; 514 } 515 if (client->mode >= NBD_MODE_EXTENDED && client->contexts.count) { 516 myflags |= NBD_FLAG_BLOCK_STAT_PAYLOAD; 517 } 518 trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags); 519 stq_be_p(buf, client->exp->size); 520 stw_be_p(buf + 8, myflags); 521 len = no_zeroes ? 10 : sizeof(buf); 522 ret = nbd_write(client->ioc, buf, len, errp); 523 if (ret < 0) { 524 error_prepend(errp, "write failed: "); 525 return ret; 526 } 527 528 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); 529 blk_exp_ref(&client->exp->common); 530 531 return 0; 532 } 533 534 /* Send a single NBD_REP_INFO, with a buffer @buf of @length bytes. 535 * The buffer does NOT include the info type prefix. 536 * Return -errno on error, 0 if ready to send more. */ 537 static int nbd_negotiate_send_info(NBDClient *client, 538 uint16_t info, uint32_t length, void *buf, 539 Error **errp) 540 { 541 int rc; 542 543 trace_nbd_negotiate_send_info(info, nbd_info_lookup(info), length); 544 rc = nbd_negotiate_send_rep_len(client, NBD_REP_INFO, 545 sizeof(info) + length, errp); 546 if (rc < 0) { 547 return rc; 548 } 549 info = cpu_to_be16(info); 550 if (nbd_write(client->ioc, &info, sizeof(info), errp) < 0) { 551 return -EIO; 552 } 553 if (nbd_write(client->ioc, buf, length, errp) < 0) { 554 return -EIO; 555 } 556 return 0; 557 } 558 559 /* nbd_reject_length: Handle any unexpected payload. 560 * @fatal requests that we quit talking to the client, even if we are able 561 * to successfully send an error reply. 562 * Return: 563 * -errno transmission error occurred or @fatal was requested, errp is set 564 * 0 error message successfully sent to client, errp is not set 565 */ 566 static int nbd_reject_length(NBDClient *client, bool fatal, Error **errp) 567 { 568 int ret; 569 570 assert(client->optlen); 571 ret = nbd_opt_invalid(client, errp, "option '%s' has unexpected length", 572 nbd_opt_lookup(client->opt)); 573 if (fatal && !ret) { 574 error_setg(errp, "option '%s' has unexpected length", 575 nbd_opt_lookup(client->opt)); 576 return -EINVAL; 577 } 578 return ret; 579 } 580 581 /* Handle NBD_OPT_INFO and NBD_OPT_GO. 582 * Return -errno on error, 0 if ready for next option, and 1 to move 583 * into transmission phase. */ 584 static int nbd_negotiate_handle_info(NBDClient *client, Error **errp) 585 { 586 int rc; 587 g_autofree char *name = NULL; 588 NBDExport *exp; 589 uint16_t requests; 590 uint16_t request; 591 uint32_t namelen = 0; 592 bool sendname = false; 593 bool blocksize = false; 594 uint32_t sizes[3]; 595 char buf[sizeof(uint64_t) + sizeof(uint16_t)]; 596 uint32_t check_align = 0; 597 uint16_t myflags; 598 599 /* Client sends: 600 4 bytes: L, name length (can be 0) 601 L bytes: export name 602 2 bytes: N, number of requests (can be 0) 603 N * 2 bytes: N requests 604 */ 605 rc = nbd_opt_read_name(client, &name, &namelen, errp); 606 if (rc <= 0) { 607 return rc; 608 } 609 trace_nbd_negotiate_handle_export_name_request(name); 610 611 rc = nbd_opt_read(client, &requests, sizeof(requests), false, errp); 612 if (rc <= 0) { 613 return rc; 614 } 615 requests = be16_to_cpu(requests); 616 trace_nbd_negotiate_handle_info_requests(requests); 617 while (requests--) { 618 rc = nbd_opt_read(client, &request, sizeof(request), false, errp); 619 if (rc <= 0) { 620 return rc; 621 } 622 request = be16_to_cpu(request); 623 trace_nbd_negotiate_handle_info_request(request, 624 nbd_info_lookup(request)); 625 /* We care about NBD_INFO_NAME and NBD_INFO_BLOCK_SIZE; 626 * everything else is either a request we don't know or 627 * something we send regardless of request */ 628 switch (request) { 629 case NBD_INFO_NAME: 630 sendname = true; 631 break; 632 case NBD_INFO_BLOCK_SIZE: 633 blocksize = true; 634 break; 635 } 636 } 637 if (client->optlen) { 638 return nbd_reject_length(client, false, errp); 639 } 640 641 exp = nbd_export_find(name); 642 if (!exp) { 643 g_autofree char *sane_name = nbd_sanitize_name(name); 644 645 return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN, 646 errp, "export '%s' not present", 647 sane_name); 648 } 649 if (client->opt == NBD_OPT_GO) { 650 nbd_check_meta_export(client, exp); 651 } 652 653 /* Don't bother sending NBD_INFO_NAME unless client requested it */ 654 if (sendname) { 655 rc = nbd_negotiate_send_info(client, NBD_INFO_NAME, namelen, name, 656 errp); 657 if (rc < 0) { 658 return rc; 659 } 660 } 661 662 /* Send NBD_INFO_DESCRIPTION only if available, regardless of 663 * client request */ 664 if (exp->description) { 665 size_t len = strlen(exp->description); 666 667 assert(len <= NBD_MAX_STRING_SIZE); 668 rc = nbd_negotiate_send_info(client, NBD_INFO_DESCRIPTION, 669 len, exp->description, errp); 670 if (rc < 0) { 671 return rc; 672 } 673 } 674 675 /* Send NBD_INFO_BLOCK_SIZE always, but tweak the minimum size 676 * according to whether the client requested it, and according to 677 * whether this is OPT_INFO or OPT_GO. */ 678 /* minimum - 1 for back-compat, or actual if client will obey it. */ 679 if (client->opt == NBD_OPT_INFO || blocksize) { 680 check_align = sizes[0] = blk_get_request_alignment(exp->common.blk); 681 } else { 682 sizes[0] = 1; 683 } 684 assert(sizes[0] <= NBD_MAX_BUFFER_SIZE); 685 /* preferred - Hard-code to 4096 for now. 686 * TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */ 687 sizes[1] = MAX(4096, sizes[0]); 688 /* maximum - At most 32M, but smaller as appropriate. */ 689 sizes[2] = MIN(blk_get_max_transfer(exp->common.blk), NBD_MAX_BUFFER_SIZE); 690 trace_nbd_negotiate_handle_info_block_size(sizes[0], sizes[1], sizes[2]); 691 sizes[0] = cpu_to_be32(sizes[0]); 692 sizes[1] = cpu_to_be32(sizes[1]); 693 sizes[2] = cpu_to_be32(sizes[2]); 694 rc = nbd_negotiate_send_info(client, NBD_INFO_BLOCK_SIZE, 695 sizeof(sizes), sizes, errp); 696 if (rc < 0) { 697 return rc; 698 } 699 700 /* Send NBD_INFO_EXPORT always */ 701 myflags = exp->nbdflags; 702 if (client->mode >= NBD_MODE_STRUCTURED) { 703 myflags |= NBD_FLAG_SEND_DF; 704 } 705 if (client->mode >= NBD_MODE_EXTENDED && 706 (client->contexts.count || client->opt == NBD_OPT_INFO)) { 707 myflags |= NBD_FLAG_BLOCK_STAT_PAYLOAD; 708 } 709 trace_nbd_negotiate_new_style_size_flags(exp->size, myflags); 710 stq_be_p(buf, exp->size); 711 stw_be_p(buf + 8, myflags); 712 rc = nbd_negotiate_send_info(client, NBD_INFO_EXPORT, 713 sizeof(buf), buf, errp); 714 if (rc < 0) { 715 return rc; 716 } 717 718 /* 719 * If the client is just asking for NBD_OPT_INFO, but forgot to 720 * request block sizes in a situation that would impact 721 * performance, then return an error. But for NBD_OPT_GO, we 722 * tolerate all clients, regardless of alignments. 723 */ 724 if (client->opt == NBD_OPT_INFO && !blocksize && 725 blk_get_request_alignment(exp->common.blk) > 1) { 726 return nbd_negotiate_send_rep_err(client, 727 NBD_REP_ERR_BLOCK_SIZE_REQD, 728 errp, 729 "request NBD_INFO_BLOCK_SIZE to " 730 "use this export"); 731 } 732 733 /* Final reply */ 734 rc = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 735 if (rc < 0) { 736 return rc; 737 } 738 739 if (client->opt == NBD_OPT_GO) { 740 client->exp = exp; 741 client->check_align = check_align; 742 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); 743 blk_exp_ref(&client->exp->common); 744 rc = 1; 745 } 746 return rc; 747 } 748 749 750 /* Handle NBD_OPT_STARTTLS. Return NULL to drop connection, or else the 751 * new channel for all further (now-encrypted) communication. */ 752 static QIOChannel *nbd_negotiate_handle_starttls(NBDClient *client, 753 Error **errp) 754 { 755 QIOChannel *ioc; 756 QIOChannelTLS *tioc; 757 struct NBDTLSHandshakeData data = { 0 }; 758 759 assert(client->opt == NBD_OPT_STARTTLS); 760 761 trace_nbd_negotiate_handle_starttls(); 762 ioc = client->ioc; 763 764 if (nbd_negotiate_send_rep(client, NBD_REP_ACK, errp) < 0) { 765 return NULL; 766 } 767 768 tioc = qio_channel_tls_new_server(ioc, 769 client->tlscreds, 770 client->tlsauthz, 771 errp); 772 if (!tioc) { 773 return NULL; 774 } 775 776 qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-server-tls"); 777 trace_nbd_negotiate_handle_starttls_handshake(); 778 data.loop = g_main_loop_new(g_main_context_default(), FALSE); 779 qio_channel_tls_handshake(tioc, 780 nbd_tls_handshake, 781 &data, 782 NULL, 783 NULL); 784 785 if (!data.complete) { 786 g_main_loop_run(data.loop); 787 } 788 g_main_loop_unref(data.loop); 789 if (data.error) { 790 object_unref(OBJECT(tioc)); 791 error_propagate(errp, data.error); 792 return NULL; 793 } 794 795 return QIO_CHANNEL(tioc); 796 } 797 798 /* nbd_negotiate_send_meta_context 799 * 800 * Send one chunk of reply to NBD_OPT_{LIST,SET}_META_CONTEXT 801 * 802 * For NBD_OPT_LIST_META_CONTEXT @context_id is ignored, 0 is used instead. 803 */ 804 static int nbd_negotiate_send_meta_context(NBDClient *client, 805 const char *context, 806 uint32_t context_id, 807 Error **errp) 808 { 809 NBDOptionReplyMetaContext opt; 810 struct iovec iov[] = { 811 {.iov_base = &opt, .iov_len = sizeof(opt)}, 812 {.iov_base = (void *)context, .iov_len = strlen(context)} 813 }; 814 815 assert(iov[1].iov_len <= NBD_MAX_STRING_SIZE); 816 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 817 context_id = 0; 818 } 819 820 trace_nbd_negotiate_meta_query_reply(context, context_id); 821 set_be_option_rep(&opt.h, client->opt, NBD_REP_META_CONTEXT, 822 sizeof(opt) - sizeof(opt.h) + iov[1].iov_len); 823 stl_be_p(&opt.context_id, context_id); 824 825 return qio_channel_writev_all(client->ioc, iov, 2, errp) < 0 ? -EIO : 0; 826 } 827 828 /* 829 * Return true if @query matches @pattern, or if @query is empty when 830 * the @client is performing _LIST_. 831 */ 832 static bool nbd_meta_empty_or_pattern(NBDClient *client, const char *pattern, 833 const char *query) 834 { 835 if (!*query) { 836 trace_nbd_negotiate_meta_query_parse("empty"); 837 return client->opt == NBD_OPT_LIST_META_CONTEXT; 838 } 839 if (strcmp(query, pattern) == 0) { 840 trace_nbd_negotiate_meta_query_parse(pattern); 841 return true; 842 } 843 trace_nbd_negotiate_meta_query_skip("pattern not matched"); 844 return false; 845 } 846 847 /* 848 * Return true and adjust @str in place if it begins with @prefix. 849 */ 850 static bool nbd_strshift(const char **str, const char *prefix) 851 { 852 size_t len = strlen(prefix); 853 854 if (strncmp(*str, prefix, len) == 0) { 855 *str += len; 856 return true; 857 } 858 return false; 859 } 860 861 /* nbd_meta_base_query 862 * 863 * Handle queries to 'base' namespace. For now, only the base:allocation 864 * context is available. Return true if @query has been handled. 865 */ 866 static bool nbd_meta_base_query(NBDClient *client, NBDMetaContexts *meta, 867 const char *query) 868 { 869 if (!nbd_strshift(&query, "base:")) { 870 return false; 871 } 872 trace_nbd_negotiate_meta_query_parse("base:"); 873 874 if (nbd_meta_empty_or_pattern(client, "allocation", query)) { 875 meta->base_allocation = true; 876 } 877 return true; 878 } 879 880 /* nbd_meta_qemu_query 881 * 882 * Handle queries to 'qemu' namespace. For now, only the qemu:dirty-bitmap: 883 * and qemu:allocation-depth contexts are available. Return true if @query 884 * has been handled. 885 */ 886 static bool nbd_meta_qemu_query(NBDClient *client, NBDMetaContexts *meta, 887 const char *query) 888 { 889 size_t i; 890 891 if (!nbd_strshift(&query, "qemu:")) { 892 return false; 893 } 894 trace_nbd_negotiate_meta_query_parse("qemu:"); 895 896 if (!*query) { 897 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 898 meta->allocation_depth = meta->exp->allocation_depth; 899 if (meta->exp->nr_export_bitmaps) { 900 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 901 } 902 } 903 trace_nbd_negotiate_meta_query_parse("empty"); 904 return true; 905 } 906 907 if (strcmp(query, "allocation-depth") == 0) { 908 trace_nbd_negotiate_meta_query_parse("allocation-depth"); 909 meta->allocation_depth = meta->exp->allocation_depth; 910 return true; 911 } 912 913 if (nbd_strshift(&query, "dirty-bitmap:")) { 914 trace_nbd_negotiate_meta_query_parse("dirty-bitmap:"); 915 if (!*query) { 916 if (client->opt == NBD_OPT_LIST_META_CONTEXT && 917 meta->exp->nr_export_bitmaps) { 918 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 919 } 920 trace_nbd_negotiate_meta_query_parse("empty"); 921 return true; 922 } 923 924 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { 925 const char *bm_name; 926 927 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); 928 if (strcmp(bm_name, query) == 0) { 929 meta->bitmaps[i] = true; 930 trace_nbd_negotiate_meta_query_parse(query); 931 return true; 932 } 933 } 934 trace_nbd_negotiate_meta_query_skip("no dirty-bitmap match"); 935 return true; 936 } 937 938 trace_nbd_negotiate_meta_query_skip("unknown qemu context"); 939 return true; 940 } 941 942 /* nbd_negotiate_meta_query 943 * 944 * Parse namespace name and call corresponding function to parse body of the 945 * query. 946 * 947 * The only supported namespaces are 'base' and 'qemu'. 948 * 949 * Return -errno on I/O error, 0 if option was completely handled by 950 * sending a reply about inconsistent lengths, or 1 on success. */ 951 static int nbd_negotiate_meta_query(NBDClient *client, 952 NBDMetaContexts *meta, Error **errp) 953 { 954 int ret; 955 g_autofree char *query = NULL; 956 uint32_t len; 957 958 ret = nbd_opt_read(client, &len, sizeof(len), false, errp); 959 if (ret <= 0) { 960 return ret; 961 } 962 len = cpu_to_be32(len); 963 964 if (len > NBD_MAX_STRING_SIZE) { 965 trace_nbd_negotiate_meta_query_skip("length too long"); 966 return nbd_opt_skip(client, len, errp); 967 } 968 969 query = g_malloc(len + 1); 970 ret = nbd_opt_read(client, query, len, true, errp); 971 if (ret <= 0) { 972 return ret; 973 } 974 query[len] = '\0'; 975 976 if (nbd_meta_base_query(client, meta, query)) { 977 return 1; 978 } 979 if (nbd_meta_qemu_query(client, meta, query)) { 980 return 1; 981 } 982 983 trace_nbd_negotiate_meta_query_skip("unknown namespace"); 984 return 1; 985 } 986 987 /* nbd_negotiate_meta_queries 988 * Handle NBD_OPT_LIST_META_CONTEXT and NBD_OPT_SET_META_CONTEXT 989 * 990 * Return -errno on I/O error, or 0 if option was completely handled. */ 991 static int nbd_negotiate_meta_queries(NBDClient *client, Error **errp) 992 { 993 int ret; 994 g_autofree char *export_name = NULL; 995 /* Mark unused to work around https://bugs.llvm.org/show_bug.cgi?id=3888 */ 996 g_autofree G_GNUC_UNUSED bool *bitmaps = NULL; 997 NBDMetaContexts local_meta = {0}; 998 NBDMetaContexts *meta; 999 uint32_t nb_queries; 1000 size_t i; 1001 size_t count = 0; 1002 1003 if (client->opt == NBD_OPT_SET_META_CONTEXT && 1004 client->mode < NBD_MODE_STRUCTURED) { 1005 return nbd_opt_invalid(client, errp, 1006 "request option '%s' when structured reply " 1007 "is not negotiated", 1008 nbd_opt_lookup(client->opt)); 1009 } 1010 1011 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 1012 /* Only change the caller's meta on SET. */ 1013 meta = &local_meta; 1014 } else { 1015 meta = &client->contexts; 1016 } 1017 1018 g_free(meta->bitmaps); 1019 memset(meta, 0, sizeof(*meta)); 1020 1021 ret = nbd_opt_read_name(client, &export_name, NULL, errp); 1022 if (ret <= 0) { 1023 return ret; 1024 } 1025 1026 meta->exp = nbd_export_find(export_name); 1027 if (meta->exp == NULL) { 1028 g_autofree char *sane_name = nbd_sanitize_name(export_name); 1029 1030 return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp, 1031 "export '%s' not present", sane_name); 1032 } 1033 meta->bitmaps = g_new0(bool, meta->exp->nr_export_bitmaps); 1034 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 1035 bitmaps = meta->bitmaps; 1036 } 1037 1038 ret = nbd_opt_read(client, &nb_queries, sizeof(nb_queries), false, errp); 1039 if (ret <= 0) { 1040 return ret; 1041 } 1042 nb_queries = cpu_to_be32(nb_queries); 1043 trace_nbd_negotiate_meta_context(nbd_opt_lookup(client->opt), 1044 export_name, nb_queries); 1045 1046 if (client->opt == NBD_OPT_LIST_META_CONTEXT && !nb_queries) { 1047 /* enable all known contexts */ 1048 meta->base_allocation = true; 1049 meta->allocation_depth = meta->exp->allocation_depth; 1050 if (meta->exp->nr_export_bitmaps) { 1051 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 1052 } 1053 } else { 1054 for (i = 0; i < nb_queries; ++i) { 1055 ret = nbd_negotiate_meta_query(client, meta, errp); 1056 if (ret <= 0) { 1057 return ret; 1058 } 1059 } 1060 } 1061 1062 if (meta->base_allocation) { 1063 ret = nbd_negotiate_send_meta_context(client, "base:allocation", 1064 NBD_META_ID_BASE_ALLOCATION, 1065 errp); 1066 if (ret < 0) { 1067 return ret; 1068 } 1069 count++; 1070 } 1071 1072 if (meta->allocation_depth) { 1073 ret = nbd_negotiate_send_meta_context(client, "qemu:allocation-depth", 1074 NBD_META_ID_ALLOCATION_DEPTH, 1075 errp); 1076 if (ret < 0) { 1077 return ret; 1078 } 1079 count++; 1080 } 1081 1082 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { 1083 const char *bm_name; 1084 g_autofree char *context = NULL; 1085 1086 if (!meta->bitmaps[i]) { 1087 continue; 1088 } 1089 1090 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); 1091 context = g_strdup_printf("qemu:dirty-bitmap:%s", bm_name); 1092 1093 ret = nbd_negotiate_send_meta_context(client, context, 1094 NBD_META_ID_DIRTY_BITMAP + i, 1095 errp); 1096 if (ret < 0) { 1097 return ret; 1098 } 1099 count++; 1100 } 1101 1102 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1103 if (ret == 0) { 1104 meta->count = count; 1105 } 1106 1107 return ret; 1108 } 1109 1110 /* nbd_negotiate_options 1111 * Process all NBD_OPT_* client option commands, during fixed newstyle 1112 * negotiation. 1113 * Return: 1114 * -errno on error, errp is set 1115 * 0 on successful negotiation, errp is not set 1116 * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, 1117 * errp is not set 1118 */ 1119 static int nbd_negotiate_options(NBDClient *client, Error **errp) 1120 { 1121 uint32_t flags; 1122 bool fixedNewstyle = false; 1123 bool no_zeroes = false; 1124 1125 /* Client sends: 1126 [ 0 .. 3] client flags 1127 1128 Then we loop until NBD_OPT_EXPORT_NAME or NBD_OPT_GO: 1129 [ 0 .. 7] NBD_OPTS_MAGIC 1130 [ 8 .. 11] NBD option 1131 [12 .. 15] Data length 1132 ... Rest of request 1133 1134 [ 0 .. 7] NBD_OPTS_MAGIC 1135 [ 8 .. 11] Second NBD option 1136 [12 .. 15] Data length 1137 ... Rest of request 1138 */ 1139 1140 if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) { 1141 return -EIO; 1142 } 1143 client->mode = NBD_MODE_EXPORT_NAME; 1144 trace_nbd_negotiate_options_flags(flags); 1145 if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) { 1146 fixedNewstyle = true; 1147 flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE; 1148 client->mode = NBD_MODE_SIMPLE; 1149 } 1150 if (flags & NBD_FLAG_C_NO_ZEROES) { 1151 no_zeroes = true; 1152 flags &= ~NBD_FLAG_C_NO_ZEROES; 1153 } 1154 if (flags != 0) { 1155 error_setg(errp, "Unknown client flags 0x%" PRIx32 " received", flags); 1156 return -EINVAL; 1157 } 1158 1159 while (1) { 1160 int ret; 1161 uint32_t option, length; 1162 uint64_t magic; 1163 1164 if (nbd_read64(client->ioc, &magic, "opts magic", errp) < 0) { 1165 return -EINVAL; 1166 } 1167 trace_nbd_negotiate_options_check_magic(magic); 1168 if (magic != NBD_OPTS_MAGIC) { 1169 error_setg(errp, "Bad magic received"); 1170 return -EINVAL; 1171 } 1172 1173 if (nbd_read32(client->ioc, &option, "option", errp) < 0) { 1174 return -EINVAL; 1175 } 1176 client->opt = option; 1177 1178 if (nbd_read32(client->ioc, &length, "option length", errp) < 0) { 1179 return -EINVAL; 1180 } 1181 assert(!client->optlen); 1182 client->optlen = length; 1183 1184 if (length > NBD_MAX_BUFFER_SIZE) { 1185 error_setg(errp, "len (%" PRIu32 ") is larger than max len (%u)", 1186 length, NBD_MAX_BUFFER_SIZE); 1187 return -EINVAL; 1188 } 1189 1190 trace_nbd_negotiate_options_check_option(option, 1191 nbd_opt_lookup(option)); 1192 if (client->tlscreds && 1193 client->ioc == (QIOChannel *)client->sioc) { 1194 QIOChannel *tioc; 1195 if (!fixedNewstyle) { 1196 error_setg(errp, "Unsupported option 0x%" PRIx32, option); 1197 return -EINVAL; 1198 } 1199 switch (option) { 1200 case NBD_OPT_STARTTLS: 1201 if (length) { 1202 /* Unconditionally drop the connection if the client 1203 * can't start a TLS negotiation correctly */ 1204 return nbd_reject_length(client, true, errp); 1205 } 1206 tioc = nbd_negotiate_handle_starttls(client, errp); 1207 if (!tioc) { 1208 return -EIO; 1209 } 1210 ret = 0; 1211 object_unref(OBJECT(client->ioc)); 1212 client->ioc = tioc; 1213 break; 1214 1215 case NBD_OPT_EXPORT_NAME: 1216 /* No way to return an error to client, so drop connection */ 1217 error_setg(errp, "Option 0x%x not permitted before TLS", 1218 option); 1219 return -EINVAL; 1220 1221 default: 1222 /* Let the client keep trying, unless they asked to 1223 * quit. Always try to give an error back to the 1224 * client; but when replying to OPT_ABORT, be aware 1225 * that the client may hang up before receiving the 1226 * error, in which case we are fine ignoring the 1227 * resulting EPIPE. */ 1228 ret = nbd_opt_drop(client, NBD_REP_ERR_TLS_REQD, 1229 option == NBD_OPT_ABORT ? NULL : errp, 1230 "Option 0x%" PRIx32 1231 " not permitted before TLS", option); 1232 if (option == NBD_OPT_ABORT) { 1233 return 1; 1234 } 1235 break; 1236 } 1237 } else if (fixedNewstyle) { 1238 switch (option) { 1239 case NBD_OPT_LIST: 1240 if (length) { 1241 ret = nbd_reject_length(client, false, errp); 1242 } else { 1243 ret = nbd_negotiate_handle_list(client, errp); 1244 } 1245 break; 1246 1247 case NBD_OPT_ABORT: 1248 /* NBD spec says we must try to reply before 1249 * disconnecting, but that we must also tolerate 1250 * guests that don't wait for our reply. */ 1251 nbd_negotiate_send_rep(client, NBD_REP_ACK, NULL); 1252 return 1; 1253 1254 case NBD_OPT_EXPORT_NAME: 1255 return nbd_negotiate_handle_export_name(client, no_zeroes, 1256 errp); 1257 1258 case NBD_OPT_INFO: 1259 case NBD_OPT_GO: 1260 ret = nbd_negotiate_handle_info(client, errp); 1261 if (ret == 1) { 1262 assert(option == NBD_OPT_GO); 1263 return 0; 1264 } 1265 break; 1266 1267 case NBD_OPT_STARTTLS: 1268 if (length) { 1269 ret = nbd_reject_length(client, false, errp); 1270 } else if (client->tlscreds) { 1271 ret = nbd_negotiate_send_rep_err(client, 1272 NBD_REP_ERR_INVALID, errp, 1273 "TLS already enabled"); 1274 } else { 1275 ret = nbd_negotiate_send_rep_err(client, 1276 NBD_REP_ERR_POLICY, errp, 1277 "TLS not configured"); 1278 } 1279 break; 1280 1281 case NBD_OPT_STRUCTURED_REPLY: 1282 if (length) { 1283 ret = nbd_reject_length(client, false, errp); 1284 } else if (client->mode >= NBD_MODE_EXTENDED) { 1285 ret = nbd_negotiate_send_rep_err( 1286 client, NBD_REP_ERR_EXT_HEADER_REQD, errp, 1287 "extended headers already negotiated"); 1288 } else if (client->mode >= NBD_MODE_STRUCTURED) { 1289 ret = nbd_negotiate_send_rep_err( 1290 client, NBD_REP_ERR_INVALID, errp, 1291 "structured reply already negotiated"); 1292 } else { 1293 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1294 client->mode = NBD_MODE_STRUCTURED; 1295 } 1296 break; 1297 1298 case NBD_OPT_LIST_META_CONTEXT: 1299 case NBD_OPT_SET_META_CONTEXT: 1300 ret = nbd_negotiate_meta_queries(client, errp); 1301 break; 1302 1303 case NBD_OPT_EXTENDED_HEADERS: 1304 if (length) { 1305 ret = nbd_reject_length(client, false, errp); 1306 } else if (client->mode >= NBD_MODE_EXTENDED) { 1307 ret = nbd_negotiate_send_rep_err( 1308 client, NBD_REP_ERR_INVALID, errp, 1309 "extended headers already negotiated"); 1310 } else { 1311 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1312 client->mode = NBD_MODE_EXTENDED; 1313 } 1314 break; 1315 1316 default: 1317 ret = nbd_opt_drop(client, NBD_REP_ERR_UNSUP, errp, 1318 "Unsupported option %" PRIu32 " (%s)", 1319 option, nbd_opt_lookup(option)); 1320 break; 1321 } 1322 } else { 1323 /* 1324 * If broken new-style we should drop the connection 1325 * for anything except NBD_OPT_EXPORT_NAME 1326 */ 1327 switch (option) { 1328 case NBD_OPT_EXPORT_NAME: 1329 return nbd_negotiate_handle_export_name(client, no_zeroes, 1330 errp); 1331 1332 default: 1333 error_setg(errp, "Unsupported option %" PRIu32 " (%s)", 1334 option, nbd_opt_lookup(option)); 1335 return -EINVAL; 1336 } 1337 } 1338 if (ret < 0) { 1339 return ret; 1340 } 1341 } 1342 } 1343 1344 /* nbd_negotiate 1345 * Return: 1346 * -errno on error, errp is set 1347 * 0 on successful negotiation, errp is not set 1348 * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, 1349 * errp is not set 1350 */ 1351 static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp) 1352 { 1353 ERRP_GUARD(); 1354 char buf[NBD_OLDSTYLE_NEGOTIATE_SIZE] = ""; 1355 int ret; 1356 1357 /* Old style negotiation header, no room for options 1358 [ 0 .. 7] passwd ("NBDMAGIC") 1359 [ 8 .. 15] magic (NBD_CLIENT_MAGIC) 1360 [16 .. 23] size 1361 [24 .. 27] export flags (zero-extended) 1362 [28 .. 151] reserved (0) 1363 1364 New style negotiation header, client can send options 1365 [ 0 .. 7] passwd ("NBDMAGIC") 1366 [ 8 .. 15] magic (NBD_OPTS_MAGIC) 1367 [16 .. 17] server flags (0) 1368 ....options sent, ending in NBD_OPT_EXPORT_NAME or NBD_OPT_GO.... 1369 */ 1370 1371 qio_channel_set_blocking(client->ioc, false, NULL); 1372 qio_channel_set_follow_coroutine_ctx(client->ioc, true); 1373 1374 trace_nbd_negotiate_begin(); 1375 memcpy(buf, "NBDMAGIC", 8); 1376 1377 stq_be_p(buf + 8, NBD_OPTS_MAGIC); 1378 stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES); 1379 1380 if (nbd_write(client->ioc, buf, 18, errp) < 0) { 1381 error_prepend(errp, "write failed: "); 1382 return -EINVAL; 1383 } 1384 ret = nbd_negotiate_options(client, errp); 1385 if (ret != 0) { 1386 if (ret < 0) { 1387 error_prepend(errp, "option negotiation failed: "); 1388 } 1389 return ret; 1390 } 1391 1392 assert(!client->optlen); 1393 trace_nbd_negotiate_success(); 1394 1395 return 0; 1396 } 1397 1398 /* nbd_read_eof 1399 * Tries to read @size bytes from @ioc. This is a local implementation of 1400 * qio_channel_readv_all_eof. We have it here because we need it to be 1401 * interruptible and to know when the coroutine is yielding. 1402 * Returns 1 on success 1403 * 0 on eof, when no data was read (errp is not set) 1404 * negative errno on failure (errp is set) 1405 */ 1406 static inline int coroutine_fn 1407 nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp) 1408 { 1409 bool partial = false; 1410 1411 assert(size); 1412 while (size > 0) { 1413 struct iovec iov = { .iov_base = buffer, .iov_len = size }; 1414 ssize_t len; 1415 1416 len = qio_channel_readv(client->ioc, &iov, 1, errp); 1417 if (len == QIO_CHANNEL_ERR_BLOCK) { 1418 client->read_yielding = true; 1419 qio_channel_yield(client->ioc, G_IO_IN); 1420 client->read_yielding = false; 1421 if (client->quiescing) { 1422 return -EAGAIN; 1423 } 1424 continue; 1425 } else if (len < 0) { 1426 return -EIO; 1427 } else if (len == 0) { 1428 if (partial) { 1429 error_setg(errp, 1430 "Unexpected end-of-file before all bytes were read"); 1431 return -EIO; 1432 } else { 1433 return 0; 1434 } 1435 } 1436 1437 partial = true; 1438 size -= len; 1439 buffer = (uint8_t *) buffer + len; 1440 } 1441 return 1; 1442 } 1443 1444 static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *request, 1445 Error **errp) 1446 { 1447 uint8_t buf[NBD_EXTENDED_REQUEST_SIZE]; 1448 uint32_t magic, expect; 1449 int ret; 1450 size_t size = client->mode >= NBD_MODE_EXTENDED ? 1451 NBD_EXTENDED_REQUEST_SIZE : NBD_REQUEST_SIZE; 1452 1453 ret = nbd_read_eof(client, buf, size, errp); 1454 if (ret < 0) { 1455 return ret; 1456 } 1457 if (ret == 0) { 1458 return -EIO; 1459 } 1460 1461 /* 1462 * Compact request 1463 * [ 0 .. 3] magic (NBD_REQUEST_MAGIC) 1464 * [ 4 .. 5] flags (NBD_CMD_FLAG_FUA, ...) 1465 * [ 6 .. 7] type (NBD_CMD_READ, ...) 1466 * [ 8 .. 15] cookie 1467 * [16 .. 23] from 1468 * [24 .. 27] len 1469 * Extended request 1470 * [ 0 .. 3] magic (NBD_EXTENDED_REQUEST_MAGIC) 1471 * [ 4 .. 5] flags (NBD_CMD_FLAG_FUA, NBD_CMD_FLAG_PAYLOAD_LEN, ...) 1472 * [ 6 .. 7] type (NBD_CMD_READ, ...) 1473 * [ 8 .. 15] cookie 1474 * [16 .. 23] from 1475 * [24 .. 31] len 1476 */ 1477 1478 magic = ldl_be_p(buf); 1479 request->flags = lduw_be_p(buf + 4); 1480 request->type = lduw_be_p(buf + 6); 1481 request->cookie = ldq_be_p(buf + 8); 1482 request->from = ldq_be_p(buf + 16); 1483 if (client->mode >= NBD_MODE_EXTENDED) { 1484 request->len = ldq_be_p(buf + 24); 1485 expect = NBD_EXTENDED_REQUEST_MAGIC; 1486 } else { 1487 request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */ 1488 expect = NBD_REQUEST_MAGIC; 1489 } 1490 1491 trace_nbd_receive_request(magic, request->flags, request->type, 1492 request->from, request->len); 1493 1494 if (magic != expect) { 1495 error_setg(errp, "invalid magic (got 0x%" PRIx32 ", expected 0x%" 1496 PRIx32 ")", magic, expect); 1497 return -EINVAL; 1498 } 1499 return 0; 1500 } 1501 1502 #define MAX_NBD_REQUESTS 16 1503 1504 /* Runs in export AioContext and main loop thread */ 1505 void nbd_client_get(NBDClient *client) 1506 { 1507 qatomic_inc(&client->refcount); 1508 } 1509 1510 void nbd_client_put(NBDClient *client) 1511 { 1512 assert(qemu_in_main_thread()); 1513 1514 if (qatomic_fetch_dec(&client->refcount) == 1) { 1515 /* The last reference should be dropped by client->close, 1516 * which is called by client_close. 1517 */ 1518 assert(client->closing); 1519 1520 object_unref(OBJECT(client->sioc)); 1521 object_unref(OBJECT(client->ioc)); 1522 if (client->tlscreds) { 1523 object_unref(OBJECT(client->tlscreds)); 1524 } 1525 g_free(client->tlsauthz); 1526 if (client->exp) { 1527 QTAILQ_REMOVE(&client->exp->clients, client, next); 1528 blk_exp_unref(&client->exp->common); 1529 } 1530 g_free(client->contexts.bitmaps); 1531 g_free(client); 1532 } 1533 } 1534 1535 /* 1536 * Tries to release the reference to @client, but only if other references 1537 * remain. This is an optimization for the common case where we want to avoid 1538 * the expense of scheduling nbd_client_put() in the main loop thread. 1539 * 1540 * Returns true upon success or false if the reference was not released because 1541 * it is the last reference. 1542 */ 1543 static bool nbd_client_put_nonzero(NBDClient *client) 1544 { 1545 int old = qatomic_read(&client->refcount); 1546 int expected; 1547 1548 do { 1549 if (old == 1) { 1550 return false; 1551 } 1552 1553 expected = old; 1554 old = qatomic_cmpxchg(&client->refcount, expected, expected - 1); 1555 } while (old != expected); 1556 1557 return true; 1558 } 1559 1560 static void client_close(NBDClient *client, bool negotiated) 1561 { 1562 assert(qemu_in_main_thread()); 1563 1564 if (client->closing) { 1565 return; 1566 } 1567 1568 client->closing = true; 1569 1570 /* Force requests to finish. They will drop their own references, 1571 * then we'll close the socket and free the NBDClient. 1572 */ 1573 qio_channel_shutdown(client->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, 1574 NULL); 1575 1576 /* Also tell the client, so that they release their reference. */ 1577 if (client->close_fn) { 1578 client->close_fn(client, negotiated); 1579 } 1580 } 1581 1582 static NBDRequestData *nbd_request_get(NBDClient *client) 1583 { 1584 NBDRequestData *req; 1585 1586 assert(client->nb_requests <= MAX_NBD_REQUESTS - 1); 1587 client->nb_requests++; 1588 1589 req = g_new0(NBDRequestData, 1); 1590 req->client = client; 1591 return req; 1592 } 1593 1594 static void nbd_request_put(NBDRequestData *req) 1595 { 1596 NBDClient *client = req->client; 1597 1598 if (req->data) { 1599 qemu_vfree(req->data); 1600 } 1601 g_free(req); 1602 1603 client->nb_requests--; 1604 1605 if (client->quiescing && client->nb_requests == 0) { 1606 aio_wait_kick(); 1607 } 1608 1609 nbd_client_receive_next_request(client); 1610 } 1611 1612 static void blk_aio_attached(AioContext *ctx, void *opaque) 1613 { 1614 NBDExport *exp = opaque; 1615 NBDClient *client; 1616 1617 trace_nbd_blk_aio_attached(exp->name, ctx); 1618 1619 exp->common.ctx = ctx; 1620 1621 QTAILQ_FOREACH(client, &exp->clients, next) { 1622 assert(client->nb_requests == 0); 1623 assert(client->recv_coroutine == NULL); 1624 assert(client->send_coroutine == NULL); 1625 } 1626 } 1627 1628 static void blk_aio_detach(void *opaque) 1629 { 1630 NBDExport *exp = opaque; 1631 1632 trace_nbd_blk_aio_detach(exp->name, exp->common.ctx); 1633 1634 exp->common.ctx = NULL; 1635 } 1636 1637 static void nbd_drained_begin(void *opaque) 1638 { 1639 NBDExport *exp = opaque; 1640 NBDClient *client; 1641 1642 QTAILQ_FOREACH(client, &exp->clients, next) { 1643 client->quiescing = true; 1644 } 1645 } 1646 1647 static void nbd_drained_end(void *opaque) 1648 { 1649 NBDExport *exp = opaque; 1650 NBDClient *client; 1651 1652 QTAILQ_FOREACH(client, &exp->clients, next) { 1653 client->quiescing = false; 1654 nbd_client_receive_next_request(client); 1655 } 1656 } 1657 1658 static bool nbd_drained_poll(void *opaque) 1659 { 1660 NBDExport *exp = opaque; 1661 NBDClient *client; 1662 1663 QTAILQ_FOREACH(client, &exp->clients, next) { 1664 if (client->nb_requests != 0) { 1665 /* 1666 * If there's a coroutine waiting for a request on nbd_read_eof() 1667 * enter it here so we don't depend on the client to wake it up. 1668 */ 1669 if (client->recv_coroutine != NULL && client->read_yielding) { 1670 qio_channel_wake_read(client->ioc); 1671 } 1672 1673 return true; 1674 } 1675 } 1676 1677 return false; 1678 } 1679 1680 static void nbd_eject_notifier(Notifier *n, void *data) 1681 { 1682 NBDExport *exp = container_of(n, NBDExport, eject_notifier); 1683 1684 blk_exp_request_shutdown(&exp->common); 1685 } 1686 1687 void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk) 1688 { 1689 NBDExport *nbd_exp = container_of(exp, NBDExport, common); 1690 assert(exp->drv == &blk_exp_nbd); 1691 assert(nbd_exp->eject_notifier_blk == NULL); 1692 1693 blk_ref(blk); 1694 nbd_exp->eject_notifier_blk = blk; 1695 nbd_exp->eject_notifier.notify = nbd_eject_notifier; 1696 blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier); 1697 } 1698 1699 static const BlockDevOps nbd_block_ops = { 1700 .drained_begin = nbd_drained_begin, 1701 .drained_end = nbd_drained_end, 1702 .drained_poll = nbd_drained_poll, 1703 }; 1704 1705 static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, 1706 Error **errp) 1707 { 1708 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1709 BlockExportOptionsNbd *arg = &exp_args->u.nbd; 1710 const char *name = arg->name ?: exp_args->node_name; 1711 BlockBackend *blk = blk_exp->blk; 1712 int64_t size; 1713 uint64_t perm, shared_perm; 1714 bool readonly = !exp_args->writable; 1715 BlockDirtyBitmapOrStrList *bitmaps; 1716 size_t i; 1717 int ret; 1718 1719 GLOBAL_STATE_CODE(); 1720 assert(exp_args->type == BLOCK_EXPORT_TYPE_NBD); 1721 1722 if (!nbd_server_is_running()) { 1723 error_setg(errp, "NBD server not running"); 1724 return -EINVAL; 1725 } 1726 1727 if (strlen(name) > NBD_MAX_STRING_SIZE) { 1728 error_setg(errp, "export name '%s' too long", name); 1729 return -EINVAL; 1730 } 1731 1732 if (arg->description && strlen(arg->description) > NBD_MAX_STRING_SIZE) { 1733 error_setg(errp, "description '%s' too long", arg->description); 1734 return -EINVAL; 1735 } 1736 1737 if (nbd_export_find(name)) { 1738 error_setg(errp, "NBD server already has export named '%s'", name); 1739 return -EEXIST; 1740 } 1741 1742 size = blk_getlength(blk); 1743 if (size < 0) { 1744 error_setg_errno(errp, -size, 1745 "Failed to determine the NBD export's length"); 1746 return size; 1747 } 1748 1749 /* Don't allow resize while the NBD server is running, otherwise we don't 1750 * care what happens with the node. */ 1751 blk_get_perm(blk, &perm, &shared_perm); 1752 ret = blk_set_perm(blk, perm, shared_perm & ~BLK_PERM_RESIZE, errp); 1753 if (ret < 0) { 1754 return ret; 1755 } 1756 1757 QTAILQ_INIT(&exp->clients); 1758 exp->name = g_strdup(name); 1759 exp->description = g_strdup(arg->description); 1760 exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH | 1761 NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE); 1762 1763 if (nbd_server_max_connections() != 1) { 1764 exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN; 1765 } 1766 if (readonly) { 1767 exp->nbdflags |= NBD_FLAG_READ_ONLY; 1768 } else { 1769 exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES | 1770 NBD_FLAG_SEND_FAST_ZERO); 1771 } 1772 exp->size = QEMU_ALIGN_DOWN(size, BDRV_SECTOR_SIZE); 1773 1774 bdrv_graph_rdlock_main_loop(); 1775 1776 for (bitmaps = arg->bitmaps; bitmaps; bitmaps = bitmaps->next) { 1777 exp->nr_export_bitmaps++; 1778 } 1779 exp->export_bitmaps = g_new0(BdrvDirtyBitmap *, exp->nr_export_bitmaps); 1780 for (i = 0, bitmaps = arg->bitmaps; bitmaps; 1781 i++, bitmaps = bitmaps->next) 1782 { 1783 const char *bitmap; 1784 BlockDriverState *bs = blk_bs(blk); 1785 BdrvDirtyBitmap *bm = NULL; 1786 1787 switch (bitmaps->value->type) { 1788 case QTYPE_QSTRING: 1789 bitmap = bitmaps->value->u.local; 1790 while (bs) { 1791 bm = bdrv_find_dirty_bitmap(bs, bitmap); 1792 if (bm != NULL) { 1793 break; 1794 } 1795 1796 bs = bdrv_filter_or_cow_bs(bs); 1797 } 1798 1799 if (bm == NULL) { 1800 ret = -ENOENT; 1801 error_setg(errp, "Bitmap '%s' is not found", 1802 bitmaps->value->u.local); 1803 goto fail; 1804 } 1805 1806 if (readonly && bdrv_is_writable(bs) && 1807 bdrv_dirty_bitmap_enabled(bm)) { 1808 ret = -EINVAL; 1809 error_setg(errp, "Enabled bitmap '%s' incompatible with " 1810 "readonly export", bitmap); 1811 goto fail; 1812 } 1813 break; 1814 case QTYPE_QDICT: 1815 bitmap = bitmaps->value->u.external.name; 1816 bm = block_dirty_bitmap_lookup(bitmaps->value->u.external.node, 1817 bitmap, NULL, errp); 1818 if (!bm) { 1819 ret = -ENOENT; 1820 goto fail; 1821 } 1822 break; 1823 default: 1824 abort(); 1825 } 1826 1827 assert(bm); 1828 1829 if (bdrv_dirty_bitmap_check(bm, BDRV_BITMAP_ALLOW_RO, errp)) { 1830 ret = -EINVAL; 1831 goto fail; 1832 } 1833 1834 exp->export_bitmaps[i] = bm; 1835 assert(strlen(bitmap) <= BDRV_BITMAP_MAX_NAME_SIZE); 1836 } 1837 1838 /* Mark bitmaps busy in a separate loop, to simplify roll-back concerns. */ 1839 for (i = 0; i < exp->nr_export_bitmaps; i++) { 1840 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], true); 1841 } 1842 1843 exp->allocation_depth = arg->allocation_depth; 1844 1845 /* 1846 * We need to inhibit request queuing in the block layer to ensure we can 1847 * be properly quiesced when entering a drained section, as our coroutines 1848 * servicing pending requests might enter blk_pread(). 1849 */ 1850 blk_set_disable_request_queuing(blk, true); 1851 1852 blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp); 1853 1854 blk_set_dev_ops(blk, &nbd_block_ops, exp); 1855 1856 QTAILQ_INSERT_TAIL(&exports, exp, next); 1857 1858 bdrv_graph_rdunlock_main_loop(); 1859 1860 return 0; 1861 1862 fail: 1863 bdrv_graph_rdunlock_main_loop(); 1864 g_free(exp->export_bitmaps); 1865 g_free(exp->name); 1866 g_free(exp->description); 1867 return ret; 1868 } 1869 1870 NBDExport *nbd_export_find(const char *name) 1871 { 1872 NBDExport *exp; 1873 QTAILQ_FOREACH(exp, &exports, next) { 1874 if (strcmp(name, exp->name) == 0) { 1875 return exp; 1876 } 1877 } 1878 1879 return NULL; 1880 } 1881 1882 AioContext * 1883 nbd_export_aio_context(NBDExport *exp) 1884 { 1885 return exp->common.ctx; 1886 } 1887 1888 static void nbd_export_request_shutdown(BlockExport *blk_exp) 1889 { 1890 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1891 NBDClient *client, *next; 1892 1893 blk_exp_ref(&exp->common); 1894 /* 1895 * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a 1896 * close mode that stops advertising the export to new clients but 1897 * still permits existing clients to run to completion? Because of 1898 * that possibility, nbd_export_close() can be called more than 1899 * once on an export. 1900 */ 1901 QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) { 1902 client_close(client, true); 1903 } 1904 if (exp->name) { 1905 g_free(exp->name); 1906 exp->name = NULL; 1907 QTAILQ_REMOVE(&exports, exp, next); 1908 } 1909 blk_exp_unref(&exp->common); 1910 } 1911 1912 static void nbd_export_delete(BlockExport *blk_exp) 1913 { 1914 size_t i; 1915 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1916 1917 assert(exp->name == NULL); 1918 assert(QTAILQ_EMPTY(&exp->clients)); 1919 1920 g_free(exp->description); 1921 exp->description = NULL; 1922 1923 if (exp->eject_notifier_blk) { 1924 notifier_remove(&exp->eject_notifier); 1925 blk_unref(exp->eject_notifier_blk); 1926 } 1927 blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, 1928 blk_aio_detach, exp); 1929 blk_set_disable_request_queuing(exp->common.blk, false); 1930 1931 for (i = 0; i < exp->nr_export_bitmaps; i++) { 1932 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false); 1933 } 1934 } 1935 1936 const BlockExportDriver blk_exp_nbd = { 1937 .type = BLOCK_EXPORT_TYPE_NBD, 1938 .instance_size = sizeof(NBDExport), 1939 .create = nbd_export_create, 1940 .delete = nbd_export_delete, 1941 .request_shutdown = nbd_export_request_shutdown, 1942 }; 1943 1944 static int coroutine_fn nbd_co_send_iov(NBDClient *client, struct iovec *iov, 1945 unsigned niov, Error **errp) 1946 { 1947 int ret; 1948 1949 g_assert(qemu_in_coroutine()); 1950 qemu_co_mutex_lock(&client->send_lock); 1951 client->send_coroutine = qemu_coroutine_self(); 1952 1953 ret = qio_channel_writev_all(client->ioc, iov, niov, errp) < 0 ? -EIO : 0; 1954 1955 client->send_coroutine = NULL; 1956 qemu_co_mutex_unlock(&client->send_lock); 1957 1958 return ret; 1959 } 1960 1961 static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error, 1962 uint64_t cookie) 1963 { 1964 stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC); 1965 stl_be_p(&reply->error, error); 1966 stq_be_p(&reply->cookie, cookie); 1967 } 1968 1969 static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client, 1970 NBDRequest *request, 1971 uint32_t error, 1972 void *data, 1973 uint64_t len, 1974 Error **errp) 1975 { 1976 NBDSimpleReply reply; 1977 int nbd_err = system_errno_to_nbd_errno(error); 1978 struct iovec iov[] = { 1979 {.iov_base = &reply, .iov_len = sizeof(reply)}, 1980 {.iov_base = data, .iov_len = len} 1981 }; 1982 1983 assert(!len || !nbd_err); 1984 assert(len <= NBD_MAX_BUFFER_SIZE); 1985 assert(client->mode < NBD_MODE_STRUCTURED || 1986 (client->mode == NBD_MODE_STRUCTURED && 1987 request->type != NBD_CMD_READ)); 1988 trace_nbd_co_send_simple_reply(request->cookie, nbd_err, 1989 nbd_err_lookup(nbd_err), len); 1990 set_be_simple_reply(&reply, nbd_err, request->cookie); 1991 1992 return nbd_co_send_iov(client, iov, 2, errp); 1993 } 1994 1995 /* 1996 * Prepare the header of a reply chunk for network transmission. 1997 * 1998 * On input, @iov is partially initialized: iov[0].iov_base must point 1999 * to an uninitialized NBDReply, while the remaining @niov elements 2000 * (if any) must be ready for transmission. This function then 2001 * populates iov[0] for transmission. 2002 */ 2003 static inline void set_be_chunk(NBDClient *client, struct iovec *iov, 2004 size_t niov, uint16_t flags, uint16_t type, 2005 NBDRequest *request) 2006 { 2007 size_t i, length = 0; 2008 2009 for (i = 1; i < niov; i++) { 2010 length += iov[i].iov_len; 2011 } 2012 assert(length <= NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)); 2013 2014 if (client->mode >= NBD_MODE_EXTENDED) { 2015 NBDExtendedReplyChunk *chunk = iov->iov_base; 2016 2017 iov[0].iov_len = sizeof(*chunk); 2018 stl_be_p(&chunk->magic, NBD_EXTENDED_REPLY_MAGIC); 2019 stw_be_p(&chunk->flags, flags); 2020 stw_be_p(&chunk->type, type); 2021 stq_be_p(&chunk->cookie, request->cookie); 2022 stq_be_p(&chunk->offset, request->from); 2023 stq_be_p(&chunk->length, length); 2024 } else { 2025 NBDStructuredReplyChunk *chunk = iov->iov_base; 2026 2027 iov[0].iov_len = sizeof(*chunk); 2028 stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC); 2029 stw_be_p(&chunk->flags, flags); 2030 stw_be_p(&chunk->type, type); 2031 stq_be_p(&chunk->cookie, request->cookie); 2032 stl_be_p(&chunk->length, length); 2033 } 2034 } 2035 2036 static int coroutine_fn nbd_co_send_chunk_done(NBDClient *client, 2037 NBDRequest *request, 2038 Error **errp) 2039 { 2040 NBDReply hdr; 2041 struct iovec iov[] = { 2042 {.iov_base = &hdr}, 2043 }; 2044 2045 trace_nbd_co_send_chunk_done(request->cookie); 2046 set_be_chunk(client, iov, 1, NBD_REPLY_FLAG_DONE, 2047 NBD_REPLY_TYPE_NONE, request); 2048 return nbd_co_send_iov(client, iov, 1, errp); 2049 } 2050 2051 static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client, 2052 NBDRequest *request, 2053 uint64_t offset, 2054 void *data, 2055 uint64_t size, 2056 bool final, 2057 Error **errp) 2058 { 2059 NBDReply hdr; 2060 NBDStructuredReadData chunk; 2061 struct iovec iov[] = { 2062 {.iov_base = &hdr}, 2063 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2064 {.iov_base = data, .iov_len = size} 2065 }; 2066 2067 assert(size && size <= NBD_MAX_BUFFER_SIZE); 2068 trace_nbd_co_send_chunk_read(request->cookie, offset, data, size); 2069 set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0, 2070 NBD_REPLY_TYPE_OFFSET_DATA, request); 2071 stq_be_p(&chunk.offset, offset); 2072 2073 return nbd_co_send_iov(client, iov, 3, errp); 2074 } 2075 2076 static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client, 2077 NBDRequest *request, 2078 uint32_t error, 2079 const char *msg, 2080 Error **errp) 2081 { 2082 NBDReply hdr; 2083 NBDStructuredError chunk; 2084 int nbd_err = system_errno_to_nbd_errno(error); 2085 struct iovec iov[] = { 2086 {.iov_base = &hdr}, 2087 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2088 {.iov_base = (char *)msg, .iov_len = msg ? strlen(msg) : 0}, 2089 }; 2090 2091 assert(nbd_err); 2092 trace_nbd_co_send_chunk_error(request->cookie, nbd_err, 2093 nbd_err_lookup(nbd_err), msg ? msg : ""); 2094 set_be_chunk(client, iov, 3, NBD_REPLY_FLAG_DONE, 2095 NBD_REPLY_TYPE_ERROR, request); 2096 stl_be_p(&chunk.error, nbd_err); 2097 stw_be_p(&chunk.message_length, iov[2].iov_len); 2098 2099 return nbd_co_send_iov(client, iov, 3, errp); 2100 } 2101 2102 /* Do a sparse read and send the structured reply to the client. 2103 * Returns -errno if sending fails. blk_co_block_status_above() failure is 2104 * reported to the client, at which point this function succeeds. 2105 */ 2106 static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, 2107 NBDRequest *request, 2108 uint64_t offset, 2109 uint8_t *data, 2110 uint64_t size, 2111 Error **errp) 2112 { 2113 int ret = 0; 2114 NBDExport *exp = client->exp; 2115 size_t progress = 0; 2116 2117 assert(size <= NBD_MAX_BUFFER_SIZE); 2118 while (progress < size) { 2119 int64_t pnum; 2120 int status = blk_co_block_status_above(exp->common.blk, NULL, 2121 offset + progress, 2122 size - progress, &pnum, NULL, 2123 NULL); 2124 bool final; 2125 2126 if (status < 0) { 2127 char *msg = g_strdup_printf("unable to check for holes: %s", 2128 strerror(-status)); 2129 2130 ret = nbd_co_send_chunk_error(client, request, -status, msg, errp); 2131 g_free(msg); 2132 return ret; 2133 } 2134 assert(pnum && pnum <= size - progress); 2135 final = progress + pnum == size; 2136 if (status & BDRV_BLOCK_ZERO) { 2137 NBDReply hdr; 2138 NBDStructuredReadHole chunk; 2139 struct iovec iov[] = { 2140 {.iov_base = &hdr}, 2141 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2142 }; 2143 2144 trace_nbd_co_send_chunk_read_hole(request->cookie, 2145 offset + progress, pnum); 2146 set_be_chunk(client, iov, 2, 2147 final ? NBD_REPLY_FLAG_DONE : 0, 2148 NBD_REPLY_TYPE_OFFSET_HOLE, request); 2149 stq_be_p(&chunk.offset, offset + progress); 2150 stl_be_p(&chunk.length, pnum); 2151 ret = nbd_co_send_iov(client, iov, 2, errp); 2152 } else { 2153 ret = blk_co_pread(exp->common.blk, offset + progress, pnum, 2154 data + progress, 0); 2155 if (ret < 0) { 2156 error_setg_errno(errp, -ret, "reading from file failed"); 2157 break; 2158 } 2159 ret = nbd_co_send_chunk_read(client, request, offset + progress, 2160 data + progress, pnum, final, errp); 2161 } 2162 2163 if (ret < 0) { 2164 break; 2165 } 2166 progress += pnum; 2167 } 2168 return ret; 2169 } 2170 2171 typedef struct NBDExtentArray { 2172 NBDExtent64 *extents; 2173 unsigned int nb_alloc; 2174 unsigned int count; 2175 uint64_t total_length; 2176 bool extended; 2177 bool can_add; 2178 bool converted_to_be; 2179 } NBDExtentArray; 2180 2181 static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc, 2182 NBDMode mode) 2183 { 2184 NBDExtentArray *ea = g_new0(NBDExtentArray, 1); 2185 2186 assert(mode >= NBD_MODE_STRUCTURED); 2187 ea->nb_alloc = nb_alloc; 2188 ea->extents = g_new(NBDExtent64, nb_alloc); 2189 ea->extended = mode >= NBD_MODE_EXTENDED; 2190 ea->can_add = true; 2191 2192 return ea; 2193 } 2194 2195 static void nbd_extent_array_free(NBDExtentArray *ea) 2196 { 2197 g_free(ea->extents); 2198 g_free(ea); 2199 } 2200 G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free) 2201 2202 /* Further modifications of the array after conversion are abandoned */ 2203 static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) 2204 { 2205 int i; 2206 2207 assert(!ea->converted_to_be); 2208 assert(ea->extended); 2209 ea->can_add = false; 2210 ea->converted_to_be = true; 2211 2212 for (i = 0; i < ea->count; i++) { 2213 ea->extents[i].length = cpu_to_be64(ea->extents[i].length); 2214 ea->extents[i].flags = cpu_to_be64(ea->extents[i].flags); 2215 } 2216 } 2217 2218 /* Further modifications of the array after conversion are abandoned */ 2219 static NBDExtent32 *nbd_extent_array_convert_to_narrow(NBDExtentArray *ea) 2220 { 2221 int i; 2222 NBDExtent32 *extents = g_new(NBDExtent32, ea->count); 2223 2224 assert(!ea->converted_to_be); 2225 assert(!ea->extended); 2226 ea->can_add = false; 2227 ea->converted_to_be = true; 2228 2229 for (i = 0; i < ea->count; i++) { 2230 assert((ea->extents[i].length | ea->extents[i].flags) <= UINT32_MAX); 2231 extents[i].length = cpu_to_be32(ea->extents[i].length); 2232 extents[i].flags = cpu_to_be32(ea->extents[i].flags); 2233 } 2234 2235 return extents; 2236 } 2237 2238 /* 2239 * Add extent to NBDExtentArray. If extent can't be added (no available space), 2240 * return -1. 2241 * For safety, when returning -1 for the first time, .can_add is set to false, 2242 * and further calls to nbd_extent_array_add() will crash. 2243 * (this avoids the situation where a caller ignores failure to add one extent, 2244 * where adding another extent that would squash into the last array entry 2245 * would result in an incorrect range reported to the client) 2246 */ 2247 static int nbd_extent_array_add(NBDExtentArray *ea, 2248 uint64_t length, uint32_t flags) 2249 { 2250 assert(ea->can_add); 2251 2252 if (!length) { 2253 return 0; 2254 } 2255 if (!ea->extended) { 2256 assert(length <= UINT32_MAX); 2257 } 2258 2259 /* Extend previous extent if flags are the same */ 2260 if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { 2261 uint64_t sum = length + ea->extents[ea->count - 1].length; 2262 2263 /* 2264 * sum cannot overflow: the block layer bounds image size at 2265 * 2^63, and ea->extents[].length comes from the block layer. 2266 */ 2267 assert(sum >= length); 2268 if (sum <= UINT32_MAX || ea->extended) { 2269 ea->extents[ea->count - 1].length = sum; 2270 ea->total_length += length; 2271 return 0; 2272 } 2273 } 2274 2275 if (ea->count >= ea->nb_alloc) { 2276 ea->can_add = false; 2277 return -1; 2278 } 2279 2280 ea->total_length += length; 2281 ea->extents[ea->count] = (NBDExtent64) {.length = length, .flags = flags}; 2282 ea->count++; 2283 2284 return 0; 2285 } 2286 2287 static int coroutine_fn blockstatus_to_extents(BlockBackend *blk, 2288 uint64_t offset, uint64_t bytes, 2289 NBDExtentArray *ea) 2290 { 2291 while (bytes) { 2292 uint32_t flags; 2293 int64_t num; 2294 int ret = blk_co_block_status_above(blk, NULL, offset, bytes, &num, 2295 NULL, NULL); 2296 2297 if (ret < 0) { 2298 return ret; 2299 } 2300 2301 flags = (ret & BDRV_BLOCK_DATA ? 0 : NBD_STATE_HOLE) | 2302 (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0); 2303 2304 if (nbd_extent_array_add(ea, num, flags) < 0) { 2305 return 0; 2306 } 2307 2308 offset += num; 2309 bytes -= num; 2310 } 2311 2312 return 0; 2313 } 2314 2315 static int coroutine_fn blockalloc_to_extents(BlockBackend *blk, 2316 uint64_t offset, uint64_t bytes, 2317 NBDExtentArray *ea) 2318 { 2319 while (bytes) { 2320 int64_t num; 2321 int ret = blk_co_is_allocated_above(blk, NULL, false, offset, bytes, 2322 &num); 2323 2324 if (ret < 0) { 2325 return ret; 2326 } 2327 2328 if (nbd_extent_array_add(ea, num, ret) < 0) { 2329 return 0; 2330 } 2331 2332 offset += num; 2333 bytes -= num; 2334 } 2335 2336 return 0; 2337 } 2338 2339 /* 2340 * nbd_co_send_extents 2341 * 2342 * @ea is converted to BE by the function 2343 * @last controls whether NBD_REPLY_FLAG_DONE is sent. 2344 */ 2345 static int coroutine_fn 2346 nbd_co_send_extents(NBDClient *client, NBDRequest *request, NBDExtentArray *ea, 2347 bool last, uint32_t context_id, Error **errp) 2348 { 2349 NBDReply hdr; 2350 NBDStructuredMeta meta; 2351 NBDExtendedMeta meta_ext; 2352 g_autofree NBDExtent32 *extents = NULL; 2353 uint16_t type; 2354 struct iovec iov[] = { {.iov_base = &hdr}, {0}, {0} }; 2355 2356 if (client->mode >= NBD_MODE_EXTENDED) { 2357 type = NBD_REPLY_TYPE_BLOCK_STATUS_EXT; 2358 2359 iov[1].iov_base = &meta_ext; 2360 iov[1].iov_len = sizeof(meta_ext); 2361 stl_be_p(&meta_ext.context_id, context_id); 2362 stl_be_p(&meta_ext.count, ea->count); 2363 2364 nbd_extent_array_convert_to_be(ea); 2365 iov[2].iov_base = ea->extents; 2366 iov[2].iov_len = ea->count * sizeof(ea->extents[0]); 2367 } else { 2368 type = NBD_REPLY_TYPE_BLOCK_STATUS; 2369 2370 iov[1].iov_base = &meta; 2371 iov[1].iov_len = sizeof(meta); 2372 stl_be_p(&meta.context_id, context_id); 2373 2374 extents = nbd_extent_array_convert_to_narrow(ea); 2375 iov[2].iov_base = extents; 2376 iov[2].iov_len = ea->count * sizeof(extents[0]); 2377 } 2378 2379 trace_nbd_co_send_extents(request->cookie, ea->count, context_id, 2380 ea->total_length, last); 2381 set_be_chunk(client, iov, 3, last ? NBD_REPLY_FLAG_DONE : 0, type, 2382 request); 2383 2384 return nbd_co_send_iov(client, iov, 3, errp); 2385 } 2386 2387 /* Get block status from the exported device and send it to the client */ 2388 static int 2389 coroutine_fn nbd_co_send_block_status(NBDClient *client, NBDRequest *request, 2390 BlockBackend *blk, uint64_t offset, 2391 uint64_t length, bool dont_fragment, 2392 bool last, uint32_t context_id, 2393 Error **errp) 2394 { 2395 int ret; 2396 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2397 g_autoptr(NBDExtentArray) ea = 2398 nbd_extent_array_new(nb_extents, client->mode); 2399 2400 if (context_id == NBD_META_ID_BASE_ALLOCATION) { 2401 ret = blockstatus_to_extents(blk, offset, length, ea); 2402 } else { 2403 ret = blockalloc_to_extents(blk, offset, length, ea); 2404 } 2405 if (ret < 0) { 2406 return nbd_co_send_chunk_error(client, request, -ret, 2407 "can't get block status", errp); 2408 } 2409 2410 return nbd_co_send_extents(client, request, ea, last, context_id, errp); 2411 } 2412 2413 /* Populate @ea from a dirty bitmap. */ 2414 static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, 2415 uint64_t offset, uint64_t length, 2416 NBDExtentArray *es) 2417 { 2418 int64_t start, dirty_start, dirty_count; 2419 int64_t end = offset + length; 2420 bool full = false; 2421 int64_t bound = es->extended ? INT64_MAX : INT32_MAX; 2422 2423 bdrv_dirty_bitmap_lock(bitmap); 2424 2425 for (start = offset; 2426 bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, bound, 2427 &dirty_start, &dirty_count); 2428 start = dirty_start + dirty_count) 2429 { 2430 if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || 2431 (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0)) 2432 { 2433 full = true; 2434 break; 2435 } 2436 } 2437 2438 if (!full) { 2439 /* last non dirty extent, nothing to do if array is now full */ 2440 (void) nbd_extent_array_add(es, end - start, 0); 2441 } 2442 2443 bdrv_dirty_bitmap_unlock(bitmap); 2444 } 2445 2446 static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, 2447 NBDRequest *request, 2448 BdrvDirtyBitmap *bitmap, 2449 uint64_t offset, 2450 uint64_t length, bool dont_fragment, 2451 bool last, uint32_t context_id, 2452 Error **errp) 2453 { 2454 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2455 g_autoptr(NBDExtentArray) ea = 2456 nbd_extent_array_new(nb_extents, client->mode); 2457 2458 bitmap_to_extents(bitmap, offset, length, ea); 2459 2460 return nbd_co_send_extents(client, request, ea, last, context_id, errp); 2461 } 2462 2463 /* 2464 * nbd_co_block_status_payload_read 2465 * Called when a client wants a subset of negotiated contexts via a 2466 * BLOCK_STATUS payload. Check the payload for valid length and 2467 * contents. On success, return 0 with request updated to effective 2468 * length. If request was invalid but all payload consumed, return 0 2469 * with request->len and request->contexts->count set to 0 (which will 2470 * trigger an appropriate NBD_EINVAL response later on). Return 2471 * negative errno if the payload was not fully consumed. 2472 */ 2473 static int 2474 nbd_co_block_status_payload_read(NBDClient *client, NBDRequest *request, 2475 Error **errp) 2476 { 2477 uint64_t payload_len = request->len; 2478 g_autofree char *buf = NULL; 2479 size_t count, i, nr_bitmaps; 2480 uint32_t id; 2481 2482 if (payload_len > NBD_MAX_BUFFER_SIZE) { 2483 error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)", 2484 request->len, NBD_MAX_BUFFER_SIZE); 2485 return -EINVAL; 2486 } 2487 2488 assert(client->contexts.exp == client->exp); 2489 nr_bitmaps = client->exp->nr_export_bitmaps; 2490 request->contexts = g_new0(NBDMetaContexts, 1); 2491 request->contexts->exp = client->exp; 2492 2493 if (payload_len % sizeof(uint32_t) || 2494 payload_len < sizeof(NBDBlockStatusPayload) || 2495 payload_len > (sizeof(NBDBlockStatusPayload) + 2496 sizeof(id) * client->contexts.count)) { 2497 goto skip; 2498 } 2499 2500 buf = g_malloc(payload_len); 2501 if (nbd_read(client->ioc, buf, payload_len, 2502 "CMD_BLOCK_STATUS data", errp) < 0) { 2503 return -EIO; 2504 } 2505 trace_nbd_co_receive_request_payload_received(request->cookie, 2506 payload_len); 2507 request->contexts->bitmaps = g_new0(bool, nr_bitmaps); 2508 count = (payload_len - sizeof(NBDBlockStatusPayload)) / sizeof(id); 2509 payload_len = 0; 2510 2511 for (i = 0; i < count; i++) { 2512 id = ldl_be_p(buf + sizeof(NBDBlockStatusPayload) + sizeof(id) * i); 2513 if (id == NBD_META_ID_BASE_ALLOCATION) { 2514 if (!client->contexts.base_allocation || 2515 request->contexts->base_allocation) { 2516 goto skip; 2517 } 2518 request->contexts->base_allocation = true; 2519 } else if (id == NBD_META_ID_ALLOCATION_DEPTH) { 2520 if (!client->contexts.allocation_depth || 2521 request->contexts->allocation_depth) { 2522 goto skip; 2523 } 2524 request->contexts->allocation_depth = true; 2525 } else { 2526 unsigned idx = id - NBD_META_ID_DIRTY_BITMAP; 2527 2528 if (idx >= nr_bitmaps || !client->contexts.bitmaps[idx] || 2529 request->contexts->bitmaps[idx]) { 2530 goto skip; 2531 } 2532 request->contexts->bitmaps[idx] = true; 2533 } 2534 } 2535 2536 request->len = ldq_be_p(buf); 2537 request->contexts->count = count; 2538 return 0; 2539 2540 skip: 2541 trace_nbd_co_receive_block_status_payload_compliance(request->from, 2542 request->len); 2543 request->len = request->contexts->count = 0; 2544 return nbd_drop(client->ioc, payload_len, errp); 2545 } 2546 2547 /* nbd_co_receive_request 2548 * Collect a client request. Return 0 if request looks valid, -EIO to drop 2549 * connection right away, -EAGAIN to indicate we were interrupted and the 2550 * channel should be quiesced, and any other negative value to report an error 2551 * to the client (although the caller may still need to disconnect after 2552 * reporting the error). 2553 */ 2554 static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, 2555 NBDRequest *request, 2556 Error **errp) 2557 { 2558 NBDClient *client = req->client; 2559 bool extended_with_payload; 2560 bool check_length = false; 2561 bool check_rofs = false; 2562 bool allocate_buffer = false; 2563 bool payload_okay = false; 2564 uint64_t payload_len = 0; 2565 int valid_flags = NBD_CMD_FLAG_FUA; 2566 int ret; 2567 2568 g_assert(qemu_in_coroutine()); 2569 assert(client->recv_coroutine == qemu_coroutine_self()); 2570 ret = nbd_receive_request(client, request, errp); 2571 if (ret < 0) { 2572 return ret; 2573 } 2574 2575 trace_nbd_co_receive_request_decode_type(request->cookie, request->type, 2576 nbd_cmd_lookup(request->type)); 2577 extended_with_payload = client->mode >= NBD_MODE_EXTENDED && 2578 request->flags & NBD_CMD_FLAG_PAYLOAD_LEN; 2579 if (extended_with_payload) { 2580 payload_len = request->len; 2581 check_length = true; 2582 } 2583 2584 switch (request->type) { 2585 case NBD_CMD_DISC: 2586 /* Special case: we're going to disconnect without a reply, 2587 * whether or not flags, from, or len are bogus */ 2588 req->complete = true; 2589 return -EIO; 2590 2591 case NBD_CMD_READ: 2592 if (client->mode >= NBD_MODE_STRUCTURED) { 2593 valid_flags |= NBD_CMD_FLAG_DF; 2594 } 2595 check_length = true; 2596 allocate_buffer = true; 2597 break; 2598 2599 case NBD_CMD_WRITE: 2600 if (client->mode >= NBD_MODE_EXTENDED) { 2601 if (!extended_with_payload) { 2602 /* The client is noncompliant. Trace it, but proceed. */ 2603 trace_nbd_co_receive_ext_payload_compliance(request->from, 2604 request->len); 2605 } 2606 valid_flags |= NBD_CMD_FLAG_PAYLOAD_LEN; 2607 } 2608 payload_okay = true; 2609 payload_len = request->len; 2610 check_length = true; 2611 allocate_buffer = true; 2612 check_rofs = true; 2613 break; 2614 2615 case NBD_CMD_FLUSH: 2616 break; 2617 2618 case NBD_CMD_TRIM: 2619 check_rofs = true; 2620 break; 2621 2622 case NBD_CMD_CACHE: 2623 check_length = true; 2624 break; 2625 2626 case NBD_CMD_WRITE_ZEROES: 2627 valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO; 2628 check_rofs = true; 2629 break; 2630 2631 case NBD_CMD_BLOCK_STATUS: 2632 if (extended_with_payload) { 2633 ret = nbd_co_block_status_payload_read(client, request, errp); 2634 if (ret < 0) { 2635 return ret; 2636 } 2637 /* payload now consumed */ 2638 check_length = false; 2639 payload_len = 0; 2640 valid_flags |= NBD_CMD_FLAG_PAYLOAD_LEN; 2641 } else { 2642 request->contexts = &client->contexts; 2643 } 2644 valid_flags |= NBD_CMD_FLAG_REQ_ONE; 2645 break; 2646 2647 default: 2648 /* Unrecognized, will fail later */ 2649 ; 2650 } 2651 2652 /* Payload and buffer handling. */ 2653 if (!payload_len) { 2654 req->complete = true; 2655 } 2656 if (check_length && request->len > NBD_MAX_BUFFER_SIZE) { 2657 /* READ, WRITE, CACHE */ 2658 error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)", 2659 request->len, NBD_MAX_BUFFER_SIZE); 2660 return -EINVAL; 2661 } 2662 if (payload_len && !payload_okay) { 2663 /* 2664 * For now, we don't support payloads on other commands; but 2665 * we can keep the connection alive by ignoring the payload. 2666 * We will fail the command later with NBD_EINVAL for the use 2667 * of an unsupported flag (and not for access beyond bounds). 2668 */ 2669 assert(request->type != NBD_CMD_WRITE); 2670 request->len = 0; 2671 } 2672 if (allocate_buffer) { 2673 /* READ, WRITE */ 2674 req->data = blk_try_blockalign(client->exp->common.blk, 2675 request->len); 2676 if (req->data == NULL) { 2677 error_setg(errp, "No memory"); 2678 return -ENOMEM; 2679 } 2680 } 2681 if (payload_len) { 2682 if (payload_okay) { 2683 /* WRITE */ 2684 assert(req->data); 2685 ret = nbd_read(client->ioc, req->data, payload_len, 2686 "CMD_WRITE data", errp); 2687 } else { 2688 ret = nbd_drop(client->ioc, payload_len, errp); 2689 } 2690 if (ret < 0) { 2691 return -EIO; 2692 } 2693 req->complete = true; 2694 trace_nbd_co_receive_request_payload_received(request->cookie, 2695 payload_len); 2696 } 2697 2698 /* Sanity checks. */ 2699 if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) { 2700 /* WRITE, TRIM, WRITE_ZEROES */ 2701 error_setg(errp, "Export is read-only"); 2702 return -EROFS; 2703 } 2704 if (request->from > client->exp->size || 2705 request->len > client->exp->size - request->from) { 2706 error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu64 2707 ", Size: %" PRIu64, request->from, request->len, 2708 client->exp->size); 2709 return (request->type == NBD_CMD_WRITE || 2710 request->type == NBD_CMD_WRITE_ZEROES) ? -ENOSPC : -EINVAL; 2711 } 2712 if (client->check_align && !QEMU_IS_ALIGNED(request->from | request->len, 2713 client->check_align)) { 2714 /* 2715 * The block layer gracefully handles unaligned requests, but 2716 * it's still worth tracing client non-compliance 2717 */ 2718 trace_nbd_co_receive_align_compliance(nbd_cmd_lookup(request->type), 2719 request->from, 2720 request->len, 2721 client->check_align); 2722 } 2723 if (request->flags & ~valid_flags) { 2724 error_setg(errp, "unsupported flags for command %s (got 0x%x)", 2725 nbd_cmd_lookup(request->type), request->flags); 2726 return -EINVAL; 2727 } 2728 2729 return 0; 2730 } 2731 2732 /* Send simple reply without a payload, or a structured error 2733 * @error_msg is ignored if @ret >= 0 2734 * Returns 0 if connection is still live, -errno on failure to talk to client 2735 */ 2736 static coroutine_fn int nbd_send_generic_reply(NBDClient *client, 2737 NBDRequest *request, 2738 int ret, 2739 const char *error_msg, 2740 Error **errp) 2741 { 2742 if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) { 2743 return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp); 2744 } else if (client->mode >= NBD_MODE_EXTENDED) { 2745 return nbd_co_send_chunk_done(client, request, errp); 2746 } else { 2747 return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0, 2748 NULL, 0, errp); 2749 } 2750 } 2751 2752 /* Handle NBD_CMD_READ request. 2753 * Return -errno if sending fails. Other errors are reported directly to the 2754 * client as an error reply. */ 2755 static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request, 2756 uint8_t *data, Error **errp) 2757 { 2758 int ret; 2759 NBDExport *exp = client->exp; 2760 2761 assert(request->type == NBD_CMD_READ); 2762 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2763 2764 /* XXX: NBD Protocol only documents use of FUA with WRITE */ 2765 if (request->flags & NBD_CMD_FLAG_FUA) { 2766 ret = blk_co_flush(exp->common.blk); 2767 if (ret < 0) { 2768 return nbd_send_generic_reply(client, request, ret, 2769 "flush failed", errp); 2770 } 2771 } 2772 2773 if (client->mode >= NBD_MODE_STRUCTURED && 2774 !(request->flags & NBD_CMD_FLAG_DF) && request->len) 2775 { 2776 return nbd_co_send_sparse_read(client, request, request->from, 2777 data, request->len, errp); 2778 } 2779 2780 ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0); 2781 if (ret < 0) { 2782 return nbd_send_generic_reply(client, request, ret, 2783 "reading from file failed", errp); 2784 } 2785 2786 if (client->mode >= NBD_MODE_STRUCTURED) { 2787 if (request->len) { 2788 return nbd_co_send_chunk_read(client, request, request->from, data, 2789 request->len, true, errp); 2790 } else { 2791 return nbd_co_send_chunk_done(client, request, errp); 2792 } 2793 } else { 2794 return nbd_co_send_simple_reply(client, request, 0, 2795 data, request->len, errp); 2796 } 2797 } 2798 2799 /* 2800 * nbd_do_cmd_cache 2801 * 2802 * Handle NBD_CMD_CACHE request. 2803 * Return -errno if sending fails. Other errors are reported directly to the 2804 * client as an error reply. 2805 */ 2806 static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request, 2807 Error **errp) 2808 { 2809 int ret; 2810 NBDExport *exp = client->exp; 2811 2812 assert(request->type == NBD_CMD_CACHE); 2813 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2814 2815 ret = blk_co_preadv(exp->common.blk, request->from, request->len, 2816 NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH); 2817 2818 return nbd_send_generic_reply(client, request, ret, 2819 "caching data failed", errp); 2820 } 2821 2822 /* Handle NBD request. 2823 * Return -errno if sending fails. Other errors are reported directly to the 2824 * client as an error reply. */ 2825 static coroutine_fn int nbd_handle_request(NBDClient *client, 2826 NBDRequest *request, 2827 uint8_t *data, Error **errp) 2828 { 2829 int ret; 2830 int flags; 2831 NBDExport *exp = client->exp; 2832 char *msg; 2833 size_t i; 2834 2835 switch (request->type) { 2836 case NBD_CMD_CACHE: 2837 return nbd_do_cmd_cache(client, request, errp); 2838 2839 case NBD_CMD_READ: 2840 return nbd_do_cmd_read(client, request, data, errp); 2841 2842 case NBD_CMD_WRITE: 2843 flags = 0; 2844 if (request->flags & NBD_CMD_FLAG_FUA) { 2845 flags |= BDRV_REQ_FUA; 2846 } 2847 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2848 ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data, 2849 flags); 2850 return nbd_send_generic_reply(client, request, ret, 2851 "writing to file failed", errp); 2852 2853 case NBD_CMD_WRITE_ZEROES: 2854 flags = 0; 2855 if (request->flags & NBD_CMD_FLAG_FUA) { 2856 flags |= BDRV_REQ_FUA; 2857 } 2858 if (!(request->flags & NBD_CMD_FLAG_NO_HOLE)) { 2859 flags |= BDRV_REQ_MAY_UNMAP; 2860 } 2861 if (request->flags & NBD_CMD_FLAG_FAST_ZERO) { 2862 flags |= BDRV_REQ_NO_FALLBACK; 2863 } 2864 ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len, 2865 flags); 2866 return nbd_send_generic_reply(client, request, ret, 2867 "writing to file failed", errp); 2868 2869 case NBD_CMD_DISC: 2870 /* unreachable, thanks to special case in nbd_co_receive_request() */ 2871 abort(); 2872 2873 case NBD_CMD_FLUSH: 2874 ret = blk_co_flush(exp->common.blk); 2875 return nbd_send_generic_reply(client, request, ret, 2876 "flush failed", errp); 2877 2878 case NBD_CMD_TRIM: 2879 ret = blk_co_pdiscard(exp->common.blk, request->from, request->len); 2880 if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) { 2881 ret = blk_co_flush(exp->common.blk); 2882 } 2883 return nbd_send_generic_reply(client, request, ret, 2884 "discard failed", errp); 2885 2886 case NBD_CMD_BLOCK_STATUS: 2887 assert(request->contexts); 2888 assert(client->mode >= NBD_MODE_EXTENDED || 2889 request->len <= UINT32_MAX); 2890 if (request->contexts->count) { 2891 bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE; 2892 int contexts_remaining = request->contexts->count; 2893 2894 if (!request->len) { 2895 return nbd_send_generic_reply(client, request, -EINVAL, 2896 "need non-zero length", errp); 2897 } 2898 if (request->contexts->base_allocation) { 2899 ret = nbd_co_send_block_status(client, request, 2900 exp->common.blk, 2901 request->from, 2902 request->len, dont_fragment, 2903 !--contexts_remaining, 2904 NBD_META_ID_BASE_ALLOCATION, 2905 errp); 2906 if (ret < 0) { 2907 return ret; 2908 } 2909 } 2910 2911 if (request->contexts->allocation_depth) { 2912 ret = nbd_co_send_block_status(client, request, 2913 exp->common.blk, 2914 request->from, request->len, 2915 dont_fragment, 2916 !--contexts_remaining, 2917 NBD_META_ID_ALLOCATION_DEPTH, 2918 errp); 2919 if (ret < 0) { 2920 return ret; 2921 } 2922 } 2923 2924 assert(request->contexts->exp == client->exp); 2925 for (i = 0; i < client->exp->nr_export_bitmaps; i++) { 2926 if (!request->contexts->bitmaps[i]) { 2927 continue; 2928 } 2929 ret = nbd_co_send_bitmap(client, request, 2930 client->exp->export_bitmaps[i], 2931 request->from, request->len, 2932 dont_fragment, !--contexts_remaining, 2933 NBD_META_ID_DIRTY_BITMAP + i, errp); 2934 if (ret < 0) { 2935 return ret; 2936 } 2937 } 2938 2939 assert(!contexts_remaining); 2940 2941 return 0; 2942 } else if (client->contexts.count) { 2943 return nbd_send_generic_reply(client, request, -EINVAL, 2944 "CMD_BLOCK_STATUS payload not valid", 2945 errp); 2946 } else { 2947 return nbd_send_generic_reply(client, request, -EINVAL, 2948 "CMD_BLOCK_STATUS not negotiated", 2949 errp); 2950 } 2951 2952 default: 2953 msg = g_strdup_printf("invalid request type (%" PRIu32 ") received", 2954 request->type); 2955 ret = nbd_send_generic_reply(client, request, -EINVAL, msg, 2956 errp); 2957 g_free(msg); 2958 return ret; 2959 } 2960 } 2961 2962 /* Owns a reference to the NBDClient passed as opaque. */ 2963 static coroutine_fn void nbd_trip(void *opaque) 2964 { 2965 NBDClient *client = opaque; 2966 NBDRequestData *req = NULL; 2967 NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ 2968 int ret; 2969 Error *local_err = NULL; 2970 2971 /* 2972 * Note that nbd_client_put() and client_close() must be called from the 2973 * main loop thread. Use aio_co_reschedule_self() to switch AioContext 2974 * before calling these functions. 2975 */ 2976 2977 trace_nbd_trip(); 2978 if (client->closing) { 2979 goto done; 2980 } 2981 2982 if (client->quiescing) { 2983 /* 2984 * We're switching between AIO contexts. Don't attempt to receive a new 2985 * request and kick the main context which may be waiting for us. 2986 */ 2987 client->recv_coroutine = NULL; 2988 aio_wait_kick(); 2989 goto done; 2990 } 2991 2992 req = nbd_request_get(client); 2993 ret = nbd_co_receive_request(req, &request, &local_err); 2994 client->recv_coroutine = NULL; 2995 2996 if (client->closing) { 2997 /* 2998 * The client may be closed when we are blocked in 2999 * nbd_co_receive_request() 3000 */ 3001 goto done; 3002 } 3003 3004 if (ret == -EAGAIN) { 3005 assert(client->quiescing); 3006 goto done; 3007 } 3008 3009 nbd_client_receive_next_request(client); 3010 if (ret == -EIO) { 3011 goto disconnect; 3012 } 3013 3014 qio_channel_set_cork(client->ioc, true); 3015 3016 if (ret < 0) { 3017 /* It wasn't -EIO, so, according to nbd_co_receive_request() 3018 * semantics, we should return the error to the client. */ 3019 Error *export_err = local_err; 3020 3021 local_err = NULL; 3022 ret = nbd_send_generic_reply(client, &request, -EINVAL, 3023 error_get_pretty(export_err), &local_err); 3024 error_free(export_err); 3025 } else { 3026 ret = nbd_handle_request(client, &request, req->data, &local_err); 3027 } 3028 if (request.contexts && request.contexts != &client->contexts) { 3029 assert(request.type == NBD_CMD_BLOCK_STATUS); 3030 g_free(request.contexts->bitmaps); 3031 g_free(request.contexts); 3032 } 3033 if (ret < 0) { 3034 error_prepend(&local_err, "Failed to send reply: "); 3035 goto disconnect; 3036 } 3037 3038 /* 3039 * We must disconnect after NBD_CMD_WRITE or BLOCK_STATUS with 3040 * payload if we did not read the payload. 3041 */ 3042 if (!req->complete) { 3043 error_setg(&local_err, "Request handling failed in intermediate state"); 3044 goto disconnect; 3045 } 3046 3047 qio_channel_set_cork(client->ioc, false); 3048 done: 3049 if (req) { 3050 nbd_request_put(req); 3051 } 3052 if (!nbd_client_put_nonzero(client)) { 3053 aio_co_reschedule_self(qemu_get_aio_context()); 3054 nbd_client_put(client); 3055 } 3056 return; 3057 3058 disconnect: 3059 if (local_err) { 3060 error_reportf_err(local_err, "Disconnect client, due to: "); 3061 } 3062 nbd_request_put(req); 3063 3064 aio_co_reschedule_self(qemu_get_aio_context()); 3065 client_close(client, true); 3066 nbd_client_put(client); 3067 } 3068 3069 static void nbd_client_receive_next_request(NBDClient *client) 3070 { 3071 if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS && 3072 !client->quiescing) { 3073 nbd_client_get(client); 3074 client->recv_coroutine = qemu_coroutine_create(nbd_trip, client); 3075 aio_co_schedule(client->exp->common.ctx, client->recv_coroutine); 3076 } 3077 } 3078 3079 static coroutine_fn void nbd_co_client_start(void *opaque) 3080 { 3081 NBDClient *client = opaque; 3082 Error *local_err = NULL; 3083 3084 qemu_co_mutex_init(&client->send_lock); 3085 3086 if (nbd_negotiate(client, &local_err)) { 3087 if (local_err) { 3088 error_report_err(local_err); 3089 } 3090 client_close(client, false); 3091 return; 3092 } 3093 3094 nbd_client_receive_next_request(client); 3095 } 3096 3097 /* 3098 * Create a new client listener using the given channel @sioc. 3099 * Begin servicing it in a coroutine. When the connection closes, call 3100 * @close_fn with an indication of whether the client completed negotiation. 3101 */ 3102 void nbd_client_new(QIOChannelSocket *sioc, 3103 QCryptoTLSCreds *tlscreds, 3104 const char *tlsauthz, 3105 void (*close_fn)(NBDClient *, bool)) 3106 { 3107 NBDClient *client; 3108 Coroutine *co; 3109 3110 client = g_new0(NBDClient, 1); 3111 client->refcount = 1; 3112 client->tlscreds = tlscreds; 3113 if (tlscreds) { 3114 object_ref(OBJECT(client->tlscreds)); 3115 } 3116 client->tlsauthz = g_strdup(tlsauthz); 3117 client->sioc = sioc; 3118 qio_channel_set_delay(QIO_CHANNEL(sioc), false); 3119 object_ref(OBJECT(client->sioc)); 3120 client->ioc = QIO_CHANNEL(sioc); 3121 object_ref(OBJECT(client->ioc)); 3122 client->close_fn = close_fn; 3123 3124 co = qemu_coroutine_create(nbd_co_client_start, client); 3125 qemu_coroutine_enter(co); 3126 } 3127