1 /* 2 * vhost-vdpa.c 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include "clients.h" 14 #include "hw/virtio/virtio-net.h" 15 #include "net/vhost_net.h" 16 #include "net/vhost-vdpa.h" 17 #include "hw/virtio/vhost-vdpa.h" 18 #include "qemu/config-file.h" 19 #include "qemu/error-report.h" 20 #include "qemu/log.h" 21 #include "qemu/memalign.h" 22 #include "qemu/option.h" 23 #include "qapi/error.h" 24 #include <linux/vhost.h> 25 #include <sys/ioctl.h> 26 #include <err.h> 27 #include "standard-headers/linux/virtio_net.h" 28 #include "monitor/monitor.h" 29 #include "migration/migration.h" 30 #include "migration/misc.h" 31 #include "hw/virtio/vhost.h" 32 33 /* Todo:need to add the multiqueue support here */ 34 typedef struct VhostVDPAState { 35 NetClientState nc; 36 struct vhost_vdpa vhost_vdpa; 37 NotifierWithReturn migration_state; 38 VHostNetState *vhost_net; 39 40 /* Control commands shadow buffers */ 41 void *cvq_cmd_out_buffer; 42 virtio_net_ctrl_ack *status; 43 44 /* The device always have SVQ enabled */ 45 bool always_svq; 46 47 /* The device can isolate CVQ in its own ASID */ 48 bool cvq_isolated; 49 50 bool started; 51 } VhostVDPAState; 52 53 /* 54 * The array is sorted alphabetically in ascending order, 55 * with the exception of VHOST_INVALID_FEATURE_BIT, 56 * which should always be the last entry. 57 */ 58 const int vdpa_feature_bits[] = { 59 VIRTIO_F_ANY_LAYOUT, 60 VIRTIO_F_IOMMU_PLATFORM, 61 VIRTIO_F_NOTIFY_ON_EMPTY, 62 VIRTIO_F_RING_PACKED, 63 VIRTIO_F_RING_RESET, 64 VIRTIO_F_VERSION_1, 65 VIRTIO_NET_F_CSUM, 66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, 67 VIRTIO_NET_F_CTRL_MAC_ADDR, 68 VIRTIO_NET_F_CTRL_RX, 69 VIRTIO_NET_F_CTRL_RX_EXTRA, 70 VIRTIO_NET_F_CTRL_VLAN, 71 VIRTIO_NET_F_CTRL_VQ, 72 VIRTIO_NET_F_GSO, 73 VIRTIO_NET_F_GUEST_CSUM, 74 VIRTIO_NET_F_GUEST_ECN, 75 VIRTIO_NET_F_GUEST_TSO4, 76 VIRTIO_NET_F_GUEST_TSO6, 77 VIRTIO_NET_F_GUEST_UFO, 78 VIRTIO_NET_F_GUEST_USO4, 79 VIRTIO_NET_F_GUEST_USO6, 80 VIRTIO_NET_F_HASH_REPORT, 81 VIRTIO_NET_F_HOST_ECN, 82 VIRTIO_NET_F_HOST_TSO4, 83 VIRTIO_NET_F_HOST_TSO6, 84 VIRTIO_NET_F_HOST_UFO, 85 VIRTIO_NET_F_HOST_USO, 86 VIRTIO_NET_F_MQ, 87 VIRTIO_NET_F_MRG_RXBUF, 88 VIRTIO_NET_F_MTU, 89 VIRTIO_NET_F_RSS, 90 VIRTIO_NET_F_STATUS, 91 VIRTIO_RING_F_EVENT_IDX, 92 VIRTIO_RING_F_INDIRECT_DESC, 93 94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ 95 VHOST_INVALID_FEATURE_BIT 96 }; 97 98 /** Supported device specific feature bits with SVQ */ 99 static const uint64_t vdpa_svq_device_features = 100 BIT_ULL(VIRTIO_NET_F_CSUM) | 101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | 103 BIT_ULL(VIRTIO_NET_F_MTU) | 104 BIT_ULL(VIRTIO_NET_F_MAC) | 105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 111 BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 112 BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 114 BIT_ULL(VIRTIO_NET_F_STATUS) | 115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 116 BIT_ULL(VIRTIO_NET_F_CTRL_RX) | 117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | 118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | 119 BIT_ULL(VIRTIO_NET_F_MQ) | 120 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 122 /* VHOST_F_LOG_ALL is exposed by SVQ */ 123 BIT_ULL(VHOST_F_LOG_ALL) | 124 BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | 125 BIT_ULL(VIRTIO_NET_F_RSS) | 126 BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 127 BIT_ULL(VIRTIO_NET_F_STANDBY) | 128 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); 129 130 #define VHOST_VDPA_NET_CVQ_ASID 1 131 132 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 133 { 134 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 135 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 136 return s->vhost_net; 137 } 138 139 static size_t vhost_vdpa_net_cvq_cmd_len(void) 140 { 141 /* 142 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 143 * In buffer is always 1 byte, so it should fit here 144 */ 145 return sizeof(struct virtio_net_ctrl_hdr) + 146 2 * sizeof(struct virtio_net_ctrl_mac) + 147 MAC_TABLE_ENTRIES * ETH_ALEN; 148 } 149 150 static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 151 { 152 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 153 } 154 155 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 156 { 157 uint64_t invalid_dev_features = 158 features & ~vdpa_svq_device_features & 159 /* Transport are all accepted at this point */ 160 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 161 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 162 163 if (invalid_dev_features) { 164 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 165 invalid_dev_features); 166 return false; 167 } 168 169 return vhost_svq_valid_features(features, errp); 170 } 171 172 static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 173 { 174 uint32_t device_id; 175 int ret; 176 struct vhost_dev *hdev; 177 178 hdev = (struct vhost_dev *)&net->dev; 179 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 180 if (device_id != VIRTIO_ID_NET) { 181 return -ENOTSUP; 182 } 183 return ret; 184 } 185 186 static int vhost_vdpa_add(NetClientState *ncs, void *be, 187 int queue_pair_index, int nvqs) 188 { 189 VhostNetOptions options; 190 struct vhost_net *net = NULL; 191 VhostVDPAState *s; 192 int ret; 193 194 options.backend_type = VHOST_BACKEND_TYPE_VDPA; 195 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 196 s = DO_UPCAST(VhostVDPAState, nc, ncs); 197 options.net_backend = ncs; 198 options.opaque = be; 199 options.busyloop_timeout = 0; 200 options.nvqs = nvqs; 201 202 net = vhost_net_init(&options); 203 if (!net) { 204 error_report("failed to init vhost_net for queue"); 205 goto err_init; 206 } 207 s->vhost_net = net; 208 ret = vhost_vdpa_net_check_device_id(net); 209 if (ret) { 210 goto err_check; 211 } 212 return 0; 213 err_check: 214 vhost_net_cleanup(net); 215 g_free(net); 216 err_init: 217 return -1; 218 } 219 220 static void vhost_vdpa_cleanup(NetClientState *nc) 221 { 222 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 223 224 /* 225 * If a peer NIC is attached, do not cleanup anything. 226 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() 227 * when the guest is shutting down. 228 */ 229 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { 230 return; 231 } 232 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); 233 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); 234 if (s->vhost_net) { 235 vhost_net_cleanup(s->vhost_net); 236 g_free(s->vhost_net); 237 s->vhost_net = NULL; 238 } 239 if (s->vhost_vdpa.index != 0) { 240 return; 241 } 242 qemu_close(s->vhost_vdpa.shared->device_fd); 243 g_free(s->vhost_vdpa.shared); 244 } 245 246 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ 247 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) 248 { 249 return true; 250 } 251 252 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 253 { 254 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 255 256 return true; 257 } 258 259 static bool vhost_vdpa_has_ufo(NetClientState *nc) 260 { 261 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 262 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 263 uint64_t features = 0; 264 features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 265 features = vhost_net_get_features(s->vhost_net, features); 266 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 267 268 } 269 270 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 271 Error **errp) 272 { 273 const char *driver = object_class_get_name(oc); 274 275 if (!g_str_has_prefix(driver, "virtio-net-")) { 276 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 277 return false; 278 } 279 280 return true; 281 } 282 283 /** Dummy receive in case qemu falls back to userland tap networking */ 284 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 285 size_t size) 286 { 287 return size; 288 } 289 290 291 /** From any vdpa net client, get the netclient of the first queue pair */ 292 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 293 { 294 NICState *nic = qemu_get_nic(s->nc.peer); 295 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); 296 297 return DO_UPCAST(VhostVDPAState, nc, nc0); 298 } 299 300 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) 301 { 302 struct vhost_vdpa *v = &s->vhost_vdpa; 303 VirtIONet *n; 304 VirtIODevice *vdev; 305 int data_queue_pairs, cvq, r; 306 307 /* We are only called on the first data vqs and only if x-svq is not set */ 308 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { 309 return; 310 } 311 312 vdev = v->dev->vdev; 313 n = VIRTIO_NET(vdev); 314 if (!n->vhost_started) { 315 return; 316 } 317 318 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; 319 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? 320 n->max_ncs - n->max_queue_pairs : 0; 321 /* 322 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter 323 * in the future and resume the device if read-only operations between 324 * suspend and reset goes wrong. 325 */ 326 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); 327 328 /* Start will check migration setup_or_active to configure or not SVQ */ 329 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); 330 if (unlikely(r < 0)) { 331 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); 332 } 333 } 334 335 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, 336 MigrationEvent *e, Error **errp) 337 { 338 VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state); 339 340 if (e->type == MIG_EVENT_PRECOPY_SETUP) { 341 vhost_vdpa_net_log_global_enable(s, true); 342 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { 343 vhost_vdpa_net_log_global_enable(s, false); 344 } 345 return 0; 346 } 347 348 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 349 { 350 struct vhost_vdpa *v = &s->vhost_vdpa; 351 352 migration_add_notifier(&s->migration_state, 353 vdpa_net_migration_state_notifier); 354 if (v->shadow_vqs_enabled) { 355 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 356 v->shared->iova_range.last); 357 } 358 } 359 360 static int vhost_vdpa_net_data_start(NetClientState *nc) 361 { 362 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 363 struct vhost_vdpa *v = &s->vhost_vdpa; 364 365 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 366 367 if (s->always_svq || 368 migration_is_setup_or_active(migrate_get_current()->state)) { 369 v->shadow_vqs_enabled = true; 370 } else { 371 v->shadow_vqs_enabled = false; 372 } 373 374 if (v->index == 0) { 375 v->shared->shadow_data = v->shadow_vqs_enabled; 376 vhost_vdpa_net_data_start_first(s); 377 return 0; 378 } 379 380 return 0; 381 } 382 383 static int vhost_vdpa_net_data_load(NetClientState *nc) 384 { 385 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 386 struct vhost_vdpa *v = &s->vhost_vdpa; 387 bool has_cvq = v->dev->vq_index_end % 2; 388 389 if (has_cvq) { 390 return 0; 391 } 392 393 for (int i = 0; i < v->dev->nvqs; ++i) { 394 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); 395 } 396 return 0; 397 } 398 399 static void vhost_vdpa_net_client_stop(NetClientState *nc) 400 { 401 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 402 struct vhost_dev *dev; 403 404 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 405 406 if (s->vhost_vdpa.index == 0) { 407 migration_remove_notifier(&s->migration_state); 408 } 409 410 dev = s->vhost_vdpa.dev; 411 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 412 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, 413 vhost_iova_tree_delete); 414 } 415 } 416 417 static NetClientInfo net_vhost_vdpa_info = { 418 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 419 .size = sizeof(VhostVDPAState), 420 .receive = vhost_vdpa_receive, 421 .start = vhost_vdpa_net_data_start, 422 .load = vhost_vdpa_net_data_load, 423 .stop = vhost_vdpa_net_client_stop, 424 .cleanup = vhost_vdpa_cleanup, 425 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 426 .has_ufo = vhost_vdpa_has_ufo, 427 .check_peer_type = vhost_vdpa_check_peer_type, 428 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 429 }; 430 431 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, 432 Error **errp) 433 { 434 struct vhost_vring_state state = { 435 .index = vq_index, 436 }; 437 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 438 439 if (unlikely(r < 0)) { 440 r = -errno; 441 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); 442 return r; 443 } 444 445 return state.num; 446 } 447 448 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 449 unsigned vq_group, 450 unsigned asid_num) 451 { 452 struct vhost_vring_state asid = { 453 .index = vq_group, 454 .num = asid_num, 455 }; 456 int r; 457 458 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 459 if (unlikely(r < 0)) { 460 error_report("Can't set vq group %u asid %u, errno=%d (%s)", 461 asid.index, asid.num, errno, g_strerror(errno)); 462 } 463 return r; 464 } 465 466 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 467 { 468 VhostIOVATree *tree = v->shared->iova_tree; 469 DMAMap needle = { 470 /* 471 * No need to specify size or to look for more translations since 472 * this contiguous chunk was allocated by us. 473 */ 474 .translated_addr = (hwaddr)(uintptr_t)addr, 475 }; 476 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 477 int r; 478 479 if (unlikely(!map)) { 480 error_report("Cannot locate expected map"); 481 return; 482 } 483 484 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, 485 map->size + 1); 486 if (unlikely(r != 0)) { 487 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 488 } 489 490 vhost_iova_tree_remove(tree, *map); 491 } 492 493 /** Map CVQ buffer. */ 494 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 495 bool write) 496 { 497 DMAMap map = {}; 498 int r; 499 500 map.translated_addr = (hwaddr)(uintptr_t)buf; 501 map.size = size - 1; 502 map.perm = write ? IOMMU_RW : IOMMU_RO, 503 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); 504 if (unlikely(r != IOVA_OK)) { 505 error_report("Cannot map injected element"); 506 return r; 507 } 508 509 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, 510 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 511 if (unlikely(r < 0)) { 512 goto dma_map_err; 513 } 514 515 return 0; 516 517 dma_map_err: 518 vhost_iova_tree_remove(v->shared->iova_tree, map); 519 return r; 520 } 521 522 static int vhost_vdpa_net_cvq_start(NetClientState *nc) 523 { 524 VhostVDPAState *s, *s0; 525 struct vhost_vdpa *v; 526 int64_t cvq_group; 527 int r; 528 Error *err = NULL; 529 530 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 531 532 s = DO_UPCAST(VhostVDPAState, nc, nc); 533 v = &s->vhost_vdpa; 534 535 s0 = vhost_vdpa_net_first_nc_vdpa(s); 536 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; 537 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 538 539 if (v->shared->shadow_data) { 540 /* SVQ is already configured for all virtqueues */ 541 goto out; 542 } 543 544 /* 545 * If we early return in these cases SVQ will not be enabled. The migration 546 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 547 */ 548 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 549 return 0; 550 } 551 552 if (!s->cvq_isolated) { 553 return 0; 554 } 555 556 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, 557 v->dev->vq_index_end - 1, 558 &err); 559 if (unlikely(cvq_group < 0)) { 560 error_report_err(err); 561 return cvq_group; 562 } 563 564 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 565 if (unlikely(r < 0)) { 566 return r; 567 } 568 569 v->shadow_vqs_enabled = true; 570 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 571 572 out: 573 if (!s->vhost_vdpa.shadow_vqs_enabled) { 574 return 0; 575 } 576 577 /* 578 * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, 579 * whether CVQ shares ASID with guest or not, because: 580 * - Memory listener need access to guest's memory addresses allocated in 581 * the IOVA tree. 582 * - There should be plenty of IOVA address space for both ASID not to 583 * worry about collisions between them. Guest's translations are still 584 * validated with virtio virtqueue_pop so there is no risk for the guest 585 * to access memory that it shouldn't. 586 * 587 * To allocate a iova tree per ASID is doable but it complicates the code 588 * and it is not worth it for the moment. 589 */ 590 if (!v->shared->iova_tree) { 591 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 592 v->shared->iova_range.last); 593 } 594 595 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 596 vhost_vdpa_net_cvq_cmd_page_len(), false); 597 if (unlikely(r < 0)) { 598 return r; 599 } 600 601 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 602 vhost_vdpa_net_cvq_cmd_page_len(), true); 603 if (unlikely(r < 0)) { 604 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 605 } 606 607 return r; 608 } 609 610 static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 611 { 612 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 613 614 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 615 616 if (s->vhost_vdpa.shadow_vqs_enabled) { 617 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 618 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 619 } 620 621 vhost_vdpa_net_client_stop(nc); 622 } 623 624 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, 625 const struct iovec *out_sg, size_t out_num, 626 const struct iovec *in_sg, size_t in_num) 627 { 628 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 629 int r; 630 631 r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); 632 if (unlikely(r != 0)) { 633 if (unlikely(r == -ENOSPC)) { 634 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 635 __func__); 636 } 637 } 638 639 return r; 640 } 641 642 /* 643 * Convenience wrapper to poll SVQ for multiple control commands. 644 * 645 * Caller should hold the BQL when invoking this function, and should take 646 * the answer before SVQ pulls by itself when BQL is released. 647 */ 648 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) 649 { 650 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 651 return vhost_svq_poll(svq, cmds_in_flight); 652 } 653 654 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, 655 struct iovec *out_cursor, 656 struct iovec *in_cursor) 657 { 658 /* reset the cursor of the output buffer for the device */ 659 out_cursor->iov_base = s->cvq_cmd_out_buffer; 660 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 661 662 /* reset the cursor of the in buffer for the device */ 663 in_cursor->iov_base = s->status; 664 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 665 } 666 667 /* 668 * Poll SVQ for multiple pending control commands and check the device's ack. 669 * 670 * Caller should hold the BQL when invoking this function. 671 * 672 * @s: The VhostVDPAState 673 * @len: The length of the pending status shadow buffer 674 */ 675 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) 676 { 677 /* device uses a one-byte length ack for each control command */ 678 ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); 679 if (unlikely(dev_written != len)) { 680 return -EIO; 681 } 682 683 /* check the device's ack */ 684 for (int i = 0; i < len; ++i) { 685 if (s->status[i] != VIRTIO_NET_OK) { 686 return -EIO; 687 } 688 } 689 return 0; 690 } 691 692 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 693 struct iovec *out_cursor, 694 struct iovec *in_cursor, uint8_t class, 695 uint8_t cmd, const struct iovec *data_sg, 696 size_t data_num) 697 { 698 const struct virtio_net_ctrl_hdr ctrl = { 699 .class = class, 700 .cmd = cmd, 701 }; 702 size_t data_size = iov_size(data_sg, data_num), cmd_size; 703 struct iovec out, in; 704 ssize_t r; 705 unsigned dummy_cursor_iov_cnt; 706 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 707 708 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 709 cmd_size = sizeof(ctrl) + data_size; 710 if (vhost_svq_available_slots(svq) < 2 || 711 iov_size(out_cursor, 1) < cmd_size) { 712 /* 713 * It is time to flush all pending control commands if SVQ is full 714 * or control commands shadow buffers are full. 715 * 716 * We can poll here since we've had BQL from the time 717 * we sent the descriptor. 718 */ 719 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - 720 (void *)s->status); 721 if (unlikely(r < 0)) { 722 return r; 723 } 724 725 vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); 726 } 727 728 /* pack the CVQ command header */ 729 iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); 730 /* pack the CVQ command command-specific-data */ 731 iov_to_buf(data_sg, data_num, 0, 732 out_cursor->iov_base + sizeof(ctrl), data_size); 733 734 /* extract the required buffer from the cursor for output */ 735 iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); 736 /* extract the required buffer from the cursor for input */ 737 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); 738 739 r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); 740 if (unlikely(r < 0)) { 741 return r; 742 } 743 744 /* iterate the cursors */ 745 dummy_cursor_iov_cnt = 1; 746 iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); 747 dummy_cursor_iov_cnt = 1; 748 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); 749 750 return 0; 751 } 752 753 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, 754 struct iovec *out_cursor, 755 struct iovec *in_cursor) 756 { 757 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 758 const struct iovec data = { 759 .iov_base = (void *)n->mac, 760 .iov_len = sizeof(n->mac), 761 }; 762 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 763 VIRTIO_NET_CTRL_MAC, 764 VIRTIO_NET_CTRL_MAC_ADDR_SET, 765 &data, 1); 766 if (unlikely(r < 0)) { 767 return r; 768 } 769 } 770 771 /* 772 * According to VirtIO standard, "The device MUST have an 773 * empty MAC filtering table on reset.". 774 * 775 * Therefore, there is no need to send this CVQ command if the 776 * driver also sets an empty MAC filter table, which aligns with 777 * the device's defaults. 778 * 779 * Note that the device's defaults can mismatch the driver's 780 * configuration only at live migration. 781 */ 782 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || 783 n->mac_table.in_use == 0) { 784 return 0; 785 } 786 787 uint32_t uni_entries = n->mac_table.first_multi, 788 uni_macs_size = uni_entries * ETH_ALEN, 789 mul_entries = n->mac_table.in_use - uni_entries, 790 mul_macs_size = mul_entries * ETH_ALEN; 791 struct virtio_net_ctrl_mac uni = { 792 .entries = cpu_to_le32(uni_entries), 793 }; 794 struct virtio_net_ctrl_mac mul = { 795 .entries = cpu_to_le32(mul_entries), 796 }; 797 const struct iovec data[] = { 798 { 799 .iov_base = &uni, 800 .iov_len = sizeof(uni), 801 }, { 802 .iov_base = n->mac_table.macs, 803 .iov_len = uni_macs_size, 804 }, { 805 .iov_base = &mul, 806 .iov_len = sizeof(mul), 807 }, { 808 .iov_base = &n->mac_table.macs[uni_macs_size], 809 .iov_len = mul_macs_size, 810 }, 811 }; 812 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 813 VIRTIO_NET_CTRL_MAC, 814 VIRTIO_NET_CTRL_MAC_TABLE_SET, 815 data, ARRAY_SIZE(data)); 816 if (unlikely(r < 0)) { 817 return r; 818 } 819 820 return 0; 821 } 822 823 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, 824 struct iovec *out_cursor, 825 struct iovec *in_cursor, bool do_rss) 826 { 827 struct virtio_net_rss_config cfg = {}; 828 ssize_t r; 829 g_autofree uint16_t *table = NULL; 830 831 /* 832 * According to VirtIO standard, "Initially the device has all hash 833 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". 834 * 835 * Therefore, there is no need to send this CVQ command if the 836 * driver disables the all hash types, which aligns with 837 * the device's defaults. 838 * 839 * Note that the device's defaults can mismatch the driver's 840 * configuration only at live migration. 841 */ 842 if (!n->rss_data.enabled || 843 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { 844 return 0; 845 } 846 847 table = g_malloc_n(n->rss_data.indirections_len, 848 sizeof(n->rss_data.indirections_table[0])); 849 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); 850 851 if (do_rss) { 852 /* 853 * According to VirtIO standard, "Number of entries in indirection_table 854 * is (indirection_table_mask + 1)". 855 */ 856 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - 857 1); 858 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); 859 for (int i = 0; i < n->rss_data.indirections_len; ++i) { 860 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); 861 } 862 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); 863 } else { 864 /* 865 * According to VirtIO standard, "Field reserved MUST contain zeroes. 866 * It is defined to make the structure to match the layout of 867 * virtio_net_rss_config structure, defined in 5.1.6.5.7.". 868 * 869 * Therefore, we need to zero the fields in 870 * struct virtio_net_rss_config, which corresponds to the 871 * `reserved` field in struct virtio_net_hash_config. 872 * 873 * Note that all other fields are zeroed at their definitions, 874 * except for the `indirection_table` field, where the actual data 875 * is stored in the `table` variable to ensure compatibility 876 * with RSS case. Therefore, we need to zero the `table` variable here. 877 */ 878 table[0] = 0; 879 } 880 881 /* 882 * Considering that virtio_net_handle_rss() currently does not restore 883 * the hash key length parsed from the CVQ command sent from the guest 884 * into n->rss_data and uses the maximum key length in other code, so 885 * we also employ the maximum key length here. 886 */ 887 cfg.hash_key_length = sizeof(n->rss_data.key); 888 889 const struct iovec data[] = { 890 { 891 .iov_base = &cfg, 892 .iov_len = offsetof(struct virtio_net_rss_config, 893 indirection_table), 894 }, { 895 .iov_base = table, 896 .iov_len = n->rss_data.indirections_len * 897 sizeof(n->rss_data.indirections_table[0]), 898 }, { 899 .iov_base = &cfg.max_tx_vq, 900 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - 901 offsetof(struct virtio_net_rss_config, max_tx_vq), 902 }, { 903 .iov_base = (void *)n->rss_data.key, 904 .iov_len = sizeof(n->rss_data.key), 905 } 906 }; 907 908 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 909 VIRTIO_NET_CTRL_MQ, 910 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : 911 VIRTIO_NET_CTRL_MQ_HASH_CONFIG, 912 data, ARRAY_SIZE(data)); 913 if (unlikely(r < 0)) { 914 return r; 915 } 916 917 return 0; 918 } 919 920 static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 921 const VirtIONet *n, 922 struct iovec *out_cursor, 923 struct iovec *in_cursor) 924 { 925 struct virtio_net_ctrl_mq mq; 926 ssize_t r; 927 928 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { 929 return 0; 930 } 931 932 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 933 const struct iovec data = { 934 .iov_base = &mq, 935 .iov_len = sizeof(mq), 936 }; 937 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 938 VIRTIO_NET_CTRL_MQ, 939 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 940 &data, 1); 941 if (unlikely(r < 0)) { 942 return r; 943 } 944 945 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { 946 /* load the receive-side scaling state */ 947 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); 948 if (unlikely(r < 0)) { 949 return r; 950 } 951 } else if (virtio_vdev_has_feature(&n->parent_obj, 952 VIRTIO_NET_F_HASH_REPORT)) { 953 /* load the hash calculation state */ 954 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); 955 if (unlikely(r < 0)) { 956 return r; 957 } 958 } 959 960 return 0; 961 } 962 963 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, 964 const VirtIONet *n, 965 struct iovec *out_cursor, 966 struct iovec *in_cursor) 967 { 968 uint64_t offloads; 969 ssize_t r; 970 971 if (!virtio_vdev_has_feature(&n->parent_obj, 972 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 973 return 0; 974 } 975 976 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { 977 /* 978 * According to VirtIO standard, "Upon feature negotiation 979 * corresponding offload gets enabled to preserve 980 * backward compatibility.". 981 * 982 * Therefore, there is no need to send this CVQ command if the 983 * driver also enables all supported offloads, which aligns with 984 * the device's defaults. 985 * 986 * Note that the device's defaults can mismatch the driver's 987 * configuration only at live migration. 988 */ 989 return 0; 990 } 991 992 offloads = cpu_to_le64(n->curr_guest_offloads); 993 const struct iovec data = { 994 .iov_base = &offloads, 995 .iov_len = sizeof(offloads), 996 }; 997 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 998 VIRTIO_NET_CTRL_GUEST_OFFLOADS, 999 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, 1000 &data, 1); 1001 if (unlikely(r < 0)) { 1002 return r; 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, 1009 struct iovec *out_cursor, 1010 struct iovec *in_cursor, 1011 uint8_t cmd, 1012 uint8_t on) 1013 { 1014 const struct iovec data = { 1015 .iov_base = &on, 1016 .iov_len = sizeof(on), 1017 }; 1018 ssize_t r; 1019 1020 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1021 VIRTIO_NET_CTRL_RX, cmd, &data, 1); 1022 if (unlikely(r < 0)) { 1023 return r; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int vhost_vdpa_net_load_rx(VhostVDPAState *s, 1030 const VirtIONet *n, 1031 struct iovec *out_cursor, 1032 struct iovec *in_cursor) 1033 { 1034 ssize_t r; 1035 1036 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { 1037 return 0; 1038 } 1039 1040 /* 1041 * According to virtio_net_reset(), device turns promiscuous mode 1042 * on by default. 1043 * 1044 * Additionally, according to VirtIO standard, "Since there are 1045 * no guarantees, it can use a hash filter or silently switch to 1046 * allmulti or promiscuous mode if it is given too many addresses.". 1047 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many 1048 * non-multicast MAC addresses, indicating that promiscuous mode 1049 * should be enabled. 1050 * 1051 * Therefore, QEMU should only send this CVQ command if the 1052 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, 1053 * which sets promiscuous mode on, different from the device's defaults. 1054 * 1055 * Note that the device's defaults can mismatch the driver's 1056 * configuration only at live migration. 1057 */ 1058 if (!n->mac_table.uni_overflow && !n->promisc) { 1059 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1060 VIRTIO_NET_CTRL_RX_PROMISC, 0); 1061 if (unlikely(r < 0)) { 1062 return r; 1063 } 1064 } 1065 1066 /* 1067 * According to virtio_net_reset(), device turns all-multicast mode 1068 * off by default. 1069 * 1070 * According to VirtIO standard, "Since there are no guarantees, 1071 * it can use a hash filter or silently switch to allmulti or 1072 * promiscuous mode if it is given too many addresses.". QEMU marks 1073 * `n->mac_table.multi_overflow` if guest sets too many 1074 * non-multicast MAC addresses. 1075 * 1076 * Therefore, QEMU should only send this CVQ command if the 1077 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, 1078 * which sets all-multicast mode on, different from the device's defaults. 1079 * 1080 * Note that the device's defaults can mismatch the driver's 1081 * configuration only at live migration. 1082 */ 1083 if (n->mac_table.multi_overflow || n->allmulti) { 1084 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1085 VIRTIO_NET_CTRL_RX_ALLMULTI, 1); 1086 if (unlikely(r < 0)) { 1087 return r; 1088 } 1089 } 1090 1091 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { 1092 return 0; 1093 } 1094 1095 /* 1096 * According to virtio_net_reset(), device turns all-unicast mode 1097 * off by default. 1098 * 1099 * Therefore, QEMU should only send this CVQ command if the driver 1100 * sets all-unicast mode on, different from the device's defaults. 1101 * 1102 * Note that the device's defaults can mismatch the driver's 1103 * configuration only at live migration. 1104 */ 1105 if (n->alluni) { 1106 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1107 VIRTIO_NET_CTRL_RX_ALLUNI, 1); 1108 if (r < 0) { 1109 return r; 1110 } 1111 } 1112 1113 /* 1114 * According to virtio_net_reset(), device turns non-multicast mode 1115 * off by default. 1116 * 1117 * Therefore, QEMU should only send this CVQ command if the driver 1118 * sets non-multicast mode on, different from the device's defaults. 1119 * 1120 * Note that the device's defaults can mismatch the driver's 1121 * configuration only at live migration. 1122 */ 1123 if (n->nomulti) { 1124 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1125 VIRTIO_NET_CTRL_RX_NOMULTI, 1); 1126 if (r < 0) { 1127 return r; 1128 } 1129 } 1130 1131 /* 1132 * According to virtio_net_reset(), device turns non-unicast mode 1133 * off by default. 1134 * 1135 * Therefore, QEMU should only send this CVQ command if the driver 1136 * sets non-unicast mode on, different from the device's defaults. 1137 * 1138 * Note that the device's defaults can mismatch the driver's 1139 * configuration only at live migration. 1140 */ 1141 if (n->nouni) { 1142 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1143 VIRTIO_NET_CTRL_RX_NOUNI, 1); 1144 if (r < 0) { 1145 return r; 1146 } 1147 } 1148 1149 /* 1150 * According to virtio_net_reset(), device turns non-broadcast mode 1151 * off by default. 1152 * 1153 * Therefore, QEMU should only send this CVQ command if the driver 1154 * sets non-broadcast mode on, different from the device's defaults. 1155 * 1156 * Note that the device's defaults can mismatch the driver's 1157 * configuration only at live migration. 1158 */ 1159 if (n->nobcast) { 1160 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1161 VIRTIO_NET_CTRL_RX_NOBCAST, 1); 1162 if (r < 0) { 1163 return r; 1164 } 1165 } 1166 1167 return 0; 1168 } 1169 1170 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, 1171 const VirtIONet *n, 1172 struct iovec *out_cursor, 1173 struct iovec *in_cursor, 1174 uint16_t vid) 1175 { 1176 const struct iovec data = { 1177 .iov_base = &vid, 1178 .iov_len = sizeof(vid), 1179 }; 1180 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1181 VIRTIO_NET_CTRL_VLAN, 1182 VIRTIO_NET_CTRL_VLAN_ADD, 1183 &data, 1); 1184 if (unlikely(r < 0)) { 1185 return r; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, 1192 const VirtIONet *n, 1193 struct iovec *out_cursor, 1194 struct iovec *in_cursor) 1195 { 1196 int r; 1197 1198 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { 1199 return 0; 1200 } 1201 1202 for (int i = 0; i < MAX_VLAN >> 5; i++) { 1203 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { 1204 if (n->vlans[i] & (1U << j)) { 1205 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, 1206 in_cursor, (i << 5) + j); 1207 if (unlikely(r != 0)) { 1208 return r; 1209 } 1210 } 1211 } 1212 } 1213 1214 return 0; 1215 } 1216 1217 static int vhost_vdpa_net_cvq_load(NetClientState *nc) 1218 { 1219 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1220 struct vhost_vdpa *v = &s->vhost_vdpa; 1221 const VirtIONet *n; 1222 int r; 1223 struct iovec out_cursor, in_cursor; 1224 1225 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1226 1227 vhost_vdpa_set_vring_ready(v, v->dev->vq_index); 1228 1229 if (v->shadow_vqs_enabled) { 1230 n = VIRTIO_NET(v->dev->vdev); 1231 vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); 1232 r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); 1233 if (unlikely(r < 0)) { 1234 return r; 1235 } 1236 r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); 1237 if (unlikely(r)) { 1238 return r; 1239 } 1240 r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); 1241 if (unlikely(r)) { 1242 return r; 1243 } 1244 r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); 1245 if (unlikely(r)) { 1246 return r; 1247 } 1248 r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); 1249 if (unlikely(r)) { 1250 return r; 1251 } 1252 1253 /* 1254 * We need to poll and check all pending device's used buffers. 1255 * 1256 * We can poll here since we've had BQL from the time 1257 * we sent the descriptor. 1258 */ 1259 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); 1260 if (unlikely(r)) { 1261 return r; 1262 } 1263 } 1264 1265 for (int i = 0; i < v->dev->vq_index; ++i) { 1266 vhost_vdpa_set_vring_ready(v, i); 1267 } 1268 1269 return 0; 1270 } 1271 1272 static NetClientInfo net_vhost_vdpa_cvq_info = { 1273 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 1274 .size = sizeof(VhostVDPAState), 1275 .receive = vhost_vdpa_receive, 1276 .start = vhost_vdpa_net_cvq_start, 1277 .load = vhost_vdpa_net_cvq_load, 1278 .stop = vhost_vdpa_net_cvq_stop, 1279 .cleanup = vhost_vdpa_cleanup, 1280 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 1281 .has_ufo = vhost_vdpa_has_ufo, 1282 .check_peer_type = vhost_vdpa_check_peer_type, 1283 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 1284 }; 1285 1286 /* 1287 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to 1288 * vdpa device. 1289 * 1290 * Considering that QEMU cannot send the entire filter table to the 1291 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ 1292 * command to enable promiscuous mode to receive all packets, 1293 * according to VirtIO standard, "Since there are no guarantees, 1294 * it can use a hash filter or silently switch to allmulti or 1295 * promiscuous mode if it is given too many addresses.". 1296 * 1297 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and 1298 * marks `n->mac_table.x_overflow` accordingly, it should have 1299 * the same effect on the device model to receive 1300 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. 1301 * The same applies to multicast MAC addresses. 1302 * 1303 * Therefore, QEMU can provide the device model with a fake 1304 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) 1305 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast 1306 * MAC addresses. This ensures that the device model marks 1307 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, 1308 * allowing all packets to be received, which aligns with the 1309 * state of the vdpa device. 1310 */ 1311 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, 1312 VirtQueueElement *elem, 1313 struct iovec *out, 1314 const struct iovec *in) 1315 { 1316 struct virtio_net_ctrl_mac mac_data, *mac_ptr; 1317 struct virtio_net_ctrl_hdr *hdr_ptr; 1318 uint32_t cursor; 1319 ssize_t r; 1320 uint8_t on = 1; 1321 1322 /* parse the non-multicast MAC address entries from CVQ command */ 1323 cursor = sizeof(*hdr_ptr); 1324 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1325 &mac_data, sizeof(mac_data)); 1326 if (unlikely(r != sizeof(mac_data))) { 1327 /* 1328 * If the CVQ command is invalid, we should simulate the vdpa device 1329 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1330 */ 1331 *s->status = VIRTIO_NET_ERR; 1332 return sizeof(*s->status); 1333 } 1334 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1335 1336 /* parse the multicast MAC address entries from CVQ command */ 1337 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1338 &mac_data, sizeof(mac_data)); 1339 if (r != sizeof(mac_data)) { 1340 /* 1341 * If the CVQ command is invalid, we should simulate the vdpa device 1342 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1343 */ 1344 *s->status = VIRTIO_NET_ERR; 1345 return sizeof(*s->status); 1346 } 1347 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1348 1349 /* validate the CVQ command */ 1350 if (iov_size(elem->out_sg, elem->out_num) != cursor) { 1351 /* 1352 * If the CVQ command is invalid, we should simulate the vdpa device 1353 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1354 */ 1355 *s->status = VIRTIO_NET_ERR; 1356 return sizeof(*s->status); 1357 } 1358 1359 /* 1360 * According to VirtIO standard, "Since there are no guarantees, 1361 * it can use a hash filter or silently switch to allmulti or 1362 * promiscuous mode if it is given too many addresses.". 1363 * 1364 * Therefore, considering that QEMU is unable to send the entire 1365 * filter table to the vdpa device, it should send the 1366 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode 1367 */ 1368 hdr_ptr = out->iov_base; 1369 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); 1370 1371 hdr_ptr->class = VIRTIO_NET_CTRL_RX; 1372 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; 1373 iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); 1374 r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); 1375 if (unlikely(r < 0)) { 1376 return r; 1377 } 1378 1379 /* 1380 * We can poll here since we've had BQL from the time 1381 * we sent the descriptor. 1382 */ 1383 r = vhost_vdpa_net_svq_poll(s, 1); 1384 if (unlikely(r < sizeof(*s->status))) { 1385 return r; 1386 } 1387 if (*s->status != VIRTIO_NET_OK) { 1388 return sizeof(*s->status); 1389 } 1390 1391 /* 1392 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ 1393 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) 1394 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) 1395 * multicast MAC addresses. 1396 * 1397 * By doing so, the device model can mark `n->mac_table.uni_overflow` 1398 * and `n->mac_table.multi_overflow`, enabling all packets to be 1399 * received, which aligns with the state of the vdpa device. 1400 */ 1401 cursor = 0; 1402 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, 1403 fake_mul_entries = MAC_TABLE_ENTRIES + 1, 1404 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + 1405 sizeof(mac_data) + fake_uni_entries * ETH_ALEN + 1406 sizeof(mac_data) + fake_mul_entries * ETH_ALEN; 1407 1408 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); 1409 out->iov_len = fake_cvq_size; 1410 1411 /* pack the header for fake CVQ command */ 1412 hdr_ptr = out->iov_base + cursor; 1413 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; 1414 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1415 cursor += sizeof(*hdr_ptr); 1416 1417 /* 1418 * Pack the non-multicast MAC addresses part for fake CVQ command. 1419 * 1420 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1421 * addresses provided in CVQ command. Therefore, only the entries 1422 * field need to be prepared in the CVQ command. 1423 */ 1424 mac_ptr = out->iov_base + cursor; 1425 mac_ptr->entries = cpu_to_le32(fake_uni_entries); 1426 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; 1427 1428 /* 1429 * Pack the multicast MAC addresses part for fake CVQ command. 1430 * 1431 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1432 * addresses provided in CVQ command. Therefore, only the entries 1433 * field need to be prepared in the CVQ command. 1434 */ 1435 mac_ptr = out->iov_base + cursor; 1436 mac_ptr->entries = cpu_to_le32(fake_mul_entries); 1437 1438 /* 1439 * Simulating QEMU poll a vdpa device used buffer 1440 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1441 */ 1442 return sizeof(*s->status); 1443 } 1444 1445 /** 1446 * Validate and copy control virtqueue commands. 1447 * 1448 * Following QEMU guidelines, we offer a copy of the buffers to the device to 1449 * prevent TOCTOU bugs. 1450 */ 1451 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 1452 VirtQueueElement *elem, 1453 void *opaque) 1454 { 1455 VhostVDPAState *s = opaque; 1456 size_t in_len; 1457 const struct virtio_net_ctrl_hdr *ctrl; 1458 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 1459 /* Out buffer sent to both the vdpa device and the device model */ 1460 struct iovec out = { 1461 .iov_base = s->cvq_cmd_out_buffer, 1462 }; 1463 /* in buffer used for device model */ 1464 const struct iovec model_in = { 1465 .iov_base = &status, 1466 .iov_len = sizeof(status), 1467 }; 1468 /* in buffer used for vdpa device */ 1469 const struct iovec vdpa_in = { 1470 .iov_base = s->status, 1471 .iov_len = sizeof(*s->status), 1472 }; 1473 ssize_t dev_written = -EINVAL; 1474 1475 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 1476 s->cvq_cmd_out_buffer, 1477 vhost_vdpa_net_cvq_cmd_page_len()); 1478 1479 ctrl = s->cvq_cmd_out_buffer; 1480 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { 1481 /* 1482 * Guest announce capability is emulated by qemu, so don't forward to 1483 * the device. 1484 */ 1485 dev_written = sizeof(status); 1486 *s->status = VIRTIO_NET_OK; 1487 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && 1488 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && 1489 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { 1490 /* 1491 * Due to the size limitation of the out buffer sent to the vdpa device, 1492 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive 1493 * MAC addresses set by the driver for the filter table can cause 1494 * truncation of the CVQ command in QEMU. As a result, the vdpa device 1495 * rejects the flawed CVQ command. 1496 * 1497 * Therefore, QEMU must handle this situation instead of sending 1498 * the CVQ command directly. 1499 */ 1500 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, 1501 &out, &vdpa_in); 1502 if (unlikely(dev_written < 0)) { 1503 goto out; 1504 } 1505 } else { 1506 ssize_t r; 1507 r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); 1508 if (unlikely(r < 0)) { 1509 dev_written = r; 1510 goto out; 1511 } 1512 1513 /* 1514 * We can poll here since we've had BQL from the time 1515 * we sent the descriptor. 1516 */ 1517 dev_written = vhost_vdpa_net_svq_poll(s, 1); 1518 } 1519 1520 if (unlikely(dev_written < sizeof(status))) { 1521 error_report("Insufficient written data (%zu)", dev_written); 1522 goto out; 1523 } 1524 1525 if (*s->status != VIRTIO_NET_OK) { 1526 goto out; 1527 } 1528 1529 status = VIRTIO_NET_ERR; 1530 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); 1531 if (status != VIRTIO_NET_OK) { 1532 error_report("Bad CVQ processing in model"); 1533 } 1534 1535 out: 1536 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 1537 sizeof(status)); 1538 if (unlikely(in_len < sizeof(status))) { 1539 error_report("Bad device CVQ written length"); 1540 } 1541 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 1542 /* 1543 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when 1544 * the function successfully forwards the CVQ command, indicated 1545 * by a non-negative value of `dev_written`. Otherwise, it still 1546 * belongs to SVQ. 1547 * This function should only free the `elem` when it owns. 1548 */ 1549 if (dev_written >= 0) { 1550 g_free(elem); 1551 } 1552 return dev_written < 0 ? dev_written : 0; 1553 } 1554 1555 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 1556 .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 1557 }; 1558 1559 /** 1560 * Probe if CVQ is isolated 1561 * 1562 * @device_fd The vdpa device fd 1563 * @features Features offered by the device. 1564 * @cvq_index The control vq pair index 1565 * 1566 * Returns <0 in case of failure, 0 if false and 1 if true. 1567 */ 1568 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, 1569 int cvq_index, Error **errp) 1570 { 1571 uint64_t backend_features; 1572 int64_t cvq_group; 1573 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | 1574 VIRTIO_CONFIG_S_DRIVER; 1575 int r; 1576 1577 ERRP_GUARD(); 1578 1579 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 1580 if (unlikely(r < 0)) { 1581 error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); 1582 return r; 1583 } 1584 1585 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { 1586 return 0; 1587 } 1588 1589 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1590 if (unlikely(r)) { 1591 error_setg_errno(errp, -r, "Cannot set device status"); 1592 goto out; 1593 } 1594 1595 r = ioctl(device_fd, VHOST_SET_FEATURES, &features); 1596 if (unlikely(r)) { 1597 error_setg_errno(errp, -r, "Cannot set features"); 1598 goto out; 1599 } 1600 1601 status |= VIRTIO_CONFIG_S_FEATURES_OK; 1602 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1603 if (unlikely(r)) { 1604 error_setg_errno(errp, -r, "Cannot set device status"); 1605 goto out; 1606 } 1607 1608 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); 1609 if (unlikely(cvq_group < 0)) { 1610 if (cvq_group != -ENOTSUP) { 1611 r = cvq_group; 1612 goto out; 1613 } 1614 1615 /* 1616 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend 1617 * support ASID even if the parent driver does not. The CVQ cannot be 1618 * isolated in this case. 1619 */ 1620 error_free(*errp); 1621 *errp = NULL; 1622 r = 0; 1623 goto out; 1624 } 1625 1626 for (int i = 0; i < cvq_index; ++i) { 1627 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); 1628 if (unlikely(group < 0)) { 1629 r = group; 1630 goto out; 1631 } 1632 1633 if (group == (int64_t)cvq_group) { 1634 r = 0; 1635 goto out; 1636 } 1637 } 1638 1639 r = 1; 1640 1641 out: 1642 status = 0; 1643 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1644 return r; 1645 } 1646 1647 static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 1648 const char *device, 1649 const char *name, 1650 int vdpa_device_fd, 1651 int queue_pair_index, 1652 int nvqs, 1653 bool is_datapath, 1654 bool svq, 1655 struct vhost_vdpa_iova_range iova_range, 1656 uint64_t features, 1657 VhostVDPAShared *shared, 1658 Error **errp) 1659 { 1660 NetClientState *nc = NULL; 1661 VhostVDPAState *s; 1662 int ret = 0; 1663 assert(name); 1664 int cvq_isolated = 0; 1665 1666 if (is_datapath) { 1667 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 1668 name); 1669 } else { 1670 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, 1671 queue_pair_index * 2, 1672 errp); 1673 if (unlikely(cvq_isolated < 0)) { 1674 return NULL; 1675 } 1676 1677 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 1678 device, name); 1679 } 1680 qemu_set_info_str(nc, TYPE_VHOST_VDPA); 1681 s = DO_UPCAST(VhostVDPAState, nc, nc); 1682 1683 s->vhost_vdpa.index = queue_pair_index; 1684 s->always_svq = svq; 1685 s->migration_state.notify = NULL; 1686 s->vhost_vdpa.shadow_vqs_enabled = svq; 1687 if (queue_pair_index == 0) { 1688 vhost_vdpa_net_valid_svq_features(features, 1689 &s->vhost_vdpa.migration_blocker); 1690 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); 1691 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; 1692 s->vhost_vdpa.shared->iova_range = iova_range; 1693 s->vhost_vdpa.shared->shadow_data = svq; 1694 } else if (!is_datapath) { 1695 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1696 PROT_READ | PROT_WRITE, 1697 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1698 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1699 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 1700 -1, 0); 1701 1702 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 1703 s->vhost_vdpa.shadow_vq_ops_opaque = s; 1704 s->cvq_isolated = cvq_isolated; 1705 } 1706 if (queue_pair_index != 0) { 1707 s->vhost_vdpa.shared = shared; 1708 } 1709 1710 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 1711 if (ret) { 1712 qemu_del_net_client(nc); 1713 return NULL; 1714 } 1715 1716 return nc; 1717 } 1718 1719 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 1720 { 1721 int ret = ioctl(fd, VHOST_GET_FEATURES, features); 1722 if (unlikely(ret < 0)) { 1723 error_setg_errno(errp, errno, 1724 "Fail to query features from vhost-vDPA device"); 1725 } 1726 return ret; 1727 } 1728 1729 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 1730 int *has_cvq, Error **errp) 1731 { 1732 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 1733 g_autofree struct vhost_vdpa_config *config = NULL; 1734 __virtio16 *max_queue_pairs; 1735 int ret; 1736 1737 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 1738 *has_cvq = 1; 1739 } else { 1740 *has_cvq = 0; 1741 } 1742 1743 if (features & (1 << VIRTIO_NET_F_MQ)) { 1744 config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 1745 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 1746 config->len = sizeof(*max_queue_pairs); 1747 1748 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 1749 if (ret) { 1750 error_setg(errp, "Fail to get config from vhost-vDPA device"); 1751 return -ret; 1752 } 1753 1754 max_queue_pairs = (__virtio16 *)&config->buf; 1755 1756 return lduw_le_p(max_queue_pairs); 1757 } 1758 1759 return 1; 1760 } 1761 1762 int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 1763 NetClientState *peer, Error **errp) 1764 { 1765 const NetdevVhostVDPAOptions *opts; 1766 uint64_t features; 1767 int vdpa_device_fd; 1768 g_autofree NetClientState **ncs = NULL; 1769 struct vhost_vdpa_iova_range iova_range; 1770 NetClientState *nc; 1771 int queue_pairs, r, i = 0, has_cvq = 0; 1772 1773 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1774 opts = &netdev->u.vhost_vdpa; 1775 if (!opts->vhostdev && !opts->vhostfd) { 1776 error_setg(errp, 1777 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 1778 return -1; 1779 } 1780 1781 if (opts->vhostdev && opts->vhostfd) { 1782 error_setg(errp, 1783 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 1784 return -1; 1785 } 1786 1787 if (opts->vhostdev) { 1788 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 1789 if (vdpa_device_fd == -1) { 1790 return -errno; 1791 } 1792 } else { 1793 /* has_vhostfd */ 1794 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 1795 if (vdpa_device_fd == -1) { 1796 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 1797 return -1; 1798 } 1799 } 1800 1801 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 1802 if (unlikely(r < 0)) { 1803 goto err; 1804 } 1805 1806 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 1807 &has_cvq, errp); 1808 if (queue_pairs < 0) { 1809 qemu_close(vdpa_device_fd); 1810 return queue_pairs; 1811 } 1812 1813 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 1814 if (unlikely(r < 0)) { 1815 error_setg(errp, "vhost-vdpa: get iova range failed: %s", 1816 strerror(-r)); 1817 goto err; 1818 } 1819 1820 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 1821 goto err; 1822 } 1823 1824 ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 1825 1826 for (i = 0; i < queue_pairs; i++) { 1827 VhostVDPAShared *shared = NULL; 1828 1829 if (i) { 1830 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; 1831 } 1832 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1833 vdpa_device_fd, i, 2, true, opts->x_svq, 1834 iova_range, features, shared, errp); 1835 if (!ncs[i]) 1836 goto err; 1837 } 1838 1839 if (has_cvq) { 1840 VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); 1841 VhostVDPAShared *shared = s0->vhost_vdpa.shared; 1842 1843 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1844 vdpa_device_fd, i, 1, false, 1845 opts->x_svq, iova_range, features, shared, 1846 errp); 1847 if (!nc) 1848 goto err; 1849 } 1850 1851 return 0; 1852 1853 err: 1854 if (i) { 1855 for (i--; i >= 0; i--) { 1856 qemu_del_net_client(ncs[i]); 1857 } 1858 } 1859 1860 qemu_close(vdpa_device_fd); 1861 1862 return -1; 1863 } 1864