1 /* 2 * vhost-vdpa.c 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include "clients.h" 14 #include "hw/virtio/virtio-net.h" 15 #include "net/vhost_net.h" 16 #include "net/vhost-vdpa.h" 17 #include "hw/virtio/vhost-vdpa.h" 18 #include "qemu/config-file.h" 19 #include "qemu/error-report.h" 20 #include "qemu/log.h" 21 #include "qemu/memalign.h" 22 #include "qemu/option.h" 23 #include "qapi/error.h" 24 #include <linux/vhost.h> 25 #include <sys/ioctl.h> 26 #include <err.h> 27 #include "standard-headers/linux/virtio_net.h" 28 #include "monitor/monitor.h" 29 #include "migration/migration.h" 30 #include "migration/misc.h" 31 #include "hw/virtio/vhost.h" 32 #include "trace.h" 33 34 /* Todo:need to add the multiqueue support here */ 35 typedef struct VhostVDPAState { 36 NetClientState nc; 37 struct vhost_vdpa vhost_vdpa; 38 NotifierWithReturn migration_state; 39 VHostNetState *vhost_net; 40 41 /* Control commands shadow buffers */ 42 void *cvq_cmd_out_buffer; 43 virtio_net_ctrl_ack *status; 44 45 /* The device always have SVQ enabled */ 46 bool always_svq; 47 48 /* The device can isolate CVQ in its own ASID */ 49 bool cvq_isolated; 50 51 bool started; 52 } VhostVDPAState; 53 54 /* 55 * The array is sorted alphabetically in ascending order, 56 * with the exception of VHOST_INVALID_FEATURE_BIT, 57 * which should always be the last entry. 58 */ 59 const int vdpa_feature_bits[] = { 60 VIRTIO_F_ANY_LAYOUT, 61 VIRTIO_F_IOMMU_PLATFORM, 62 VIRTIO_F_NOTIFY_ON_EMPTY, 63 VIRTIO_F_RING_PACKED, 64 VIRTIO_F_RING_RESET, 65 VIRTIO_F_VERSION_1, 66 VIRTIO_NET_F_CSUM, 67 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, 68 VIRTIO_NET_F_CTRL_MAC_ADDR, 69 VIRTIO_NET_F_CTRL_RX, 70 VIRTIO_NET_F_CTRL_RX_EXTRA, 71 VIRTIO_NET_F_CTRL_VLAN, 72 VIRTIO_NET_F_CTRL_VQ, 73 VIRTIO_NET_F_GSO, 74 VIRTIO_NET_F_GUEST_CSUM, 75 VIRTIO_NET_F_GUEST_ECN, 76 VIRTIO_NET_F_GUEST_TSO4, 77 VIRTIO_NET_F_GUEST_TSO6, 78 VIRTIO_NET_F_GUEST_UFO, 79 VIRTIO_NET_F_GUEST_USO4, 80 VIRTIO_NET_F_GUEST_USO6, 81 VIRTIO_NET_F_HASH_REPORT, 82 VIRTIO_NET_F_HOST_ECN, 83 VIRTIO_NET_F_HOST_TSO4, 84 VIRTIO_NET_F_HOST_TSO6, 85 VIRTIO_NET_F_HOST_UFO, 86 VIRTIO_NET_F_HOST_USO, 87 VIRTIO_NET_F_MQ, 88 VIRTIO_NET_F_MRG_RXBUF, 89 VIRTIO_NET_F_MTU, 90 VIRTIO_NET_F_RSS, 91 VIRTIO_NET_F_STATUS, 92 VIRTIO_RING_F_EVENT_IDX, 93 VIRTIO_RING_F_INDIRECT_DESC, 94 95 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ 96 VHOST_INVALID_FEATURE_BIT 97 }; 98 99 /** Supported device specific feature bits with SVQ */ 100 static const uint64_t vdpa_svq_device_features = 101 BIT_ULL(VIRTIO_NET_F_CSUM) | 102 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 103 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | 104 BIT_ULL(VIRTIO_NET_F_MTU) | 105 BIT_ULL(VIRTIO_NET_F_MAC) | 106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 107 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 108 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 109 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 110 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 111 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 112 BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 113 BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 114 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 115 BIT_ULL(VIRTIO_NET_F_STATUS) | 116 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 117 BIT_ULL(VIRTIO_NET_F_CTRL_RX) | 118 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | 119 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | 120 BIT_ULL(VIRTIO_NET_F_MQ) | 121 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 122 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 123 /* VHOST_F_LOG_ALL is exposed by SVQ */ 124 BIT_ULL(VHOST_F_LOG_ALL) | 125 BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | 126 BIT_ULL(VIRTIO_NET_F_RSS) | 127 BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 128 BIT_ULL(VIRTIO_NET_F_STANDBY) | 129 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); 130 131 #define VHOST_VDPA_NET_CVQ_ASID 1 132 133 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 134 { 135 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 136 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 137 return s->vhost_net; 138 } 139 140 static size_t vhost_vdpa_net_cvq_cmd_len(void) 141 { 142 /* 143 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 144 * In buffer is always 1 byte, so it should fit here 145 */ 146 return sizeof(struct virtio_net_ctrl_hdr) + 147 2 * sizeof(struct virtio_net_ctrl_mac) + 148 MAC_TABLE_ENTRIES * ETH_ALEN; 149 } 150 151 static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 152 { 153 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 154 } 155 156 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 157 { 158 uint64_t invalid_dev_features = 159 features & ~vdpa_svq_device_features & 160 /* Transport are all accepted at this point */ 161 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 162 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 163 164 if (invalid_dev_features) { 165 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 166 invalid_dev_features); 167 return false; 168 } 169 170 return vhost_svq_valid_features(features, errp); 171 } 172 173 static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 174 { 175 uint32_t device_id; 176 int ret; 177 struct vhost_dev *hdev; 178 179 hdev = (struct vhost_dev *)&net->dev; 180 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 181 if (device_id != VIRTIO_ID_NET) { 182 return -ENOTSUP; 183 } 184 return ret; 185 } 186 187 static int vhost_vdpa_add(NetClientState *ncs, void *be, 188 int queue_pair_index, int nvqs) 189 { 190 VhostNetOptions options; 191 struct vhost_net *net = NULL; 192 VhostVDPAState *s; 193 int ret; 194 195 options.backend_type = VHOST_BACKEND_TYPE_VDPA; 196 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 197 s = DO_UPCAST(VhostVDPAState, nc, ncs); 198 options.net_backend = ncs; 199 options.opaque = be; 200 options.busyloop_timeout = 0; 201 options.nvqs = nvqs; 202 203 net = vhost_net_init(&options); 204 if (!net) { 205 error_report("failed to init vhost_net for queue"); 206 goto err_init; 207 } 208 s->vhost_net = net; 209 ret = vhost_vdpa_net_check_device_id(net); 210 if (ret) { 211 goto err_check; 212 } 213 return 0; 214 err_check: 215 vhost_net_cleanup(net); 216 g_free(net); 217 err_init: 218 return -1; 219 } 220 221 static void vhost_vdpa_cleanup(NetClientState *nc) 222 { 223 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 224 225 /* 226 * If a peer NIC is attached, do not cleanup anything. 227 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() 228 * when the guest is shutting down. 229 */ 230 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { 231 return; 232 } 233 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); 234 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); 235 if (s->vhost_net) { 236 vhost_net_cleanup(s->vhost_net); 237 g_free(s->vhost_net); 238 s->vhost_net = NULL; 239 } 240 if (s->vhost_vdpa.index != 0) { 241 return; 242 } 243 qemu_close(s->vhost_vdpa.shared->device_fd); 244 g_free(s->vhost_vdpa.shared); 245 } 246 247 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ 248 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) 249 { 250 return true; 251 } 252 253 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 254 { 255 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 256 257 return true; 258 } 259 260 static bool vhost_vdpa_has_ufo(NetClientState *nc) 261 { 262 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 263 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 264 uint64_t features = 0; 265 features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 266 features = vhost_net_get_features(s->vhost_net, features); 267 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 268 269 } 270 271 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 272 Error **errp) 273 { 274 const char *driver = object_class_get_name(oc); 275 276 if (!g_str_has_prefix(driver, "virtio-net-")) { 277 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 278 return false; 279 } 280 281 return true; 282 } 283 284 /** Dummy receive in case qemu falls back to userland tap networking */ 285 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 286 size_t size) 287 { 288 return size; 289 } 290 291 292 /** From any vdpa net client, get the netclient of the i-th queue pair */ 293 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i) 294 { 295 NICState *nic = qemu_get_nic(s->nc.peer); 296 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); 297 298 return DO_UPCAST(VhostVDPAState, nc, nc_i); 299 } 300 301 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 302 { 303 return vhost_vdpa_net_get_nc_vdpa(s, 0); 304 } 305 306 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) 307 { 308 struct vhost_vdpa *v = &s->vhost_vdpa; 309 VirtIONet *n; 310 VirtIODevice *vdev; 311 int data_queue_pairs, cvq, r; 312 313 /* We are only called on the first data vqs and only if x-svq is not set */ 314 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { 315 return; 316 } 317 318 vdev = v->dev->vdev; 319 n = VIRTIO_NET(vdev); 320 if (!n->vhost_started) { 321 return; 322 } 323 324 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; 325 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? 326 n->max_ncs - n->max_queue_pairs : 0; 327 /* 328 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter 329 * in the future and resume the device if read-only operations between 330 * suspend and reset goes wrong. 331 */ 332 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); 333 334 /* Start will check migration setup_or_active to configure or not SVQ */ 335 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); 336 if (unlikely(r < 0)) { 337 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); 338 } 339 } 340 341 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, 342 MigrationEvent *e, Error **errp) 343 { 344 VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state); 345 346 if (e->type == MIG_EVENT_PRECOPY_SETUP) { 347 vhost_vdpa_net_log_global_enable(s, true); 348 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { 349 vhost_vdpa_net_log_global_enable(s, false); 350 } 351 return 0; 352 } 353 354 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 355 { 356 struct vhost_vdpa *v = &s->vhost_vdpa; 357 358 migration_add_notifier(&s->migration_state, 359 vdpa_net_migration_state_notifier); 360 if (v->shadow_vqs_enabled) { 361 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 362 v->shared->iova_range.last); 363 } 364 } 365 366 static int vhost_vdpa_net_data_start(NetClientState *nc) 367 { 368 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 369 struct vhost_vdpa *v = &s->vhost_vdpa; 370 371 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 372 373 if (s->always_svq || 374 migration_is_setup_or_active(migrate_get_current()->state)) { 375 v->shadow_vqs_enabled = true; 376 } else { 377 v->shadow_vqs_enabled = false; 378 } 379 380 if (v->index == 0) { 381 v->shared->shadow_data = v->shadow_vqs_enabled; 382 vhost_vdpa_net_data_start_first(s); 383 return 0; 384 } 385 386 return 0; 387 } 388 389 static int vhost_vdpa_net_data_load(NetClientState *nc) 390 { 391 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 392 struct vhost_vdpa *v = &s->vhost_vdpa; 393 bool has_cvq = v->dev->vq_index_end % 2; 394 395 if (has_cvq) { 396 return 0; 397 } 398 399 for (int i = 0; i < v->dev->nvqs; ++i) { 400 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); 401 } 402 return 0; 403 } 404 405 static void vhost_vdpa_net_client_stop(NetClientState *nc) 406 { 407 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 408 struct vhost_dev *dev; 409 410 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 411 412 if (s->vhost_vdpa.index == 0) { 413 migration_remove_notifier(&s->migration_state); 414 } 415 416 dev = s->vhost_vdpa.dev; 417 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 418 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, 419 vhost_iova_tree_delete); 420 } 421 } 422 423 static NetClientInfo net_vhost_vdpa_info = { 424 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 425 .size = sizeof(VhostVDPAState), 426 .receive = vhost_vdpa_receive, 427 .start = vhost_vdpa_net_data_start, 428 .load = vhost_vdpa_net_data_load, 429 .stop = vhost_vdpa_net_client_stop, 430 .cleanup = vhost_vdpa_cleanup, 431 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 432 .has_ufo = vhost_vdpa_has_ufo, 433 .check_peer_type = vhost_vdpa_check_peer_type, 434 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 435 }; 436 437 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, 438 Error **errp) 439 { 440 struct vhost_vring_state state = { 441 .index = vq_index, 442 }; 443 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 444 445 if (unlikely(r < 0)) { 446 r = -errno; 447 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); 448 return r; 449 } 450 451 return state.num; 452 } 453 454 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 455 unsigned vq_group, 456 unsigned asid_num) 457 { 458 struct vhost_vring_state asid = { 459 .index = vq_group, 460 .num = asid_num, 461 }; 462 int r; 463 464 trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num); 465 466 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 467 if (unlikely(r < 0)) { 468 error_report("Can't set vq group %u asid %u, errno=%d (%s)", 469 asid.index, asid.num, errno, g_strerror(errno)); 470 } 471 return r; 472 } 473 474 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 475 { 476 VhostIOVATree *tree = v->shared->iova_tree; 477 DMAMap needle = { 478 /* 479 * No need to specify size or to look for more translations since 480 * this contiguous chunk was allocated by us. 481 */ 482 .translated_addr = (hwaddr)(uintptr_t)addr, 483 }; 484 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 485 int r; 486 487 if (unlikely(!map)) { 488 error_report("Cannot locate expected map"); 489 return; 490 } 491 492 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, 493 map->size + 1); 494 if (unlikely(r != 0)) { 495 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 496 } 497 498 vhost_iova_tree_remove(tree, *map); 499 } 500 501 /** Map CVQ buffer. */ 502 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 503 bool write) 504 { 505 DMAMap map = {}; 506 int r; 507 508 map.translated_addr = (hwaddr)(uintptr_t)buf; 509 map.size = size - 1; 510 map.perm = write ? IOMMU_RW : IOMMU_RO, 511 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); 512 if (unlikely(r != IOVA_OK)) { 513 error_report("Cannot map injected element"); 514 return r; 515 } 516 517 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, 518 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 519 if (unlikely(r < 0)) { 520 goto dma_map_err; 521 } 522 523 return 0; 524 525 dma_map_err: 526 vhost_iova_tree_remove(v->shared->iova_tree, map); 527 return r; 528 } 529 530 static int vhost_vdpa_net_cvq_start(NetClientState *nc) 531 { 532 VhostVDPAState *s, *s0; 533 struct vhost_vdpa *v; 534 int64_t cvq_group; 535 int r; 536 Error *err = NULL; 537 538 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 539 540 s = DO_UPCAST(VhostVDPAState, nc, nc); 541 v = &s->vhost_vdpa; 542 543 s0 = vhost_vdpa_net_first_nc_vdpa(s); 544 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; 545 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 546 547 if (v->shared->shadow_data) { 548 /* SVQ is already configured for all virtqueues */ 549 goto out; 550 } 551 552 /* 553 * If we early return in these cases SVQ will not be enabled. The migration 554 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 555 */ 556 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 557 return 0; 558 } 559 560 if (!s->cvq_isolated) { 561 return 0; 562 } 563 564 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, 565 v->dev->vq_index_end - 1, 566 &err); 567 if (unlikely(cvq_group < 0)) { 568 error_report_err(err); 569 return cvq_group; 570 } 571 572 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 573 if (unlikely(r < 0)) { 574 return r; 575 } 576 577 v->shadow_vqs_enabled = true; 578 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 579 580 out: 581 if (!s->vhost_vdpa.shadow_vqs_enabled) { 582 return 0; 583 } 584 585 /* 586 * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, 587 * whether CVQ shares ASID with guest or not, because: 588 * - Memory listener need access to guest's memory addresses allocated in 589 * the IOVA tree. 590 * - There should be plenty of IOVA address space for both ASID not to 591 * worry about collisions between them. Guest's translations are still 592 * validated with virtio virtqueue_pop so there is no risk for the guest 593 * to access memory that it shouldn't. 594 * 595 * To allocate a iova tree per ASID is doable but it complicates the code 596 * and it is not worth it for the moment. 597 */ 598 if (!v->shared->iova_tree) { 599 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 600 v->shared->iova_range.last); 601 } 602 603 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 604 vhost_vdpa_net_cvq_cmd_page_len(), false); 605 if (unlikely(r < 0)) { 606 return r; 607 } 608 609 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 610 vhost_vdpa_net_cvq_cmd_page_len(), true); 611 if (unlikely(r < 0)) { 612 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 613 } 614 615 return r; 616 } 617 618 static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 619 { 620 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 621 622 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 623 624 if (s->vhost_vdpa.shadow_vqs_enabled) { 625 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 626 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 627 } 628 629 vhost_vdpa_net_client_stop(nc); 630 } 631 632 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, 633 const struct iovec *out_sg, size_t out_num, 634 const struct iovec *in_sg, size_t in_num) 635 { 636 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 637 int r; 638 639 r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); 640 if (unlikely(r != 0)) { 641 if (unlikely(r == -ENOSPC)) { 642 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 643 __func__); 644 } 645 } 646 647 return r; 648 } 649 650 /* 651 * Convenience wrapper to poll SVQ for multiple control commands. 652 * 653 * Caller should hold the BQL when invoking this function, and should take 654 * the answer before SVQ pulls by itself when BQL is released. 655 */ 656 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) 657 { 658 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 659 return vhost_svq_poll(svq, cmds_in_flight); 660 } 661 662 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, 663 struct iovec *out_cursor, 664 struct iovec *in_cursor) 665 { 666 /* reset the cursor of the output buffer for the device */ 667 out_cursor->iov_base = s->cvq_cmd_out_buffer; 668 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 669 670 /* reset the cursor of the in buffer for the device */ 671 in_cursor->iov_base = s->status; 672 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 673 } 674 675 /* 676 * Poll SVQ for multiple pending control commands and check the device's ack. 677 * 678 * Caller should hold the BQL when invoking this function. 679 * 680 * @s: The VhostVDPAState 681 * @len: The length of the pending status shadow buffer 682 */ 683 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) 684 { 685 /* device uses a one-byte length ack for each control command */ 686 ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); 687 if (unlikely(dev_written != len)) { 688 return -EIO; 689 } 690 691 /* check the device's ack */ 692 for (int i = 0; i < len; ++i) { 693 if (s->status[i] != VIRTIO_NET_OK) { 694 return -EIO; 695 } 696 } 697 return 0; 698 } 699 700 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 701 struct iovec *out_cursor, 702 struct iovec *in_cursor, uint8_t class, 703 uint8_t cmd, const struct iovec *data_sg, 704 size_t data_num) 705 { 706 const struct virtio_net_ctrl_hdr ctrl = { 707 .class = class, 708 .cmd = cmd, 709 }; 710 size_t data_size = iov_size(data_sg, data_num), cmd_size; 711 struct iovec out, in; 712 ssize_t r; 713 unsigned dummy_cursor_iov_cnt; 714 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 715 716 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 717 cmd_size = sizeof(ctrl) + data_size; 718 if (vhost_svq_available_slots(svq) < 2 || 719 iov_size(out_cursor, 1) < cmd_size) { 720 /* 721 * It is time to flush all pending control commands if SVQ is full 722 * or control commands shadow buffers are full. 723 * 724 * We can poll here since we've had BQL from the time 725 * we sent the descriptor. 726 */ 727 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - 728 (void *)s->status); 729 if (unlikely(r < 0)) { 730 return r; 731 } 732 733 vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); 734 } 735 736 /* pack the CVQ command header */ 737 iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); 738 /* pack the CVQ command command-specific-data */ 739 iov_to_buf(data_sg, data_num, 0, 740 out_cursor->iov_base + sizeof(ctrl), data_size); 741 742 /* extract the required buffer from the cursor for output */ 743 iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); 744 /* extract the required buffer from the cursor for input */ 745 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); 746 747 r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); 748 if (unlikely(r < 0)) { 749 return r; 750 } 751 752 /* iterate the cursors */ 753 dummy_cursor_iov_cnt = 1; 754 iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); 755 dummy_cursor_iov_cnt = 1; 756 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); 757 758 return 0; 759 } 760 761 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, 762 struct iovec *out_cursor, 763 struct iovec *in_cursor) 764 { 765 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 766 const struct iovec data = { 767 .iov_base = (void *)n->mac, 768 .iov_len = sizeof(n->mac), 769 }; 770 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 771 VIRTIO_NET_CTRL_MAC, 772 VIRTIO_NET_CTRL_MAC_ADDR_SET, 773 &data, 1); 774 if (unlikely(r < 0)) { 775 return r; 776 } 777 } 778 779 /* 780 * According to VirtIO standard, "The device MUST have an 781 * empty MAC filtering table on reset.". 782 * 783 * Therefore, there is no need to send this CVQ command if the 784 * driver also sets an empty MAC filter table, which aligns with 785 * the device's defaults. 786 * 787 * Note that the device's defaults can mismatch the driver's 788 * configuration only at live migration. 789 */ 790 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || 791 n->mac_table.in_use == 0) { 792 return 0; 793 } 794 795 uint32_t uni_entries = n->mac_table.first_multi, 796 uni_macs_size = uni_entries * ETH_ALEN, 797 mul_entries = n->mac_table.in_use - uni_entries, 798 mul_macs_size = mul_entries * ETH_ALEN; 799 struct virtio_net_ctrl_mac uni = { 800 .entries = cpu_to_le32(uni_entries), 801 }; 802 struct virtio_net_ctrl_mac mul = { 803 .entries = cpu_to_le32(mul_entries), 804 }; 805 const struct iovec data[] = { 806 { 807 .iov_base = &uni, 808 .iov_len = sizeof(uni), 809 }, { 810 .iov_base = n->mac_table.macs, 811 .iov_len = uni_macs_size, 812 }, { 813 .iov_base = &mul, 814 .iov_len = sizeof(mul), 815 }, { 816 .iov_base = &n->mac_table.macs[uni_macs_size], 817 .iov_len = mul_macs_size, 818 }, 819 }; 820 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 821 VIRTIO_NET_CTRL_MAC, 822 VIRTIO_NET_CTRL_MAC_TABLE_SET, 823 data, ARRAY_SIZE(data)); 824 if (unlikely(r < 0)) { 825 return r; 826 } 827 828 return 0; 829 } 830 831 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, 832 struct iovec *out_cursor, 833 struct iovec *in_cursor, bool do_rss) 834 { 835 struct virtio_net_rss_config cfg = {}; 836 ssize_t r; 837 g_autofree uint16_t *table = NULL; 838 839 /* 840 * According to VirtIO standard, "Initially the device has all hash 841 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". 842 * 843 * Therefore, there is no need to send this CVQ command if the 844 * driver disables the all hash types, which aligns with 845 * the device's defaults. 846 * 847 * Note that the device's defaults can mismatch the driver's 848 * configuration only at live migration. 849 */ 850 if (!n->rss_data.enabled || 851 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { 852 return 0; 853 } 854 855 table = g_malloc_n(n->rss_data.indirections_len, 856 sizeof(n->rss_data.indirections_table[0])); 857 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); 858 859 if (do_rss) { 860 /* 861 * According to VirtIO standard, "Number of entries in indirection_table 862 * is (indirection_table_mask + 1)". 863 */ 864 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - 865 1); 866 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); 867 for (int i = 0; i < n->rss_data.indirections_len; ++i) { 868 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); 869 } 870 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); 871 } else { 872 /* 873 * According to VirtIO standard, "Field reserved MUST contain zeroes. 874 * It is defined to make the structure to match the layout of 875 * virtio_net_rss_config structure, defined in 5.1.6.5.7.". 876 * 877 * Therefore, we need to zero the fields in 878 * struct virtio_net_rss_config, which corresponds to the 879 * `reserved` field in struct virtio_net_hash_config. 880 * 881 * Note that all other fields are zeroed at their definitions, 882 * except for the `indirection_table` field, where the actual data 883 * is stored in the `table` variable to ensure compatibility 884 * with RSS case. Therefore, we need to zero the `table` variable here. 885 */ 886 table[0] = 0; 887 } 888 889 /* 890 * Considering that virtio_net_handle_rss() currently does not restore 891 * the hash key length parsed from the CVQ command sent from the guest 892 * into n->rss_data and uses the maximum key length in other code, so 893 * we also employ the maximum key length here. 894 */ 895 cfg.hash_key_length = sizeof(n->rss_data.key); 896 897 const struct iovec data[] = { 898 { 899 .iov_base = &cfg, 900 .iov_len = offsetof(struct virtio_net_rss_config, 901 indirection_table), 902 }, { 903 .iov_base = table, 904 .iov_len = n->rss_data.indirections_len * 905 sizeof(n->rss_data.indirections_table[0]), 906 }, { 907 .iov_base = &cfg.max_tx_vq, 908 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - 909 offsetof(struct virtio_net_rss_config, max_tx_vq), 910 }, { 911 .iov_base = (void *)n->rss_data.key, 912 .iov_len = sizeof(n->rss_data.key), 913 } 914 }; 915 916 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 917 VIRTIO_NET_CTRL_MQ, 918 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : 919 VIRTIO_NET_CTRL_MQ_HASH_CONFIG, 920 data, ARRAY_SIZE(data)); 921 if (unlikely(r < 0)) { 922 return r; 923 } 924 925 return 0; 926 } 927 928 static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 929 const VirtIONet *n, 930 struct iovec *out_cursor, 931 struct iovec *in_cursor) 932 { 933 struct virtio_net_ctrl_mq mq; 934 ssize_t r; 935 936 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { 937 return 0; 938 } 939 940 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 941 const struct iovec data = { 942 .iov_base = &mq, 943 .iov_len = sizeof(mq), 944 }; 945 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 946 VIRTIO_NET_CTRL_MQ, 947 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 948 &data, 1); 949 if (unlikely(r < 0)) { 950 return r; 951 } 952 953 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { 954 /* load the receive-side scaling state */ 955 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); 956 if (unlikely(r < 0)) { 957 return r; 958 } 959 } else if (virtio_vdev_has_feature(&n->parent_obj, 960 VIRTIO_NET_F_HASH_REPORT)) { 961 /* load the hash calculation state */ 962 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); 963 if (unlikely(r < 0)) { 964 return r; 965 } 966 } 967 968 return 0; 969 } 970 971 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, 972 const VirtIONet *n, 973 struct iovec *out_cursor, 974 struct iovec *in_cursor) 975 { 976 uint64_t offloads; 977 ssize_t r; 978 979 if (!virtio_vdev_has_feature(&n->parent_obj, 980 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 981 return 0; 982 } 983 984 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { 985 /* 986 * According to VirtIO standard, "Upon feature negotiation 987 * corresponding offload gets enabled to preserve 988 * backward compatibility.". 989 * 990 * Therefore, there is no need to send this CVQ command if the 991 * driver also enables all supported offloads, which aligns with 992 * the device's defaults. 993 * 994 * Note that the device's defaults can mismatch the driver's 995 * configuration only at live migration. 996 */ 997 return 0; 998 } 999 1000 offloads = cpu_to_le64(n->curr_guest_offloads); 1001 const struct iovec data = { 1002 .iov_base = &offloads, 1003 .iov_len = sizeof(offloads), 1004 }; 1005 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1006 VIRTIO_NET_CTRL_GUEST_OFFLOADS, 1007 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, 1008 &data, 1); 1009 if (unlikely(r < 0)) { 1010 return r; 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, 1017 struct iovec *out_cursor, 1018 struct iovec *in_cursor, 1019 uint8_t cmd, 1020 uint8_t on) 1021 { 1022 const struct iovec data = { 1023 .iov_base = &on, 1024 .iov_len = sizeof(on), 1025 }; 1026 ssize_t r; 1027 1028 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1029 VIRTIO_NET_CTRL_RX, cmd, &data, 1); 1030 if (unlikely(r < 0)) { 1031 return r; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static int vhost_vdpa_net_load_rx(VhostVDPAState *s, 1038 const VirtIONet *n, 1039 struct iovec *out_cursor, 1040 struct iovec *in_cursor) 1041 { 1042 ssize_t r; 1043 1044 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { 1045 return 0; 1046 } 1047 1048 /* 1049 * According to virtio_net_reset(), device turns promiscuous mode 1050 * on by default. 1051 * 1052 * Additionally, according to VirtIO standard, "Since there are 1053 * no guarantees, it can use a hash filter or silently switch to 1054 * allmulti or promiscuous mode if it is given too many addresses.". 1055 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many 1056 * non-multicast MAC addresses, indicating that promiscuous mode 1057 * should be enabled. 1058 * 1059 * Therefore, QEMU should only send this CVQ command if the 1060 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, 1061 * which sets promiscuous mode on, different from the device's defaults. 1062 * 1063 * Note that the device's defaults can mismatch the driver's 1064 * configuration only at live migration. 1065 */ 1066 if (!n->mac_table.uni_overflow && !n->promisc) { 1067 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1068 VIRTIO_NET_CTRL_RX_PROMISC, 0); 1069 if (unlikely(r < 0)) { 1070 return r; 1071 } 1072 } 1073 1074 /* 1075 * According to virtio_net_reset(), device turns all-multicast mode 1076 * off by default. 1077 * 1078 * According to VirtIO standard, "Since there are no guarantees, 1079 * it can use a hash filter or silently switch to allmulti or 1080 * promiscuous mode if it is given too many addresses.". QEMU marks 1081 * `n->mac_table.multi_overflow` if guest sets too many 1082 * non-multicast MAC addresses. 1083 * 1084 * Therefore, QEMU should only send this CVQ command if the 1085 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, 1086 * which sets all-multicast mode on, different from the device's defaults. 1087 * 1088 * Note that the device's defaults can mismatch the driver's 1089 * configuration only at live migration. 1090 */ 1091 if (n->mac_table.multi_overflow || n->allmulti) { 1092 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1093 VIRTIO_NET_CTRL_RX_ALLMULTI, 1); 1094 if (unlikely(r < 0)) { 1095 return r; 1096 } 1097 } 1098 1099 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { 1100 return 0; 1101 } 1102 1103 /* 1104 * According to virtio_net_reset(), device turns all-unicast mode 1105 * off by default. 1106 * 1107 * Therefore, QEMU should only send this CVQ command if the driver 1108 * sets all-unicast mode on, different from the device's defaults. 1109 * 1110 * Note that the device's defaults can mismatch the driver's 1111 * configuration only at live migration. 1112 */ 1113 if (n->alluni) { 1114 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1115 VIRTIO_NET_CTRL_RX_ALLUNI, 1); 1116 if (r < 0) { 1117 return r; 1118 } 1119 } 1120 1121 /* 1122 * According to virtio_net_reset(), device turns non-multicast mode 1123 * off by default. 1124 * 1125 * Therefore, QEMU should only send this CVQ command if the driver 1126 * sets non-multicast mode on, different from the device's defaults. 1127 * 1128 * Note that the device's defaults can mismatch the driver's 1129 * configuration only at live migration. 1130 */ 1131 if (n->nomulti) { 1132 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1133 VIRTIO_NET_CTRL_RX_NOMULTI, 1); 1134 if (r < 0) { 1135 return r; 1136 } 1137 } 1138 1139 /* 1140 * According to virtio_net_reset(), device turns non-unicast mode 1141 * off by default. 1142 * 1143 * Therefore, QEMU should only send this CVQ command if the driver 1144 * sets non-unicast mode on, different from the device's defaults. 1145 * 1146 * Note that the device's defaults can mismatch the driver's 1147 * configuration only at live migration. 1148 */ 1149 if (n->nouni) { 1150 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1151 VIRTIO_NET_CTRL_RX_NOUNI, 1); 1152 if (r < 0) { 1153 return r; 1154 } 1155 } 1156 1157 /* 1158 * According to virtio_net_reset(), device turns non-broadcast mode 1159 * off by default. 1160 * 1161 * Therefore, QEMU should only send this CVQ command if the driver 1162 * sets non-broadcast mode on, different from the device's defaults. 1163 * 1164 * Note that the device's defaults can mismatch the driver's 1165 * configuration only at live migration. 1166 */ 1167 if (n->nobcast) { 1168 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1169 VIRTIO_NET_CTRL_RX_NOBCAST, 1); 1170 if (r < 0) { 1171 return r; 1172 } 1173 } 1174 1175 return 0; 1176 } 1177 1178 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, 1179 const VirtIONet *n, 1180 struct iovec *out_cursor, 1181 struct iovec *in_cursor, 1182 uint16_t vid) 1183 { 1184 const struct iovec data = { 1185 .iov_base = &vid, 1186 .iov_len = sizeof(vid), 1187 }; 1188 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1189 VIRTIO_NET_CTRL_VLAN, 1190 VIRTIO_NET_CTRL_VLAN_ADD, 1191 &data, 1); 1192 if (unlikely(r < 0)) { 1193 return r; 1194 } 1195 1196 return 0; 1197 } 1198 1199 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, 1200 const VirtIONet *n, 1201 struct iovec *out_cursor, 1202 struct iovec *in_cursor) 1203 { 1204 int r; 1205 1206 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { 1207 return 0; 1208 } 1209 1210 for (int i = 0; i < MAX_VLAN >> 5; i++) { 1211 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { 1212 if (n->vlans[i] & (1U << j)) { 1213 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, 1214 in_cursor, (i << 5) + j); 1215 if (unlikely(r != 0)) { 1216 return r; 1217 } 1218 } 1219 } 1220 } 1221 1222 return 0; 1223 } 1224 1225 static int vhost_vdpa_net_cvq_load(NetClientState *nc) 1226 { 1227 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1228 struct vhost_vdpa *v = &s->vhost_vdpa; 1229 const VirtIONet *n; 1230 int r; 1231 struct iovec out_cursor, in_cursor; 1232 1233 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1234 1235 vhost_vdpa_set_vring_ready(v, v->dev->vq_index); 1236 1237 if (v->shadow_vqs_enabled) { 1238 n = VIRTIO_NET(v->dev->vdev); 1239 vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); 1240 r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); 1241 if (unlikely(r < 0)) { 1242 return r; 1243 } 1244 r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); 1245 if (unlikely(r)) { 1246 return r; 1247 } 1248 r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); 1249 if (unlikely(r)) { 1250 return r; 1251 } 1252 r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); 1253 if (unlikely(r)) { 1254 return r; 1255 } 1256 r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); 1257 if (unlikely(r)) { 1258 return r; 1259 } 1260 1261 /* 1262 * We need to poll and check all pending device's used buffers. 1263 * 1264 * We can poll here since we've had BQL from the time 1265 * we sent the descriptor. 1266 */ 1267 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); 1268 if (unlikely(r)) { 1269 return r; 1270 } 1271 } 1272 1273 for (int i = 0; i < v->dev->vq_index; ++i) { 1274 vhost_vdpa_set_vring_ready(v, i); 1275 } 1276 1277 return 0; 1278 } 1279 1280 static NetClientInfo net_vhost_vdpa_cvq_info = { 1281 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 1282 .size = sizeof(VhostVDPAState), 1283 .receive = vhost_vdpa_receive, 1284 .start = vhost_vdpa_net_cvq_start, 1285 .load = vhost_vdpa_net_cvq_load, 1286 .stop = vhost_vdpa_net_cvq_stop, 1287 .cleanup = vhost_vdpa_cleanup, 1288 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 1289 .has_ufo = vhost_vdpa_has_ufo, 1290 .check_peer_type = vhost_vdpa_check_peer_type, 1291 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 1292 }; 1293 1294 /* 1295 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to 1296 * vdpa device. 1297 * 1298 * Considering that QEMU cannot send the entire filter table to the 1299 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ 1300 * command to enable promiscuous mode to receive all packets, 1301 * according to VirtIO standard, "Since there are no guarantees, 1302 * it can use a hash filter or silently switch to allmulti or 1303 * promiscuous mode if it is given too many addresses.". 1304 * 1305 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and 1306 * marks `n->mac_table.x_overflow` accordingly, it should have 1307 * the same effect on the device model to receive 1308 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. 1309 * The same applies to multicast MAC addresses. 1310 * 1311 * Therefore, QEMU can provide the device model with a fake 1312 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) 1313 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast 1314 * MAC addresses. This ensures that the device model marks 1315 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, 1316 * allowing all packets to be received, which aligns with the 1317 * state of the vdpa device. 1318 */ 1319 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, 1320 VirtQueueElement *elem, 1321 struct iovec *out, 1322 const struct iovec *in) 1323 { 1324 struct virtio_net_ctrl_mac mac_data, *mac_ptr; 1325 struct virtio_net_ctrl_hdr *hdr_ptr; 1326 uint32_t cursor; 1327 ssize_t r; 1328 uint8_t on = 1; 1329 1330 /* parse the non-multicast MAC address entries from CVQ command */ 1331 cursor = sizeof(*hdr_ptr); 1332 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1333 &mac_data, sizeof(mac_data)); 1334 if (unlikely(r != sizeof(mac_data))) { 1335 /* 1336 * If the CVQ command is invalid, we should simulate the vdpa device 1337 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1338 */ 1339 *s->status = VIRTIO_NET_ERR; 1340 return sizeof(*s->status); 1341 } 1342 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1343 1344 /* parse the multicast MAC address entries from CVQ command */ 1345 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1346 &mac_data, sizeof(mac_data)); 1347 if (r != sizeof(mac_data)) { 1348 /* 1349 * If the CVQ command is invalid, we should simulate the vdpa device 1350 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1351 */ 1352 *s->status = VIRTIO_NET_ERR; 1353 return sizeof(*s->status); 1354 } 1355 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1356 1357 /* validate the CVQ command */ 1358 if (iov_size(elem->out_sg, elem->out_num) != cursor) { 1359 /* 1360 * If the CVQ command is invalid, we should simulate the vdpa device 1361 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1362 */ 1363 *s->status = VIRTIO_NET_ERR; 1364 return sizeof(*s->status); 1365 } 1366 1367 /* 1368 * According to VirtIO standard, "Since there are no guarantees, 1369 * it can use a hash filter or silently switch to allmulti or 1370 * promiscuous mode if it is given too many addresses.". 1371 * 1372 * Therefore, considering that QEMU is unable to send the entire 1373 * filter table to the vdpa device, it should send the 1374 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode 1375 */ 1376 hdr_ptr = out->iov_base; 1377 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); 1378 1379 hdr_ptr->class = VIRTIO_NET_CTRL_RX; 1380 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; 1381 iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); 1382 r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); 1383 if (unlikely(r < 0)) { 1384 return r; 1385 } 1386 1387 /* 1388 * We can poll here since we've had BQL from the time 1389 * we sent the descriptor. 1390 */ 1391 r = vhost_vdpa_net_svq_poll(s, 1); 1392 if (unlikely(r < sizeof(*s->status))) { 1393 return r; 1394 } 1395 if (*s->status != VIRTIO_NET_OK) { 1396 return sizeof(*s->status); 1397 } 1398 1399 /* 1400 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ 1401 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) 1402 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) 1403 * multicast MAC addresses. 1404 * 1405 * By doing so, the device model can mark `n->mac_table.uni_overflow` 1406 * and `n->mac_table.multi_overflow`, enabling all packets to be 1407 * received, which aligns with the state of the vdpa device. 1408 */ 1409 cursor = 0; 1410 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, 1411 fake_mul_entries = MAC_TABLE_ENTRIES + 1, 1412 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + 1413 sizeof(mac_data) + fake_uni_entries * ETH_ALEN + 1414 sizeof(mac_data) + fake_mul_entries * ETH_ALEN; 1415 1416 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); 1417 out->iov_len = fake_cvq_size; 1418 1419 /* pack the header for fake CVQ command */ 1420 hdr_ptr = out->iov_base + cursor; 1421 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; 1422 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1423 cursor += sizeof(*hdr_ptr); 1424 1425 /* 1426 * Pack the non-multicast MAC addresses part for fake CVQ command. 1427 * 1428 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1429 * addresses provided in CVQ command. Therefore, only the entries 1430 * field need to be prepared in the CVQ command. 1431 */ 1432 mac_ptr = out->iov_base + cursor; 1433 mac_ptr->entries = cpu_to_le32(fake_uni_entries); 1434 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; 1435 1436 /* 1437 * Pack the multicast MAC addresses part for fake CVQ command. 1438 * 1439 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1440 * addresses provided in CVQ command. Therefore, only the entries 1441 * field need to be prepared in the CVQ command. 1442 */ 1443 mac_ptr = out->iov_base + cursor; 1444 mac_ptr->entries = cpu_to_le32(fake_mul_entries); 1445 1446 /* 1447 * Simulating QEMU poll a vdpa device used buffer 1448 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1449 */ 1450 return sizeof(*s->status); 1451 } 1452 1453 /** 1454 * Validate and copy control virtqueue commands. 1455 * 1456 * Following QEMU guidelines, we offer a copy of the buffers to the device to 1457 * prevent TOCTOU bugs. 1458 */ 1459 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 1460 VirtQueueElement *elem, 1461 void *opaque) 1462 { 1463 VhostVDPAState *s = opaque; 1464 size_t in_len; 1465 const struct virtio_net_ctrl_hdr *ctrl; 1466 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 1467 /* Out buffer sent to both the vdpa device and the device model */ 1468 struct iovec out = { 1469 .iov_base = s->cvq_cmd_out_buffer, 1470 }; 1471 /* in buffer used for device model */ 1472 const struct iovec model_in = { 1473 .iov_base = &status, 1474 .iov_len = sizeof(status), 1475 }; 1476 /* in buffer used for vdpa device */ 1477 const struct iovec vdpa_in = { 1478 .iov_base = s->status, 1479 .iov_len = sizeof(*s->status), 1480 }; 1481 ssize_t dev_written = -EINVAL; 1482 1483 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 1484 s->cvq_cmd_out_buffer, 1485 vhost_vdpa_net_cvq_cmd_page_len()); 1486 1487 ctrl = s->cvq_cmd_out_buffer; 1488 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { 1489 /* 1490 * Guest announce capability is emulated by qemu, so don't forward to 1491 * the device. 1492 */ 1493 dev_written = sizeof(status); 1494 *s->status = VIRTIO_NET_OK; 1495 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && 1496 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && 1497 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { 1498 /* 1499 * Due to the size limitation of the out buffer sent to the vdpa device, 1500 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive 1501 * MAC addresses set by the driver for the filter table can cause 1502 * truncation of the CVQ command in QEMU. As a result, the vdpa device 1503 * rejects the flawed CVQ command. 1504 * 1505 * Therefore, QEMU must handle this situation instead of sending 1506 * the CVQ command directly. 1507 */ 1508 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, 1509 &out, &vdpa_in); 1510 if (unlikely(dev_written < 0)) { 1511 goto out; 1512 } 1513 } else { 1514 ssize_t r; 1515 r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); 1516 if (unlikely(r < 0)) { 1517 dev_written = r; 1518 goto out; 1519 } 1520 1521 /* 1522 * We can poll here since we've had BQL from the time 1523 * we sent the descriptor. 1524 */ 1525 dev_written = vhost_vdpa_net_svq_poll(s, 1); 1526 } 1527 1528 if (unlikely(dev_written < sizeof(status))) { 1529 error_report("Insufficient written data (%zu)", dev_written); 1530 goto out; 1531 } 1532 1533 if (*s->status != VIRTIO_NET_OK) { 1534 goto out; 1535 } 1536 1537 status = VIRTIO_NET_ERR; 1538 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); 1539 if (status != VIRTIO_NET_OK) { 1540 error_report("Bad CVQ processing in model"); 1541 } 1542 1543 out: 1544 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 1545 sizeof(status)); 1546 if (unlikely(in_len < sizeof(status))) { 1547 error_report("Bad device CVQ written length"); 1548 } 1549 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 1550 /* 1551 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when 1552 * the function successfully forwards the CVQ command, indicated 1553 * by a non-negative value of `dev_written`. Otherwise, it still 1554 * belongs to SVQ. 1555 * This function should only free the `elem` when it owns. 1556 */ 1557 if (dev_written >= 0) { 1558 g_free(elem); 1559 } 1560 return dev_written < 0 ? dev_written : 0; 1561 } 1562 1563 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 1564 .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 1565 }; 1566 1567 /** 1568 * Probe if CVQ is isolated 1569 * 1570 * @device_fd The vdpa device fd 1571 * @features Features offered by the device. 1572 * @cvq_index The control vq pair index 1573 * 1574 * Returns <0 in case of failure, 0 if false and 1 if true. 1575 */ 1576 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, 1577 int cvq_index, Error **errp) 1578 { 1579 uint64_t backend_features; 1580 int64_t cvq_group; 1581 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | 1582 VIRTIO_CONFIG_S_DRIVER; 1583 int r; 1584 1585 ERRP_GUARD(); 1586 1587 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 1588 if (unlikely(r < 0)) { 1589 error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); 1590 return r; 1591 } 1592 1593 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { 1594 return 0; 1595 } 1596 1597 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1598 if (unlikely(r)) { 1599 error_setg_errno(errp, -r, "Cannot set device status"); 1600 goto out; 1601 } 1602 1603 r = ioctl(device_fd, VHOST_SET_FEATURES, &features); 1604 if (unlikely(r)) { 1605 error_setg_errno(errp, -r, "Cannot set features"); 1606 goto out; 1607 } 1608 1609 status |= VIRTIO_CONFIG_S_FEATURES_OK; 1610 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1611 if (unlikely(r)) { 1612 error_setg_errno(errp, -r, "Cannot set device status"); 1613 goto out; 1614 } 1615 1616 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); 1617 if (unlikely(cvq_group < 0)) { 1618 if (cvq_group != -ENOTSUP) { 1619 r = cvq_group; 1620 goto out; 1621 } 1622 1623 /* 1624 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend 1625 * support ASID even if the parent driver does not. The CVQ cannot be 1626 * isolated in this case. 1627 */ 1628 error_free(*errp); 1629 *errp = NULL; 1630 r = 0; 1631 goto out; 1632 } 1633 1634 for (int i = 0; i < cvq_index; ++i) { 1635 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); 1636 if (unlikely(group < 0)) { 1637 r = group; 1638 goto out; 1639 } 1640 1641 if (group == (int64_t)cvq_group) { 1642 r = 0; 1643 goto out; 1644 } 1645 } 1646 1647 r = 1; 1648 1649 out: 1650 status = 0; 1651 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1652 return r; 1653 } 1654 1655 static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 1656 const char *device, 1657 const char *name, 1658 int vdpa_device_fd, 1659 int queue_pair_index, 1660 int nvqs, 1661 bool is_datapath, 1662 bool svq, 1663 struct vhost_vdpa_iova_range iova_range, 1664 uint64_t features, 1665 VhostVDPAShared *shared, 1666 Error **errp) 1667 { 1668 NetClientState *nc = NULL; 1669 VhostVDPAState *s; 1670 int ret = 0; 1671 assert(name); 1672 int cvq_isolated = 0; 1673 1674 if (is_datapath) { 1675 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 1676 name); 1677 } else { 1678 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, 1679 queue_pair_index * 2, 1680 errp); 1681 if (unlikely(cvq_isolated < 0)) { 1682 return NULL; 1683 } 1684 1685 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 1686 device, name); 1687 } 1688 qemu_set_info_str(nc, TYPE_VHOST_VDPA); 1689 s = DO_UPCAST(VhostVDPAState, nc, nc); 1690 1691 s->vhost_vdpa.index = queue_pair_index; 1692 s->always_svq = svq; 1693 s->migration_state.notify = NULL; 1694 s->vhost_vdpa.shadow_vqs_enabled = svq; 1695 if (queue_pair_index == 0) { 1696 vhost_vdpa_net_valid_svq_features(features, 1697 &s->vhost_vdpa.migration_blocker); 1698 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); 1699 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; 1700 s->vhost_vdpa.shared->iova_range = iova_range; 1701 s->vhost_vdpa.shared->shadow_data = svq; 1702 } else if (!is_datapath) { 1703 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1704 PROT_READ | PROT_WRITE, 1705 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1706 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1707 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 1708 -1, 0); 1709 1710 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 1711 s->vhost_vdpa.shadow_vq_ops_opaque = s; 1712 s->cvq_isolated = cvq_isolated; 1713 } 1714 if (queue_pair_index != 0) { 1715 s->vhost_vdpa.shared = shared; 1716 } 1717 1718 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 1719 if (ret) { 1720 qemu_del_net_client(nc); 1721 return NULL; 1722 } 1723 1724 return nc; 1725 } 1726 1727 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 1728 { 1729 int ret = ioctl(fd, VHOST_GET_FEATURES, features); 1730 if (unlikely(ret < 0)) { 1731 error_setg_errno(errp, errno, 1732 "Fail to query features from vhost-vDPA device"); 1733 } 1734 return ret; 1735 } 1736 1737 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 1738 int *has_cvq, Error **errp) 1739 { 1740 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 1741 g_autofree struct vhost_vdpa_config *config = NULL; 1742 __virtio16 *max_queue_pairs; 1743 int ret; 1744 1745 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 1746 *has_cvq = 1; 1747 } else { 1748 *has_cvq = 0; 1749 } 1750 1751 if (features & (1 << VIRTIO_NET_F_MQ)) { 1752 config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 1753 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 1754 config->len = sizeof(*max_queue_pairs); 1755 1756 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 1757 if (ret) { 1758 error_setg(errp, "Fail to get config from vhost-vDPA device"); 1759 return -ret; 1760 } 1761 1762 max_queue_pairs = (__virtio16 *)&config->buf; 1763 1764 return lduw_le_p(max_queue_pairs); 1765 } 1766 1767 return 1; 1768 } 1769 1770 int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 1771 NetClientState *peer, Error **errp) 1772 { 1773 const NetdevVhostVDPAOptions *opts; 1774 uint64_t features; 1775 int vdpa_device_fd; 1776 g_autofree NetClientState **ncs = NULL; 1777 struct vhost_vdpa_iova_range iova_range; 1778 NetClientState *nc; 1779 int queue_pairs, r, i = 0, has_cvq = 0; 1780 1781 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1782 opts = &netdev->u.vhost_vdpa; 1783 if (!opts->vhostdev && !opts->vhostfd) { 1784 error_setg(errp, 1785 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 1786 return -1; 1787 } 1788 1789 if (opts->vhostdev && opts->vhostfd) { 1790 error_setg(errp, 1791 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 1792 return -1; 1793 } 1794 1795 if (opts->vhostdev) { 1796 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 1797 if (vdpa_device_fd == -1) { 1798 return -errno; 1799 } 1800 } else { 1801 /* has_vhostfd */ 1802 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 1803 if (vdpa_device_fd == -1) { 1804 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 1805 return -1; 1806 } 1807 } 1808 1809 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 1810 if (unlikely(r < 0)) { 1811 goto err; 1812 } 1813 1814 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 1815 &has_cvq, errp); 1816 if (queue_pairs < 0) { 1817 qemu_close(vdpa_device_fd); 1818 return queue_pairs; 1819 } 1820 1821 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 1822 if (unlikely(r < 0)) { 1823 error_setg(errp, "vhost-vdpa: get iova range failed: %s", 1824 strerror(-r)); 1825 goto err; 1826 } 1827 1828 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 1829 goto err; 1830 } 1831 1832 ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 1833 1834 for (i = 0; i < queue_pairs; i++) { 1835 VhostVDPAShared *shared = NULL; 1836 1837 if (i) { 1838 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; 1839 } 1840 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1841 vdpa_device_fd, i, 2, true, opts->x_svq, 1842 iova_range, features, shared, errp); 1843 if (!ncs[i]) 1844 goto err; 1845 } 1846 1847 if (has_cvq) { 1848 VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); 1849 VhostVDPAShared *shared = s0->vhost_vdpa.shared; 1850 1851 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1852 vdpa_device_fd, i, 1, false, 1853 opts->x_svq, iova_range, features, shared, 1854 errp); 1855 if (!nc) 1856 goto err; 1857 } 1858 1859 return 0; 1860 1861 err: 1862 if (i) { 1863 for (i--; i >= 0; i--) { 1864 qemu_del_net_client(ncs[i]); 1865 } 1866 } 1867 1868 qemu_close(vdpa_device_fd); 1869 1870 return -1; 1871 } 1872