1 /* 2 * vhost-vdpa.c 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include "clients.h" 14 #include "hw/virtio/virtio-net.h" 15 #include "net/vhost_net.h" 16 #include "net/vhost-vdpa.h" 17 #include "hw/virtio/vhost-vdpa.h" 18 #include "qemu/config-file.h" 19 #include "qemu/error-report.h" 20 #include "qemu/log.h" 21 #include "qemu/memalign.h" 22 #include "qemu/option.h" 23 #include "qapi/error.h" 24 #include <linux/vhost.h> 25 #include <sys/ioctl.h> 26 #include <err.h> 27 #include "standard-headers/linux/virtio_net.h" 28 #include "monitor/monitor.h" 29 #include "migration/migration.h" 30 #include "migration/misc.h" 31 #include "hw/virtio/vhost.h" 32 #include "trace.h" 33 34 /* Todo:need to add the multiqueue support here */ 35 typedef struct VhostVDPAState { 36 NetClientState nc; 37 struct vhost_vdpa vhost_vdpa; 38 NotifierWithReturn migration_state; 39 VHostNetState *vhost_net; 40 41 /* Control commands shadow buffers */ 42 void *cvq_cmd_out_buffer; 43 virtio_net_ctrl_ack *status; 44 45 /* The device always have SVQ enabled */ 46 bool always_svq; 47 48 /* The device can isolate CVQ in its own ASID */ 49 bool cvq_isolated; 50 51 bool started; 52 } VhostVDPAState; 53 54 /* 55 * The array is sorted alphabetically in ascending order, 56 * with the exception of VHOST_INVALID_FEATURE_BIT, 57 * which should always be the last entry. 58 */ 59 const int vdpa_feature_bits[] = { 60 VIRTIO_F_ANY_LAYOUT, 61 VIRTIO_F_IOMMU_PLATFORM, 62 VIRTIO_F_NOTIFY_ON_EMPTY, 63 VIRTIO_F_RING_PACKED, 64 VIRTIO_F_RING_RESET, 65 VIRTIO_F_VERSION_1, 66 VIRTIO_NET_F_CSUM, 67 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, 68 VIRTIO_NET_F_CTRL_MAC_ADDR, 69 VIRTIO_NET_F_CTRL_RX, 70 VIRTIO_NET_F_CTRL_RX_EXTRA, 71 VIRTIO_NET_F_CTRL_VLAN, 72 VIRTIO_NET_F_CTRL_VQ, 73 VIRTIO_NET_F_GSO, 74 VIRTIO_NET_F_GUEST_CSUM, 75 VIRTIO_NET_F_GUEST_ECN, 76 VIRTIO_NET_F_GUEST_TSO4, 77 VIRTIO_NET_F_GUEST_TSO6, 78 VIRTIO_NET_F_GUEST_UFO, 79 VIRTIO_NET_F_GUEST_USO4, 80 VIRTIO_NET_F_GUEST_USO6, 81 VIRTIO_NET_F_HASH_REPORT, 82 VIRTIO_NET_F_HOST_ECN, 83 VIRTIO_NET_F_HOST_TSO4, 84 VIRTIO_NET_F_HOST_TSO6, 85 VIRTIO_NET_F_HOST_UFO, 86 VIRTIO_NET_F_HOST_USO, 87 VIRTIO_NET_F_MQ, 88 VIRTIO_NET_F_MRG_RXBUF, 89 VIRTIO_NET_F_MTU, 90 VIRTIO_NET_F_RSS, 91 VIRTIO_NET_F_STATUS, 92 VIRTIO_RING_F_EVENT_IDX, 93 VIRTIO_RING_F_INDIRECT_DESC, 94 95 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ 96 VHOST_INVALID_FEATURE_BIT 97 }; 98 99 /** Supported device specific feature bits with SVQ */ 100 static const uint64_t vdpa_svq_device_features = 101 BIT_ULL(VIRTIO_NET_F_CSUM) | 102 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 103 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | 104 BIT_ULL(VIRTIO_NET_F_MTU) | 105 BIT_ULL(VIRTIO_NET_F_MAC) | 106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 107 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 108 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 109 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 110 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 111 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 112 BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 113 BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 114 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 115 BIT_ULL(VIRTIO_NET_F_STATUS) | 116 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 117 BIT_ULL(VIRTIO_NET_F_CTRL_RX) | 118 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | 119 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | 120 BIT_ULL(VIRTIO_NET_F_MQ) | 121 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 122 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 123 /* VHOST_F_LOG_ALL is exposed by SVQ */ 124 BIT_ULL(VHOST_F_LOG_ALL) | 125 BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | 126 BIT_ULL(VIRTIO_NET_F_RSS) | 127 BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 128 BIT_ULL(VIRTIO_NET_F_STANDBY) | 129 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); 130 131 #define VHOST_VDPA_NET_CVQ_ASID 1 132 133 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 134 { 135 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 136 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 137 return s->vhost_net; 138 } 139 140 static size_t vhost_vdpa_net_cvq_cmd_len(void) 141 { 142 /* 143 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 144 * In buffer is always 1 byte, so it should fit here 145 */ 146 return sizeof(struct virtio_net_ctrl_hdr) + 147 2 * sizeof(struct virtio_net_ctrl_mac) + 148 MAC_TABLE_ENTRIES * ETH_ALEN; 149 } 150 151 static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 152 { 153 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 154 } 155 156 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 157 { 158 uint64_t invalid_dev_features = 159 features & ~vdpa_svq_device_features & 160 /* Transport are all accepted at this point */ 161 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 162 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 163 164 if (invalid_dev_features) { 165 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 166 invalid_dev_features); 167 return false; 168 } 169 170 return vhost_svq_valid_features(features, errp); 171 } 172 173 static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 174 { 175 uint32_t device_id; 176 int ret; 177 struct vhost_dev *hdev; 178 179 hdev = (struct vhost_dev *)&net->dev; 180 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 181 if (device_id != VIRTIO_ID_NET) { 182 return -ENOTSUP; 183 } 184 return ret; 185 } 186 187 static int vhost_vdpa_add(NetClientState *ncs, void *be, 188 int queue_pair_index, int nvqs) 189 { 190 VhostNetOptions options; 191 struct vhost_net *net = NULL; 192 VhostVDPAState *s; 193 int ret; 194 195 options.backend_type = VHOST_BACKEND_TYPE_VDPA; 196 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 197 s = DO_UPCAST(VhostVDPAState, nc, ncs); 198 options.net_backend = ncs; 199 options.opaque = be; 200 options.busyloop_timeout = 0; 201 options.nvqs = nvqs; 202 203 net = vhost_net_init(&options); 204 if (!net) { 205 error_report("failed to init vhost_net for queue"); 206 goto err_init; 207 } 208 s->vhost_net = net; 209 ret = vhost_vdpa_net_check_device_id(net); 210 if (ret) { 211 goto err_check; 212 } 213 return 0; 214 err_check: 215 vhost_net_cleanup(net); 216 g_free(net); 217 err_init: 218 return -1; 219 } 220 221 static void vhost_vdpa_cleanup(NetClientState *nc) 222 { 223 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 224 225 /* 226 * If a peer NIC is attached, do not cleanup anything. 227 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() 228 * when the guest is shutting down. 229 */ 230 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { 231 return; 232 } 233 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); 234 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); 235 if (s->vhost_net) { 236 vhost_net_cleanup(s->vhost_net); 237 g_free(s->vhost_net); 238 s->vhost_net = NULL; 239 } 240 if (s->vhost_vdpa.index != 0) { 241 return; 242 } 243 qemu_close(s->vhost_vdpa.shared->device_fd); 244 g_free(s->vhost_vdpa.shared); 245 } 246 247 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ 248 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) 249 { 250 return true; 251 } 252 253 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 254 { 255 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 256 257 return true; 258 } 259 260 static bool vhost_vdpa_has_ufo(NetClientState *nc) 261 { 262 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 263 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 264 uint64_t features = 0; 265 features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 266 features = vhost_net_get_features(s->vhost_net, features); 267 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 268 269 } 270 271 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 272 Error **errp) 273 { 274 const char *driver = object_class_get_name(oc); 275 276 if (!g_str_has_prefix(driver, "virtio-net-")) { 277 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 278 return false; 279 } 280 281 return true; 282 } 283 284 /** Dummy receive in case qemu falls back to userland tap networking */ 285 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 286 size_t size) 287 { 288 return size; 289 } 290 291 292 /** From any vdpa net client, get the netclient of the i-th queue pair */ 293 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i) 294 { 295 NICState *nic = qemu_get_nic(s->nc.peer); 296 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); 297 298 return DO_UPCAST(VhostVDPAState, nc, nc_i); 299 } 300 301 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 302 { 303 return vhost_vdpa_net_get_nc_vdpa(s, 0); 304 } 305 306 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) 307 { 308 struct vhost_vdpa *v = &s->vhost_vdpa; 309 VirtIONet *n; 310 VirtIODevice *vdev; 311 int data_queue_pairs, cvq, r; 312 313 /* We are only called on the first data vqs and only if x-svq is not set */ 314 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { 315 return; 316 } 317 318 vdev = v->dev->vdev; 319 n = VIRTIO_NET(vdev); 320 if (!n->vhost_started) { 321 return; 322 } 323 324 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; 325 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? 326 n->max_ncs - n->max_queue_pairs : 0; 327 /* 328 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter 329 * in the future and resume the device if read-only operations between 330 * suspend and reset goes wrong. 331 */ 332 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); 333 334 /* Start will check migration setup_or_active to configure or not SVQ */ 335 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); 336 if (unlikely(r < 0)) { 337 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); 338 } 339 } 340 341 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, 342 MigrationEvent *e, Error **errp) 343 { 344 VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state); 345 346 if (e->type == MIG_EVENT_PRECOPY_SETUP) { 347 vhost_vdpa_net_log_global_enable(s, true); 348 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { 349 vhost_vdpa_net_log_global_enable(s, false); 350 } 351 return 0; 352 } 353 354 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 355 { 356 struct vhost_vdpa *v = &s->vhost_vdpa; 357 358 migration_add_notifier(&s->migration_state, 359 vdpa_net_migration_state_notifier); 360 if (v->shadow_vqs_enabled) { 361 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 362 v->shared->iova_range.last); 363 } 364 } 365 366 static int vhost_vdpa_net_data_start(NetClientState *nc) 367 { 368 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 369 struct vhost_vdpa *v = &s->vhost_vdpa; 370 371 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 372 373 if (s->always_svq || 374 migration_is_setup_or_active(migrate_get_current()->state)) { 375 v->shadow_vqs_enabled = true; 376 } else { 377 v->shadow_vqs_enabled = false; 378 } 379 380 if (v->index == 0) { 381 v->shared->shadow_data = v->shadow_vqs_enabled; 382 vhost_vdpa_net_data_start_first(s); 383 return 0; 384 } 385 386 return 0; 387 } 388 389 static int vhost_vdpa_net_data_load(NetClientState *nc) 390 { 391 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 392 struct vhost_vdpa *v = &s->vhost_vdpa; 393 bool has_cvq = v->dev->vq_index_end % 2; 394 395 if (has_cvq) { 396 return 0; 397 } 398 399 for (int i = 0; i < v->dev->nvqs; ++i) { 400 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); 401 } 402 return 0; 403 } 404 405 static void vhost_vdpa_net_client_stop(NetClientState *nc) 406 { 407 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 408 struct vhost_dev *dev; 409 410 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 411 412 if (s->vhost_vdpa.index == 0) { 413 migration_remove_notifier(&s->migration_state); 414 } 415 416 dev = s->vhost_vdpa.dev; 417 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 418 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, 419 vhost_iova_tree_delete); 420 } 421 } 422 423 static NetClientInfo net_vhost_vdpa_info = { 424 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 425 .size = sizeof(VhostVDPAState), 426 .receive = vhost_vdpa_receive, 427 .start = vhost_vdpa_net_data_start, 428 .load = vhost_vdpa_net_data_load, 429 .stop = vhost_vdpa_net_client_stop, 430 .cleanup = vhost_vdpa_cleanup, 431 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 432 .has_ufo = vhost_vdpa_has_ufo, 433 .check_peer_type = vhost_vdpa_check_peer_type, 434 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 435 }; 436 437 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, 438 Error **errp) 439 { 440 struct vhost_vring_state state = { 441 .index = vq_index, 442 }; 443 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 444 445 if (unlikely(r < 0)) { 446 r = -errno; 447 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); 448 return r; 449 } 450 451 return state.num; 452 } 453 454 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 455 unsigned vq_group, 456 unsigned asid_num) 457 { 458 struct vhost_vring_state asid = { 459 .index = vq_group, 460 .num = asid_num, 461 }; 462 int r; 463 464 trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num); 465 466 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 467 if (unlikely(r < 0)) { 468 error_report("Can't set vq group %u asid %u, errno=%d (%s)", 469 asid.index, asid.num, errno, g_strerror(errno)); 470 } 471 return r; 472 } 473 474 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 475 { 476 VhostIOVATree *tree = v->shared->iova_tree; 477 DMAMap needle = { 478 /* 479 * No need to specify size or to look for more translations since 480 * this contiguous chunk was allocated by us. 481 */ 482 .translated_addr = (hwaddr)(uintptr_t)addr, 483 }; 484 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 485 int r; 486 487 if (unlikely(!map)) { 488 error_report("Cannot locate expected map"); 489 return; 490 } 491 492 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, 493 map->size + 1); 494 if (unlikely(r != 0)) { 495 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 496 } 497 498 vhost_iova_tree_remove(tree, *map); 499 } 500 501 /** Map CVQ buffer. */ 502 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 503 bool write) 504 { 505 DMAMap map = {}; 506 int r; 507 508 map.translated_addr = (hwaddr)(uintptr_t)buf; 509 map.size = size - 1; 510 map.perm = write ? IOMMU_RW : IOMMU_RO, 511 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); 512 if (unlikely(r != IOVA_OK)) { 513 error_report("Cannot map injected element"); 514 return r; 515 } 516 517 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, 518 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 519 if (unlikely(r < 0)) { 520 goto dma_map_err; 521 } 522 523 return 0; 524 525 dma_map_err: 526 vhost_iova_tree_remove(v->shared->iova_tree, map); 527 return r; 528 } 529 530 static int vhost_vdpa_net_cvq_start(NetClientState *nc) 531 { 532 VhostVDPAState *s, *s0; 533 struct vhost_vdpa *v; 534 int64_t cvq_group; 535 int r; 536 Error *err = NULL; 537 538 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 539 540 s = DO_UPCAST(VhostVDPAState, nc, nc); 541 v = &s->vhost_vdpa; 542 543 s0 = vhost_vdpa_net_first_nc_vdpa(s); 544 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; 545 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 546 547 if (v->shared->shadow_data) { 548 /* SVQ is already configured for all virtqueues */ 549 goto out; 550 } 551 552 /* 553 * If we early return in these cases SVQ will not be enabled. The migration 554 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 555 */ 556 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 557 return 0; 558 } 559 560 if (!s->cvq_isolated) { 561 return 0; 562 } 563 564 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, 565 v->dev->vq_index_end - 1, 566 &err); 567 if (unlikely(cvq_group < 0)) { 568 error_report_err(err); 569 return cvq_group; 570 } 571 572 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 573 if (unlikely(r < 0)) { 574 return r; 575 } 576 577 v->shadow_vqs_enabled = true; 578 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 579 580 out: 581 if (!s->vhost_vdpa.shadow_vqs_enabled) { 582 return 0; 583 } 584 585 /* 586 * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, 587 * whether CVQ shares ASID with guest or not, because: 588 * - Memory listener need access to guest's memory addresses allocated in 589 * the IOVA tree. 590 * - There should be plenty of IOVA address space for both ASID not to 591 * worry about collisions between them. Guest's translations are still 592 * validated with virtio virtqueue_pop so there is no risk for the guest 593 * to access memory that it shouldn't. 594 * 595 * To allocate a iova tree per ASID is doable but it complicates the code 596 * and it is not worth it for the moment. 597 */ 598 if (!v->shared->iova_tree) { 599 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 600 v->shared->iova_range.last); 601 } 602 603 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 604 vhost_vdpa_net_cvq_cmd_page_len(), false); 605 if (unlikely(r < 0)) { 606 return r; 607 } 608 609 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 610 vhost_vdpa_net_cvq_cmd_page_len(), true); 611 if (unlikely(r < 0)) { 612 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 613 } 614 615 return r; 616 } 617 618 static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 619 { 620 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 621 622 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 623 624 if (s->vhost_vdpa.shadow_vqs_enabled) { 625 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 626 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 627 } 628 629 vhost_vdpa_net_client_stop(nc); 630 } 631 632 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, 633 const struct iovec *out_sg, size_t out_num, 634 const struct iovec *in_sg, size_t in_num) 635 { 636 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 637 int r; 638 639 r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); 640 if (unlikely(r != 0)) { 641 if (unlikely(r == -ENOSPC)) { 642 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 643 __func__); 644 } 645 } 646 647 return r; 648 } 649 650 /* 651 * Convenience wrapper to poll SVQ for multiple control commands. 652 * 653 * Caller should hold the BQL when invoking this function, and should take 654 * the answer before SVQ pulls by itself when BQL is released. 655 */ 656 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) 657 { 658 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 659 return vhost_svq_poll(svq, cmds_in_flight); 660 } 661 662 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, 663 struct iovec *out_cursor, 664 struct iovec *in_cursor) 665 { 666 /* reset the cursor of the output buffer for the device */ 667 out_cursor->iov_base = s->cvq_cmd_out_buffer; 668 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 669 670 /* reset the cursor of the in buffer for the device */ 671 in_cursor->iov_base = s->status; 672 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 673 } 674 675 /* 676 * Poll SVQ for multiple pending control commands and check the device's ack. 677 * 678 * Caller should hold the BQL when invoking this function. 679 * 680 * @s: The VhostVDPAState 681 * @len: The length of the pending status shadow buffer 682 */ 683 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) 684 { 685 /* device uses a one-byte length ack for each control command */ 686 ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); 687 if (unlikely(dev_written != len)) { 688 return -EIO; 689 } 690 691 /* check the device's ack */ 692 for (int i = 0; i < len; ++i) { 693 if (s->status[i] != VIRTIO_NET_OK) { 694 return -EIO; 695 } 696 } 697 return 0; 698 } 699 700 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 701 struct iovec *out_cursor, 702 struct iovec *in_cursor, uint8_t class, 703 uint8_t cmd, const struct iovec *data_sg, 704 size_t data_num) 705 { 706 const struct virtio_net_ctrl_hdr ctrl = { 707 .class = class, 708 .cmd = cmd, 709 }; 710 size_t data_size = iov_size(data_sg, data_num), cmd_size; 711 struct iovec out, in; 712 ssize_t r; 713 unsigned dummy_cursor_iov_cnt; 714 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 715 716 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 717 cmd_size = sizeof(ctrl) + data_size; 718 trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size); 719 if (vhost_svq_available_slots(svq) < 2 || 720 iov_size(out_cursor, 1) < cmd_size) { 721 /* 722 * It is time to flush all pending control commands if SVQ is full 723 * or control commands shadow buffers are full. 724 * 725 * We can poll here since we've had BQL from the time 726 * we sent the descriptor. 727 */ 728 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - 729 (void *)s->status); 730 if (unlikely(r < 0)) { 731 return r; 732 } 733 734 vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); 735 } 736 737 /* pack the CVQ command header */ 738 iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); 739 /* pack the CVQ command command-specific-data */ 740 iov_to_buf(data_sg, data_num, 0, 741 out_cursor->iov_base + sizeof(ctrl), data_size); 742 743 /* extract the required buffer from the cursor for output */ 744 iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); 745 /* extract the required buffer from the cursor for input */ 746 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); 747 748 r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); 749 if (unlikely(r < 0)) { 750 trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r); 751 return r; 752 } 753 754 /* iterate the cursors */ 755 dummy_cursor_iov_cnt = 1; 756 iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); 757 dummy_cursor_iov_cnt = 1; 758 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); 759 760 return 0; 761 } 762 763 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, 764 struct iovec *out_cursor, 765 struct iovec *in_cursor) 766 { 767 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 768 const struct iovec data = { 769 .iov_base = (void *)n->mac, 770 .iov_len = sizeof(n->mac), 771 }; 772 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 773 VIRTIO_NET_CTRL_MAC, 774 VIRTIO_NET_CTRL_MAC_ADDR_SET, 775 &data, 1); 776 if (unlikely(r < 0)) { 777 return r; 778 } 779 } 780 781 /* 782 * According to VirtIO standard, "The device MUST have an 783 * empty MAC filtering table on reset.". 784 * 785 * Therefore, there is no need to send this CVQ command if the 786 * driver also sets an empty MAC filter table, which aligns with 787 * the device's defaults. 788 * 789 * Note that the device's defaults can mismatch the driver's 790 * configuration only at live migration. 791 */ 792 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || 793 n->mac_table.in_use == 0) { 794 return 0; 795 } 796 797 uint32_t uni_entries = n->mac_table.first_multi, 798 uni_macs_size = uni_entries * ETH_ALEN, 799 mul_entries = n->mac_table.in_use - uni_entries, 800 mul_macs_size = mul_entries * ETH_ALEN; 801 struct virtio_net_ctrl_mac uni = { 802 .entries = cpu_to_le32(uni_entries), 803 }; 804 struct virtio_net_ctrl_mac mul = { 805 .entries = cpu_to_le32(mul_entries), 806 }; 807 const struct iovec data[] = { 808 { 809 .iov_base = &uni, 810 .iov_len = sizeof(uni), 811 }, { 812 .iov_base = n->mac_table.macs, 813 .iov_len = uni_macs_size, 814 }, { 815 .iov_base = &mul, 816 .iov_len = sizeof(mul), 817 }, { 818 .iov_base = &n->mac_table.macs[uni_macs_size], 819 .iov_len = mul_macs_size, 820 }, 821 }; 822 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 823 VIRTIO_NET_CTRL_MAC, 824 VIRTIO_NET_CTRL_MAC_TABLE_SET, 825 data, ARRAY_SIZE(data)); 826 if (unlikely(r < 0)) { 827 return r; 828 } 829 830 return 0; 831 } 832 833 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, 834 struct iovec *out_cursor, 835 struct iovec *in_cursor, bool do_rss) 836 { 837 struct virtio_net_rss_config cfg = {}; 838 ssize_t r; 839 g_autofree uint16_t *table = NULL; 840 841 /* 842 * According to VirtIO standard, "Initially the device has all hash 843 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". 844 * 845 * Therefore, there is no need to send this CVQ command if the 846 * driver disables the all hash types, which aligns with 847 * the device's defaults. 848 * 849 * Note that the device's defaults can mismatch the driver's 850 * configuration only at live migration. 851 */ 852 if (!n->rss_data.enabled || 853 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { 854 return 0; 855 } 856 857 table = g_malloc_n(n->rss_data.indirections_len, 858 sizeof(n->rss_data.indirections_table[0])); 859 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); 860 861 if (do_rss) { 862 /* 863 * According to VirtIO standard, "Number of entries in indirection_table 864 * is (indirection_table_mask + 1)". 865 */ 866 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - 867 1); 868 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); 869 for (int i = 0; i < n->rss_data.indirections_len; ++i) { 870 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); 871 } 872 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); 873 } else { 874 /* 875 * According to VirtIO standard, "Field reserved MUST contain zeroes. 876 * It is defined to make the structure to match the layout of 877 * virtio_net_rss_config structure, defined in 5.1.6.5.7.". 878 * 879 * Therefore, we need to zero the fields in 880 * struct virtio_net_rss_config, which corresponds to the 881 * `reserved` field in struct virtio_net_hash_config. 882 * 883 * Note that all other fields are zeroed at their definitions, 884 * except for the `indirection_table` field, where the actual data 885 * is stored in the `table` variable to ensure compatibility 886 * with RSS case. Therefore, we need to zero the `table` variable here. 887 */ 888 table[0] = 0; 889 } 890 891 /* 892 * Considering that virtio_net_handle_rss() currently does not restore 893 * the hash key length parsed from the CVQ command sent from the guest 894 * into n->rss_data and uses the maximum key length in other code, so 895 * we also employ the maximum key length here. 896 */ 897 cfg.hash_key_length = sizeof(n->rss_data.key); 898 899 const struct iovec data[] = { 900 { 901 .iov_base = &cfg, 902 .iov_len = offsetof(struct virtio_net_rss_config, 903 indirection_table), 904 }, { 905 .iov_base = table, 906 .iov_len = n->rss_data.indirections_len * 907 sizeof(n->rss_data.indirections_table[0]), 908 }, { 909 .iov_base = &cfg.max_tx_vq, 910 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - 911 offsetof(struct virtio_net_rss_config, max_tx_vq), 912 }, { 913 .iov_base = (void *)n->rss_data.key, 914 .iov_len = sizeof(n->rss_data.key), 915 } 916 }; 917 918 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 919 VIRTIO_NET_CTRL_MQ, 920 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : 921 VIRTIO_NET_CTRL_MQ_HASH_CONFIG, 922 data, ARRAY_SIZE(data)); 923 if (unlikely(r < 0)) { 924 return r; 925 } 926 927 return 0; 928 } 929 930 static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 931 const VirtIONet *n, 932 struct iovec *out_cursor, 933 struct iovec *in_cursor) 934 { 935 struct virtio_net_ctrl_mq mq; 936 ssize_t r; 937 938 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { 939 return 0; 940 } 941 942 trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); 943 944 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 945 const struct iovec data = { 946 .iov_base = &mq, 947 .iov_len = sizeof(mq), 948 }; 949 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 950 VIRTIO_NET_CTRL_MQ, 951 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 952 &data, 1); 953 if (unlikely(r < 0)) { 954 return r; 955 } 956 957 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { 958 /* load the receive-side scaling state */ 959 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); 960 if (unlikely(r < 0)) { 961 return r; 962 } 963 } else if (virtio_vdev_has_feature(&n->parent_obj, 964 VIRTIO_NET_F_HASH_REPORT)) { 965 /* load the hash calculation state */ 966 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); 967 if (unlikely(r < 0)) { 968 return r; 969 } 970 } 971 972 return 0; 973 } 974 975 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, 976 const VirtIONet *n, 977 struct iovec *out_cursor, 978 struct iovec *in_cursor) 979 { 980 uint64_t offloads; 981 ssize_t r; 982 983 if (!virtio_vdev_has_feature(&n->parent_obj, 984 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 985 return 0; 986 } 987 988 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { 989 /* 990 * According to VirtIO standard, "Upon feature negotiation 991 * corresponding offload gets enabled to preserve 992 * backward compatibility.". 993 * 994 * Therefore, there is no need to send this CVQ command if the 995 * driver also enables all supported offloads, which aligns with 996 * the device's defaults. 997 * 998 * Note that the device's defaults can mismatch the driver's 999 * configuration only at live migration. 1000 */ 1001 return 0; 1002 } 1003 1004 offloads = cpu_to_le64(n->curr_guest_offloads); 1005 const struct iovec data = { 1006 .iov_base = &offloads, 1007 .iov_len = sizeof(offloads), 1008 }; 1009 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1010 VIRTIO_NET_CTRL_GUEST_OFFLOADS, 1011 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, 1012 &data, 1); 1013 if (unlikely(r < 0)) { 1014 return r; 1015 } 1016 1017 return 0; 1018 } 1019 1020 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, 1021 struct iovec *out_cursor, 1022 struct iovec *in_cursor, 1023 uint8_t cmd, 1024 uint8_t on) 1025 { 1026 const struct iovec data = { 1027 .iov_base = &on, 1028 .iov_len = sizeof(on), 1029 }; 1030 ssize_t r; 1031 1032 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1033 VIRTIO_NET_CTRL_RX, cmd, &data, 1); 1034 if (unlikely(r < 0)) { 1035 return r; 1036 } 1037 1038 return 0; 1039 } 1040 1041 static int vhost_vdpa_net_load_rx(VhostVDPAState *s, 1042 const VirtIONet *n, 1043 struct iovec *out_cursor, 1044 struct iovec *in_cursor) 1045 { 1046 ssize_t r; 1047 1048 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { 1049 return 0; 1050 } 1051 1052 /* 1053 * According to virtio_net_reset(), device turns promiscuous mode 1054 * on by default. 1055 * 1056 * Additionally, according to VirtIO standard, "Since there are 1057 * no guarantees, it can use a hash filter or silently switch to 1058 * allmulti or promiscuous mode if it is given too many addresses.". 1059 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many 1060 * non-multicast MAC addresses, indicating that promiscuous mode 1061 * should be enabled. 1062 * 1063 * Therefore, QEMU should only send this CVQ command if the 1064 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, 1065 * which sets promiscuous mode on, different from the device's defaults. 1066 * 1067 * Note that the device's defaults can mismatch the driver's 1068 * configuration only at live migration. 1069 */ 1070 if (!n->mac_table.uni_overflow && !n->promisc) { 1071 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1072 VIRTIO_NET_CTRL_RX_PROMISC, 0); 1073 if (unlikely(r < 0)) { 1074 return r; 1075 } 1076 } 1077 1078 /* 1079 * According to virtio_net_reset(), device turns all-multicast mode 1080 * off by default. 1081 * 1082 * According to VirtIO standard, "Since there are no guarantees, 1083 * it can use a hash filter or silently switch to allmulti or 1084 * promiscuous mode if it is given too many addresses.". QEMU marks 1085 * `n->mac_table.multi_overflow` if guest sets too many 1086 * non-multicast MAC addresses. 1087 * 1088 * Therefore, QEMU should only send this CVQ command if the 1089 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, 1090 * which sets all-multicast mode on, different from the device's defaults. 1091 * 1092 * Note that the device's defaults can mismatch the driver's 1093 * configuration only at live migration. 1094 */ 1095 if (n->mac_table.multi_overflow || n->allmulti) { 1096 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1097 VIRTIO_NET_CTRL_RX_ALLMULTI, 1); 1098 if (unlikely(r < 0)) { 1099 return r; 1100 } 1101 } 1102 1103 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { 1104 return 0; 1105 } 1106 1107 /* 1108 * According to virtio_net_reset(), device turns all-unicast mode 1109 * off by default. 1110 * 1111 * Therefore, QEMU should only send this CVQ command if the driver 1112 * sets all-unicast mode on, different from the device's defaults. 1113 * 1114 * Note that the device's defaults can mismatch the driver's 1115 * configuration only at live migration. 1116 */ 1117 if (n->alluni) { 1118 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1119 VIRTIO_NET_CTRL_RX_ALLUNI, 1); 1120 if (r < 0) { 1121 return r; 1122 } 1123 } 1124 1125 /* 1126 * According to virtio_net_reset(), device turns non-multicast mode 1127 * off by default. 1128 * 1129 * Therefore, QEMU should only send this CVQ command if the driver 1130 * sets non-multicast mode on, different from the device's defaults. 1131 * 1132 * Note that the device's defaults can mismatch the driver's 1133 * configuration only at live migration. 1134 */ 1135 if (n->nomulti) { 1136 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1137 VIRTIO_NET_CTRL_RX_NOMULTI, 1); 1138 if (r < 0) { 1139 return r; 1140 } 1141 } 1142 1143 /* 1144 * According to virtio_net_reset(), device turns non-unicast mode 1145 * off by default. 1146 * 1147 * Therefore, QEMU should only send this CVQ command if the driver 1148 * sets non-unicast mode on, different from the device's defaults. 1149 * 1150 * Note that the device's defaults can mismatch the driver's 1151 * configuration only at live migration. 1152 */ 1153 if (n->nouni) { 1154 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1155 VIRTIO_NET_CTRL_RX_NOUNI, 1); 1156 if (r < 0) { 1157 return r; 1158 } 1159 } 1160 1161 /* 1162 * According to virtio_net_reset(), device turns non-broadcast mode 1163 * off by default. 1164 * 1165 * Therefore, QEMU should only send this CVQ command if the driver 1166 * sets non-broadcast mode on, different from the device's defaults. 1167 * 1168 * Note that the device's defaults can mismatch the driver's 1169 * configuration only at live migration. 1170 */ 1171 if (n->nobcast) { 1172 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1173 VIRTIO_NET_CTRL_RX_NOBCAST, 1); 1174 if (r < 0) { 1175 return r; 1176 } 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, 1183 const VirtIONet *n, 1184 struct iovec *out_cursor, 1185 struct iovec *in_cursor, 1186 uint16_t vid) 1187 { 1188 const struct iovec data = { 1189 .iov_base = &vid, 1190 .iov_len = sizeof(vid), 1191 }; 1192 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1193 VIRTIO_NET_CTRL_VLAN, 1194 VIRTIO_NET_CTRL_VLAN_ADD, 1195 &data, 1); 1196 if (unlikely(r < 0)) { 1197 return r; 1198 } 1199 1200 return 0; 1201 } 1202 1203 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, 1204 const VirtIONet *n, 1205 struct iovec *out_cursor, 1206 struct iovec *in_cursor) 1207 { 1208 int r; 1209 1210 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { 1211 return 0; 1212 } 1213 1214 for (int i = 0; i < MAX_VLAN >> 5; i++) { 1215 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { 1216 if (n->vlans[i] & (1U << j)) { 1217 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, 1218 in_cursor, (i << 5) + j); 1219 if (unlikely(r != 0)) { 1220 return r; 1221 } 1222 } 1223 } 1224 } 1225 1226 return 0; 1227 } 1228 1229 static int vhost_vdpa_net_cvq_load(NetClientState *nc) 1230 { 1231 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1232 struct vhost_vdpa *v = &s->vhost_vdpa; 1233 const VirtIONet *n; 1234 int r; 1235 struct iovec out_cursor, in_cursor; 1236 1237 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1238 1239 vhost_vdpa_set_vring_ready(v, v->dev->vq_index); 1240 1241 if (v->shadow_vqs_enabled) { 1242 n = VIRTIO_NET(v->dev->vdev); 1243 vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); 1244 r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); 1245 if (unlikely(r < 0)) { 1246 return r; 1247 } 1248 r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); 1249 if (unlikely(r)) { 1250 return r; 1251 } 1252 r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); 1253 if (unlikely(r)) { 1254 return r; 1255 } 1256 r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); 1257 if (unlikely(r)) { 1258 return r; 1259 } 1260 r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); 1261 if (unlikely(r)) { 1262 return r; 1263 } 1264 1265 /* 1266 * We need to poll and check all pending device's used buffers. 1267 * 1268 * We can poll here since we've had BQL from the time 1269 * we sent the descriptor. 1270 */ 1271 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); 1272 if (unlikely(r)) { 1273 return r; 1274 } 1275 } 1276 1277 for (int i = 0; i < v->dev->vq_index; ++i) { 1278 vhost_vdpa_set_vring_ready(v, i); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static NetClientInfo net_vhost_vdpa_cvq_info = { 1285 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 1286 .size = sizeof(VhostVDPAState), 1287 .receive = vhost_vdpa_receive, 1288 .start = vhost_vdpa_net_cvq_start, 1289 .load = vhost_vdpa_net_cvq_load, 1290 .stop = vhost_vdpa_net_cvq_stop, 1291 .cleanup = vhost_vdpa_cleanup, 1292 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 1293 .has_ufo = vhost_vdpa_has_ufo, 1294 .check_peer_type = vhost_vdpa_check_peer_type, 1295 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 1296 }; 1297 1298 /* 1299 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to 1300 * vdpa device. 1301 * 1302 * Considering that QEMU cannot send the entire filter table to the 1303 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ 1304 * command to enable promiscuous mode to receive all packets, 1305 * according to VirtIO standard, "Since there are no guarantees, 1306 * it can use a hash filter or silently switch to allmulti or 1307 * promiscuous mode if it is given too many addresses.". 1308 * 1309 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and 1310 * marks `n->mac_table.x_overflow` accordingly, it should have 1311 * the same effect on the device model to receive 1312 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. 1313 * The same applies to multicast MAC addresses. 1314 * 1315 * Therefore, QEMU can provide the device model with a fake 1316 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) 1317 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast 1318 * MAC addresses. This ensures that the device model marks 1319 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, 1320 * allowing all packets to be received, which aligns with the 1321 * state of the vdpa device. 1322 */ 1323 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, 1324 VirtQueueElement *elem, 1325 struct iovec *out, 1326 const struct iovec *in) 1327 { 1328 struct virtio_net_ctrl_mac mac_data, *mac_ptr; 1329 struct virtio_net_ctrl_hdr *hdr_ptr; 1330 uint32_t cursor; 1331 ssize_t r; 1332 uint8_t on = 1; 1333 1334 /* parse the non-multicast MAC address entries from CVQ command */ 1335 cursor = sizeof(*hdr_ptr); 1336 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1337 &mac_data, sizeof(mac_data)); 1338 if (unlikely(r != sizeof(mac_data))) { 1339 /* 1340 * If the CVQ command is invalid, we should simulate the vdpa device 1341 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1342 */ 1343 *s->status = VIRTIO_NET_ERR; 1344 return sizeof(*s->status); 1345 } 1346 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1347 1348 /* parse the multicast MAC address entries from CVQ command */ 1349 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1350 &mac_data, sizeof(mac_data)); 1351 if (r != sizeof(mac_data)) { 1352 /* 1353 * If the CVQ command is invalid, we should simulate the vdpa device 1354 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1355 */ 1356 *s->status = VIRTIO_NET_ERR; 1357 return sizeof(*s->status); 1358 } 1359 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1360 1361 /* validate the CVQ command */ 1362 if (iov_size(elem->out_sg, elem->out_num) != cursor) { 1363 /* 1364 * If the CVQ command is invalid, we should simulate the vdpa device 1365 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1366 */ 1367 *s->status = VIRTIO_NET_ERR; 1368 return sizeof(*s->status); 1369 } 1370 1371 /* 1372 * According to VirtIO standard, "Since there are no guarantees, 1373 * it can use a hash filter or silently switch to allmulti or 1374 * promiscuous mode if it is given too many addresses.". 1375 * 1376 * Therefore, considering that QEMU is unable to send the entire 1377 * filter table to the vdpa device, it should send the 1378 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode 1379 */ 1380 hdr_ptr = out->iov_base; 1381 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); 1382 1383 hdr_ptr->class = VIRTIO_NET_CTRL_RX; 1384 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; 1385 iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); 1386 r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); 1387 if (unlikely(r < 0)) { 1388 return r; 1389 } 1390 1391 /* 1392 * We can poll here since we've had BQL from the time 1393 * we sent the descriptor. 1394 */ 1395 r = vhost_vdpa_net_svq_poll(s, 1); 1396 if (unlikely(r < sizeof(*s->status))) { 1397 return r; 1398 } 1399 if (*s->status != VIRTIO_NET_OK) { 1400 return sizeof(*s->status); 1401 } 1402 1403 /* 1404 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ 1405 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) 1406 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) 1407 * multicast MAC addresses. 1408 * 1409 * By doing so, the device model can mark `n->mac_table.uni_overflow` 1410 * and `n->mac_table.multi_overflow`, enabling all packets to be 1411 * received, which aligns with the state of the vdpa device. 1412 */ 1413 cursor = 0; 1414 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, 1415 fake_mul_entries = MAC_TABLE_ENTRIES + 1, 1416 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + 1417 sizeof(mac_data) + fake_uni_entries * ETH_ALEN + 1418 sizeof(mac_data) + fake_mul_entries * ETH_ALEN; 1419 1420 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); 1421 out->iov_len = fake_cvq_size; 1422 1423 /* pack the header for fake CVQ command */ 1424 hdr_ptr = out->iov_base + cursor; 1425 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; 1426 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1427 cursor += sizeof(*hdr_ptr); 1428 1429 /* 1430 * Pack the non-multicast MAC addresses part for fake CVQ command. 1431 * 1432 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1433 * addresses provided in CVQ command. Therefore, only the entries 1434 * field need to be prepared in the CVQ command. 1435 */ 1436 mac_ptr = out->iov_base + cursor; 1437 mac_ptr->entries = cpu_to_le32(fake_uni_entries); 1438 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; 1439 1440 /* 1441 * Pack the multicast MAC addresses part for fake CVQ command. 1442 * 1443 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1444 * addresses provided in CVQ command. Therefore, only the entries 1445 * field need to be prepared in the CVQ command. 1446 */ 1447 mac_ptr = out->iov_base + cursor; 1448 mac_ptr->entries = cpu_to_le32(fake_mul_entries); 1449 1450 /* 1451 * Simulating QEMU poll a vdpa device used buffer 1452 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1453 */ 1454 return sizeof(*s->status); 1455 } 1456 1457 /** 1458 * Validate and copy control virtqueue commands. 1459 * 1460 * Following QEMU guidelines, we offer a copy of the buffers to the device to 1461 * prevent TOCTOU bugs. 1462 */ 1463 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 1464 VirtQueueElement *elem, 1465 void *opaque) 1466 { 1467 VhostVDPAState *s = opaque; 1468 size_t in_len; 1469 const struct virtio_net_ctrl_hdr *ctrl; 1470 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 1471 /* Out buffer sent to both the vdpa device and the device model */ 1472 struct iovec out = { 1473 .iov_base = s->cvq_cmd_out_buffer, 1474 }; 1475 /* in buffer used for device model */ 1476 const struct iovec model_in = { 1477 .iov_base = &status, 1478 .iov_len = sizeof(status), 1479 }; 1480 /* in buffer used for vdpa device */ 1481 const struct iovec vdpa_in = { 1482 .iov_base = s->status, 1483 .iov_len = sizeof(*s->status), 1484 }; 1485 ssize_t dev_written = -EINVAL; 1486 1487 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 1488 s->cvq_cmd_out_buffer, 1489 vhost_vdpa_net_cvq_cmd_page_len()); 1490 1491 ctrl = s->cvq_cmd_out_buffer; 1492 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { 1493 /* 1494 * Guest announce capability is emulated by qemu, so don't forward to 1495 * the device. 1496 */ 1497 dev_written = sizeof(status); 1498 *s->status = VIRTIO_NET_OK; 1499 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && 1500 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && 1501 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { 1502 /* 1503 * Due to the size limitation of the out buffer sent to the vdpa device, 1504 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive 1505 * MAC addresses set by the driver for the filter table can cause 1506 * truncation of the CVQ command in QEMU. As a result, the vdpa device 1507 * rejects the flawed CVQ command. 1508 * 1509 * Therefore, QEMU must handle this situation instead of sending 1510 * the CVQ command directly. 1511 */ 1512 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, 1513 &out, &vdpa_in); 1514 if (unlikely(dev_written < 0)) { 1515 goto out; 1516 } 1517 } else { 1518 ssize_t r; 1519 r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); 1520 if (unlikely(r < 0)) { 1521 dev_written = r; 1522 goto out; 1523 } 1524 1525 /* 1526 * We can poll here since we've had BQL from the time 1527 * we sent the descriptor. 1528 */ 1529 dev_written = vhost_vdpa_net_svq_poll(s, 1); 1530 } 1531 1532 if (unlikely(dev_written < sizeof(status))) { 1533 error_report("Insufficient written data (%zu)", dev_written); 1534 goto out; 1535 } 1536 1537 if (*s->status != VIRTIO_NET_OK) { 1538 goto out; 1539 } 1540 1541 status = VIRTIO_NET_ERR; 1542 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); 1543 if (status != VIRTIO_NET_OK) { 1544 error_report("Bad CVQ processing in model"); 1545 } 1546 1547 out: 1548 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 1549 sizeof(status)); 1550 if (unlikely(in_len < sizeof(status))) { 1551 error_report("Bad device CVQ written length"); 1552 } 1553 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 1554 /* 1555 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when 1556 * the function successfully forwards the CVQ command, indicated 1557 * by a non-negative value of `dev_written`. Otherwise, it still 1558 * belongs to SVQ. 1559 * This function should only free the `elem` when it owns. 1560 */ 1561 if (dev_written >= 0) { 1562 g_free(elem); 1563 } 1564 return dev_written < 0 ? dev_written : 0; 1565 } 1566 1567 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 1568 .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 1569 }; 1570 1571 /** 1572 * Probe if CVQ is isolated 1573 * 1574 * @device_fd The vdpa device fd 1575 * @features Features offered by the device. 1576 * @cvq_index The control vq pair index 1577 * 1578 * Returns <0 in case of failure, 0 if false and 1 if true. 1579 */ 1580 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, 1581 int cvq_index, Error **errp) 1582 { 1583 uint64_t backend_features; 1584 int64_t cvq_group; 1585 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | 1586 VIRTIO_CONFIG_S_DRIVER; 1587 int r; 1588 1589 ERRP_GUARD(); 1590 1591 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 1592 if (unlikely(r < 0)) { 1593 error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); 1594 return r; 1595 } 1596 1597 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { 1598 return 0; 1599 } 1600 1601 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1602 if (unlikely(r)) { 1603 error_setg_errno(errp, -r, "Cannot set device status"); 1604 goto out; 1605 } 1606 1607 r = ioctl(device_fd, VHOST_SET_FEATURES, &features); 1608 if (unlikely(r)) { 1609 error_setg_errno(errp, -r, "Cannot set features"); 1610 goto out; 1611 } 1612 1613 status |= VIRTIO_CONFIG_S_FEATURES_OK; 1614 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1615 if (unlikely(r)) { 1616 error_setg_errno(errp, -r, "Cannot set device status"); 1617 goto out; 1618 } 1619 1620 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); 1621 if (unlikely(cvq_group < 0)) { 1622 if (cvq_group != -ENOTSUP) { 1623 r = cvq_group; 1624 goto out; 1625 } 1626 1627 /* 1628 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend 1629 * support ASID even if the parent driver does not. The CVQ cannot be 1630 * isolated in this case. 1631 */ 1632 error_free(*errp); 1633 *errp = NULL; 1634 r = 0; 1635 goto out; 1636 } 1637 1638 for (int i = 0; i < cvq_index; ++i) { 1639 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); 1640 if (unlikely(group < 0)) { 1641 r = group; 1642 goto out; 1643 } 1644 1645 if (group == (int64_t)cvq_group) { 1646 r = 0; 1647 goto out; 1648 } 1649 } 1650 1651 r = 1; 1652 1653 out: 1654 status = 0; 1655 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1656 return r; 1657 } 1658 1659 static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 1660 const char *device, 1661 const char *name, 1662 int vdpa_device_fd, 1663 int queue_pair_index, 1664 int nvqs, 1665 bool is_datapath, 1666 bool svq, 1667 struct vhost_vdpa_iova_range iova_range, 1668 uint64_t features, 1669 VhostVDPAShared *shared, 1670 Error **errp) 1671 { 1672 NetClientState *nc = NULL; 1673 VhostVDPAState *s; 1674 int ret = 0; 1675 assert(name); 1676 int cvq_isolated = 0; 1677 1678 if (is_datapath) { 1679 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 1680 name); 1681 } else { 1682 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, 1683 queue_pair_index * 2, 1684 errp); 1685 if (unlikely(cvq_isolated < 0)) { 1686 return NULL; 1687 } 1688 1689 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 1690 device, name); 1691 } 1692 qemu_set_info_str(nc, TYPE_VHOST_VDPA); 1693 s = DO_UPCAST(VhostVDPAState, nc, nc); 1694 1695 s->vhost_vdpa.index = queue_pair_index; 1696 s->always_svq = svq; 1697 s->migration_state.notify = NULL; 1698 s->vhost_vdpa.shadow_vqs_enabled = svq; 1699 if (queue_pair_index == 0) { 1700 vhost_vdpa_net_valid_svq_features(features, 1701 &s->vhost_vdpa.migration_blocker); 1702 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); 1703 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; 1704 s->vhost_vdpa.shared->iova_range = iova_range; 1705 s->vhost_vdpa.shared->shadow_data = svq; 1706 } else if (!is_datapath) { 1707 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1708 PROT_READ | PROT_WRITE, 1709 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1710 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1711 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 1712 -1, 0); 1713 1714 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 1715 s->vhost_vdpa.shadow_vq_ops_opaque = s; 1716 s->cvq_isolated = cvq_isolated; 1717 } 1718 if (queue_pair_index != 0) { 1719 s->vhost_vdpa.shared = shared; 1720 } 1721 1722 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 1723 if (ret) { 1724 qemu_del_net_client(nc); 1725 return NULL; 1726 } 1727 1728 return nc; 1729 } 1730 1731 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 1732 { 1733 int ret = ioctl(fd, VHOST_GET_FEATURES, features); 1734 if (unlikely(ret < 0)) { 1735 error_setg_errno(errp, errno, 1736 "Fail to query features from vhost-vDPA device"); 1737 } 1738 return ret; 1739 } 1740 1741 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 1742 int *has_cvq, Error **errp) 1743 { 1744 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 1745 g_autofree struct vhost_vdpa_config *config = NULL; 1746 __virtio16 *max_queue_pairs; 1747 int ret; 1748 1749 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 1750 *has_cvq = 1; 1751 } else { 1752 *has_cvq = 0; 1753 } 1754 1755 if (features & (1 << VIRTIO_NET_F_MQ)) { 1756 config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 1757 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 1758 config->len = sizeof(*max_queue_pairs); 1759 1760 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 1761 if (ret) { 1762 error_setg(errp, "Fail to get config from vhost-vDPA device"); 1763 return -ret; 1764 } 1765 1766 max_queue_pairs = (__virtio16 *)&config->buf; 1767 1768 return lduw_le_p(max_queue_pairs); 1769 } 1770 1771 return 1; 1772 } 1773 1774 int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 1775 NetClientState *peer, Error **errp) 1776 { 1777 const NetdevVhostVDPAOptions *opts; 1778 uint64_t features; 1779 int vdpa_device_fd; 1780 g_autofree NetClientState **ncs = NULL; 1781 struct vhost_vdpa_iova_range iova_range; 1782 NetClientState *nc; 1783 int queue_pairs, r, i = 0, has_cvq = 0; 1784 1785 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1786 opts = &netdev->u.vhost_vdpa; 1787 if (!opts->vhostdev && !opts->vhostfd) { 1788 error_setg(errp, 1789 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 1790 return -1; 1791 } 1792 1793 if (opts->vhostdev && opts->vhostfd) { 1794 error_setg(errp, 1795 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 1796 return -1; 1797 } 1798 1799 if (opts->vhostdev) { 1800 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 1801 if (vdpa_device_fd == -1) { 1802 return -errno; 1803 } 1804 } else { 1805 /* has_vhostfd */ 1806 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 1807 if (vdpa_device_fd == -1) { 1808 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 1809 return -1; 1810 } 1811 } 1812 1813 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 1814 if (unlikely(r < 0)) { 1815 goto err; 1816 } 1817 1818 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 1819 &has_cvq, errp); 1820 if (queue_pairs < 0) { 1821 qemu_close(vdpa_device_fd); 1822 return queue_pairs; 1823 } 1824 1825 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 1826 if (unlikely(r < 0)) { 1827 error_setg(errp, "vhost-vdpa: get iova range failed: %s", 1828 strerror(-r)); 1829 goto err; 1830 } 1831 1832 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 1833 goto err; 1834 } 1835 1836 ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 1837 1838 for (i = 0; i < queue_pairs; i++) { 1839 VhostVDPAShared *shared = NULL; 1840 1841 if (i) { 1842 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; 1843 } 1844 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1845 vdpa_device_fd, i, 2, true, opts->x_svq, 1846 iova_range, features, shared, errp); 1847 if (!ncs[i]) 1848 goto err; 1849 } 1850 1851 if (has_cvq) { 1852 VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); 1853 VhostVDPAShared *shared = s0->vhost_vdpa.shared; 1854 1855 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1856 vdpa_device_fd, i, 1, false, 1857 opts->x_svq, iova_range, features, shared, 1858 errp); 1859 if (!nc) 1860 goto err; 1861 } 1862 1863 return 0; 1864 1865 err: 1866 if (i) { 1867 for (i--; i >= 0; i--) { 1868 qemu_del_net_client(ncs[i]); 1869 } 1870 } 1871 1872 qemu_close(vdpa_device_fd); 1873 1874 return -1; 1875 } 1876