1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 #include <virglrenderer.h> 17 #include "virgl.h" 18 19 #include <epoxy/gl.h> 20 21 void 22 vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id, 23 gpointer data) 24 { 25 uint32_t width, height; 26 uint32_t *cursor; 27 28 cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height); 29 g_return_if_fail(cursor != NULL); 30 g_return_if_fail(width == 64); 31 g_return_if_fail(height == 64); 32 33 memcpy(data, cursor, 64 * 64 * sizeof(uint32_t)); 34 free(cursor); 35 } 36 37 static void 38 virgl_cmd_context_create(VuGpu *g, 39 struct virtio_gpu_ctrl_command *cmd) 40 { 41 struct virtio_gpu_ctx_create cc; 42 43 VUGPU_FILL_CMD(cc); 44 45 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, 46 cc.debug_name); 47 } 48 49 static void 50 virgl_cmd_context_destroy(VuGpu *g, 51 struct virtio_gpu_ctrl_command *cmd) 52 { 53 struct virtio_gpu_ctx_destroy cd; 54 55 VUGPU_FILL_CMD(cd); 56 57 virgl_renderer_context_destroy(cd.hdr.ctx_id); 58 } 59 60 static void 61 virgl_cmd_create_resource_2d(VuGpu *g, 62 struct virtio_gpu_ctrl_command *cmd) 63 { 64 struct virtio_gpu_resource_create_2d c2d; 65 struct virgl_renderer_resource_create_args args; 66 67 VUGPU_FILL_CMD(c2d); 68 69 args.handle = c2d.resource_id; 70 args.target = 2; 71 args.format = c2d.format; 72 args.bind = (1 << 1); 73 args.width = c2d.width; 74 args.height = c2d.height; 75 args.depth = 1; 76 args.array_size = 1; 77 args.last_level = 0; 78 args.nr_samples = 0; 79 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; 80 virgl_renderer_resource_create(&args, NULL, 0); 81 } 82 83 static void 84 virgl_cmd_create_resource_3d(VuGpu *g, 85 struct virtio_gpu_ctrl_command *cmd) 86 { 87 struct virtio_gpu_resource_create_3d c3d; 88 struct virgl_renderer_resource_create_args args; 89 90 VUGPU_FILL_CMD(c3d); 91 92 args.handle = c3d.resource_id; 93 args.target = c3d.target; 94 args.format = c3d.format; 95 args.bind = c3d.bind; 96 args.width = c3d.width; 97 args.height = c3d.height; 98 args.depth = c3d.depth; 99 args.array_size = c3d.array_size; 100 args.last_level = c3d.last_level; 101 args.nr_samples = c3d.nr_samples; 102 args.flags = c3d.flags; 103 virgl_renderer_resource_create(&args, NULL, 0); 104 } 105 106 static void 107 virgl_cmd_resource_unref(VuGpu *g, 108 struct virtio_gpu_ctrl_command *cmd) 109 { 110 struct virtio_gpu_resource_unref unref; 111 struct iovec *res_iovs = NULL; 112 int num_iovs = 0; 113 114 VUGPU_FILL_CMD(unref); 115 116 virgl_renderer_resource_detach_iov(unref.resource_id, 117 &res_iovs, 118 &num_iovs); 119 g_free(res_iovs); 120 121 virgl_renderer_resource_unref(unref.resource_id); 122 } 123 124 /* Not yet(?) defined in standard-headers, remove when possible */ 125 #ifndef VIRTIO_GPU_CAPSET_VIRGL2 126 #define VIRTIO_GPU_CAPSET_VIRGL2 2 127 #endif 128 129 static void 130 virgl_cmd_get_capset_info(VuGpu *g, 131 struct virtio_gpu_ctrl_command *cmd) 132 { 133 struct virtio_gpu_get_capset_info info; 134 struct virtio_gpu_resp_capset_info resp; 135 136 VUGPU_FILL_CMD(info); 137 138 memset(&resp, 0, sizeof(resp)); 139 if (info.capset_index == 0) { 140 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; 141 virgl_renderer_get_cap_set(resp.capset_id, 142 &resp.capset_max_version, 143 &resp.capset_max_size); 144 } else if (info.capset_index == 1) { 145 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; 146 virgl_renderer_get_cap_set(resp.capset_id, 147 &resp.capset_max_version, 148 &resp.capset_max_size); 149 } else { 150 resp.capset_max_version = 0; 151 resp.capset_max_size = 0; 152 } 153 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; 154 vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 155 } 156 157 uint32_t 158 vg_virgl_get_num_capsets(void) 159 { 160 uint32_t capset2_max_ver, capset2_max_size; 161 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, 162 &capset2_max_ver, 163 &capset2_max_size); 164 165 return capset2_max_ver ? 2 : 1; 166 } 167 168 static void 169 virgl_cmd_get_capset(VuGpu *g, 170 struct virtio_gpu_ctrl_command *cmd) 171 { 172 struct virtio_gpu_get_capset gc; 173 struct virtio_gpu_resp_capset *resp; 174 uint32_t max_ver, max_size; 175 176 VUGPU_FILL_CMD(gc); 177 178 virgl_renderer_get_cap_set(gc.capset_id, &max_ver, 179 &max_size); 180 resp = g_malloc0(sizeof(*resp) + max_size); 181 182 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; 183 virgl_renderer_fill_caps(gc.capset_id, 184 gc.capset_version, 185 (void *)resp->capset_data); 186 vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); 187 g_free(resp); 188 } 189 190 static void 191 virgl_cmd_submit_3d(VuGpu *g, 192 struct virtio_gpu_ctrl_command *cmd) 193 { 194 struct virtio_gpu_cmd_submit cs; 195 void *buf; 196 size_t s; 197 198 VUGPU_FILL_CMD(cs); 199 200 buf = g_malloc(cs.size); 201 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 202 sizeof(cs), buf, cs.size); 203 if (s != cs.size) { 204 g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size); 205 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 206 goto out; 207 } 208 209 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); 210 211 out: 212 g_free(buf); 213 } 214 215 static void 216 virgl_cmd_transfer_to_host_2d(VuGpu *g, 217 struct virtio_gpu_ctrl_command *cmd) 218 { 219 struct virtio_gpu_transfer_to_host_2d t2d; 220 struct virtio_gpu_box box; 221 222 VUGPU_FILL_CMD(t2d); 223 224 box.x = t2d.r.x; 225 box.y = t2d.r.y; 226 box.z = 0; 227 box.w = t2d.r.width; 228 box.h = t2d.r.height; 229 box.d = 1; 230 231 virgl_renderer_transfer_write_iov(t2d.resource_id, 232 0, 233 0, 234 0, 235 0, 236 (struct virgl_box *)&box, 237 t2d.offset, NULL, 0); 238 } 239 240 static void 241 virgl_cmd_transfer_to_host_3d(VuGpu *g, 242 struct virtio_gpu_ctrl_command *cmd) 243 { 244 struct virtio_gpu_transfer_host_3d t3d; 245 246 VUGPU_FILL_CMD(t3d); 247 248 virgl_renderer_transfer_write_iov(t3d.resource_id, 249 t3d.hdr.ctx_id, 250 t3d.level, 251 t3d.stride, 252 t3d.layer_stride, 253 (struct virgl_box *)&t3d.box, 254 t3d.offset, NULL, 0); 255 } 256 257 static void 258 virgl_cmd_transfer_from_host_3d(VuGpu *g, 259 struct virtio_gpu_ctrl_command *cmd) 260 { 261 struct virtio_gpu_transfer_host_3d tf3d; 262 263 VUGPU_FILL_CMD(tf3d); 264 265 virgl_renderer_transfer_read_iov(tf3d.resource_id, 266 tf3d.hdr.ctx_id, 267 tf3d.level, 268 tf3d.stride, 269 tf3d.layer_stride, 270 (struct virgl_box *)&tf3d.box, 271 tf3d.offset, NULL, 0); 272 } 273 274 static void 275 virgl_resource_attach_backing(VuGpu *g, 276 struct virtio_gpu_ctrl_command *cmd) 277 { 278 struct virtio_gpu_resource_attach_backing att_rb; 279 struct iovec *res_iovs; 280 int ret; 281 282 VUGPU_FILL_CMD(att_rb); 283 284 ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); 285 if (ret != 0) { 286 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 287 return; 288 } 289 290 ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, 291 res_iovs, att_rb.nr_entries); 292 if (ret != 0) { 293 g_free(res_iovs); 294 } 295 } 296 297 static void 298 virgl_resource_detach_backing(VuGpu *g, 299 struct virtio_gpu_ctrl_command *cmd) 300 { 301 struct virtio_gpu_resource_detach_backing detach_rb; 302 struct iovec *res_iovs = NULL; 303 int num_iovs = 0; 304 305 VUGPU_FILL_CMD(detach_rb); 306 307 virgl_renderer_resource_detach_iov(detach_rb.resource_id, 308 &res_iovs, 309 &num_iovs); 310 if (res_iovs == NULL || num_iovs == 0) { 311 return; 312 } 313 g_free(res_iovs); 314 } 315 316 static void 317 virgl_cmd_set_scanout(VuGpu *g, 318 struct virtio_gpu_ctrl_command *cmd) 319 { 320 struct virtio_gpu_set_scanout ss; 321 struct virgl_renderer_resource_info info; 322 int ret; 323 324 VUGPU_FILL_CMD(ss); 325 326 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 327 g_critical("%s: illegal scanout id specified %d", 328 __func__, ss.scanout_id); 329 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 330 return; 331 } 332 333 memset(&info, 0, sizeof(info)); 334 335 if (ss.resource_id && ss.r.width && ss.r.height) { 336 ret = virgl_renderer_resource_get_info(ss.resource_id, &info); 337 if (ret == -1) { 338 g_critical("%s: illegal resource specified %d\n", 339 __func__, ss.resource_id); 340 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 341 return; 342 } 343 344 int fd = -1; 345 if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) { 346 g_critical("%s: failed to get fd for texture\n", __func__); 347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 348 return; 349 } 350 assert(fd >= 0); 351 VhostUserGpuMsg msg = { 352 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 353 .size = sizeof(VhostUserGpuDMABUFScanout), 354 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 355 .payload.dmabuf_scanout.x = ss.r.x, 356 .payload.dmabuf_scanout.y = ss.r.y, 357 .payload.dmabuf_scanout.width = ss.r.width, 358 .payload.dmabuf_scanout.height = ss.r.height, 359 .payload.dmabuf_scanout.fd_width = info.width, 360 .payload.dmabuf_scanout.fd_height = info.height, 361 .payload.dmabuf_scanout.fd_stride = info.stride, 362 .payload.dmabuf_scanout.fd_flags = info.flags, 363 .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc 364 }; 365 vg_send_msg(g, &msg, fd); 366 close(fd); 367 } else { 368 VhostUserGpuMsg msg = { 369 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 370 .size = sizeof(VhostUserGpuDMABUFScanout), 371 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 372 }; 373 g_debug("disable scanout"); 374 vg_send_msg(g, &msg, -1); 375 } 376 g->scanout[ss.scanout_id].resource_id = ss.resource_id; 377 } 378 379 static void 380 virgl_cmd_resource_flush(VuGpu *g, 381 struct virtio_gpu_ctrl_command *cmd) 382 { 383 struct virtio_gpu_resource_flush rf; 384 int i; 385 386 VUGPU_FILL_CMD(rf); 387 388 glFlush(); 389 if (!rf.resource_id) { 390 g_debug("bad resource id for flush..?"); 391 return; 392 } 393 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 394 if (g->scanout[i].resource_id != rf.resource_id) { 395 continue; 396 } 397 VhostUserGpuMsg msg = { 398 .request = VHOST_USER_GPU_DMABUF_UPDATE, 399 .size = sizeof(VhostUserGpuUpdate), 400 .payload.update.scanout_id = i, 401 .payload.update.x = rf.r.x, 402 .payload.update.y = rf.r.y, 403 .payload.update.width = rf.r.width, 404 .payload.update.height = rf.r.height 405 }; 406 vg_send_msg(g, &msg, -1); 407 vg_wait_ok(g); 408 } 409 } 410 411 static void 412 virgl_cmd_ctx_attach_resource(VuGpu *g, 413 struct virtio_gpu_ctrl_command *cmd) 414 { 415 struct virtio_gpu_ctx_resource att_res; 416 417 VUGPU_FILL_CMD(att_res); 418 419 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); 420 } 421 422 static void 423 virgl_cmd_ctx_detach_resource(VuGpu *g, 424 struct virtio_gpu_ctrl_command *cmd) 425 { 426 struct virtio_gpu_ctx_resource det_res; 427 428 VUGPU_FILL_CMD(det_res); 429 430 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); 431 } 432 433 void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) 434 { 435 virgl_renderer_force_ctx_0(); 436 switch (cmd->cmd_hdr.type) { 437 case VIRTIO_GPU_CMD_CTX_CREATE: 438 virgl_cmd_context_create(g, cmd); 439 break; 440 case VIRTIO_GPU_CMD_CTX_DESTROY: 441 virgl_cmd_context_destroy(g, cmd); 442 break; 443 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 444 virgl_cmd_create_resource_2d(g, cmd); 445 break; 446 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: 447 virgl_cmd_create_resource_3d(g, cmd); 448 break; 449 case VIRTIO_GPU_CMD_SUBMIT_3D: 450 virgl_cmd_submit_3d(g, cmd); 451 break; 452 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 453 virgl_cmd_transfer_to_host_2d(g, cmd); 454 break; 455 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: 456 virgl_cmd_transfer_to_host_3d(g, cmd); 457 break; 458 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: 459 virgl_cmd_transfer_from_host_3d(g, cmd); 460 break; 461 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 462 virgl_resource_attach_backing(g, cmd); 463 break; 464 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 465 virgl_resource_detach_backing(g, cmd); 466 break; 467 case VIRTIO_GPU_CMD_SET_SCANOUT: 468 virgl_cmd_set_scanout(g, cmd); 469 break; 470 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 471 virgl_cmd_resource_flush(g, cmd); 472 break; 473 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 474 virgl_cmd_resource_unref(g, cmd); 475 break; 476 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: 477 /* TODO add security */ 478 virgl_cmd_ctx_attach_resource(g, cmd); 479 break; 480 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: 481 /* TODO add security */ 482 virgl_cmd_ctx_detach_resource(g, cmd); 483 break; 484 case VIRTIO_GPU_CMD_GET_CAPSET_INFO: 485 virgl_cmd_get_capset_info(g, cmd); 486 break; 487 case VIRTIO_GPU_CMD_GET_CAPSET: 488 virgl_cmd_get_capset(g, cmd); 489 break; 490 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 491 vg_get_display_info(g, cmd); 492 break; 493 default: 494 g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 495 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 496 break; 497 } 498 499 if (cmd->state != VG_CMD_STATE_NEW) { 500 return; 501 } 502 503 if (cmd->error) { 504 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__, 505 cmd->cmd_hdr.type, cmd->error); 506 vg_ctrl_response_nodata(g, cmd, cmd->error); 507 return; 508 } 509 510 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 511 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 512 return; 513 } 514 515 g_debug("Creating fence id:%" PRId64 " type:%d", 516 cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 517 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 518 } 519 520 static void 521 virgl_write_fence(void *opaque, uint32_t fence) 522 { 523 VuGpu *g = opaque; 524 struct virtio_gpu_ctrl_command *cmd, *tmp; 525 526 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 527 /* 528 * the guest can end up emitting fences out of order 529 * so we should check all fenced cmds not just the first one. 530 */ 531 if (cmd->cmd_hdr.fence_id > fence) { 532 continue; 533 } 534 g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id); 535 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 536 QTAILQ_REMOVE(&g->fenceq, cmd, next); 537 free(cmd); 538 g->inflight--; 539 } 540 } 541 542 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 543 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 544 static int 545 virgl_get_drm_fd(void *opaque) 546 { 547 VuGpu *g = opaque; 548 549 return g->drm_rnode_fd; 550 } 551 #endif 552 553 static struct virgl_renderer_callbacks virgl_cbs = { 554 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 555 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 556 .get_drm_fd = virgl_get_drm_fd, 557 .version = 2, 558 #else 559 .version = 1, 560 #endif 561 .write_fence = virgl_write_fence, 562 }; 563 564 static void 565 vg_virgl_poll(VuDev *dev, int condition, void *data) 566 { 567 virgl_renderer_poll(); 568 } 569 570 bool 571 vg_virgl_init(VuGpu *g) 572 { 573 int ret; 574 575 if (g->drm_rnode_fd && virgl_cbs.version == 1) { 576 g_warning("virgl will use the default rendernode"); 577 } 578 579 ret = virgl_renderer_init(g, 580 VIRGL_RENDERER_USE_EGL | 581 VIRGL_RENDERER_THREAD_SYNC, 582 &virgl_cbs); 583 if (ret != 0) { 584 return false; 585 } 586 587 ret = virgl_renderer_get_poll_fd(); 588 if (ret != -1) { 589 g->renderer_source = 590 vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g); 591 } 592 593 return true; 594 } 595