1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 #include <virglrenderer.h> 17 #include "virgl.h" 18 19 #include <epoxy/gl.h> 20 21 void 22 vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id, 23 gpointer data) 24 { 25 uint32_t width, height; 26 uint32_t *cursor; 27 28 cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height); 29 g_return_if_fail(cursor != NULL); 30 g_return_if_fail(width == 64); 31 g_return_if_fail(height == 64); 32 33 memcpy(data, cursor, 64 * 64 * sizeof(uint32_t)); 34 free(cursor); 35 } 36 37 static void 38 virgl_cmd_context_create(VuGpu *g, 39 struct virtio_gpu_ctrl_command *cmd) 40 { 41 struct virtio_gpu_ctx_create cc; 42 43 VUGPU_FILL_CMD(cc); 44 45 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, 46 cc.debug_name); 47 } 48 49 static void 50 virgl_cmd_context_destroy(VuGpu *g, 51 struct virtio_gpu_ctrl_command *cmd) 52 { 53 struct virtio_gpu_ctx_destroy cd; 54 55 VUGPU_FILL_CMD(cd); 56 57 virgl_renderer_context_destroy(cd.hdr.ctx_id); 58 } 59 60 static void 61 virgl_cmd_create_resource_2d(VuGpu *g, 62 struct virtio_gpu_ctrl_command *cmd) 63 { 64 struct virtio_gpu_resource_create_2d c2d; 65 struct virgl_renderer_resource_create_args args; 66 67 VUGPU_FILL_CMD(c2d); 68 69 args.handle = c2d.resource_id; 70 args.target = 2; 71 args.format = c2d.format; 72 args.bind = (1 << 1); 73 args.width = c2d.width; 74 args.height = c2d.height; 75 args.depth = 1; 76 args.array_size = 1; 77 args.last_level = 0; 78 args.nr_samples = 0; 79 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; 80 virgl_renderer_resource_create(&args, NULL, 0); 81 } 82 83 static void 84 virgl_cmd_create_resource_3d(VuGpu *g, 85 struct virtio_gpu_ctrl_command *cmd) 86 { 87 struct virtio_gpu_resource_create_3d c3d; 88 struct virgl_renderer_resource_create_args args; 89 90 VUGPU_FILL_CMD(c3d); 91 92 args.handle = c3d.resource_id; 93 args.target = c3d.target; 94 args.format = c3d.format; 95 args.bind = c3d.bind; 96 args.width = c3d.width; 97 args.height = c3d.height; 98 args.depth = c3d.depth; 99 args.array_size = c3d.array_size; 100 args.last_level = c3d.last_level; 101 args.nr_samples = c3d.nr_samples; 102 args.flags = c3d.flags; 103 virgl_renderer_resource_create(&args, NULL, 0); 104 } 105 106 static void 107 virgl_cmd_resource_unref(VuGpu *g, 108 struct virtio_gpu_ctrl_command *cmd) 109 { 110 struct virtio_gpu_resource_unref unref; 111 struct iovec *res_iovs = NULL; 112 int num_iovs = 0; 113 114 VUGPU_FILL_CMD(unref); 115 116 virgl_renderer_resource_detach_iov(unref.resource_id, 117 &res_iovs, 118 &num_iovs); 119 g_free(res_iovs); 120 121 virgl_renderer_resource_unref(unref.resource_id); 122 } 123 124 /* Not yet(?) defined in standard-headers, remove when possible */ 125 #ifndef VIRTIO_GPU_CAPSET_VIRGL2 126 #define VIRTIO_GPU_CAPSET_VIRGL2 2 127 #endif 128 129 static void 130 virgl_cmd_get_capset_info(VuGpu *g, 131 struct virtio_gpu_ctrl_command *cmd) 132 { 133 struct virtio_gpu_get_capset_info info; 134 struct virtio_gpu_resp_capset_info resp; 135 136 VUGPU_FILL_CMD(info); 137 138 memset(&resp, 0, sizeof(resp)); 139 if (info.capset_index == 0) { 140 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; 141 virgl_renderer_get_cap_set(resp.capset_id, 142 &resp.capset_max_version, 143 &resp.capset_max_size); 144 } else if (info.capset_index == 1) { 145 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; 146 virgl_renderer_get_cap_set(resp.capset_id, 147 &resp.capset_max_version, 148 &resp.capset_max_size); 149 } else { 150 resp.capset_max_version = 0; 151 resp.capset_max_size = 0; 152 } 153 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; 154 vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 155 } 156 157 uint32_t 158 vg_virgl_get_num_capsets(void) 159 { 160 uint32_t capset2_max_ver, capset2_max_size; 161 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, 162 &capset2_max_ver, 163 &capset2_max_size); 164 165 return capset2_max_ver ? 2 : 1; 166 } 167 168 static void 169 virgl_cmd_get_capset(VuGpu *g, 170 struct virtio_gpu_ctrl_command *cmd) 171 { 172 struct virtio_gpu_get_capset gc; 173 struct virtio_gpu_resp_capset *resp; 174 uint32_t max_ver, max_size; 175 176 VUGPU_FILL_CMD(gc); 177 178 virgl_renderer_get_cap_set(gc.capset_id, &max_ver, 179 &max_size); 180 if (!max_size) { 181 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 182 return; 183 } 184 resp = g_malloc0(sizeof(*resp) + max_size); 185 186 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; 187 virgl_renderer_fill_caps(gc.capset_id, 188 gc.capset_version, 189 (void *)resp->capset_data); 190 vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); 191 g_free(resp); 192 } 193 194 static void 195 virgl_cmd_submit_3d(VuGpu *g, 196 struct virtio_gpu_ctrl_command *cmd) 197 { 198 struct virtio_gpu_cmd_submit cs; 199 void *buf; 200 size_t s; 201 202 VUGPU_FILL_CMD(cs); 203 204 buf = g_malloc(cs.size); 205 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 206 sizeof(cs), buf, cs.size); 207 if (s != cs.size) { 208 g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size); 209 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 210 goto out; 211 } 212 213 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); 214 215 out: 216 g_free(buf); 217 } 218 219 static void 220 virgl_cmd_transfer_to_host_2d(VuGpu *g, 221 struct virtio_gpu_ctrl_command *cmd) 222 { 223 struct virtio_gpu_transfer_to_host_2d t2d; 224 struct virtio_gpu_box box; 225 226 VUGPU_FILL_CMD(t2d); 227 228 box.x = t2d.r.x; 229 box.y = t2d.r.y; 230 box.z = 0; 231 box.w = t2d.r.width; 232 box.h = t2d.r.height; 233 box.d = 1; 234 235 virgl_renderer_transfer_write_iov(t2d.resource_id, 236 0, 237 0, 238 0, 239 0, 240 (struct virgl_box *)&box, 241 t2d.offset, NULL, 0); 242 } 243 244 static void 245 virgl_cmd_transfer_to_host_3d(VuGpu *g, 246 struct virtio_gpu_ctrl_command *cmd) 247 { 248 struct virtio_gpu_transfer_host_3d t3d; 249 250 VUGPU_FILL_CMD(t3d); 251 252 virgl_renderer_transfer_write_iov(t3d.resource_id, 253 t3d.hdr.ctx_id, 254 t3d.level, 255 t3d.stride, 256 t3d.layer_stride, 257 (struct virgl_box *)&t3d.box, 258 t3d.offset, NULL, 0); 259 } 260 261 static void 262 virgl_cmd_transfer_from_host_3d(VuGpu *g, 263 struct virtio_gpu_ctrl_command *cmd) 264 { 265 struct virtio_gpu_transfer_host_3d tf3d; 266 267 VUGPU_FILL_CMD(tf3d); 268 269 virgl_renderer_transfer_read_iov(tf3d.resource_id, 270 tf3d.hdr.ctx_id, 271 tf3d.level, 272 tf3d.stride, 273 tf3d.layer_stride, 274 (struct virgl_box *)&tf3d.box, 275 tf3d.offset, NULL, 0); 276 } 277 278 static void 279 virgl_resource_attach_backing(VuGpu *g, 280 struct virtio_gpu_ctrl_command *cmd) 281 { 282 struct virtio_gpu_resource_attach_backing att_rb; 283 struct iovec *res_iovs; 284 int ret; 285 286 VUGPU_FILL_CMD(att_rb); 287 288 ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); 289 if (ret != 0) { 290 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 291 return; 292 } 293 294 ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, 295 res_iovs, att_rb.nr_entries); 296 if (ret != 0) { 297 g_free(res_iovs); 298 } 299 } 300 301 static void 302 virgl_resource_detach_backing(VuGpu *g, 303 struct virtio_gpu_ctrl_command *cmd) 304 { 305 struct virtio_gpu_resource_detach_backing detach_rb; 306 struct iovec *res_iovs = NULL; 307 int num_iovs = 0; 308 309 VUGPU_FILL_CMD(detach_rb); 310 311 virgl_renderer_resource_detach_iov(detach_rb.resource_id, 312 &res_iovs, 313 &num_iovs); 314 if (res_iovs == NULL || num_iovs == 0) { 315 return; 316 } 317 g_free(res_iovs); 318 } 319 320 static void 321 virgl_cmd_set_scanout(VuGpu *g, 322 struct virtio_gpu_ctrl_command *cmd) 323 { 324 struct virtio_gpu_set_scanout ss; 325 struct virgl_renderer_resource_info info; 326 int ret; 327 328 VUGPU_FILL_CMD(ss); 329 330 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 331 g_critical("%s: illegal scanout id specified %d", 332 __func__, ss.scanout_id); 333 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 334 return; 335 } 336 337 memset(&info, 0, sizeof(info)); 338 339 if (ss.resource_id && ss.r.width && ss.r.height) { 340 ret = virgl_renderer_resource_get_info(ss.resource_id, &info); 341 if (ret == -1) { 342 g_critical("%s: illegal resource specified %d\n", 343 __func__, ss.resource_id); 344 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 345 return; 346 } 347 348 int fd = -1; 349 if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) { 350 g_critical("%s: failed to get fd for texture\n", __func__); 351 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 352 return; 353 } 354 assert(fd >= 0); 355 VhostUserGpuMsg msg = { 356 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 357 .size = sizeof(VhostUserGpuDMABUFScanout), 358 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 359 .payload.dmabuf_scanout.x = ss.r.x, 360 .payload.dmabuf_scanout.y = ss.r.y, 361 .payload.dmabuf_scanout.width = ss.r.width, 362 .payload.dmabuf_scanout.height = ss.r.height, 363 .payload.dmabuf_scanout.fd_width = info.width, 364 .payload.dmabuf_scanout.fd_height = info.height, 365 .payload.dmabuf_scanout.fd_stride = info.stride, 366 .payload.dmabuf_scanout.fd_flags = info.flags, 367 .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc 368 }; 369 vg_send_msg(g, &msg, fd); 370 close(fd); 371 } else { 372 VhostUserGpuMsg msg = { 373 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 374 .size = sizeof(VhostUserGpuDMABUFScanout), 375 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 376 }; 377 g_debug("disable scanout"); 378 vg_send_msg(g, &msg, -1); 379 } 380 g->scanout[ss.scanout_id].resource_id = ss.resource_id; 381 } 382 383 static void 384 virgl_cmd_resource_flush(VuGpu *g, 385 struct virtio_gpu_ctrl_command *cmd) 386 { 387 struct virtio_gpu_resource_flush rf; 388 int i; 389 390 VUGPU_FILL_CMD(rf); 391 392 glFlush(); 393 if (!rf.resource_id) { 394 g_debug("bad resource id for flush..?"); 395 return; 396 } 397 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 398 if (g->scanout[i].resource_id != rf.resource_id) { 399 continue; 400 } 401 VhostUserGpuMsg msg = { 402 .request = VHOST_USER_GPU_DMABUF_UPDATE, 403 .size = sizeof(VhostUserGpuUpdate), 404 .payload.update.scanout_id = i, 405 .payload.update.x = rf.r.x, 406 .payload.update.y = rf.r.y, 407 .payload.update.width = rf.r.width, 408 .payload.update.height = rf.r.height 409 }; 410 vg_send_msg(g, &msg, -1); 411 vg_wait_ok(g); 412 } 413 } 414 415 static void 416 virgl_cmd_ctx_attach_resource(VuGpu *g, 417 struct virtio_gpu_ctrl_command *cmd) 418 { 419 struct virtio_gpu_ctx_resource att_res; 420 421 VUGPU_FILL_CMD(att_res); 422 423 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); 424 } 425 426 static void 427 virgl_cmd_ctx_detach_resource(VuGpu *g, 428 struct virtio_gpu_ctrl_command *cmd) 429 { 430 struct virtio_gpu_ctx_resource det_res; 431 432 VUGPU_FILL_CMD(det_res); 433 434 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); 435 } 436 437 void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) 438 { 439 virgl_renderer_force_ctx_0(); 440 switch (cmd->cmd_hdr.type) { 441 case VIRTIO_GPU_CMD_CTX_CREATE: 442 virgl_cmd_context_create(g, cmd); 443 break; 444 case VIRTIO_GPU_CMD_CTX_DESTROY: 445 virgl_cmd_context_destroy(g, cmd); 446 break; 447 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 448 virgl_cmd_create_resource_2d(g, cmd); 449 break; 450 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: 451 virgl_cmd_create_resource_3d(g, cmd); 452 break; 453 case VIRTIO_GPU_CMD_SUBMIT_3D: 454 virgl_cmd_submit_3d(g, cmd); 455 break; 456 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 457 virgl_cmd_transfer_to_host_2d(g, cmd); 458 break; 459 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: 460 virgl_cmd_transfer_to_host_3d(g, cmd); 461 break; 462 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: 463 virgl_cmd_transfer_from_host_3d(g, cmd); 464 break; 465 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 466 virgl_resource_attach_backing(g, cmd); 467 break; 468 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 469 virgl_resource_detach_backing(g, cmd); 470 break; 471 case VIRTIO_GPU_CMD_SET_SCANOUT: 472 virgl_cmd_set_scanout(g, cmd); 473 break; 474 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 475 virgl_cmd_resource_flush(g, cmd); 476 break; 477 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 478 virgl_cmd_resource_unref(g, cmd); 479 break; 480 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: 481 /* TODO add security */ 482 virgl_cmd_ctx_attach_resource(g, cmd); 483 break; 484 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: 485 /* TODO add security */ 486 virgl_cmd_ctx_detach_resource(g, cmd); 487 break; 488 case VIRTIO_GPU_CMD_GET_CAPSET_INFO: 489 virgl_cmd_get_capset_info(g, cmd); 490 break; 491 case VIRTIO_GPU_CMD_GET_CAPSET: 492 virgl_cmd_get_capset(g, cmd); 493 break; 494 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 495 vg_get_display_info(g, cmd); 496 break; 497 default: 498 g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 499 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 500 break; 501 } 502 503 if (cmd->state != VG_CMD_STATE_NEW) { 504 return; 505 } 506 507 if (cmd->error) { 508 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__, 509 cmd->cmd_hdr.type, cmd->error); 510 vg_ctrl_response_nodata(g, cmd, cmd->error); 511 return; 512 } 513 514 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 515 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 516 return; 517 } 518 519 g_debug("Creating fence id:%" PRId64 " type:%d", 520 cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 521 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 522 } 523 524 static void 525 virgl_write_fence(void *opaque, uint32_t fence) 526 { 527 VuGpu *g = opaque; 528 struct virtio_gpu_ctrl_command *cmd, *tmp; 529 530 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 531 /* 532 * the guest can end up emitting fences out of order 533 * so we should check all fenced cmds not just the first one. 534 */ 535 if (cmd->cmd_hdr.fence_id > fence) { 536 continue; 537 } 538 g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id); 539 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 540 QTAILQ_REMOVE(&g->fenceq, cmd, next); 541 free(cmd); 542 g->inflight--; 543 } 544 } 545 546 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 547 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 548 static int 549 virgl_get_drm_fd(void *opaque) 550 { 551 VuGpu *g = opaque; 552 553 return g->drm_rnode_fd; 554 } 555 #endif 556 557 static struct virgl_renderer_callbacks virgl_cbs = { 558 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 559 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 560 .get_drm_fd = virgl_get_drm_fd, 561 .version = 2, 562 #else 563 .version = 1, 564 #endif 565 .write_fence = virgl_write_fence, 566 }; 567 568 static void 569 vg_virgl_poll(VuDev *dev, int condition, void *data) 570 { 571 virgl_renderer_poll(); 572 } 573 574 bool 575 vg_virgl_init(VuGpu *g) 576 { 577 int ret; 578 579 if (g->drm_rnode_fd && virgl_cbs.version == 1) { 580 g_warning("virgl will use the default rendernode"); 581 } 582 583 ret = virgl_renderer_init(g, 584 VIRGL_RENDERER_USE_EGL | 585 VIRGL_RENDERER_THREAD_SYNC, 586 &virgl_cbs); 587 if (ret != 0) { 588 return false; 589 } 590 591 ret = virgl_renderer_get_poll_fd(); 592 if (ret != -1) { 593 g->renderer_source = 594 vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g); 595 } 596 597 return true; 598 } 599