1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 #include <virglrenderer.h> 17 #include "virgl.h" 18 19 #include <epoxy/gl.h> 20 21 void 22 vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id, 23 gpointer data) 24 { 25 uint32_t width, height; 26 uint32_t *cursor; 27 28 cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height); 29 g_return_if_fail(cursor != NULL); 30 g_return_if_fail(width == 64); 31 g_return_if_fail(height == 64); 32 33 memcpy(data, cursor, 64 * 64 * sizeof(uint32_t)); 34 free(cursor); 35 } 36 37 static void 38 virgl_cmd_context_create(VuGpu *g, 39 struct virtio_gpu_ctrl_command *cmd) 40 { 41 struct virtio_gpu_ctx_create cc; 42 43 VUGPU_FILL_CMD(cc); 44 45 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, 46 cc.debug_name); 47 } 48 49 static void 50 virgl_cmd_context_destroy(VuGpu *g, 51 struct virtio_gpu_ctrl_command *cmd) 52 { 53 struct virtio_gpu_ctx_destroy cd; 54 55 VUGPU_FILL_CMD(cd); 56 57 virgl_renderer_context_destroy(cd.hdr.ctx_id); 58 } 59 60 static void 61 virgl_cmd_create_resource_2d(VuGpu *g, 62 struct virtio_gpu_ctrl_command *cmd) 63 { 64 struct virtio_gpu_resource_create_2d c2d; 65 struct virgl_renderer_resource_create_args args; 66 67 VUGPU_FILL_CMD(c2d); 68 69 args.handle = c2d.resource_id; 70 args.target = 2; 71 args.format = c2d.format; 72 args.bind = (1 << 1); 73 args.width = c2d.width; 74 args.height = c2d.height; 75 args.depth = 1; 76 args.array_size = 1; 77 args.last_level = 0; 78 args.nr_samples = 0; 79 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; 80 virgl_renderer_resource_create(&args, NULL, 0); 81 } 82 83 static void 84 virgl_cmd_create_resource_3d(VuGpu *g, 85 struct virtio_gpu_ctrl_command *cmd) 86 { 87 struct virtio_gpu_resource_create_3d c3d; 88 struct virgl_renderer_resource_create_args args; 89 90 VUGPU_FILL_CMD(c3d); 91 92 args.handle = c3d.resource_id; 93 args.target = c3d.target; 94 args.format = c3d.format; 95 args.bind = c3d.bind; 96 args.width = c3d.width; 97 args.height = c3d.height; 98 args.depth = c3d.depth; 99 args.array_size = c3d.array_size; 100 args.last_level = c3d.last_level; 101 args.nr_samples = c3d.nr_samples; 102 args.flags = c3d.flags; 103 virgl_renderer_resource_create(&args, NULL, 0); 104 } 105 106 static void 107 virgl_cmd_resource_unref(VuGpu *g, 108 struct virtio_gpu_ctrl_command *cmd) 109 { 110 struct virtio_gpu_resource_unref unref; 111 112 VUGPU_FILL_CMD(unref); 113 114 virgl_renderer_resource_unref(unref.resource_id); 115 } 116 117 /* Not yet(?) defined in standard-headers, remove when possible */ 118 #ifndef VIRTIO_GPU_CAPSET_VIRGL2 119 #define VIRTIO_GPU_CAPSET_VIRGL2 2 120 #endif 121 122 static void 123 virgl_cmd_get_capset_info(VuGpu *g, 124 struct virtio_gpu_ctrl_command *cmd) 125 { 126 struct virtio_gpu_get_capset_info info; 127 struct virtio_gpu_resp_capset_info resp; 128 129 VUGPU_FILL_CMD(info); 130 131 memset(&resp, 0, sizeof(resp)); 132 if (info.capset_index == 0) { 133 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; 134 virgl_renderer_get_cap_set(resp.capset_id, 135 &resp.capset_max_version, 136 &resp.capset_max_size); 137 } else if (info.capset_index == 1) { 138 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; 139 virgl_renderer_get_cap_set(resp.capset_id, 140 &resp.capset_max_version, 141 &resp.capset_max_size); 142 } else { 143 resp.capset_max_version = 0; 144 resp.capset_max_size = 0; 145 } 146 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; 147 vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 148 } 149 150 uint32_t 151 vg_virgl_get_num_capsets(void) 152 { 153 uint32_t capset2_max_ver, capset2_max_size; 154 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, 155 &capset2_max_ver, 156 &capset2_max_size); 157 158 return capset2_max_ver ? 2 : 1; 159 } 160 161 static void 162 virgl_cmd_get_capset(VuGpu *g, 163 struct virtio_gpu_ctrl_command *cmd) 164 { 165 struct virtio_gpu_get_capset gc; 166 struct virtio_gpu_resp_capset *resp; 167 uint32_t max_ver, max_size; 168 169 VUGPU_FILL_CMD(gc); 170 171 virgl_renderer_get_cap_set(gc.capset_id, &max_ver, 172 &max_size); 173 resp = g_malloc0(sizeof(*resp) + max_size); 174 175 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; 176 virgl_renderer_fill_caps(gc.capset_id, 177 gc.capset_version, 178 (void *)resp->capset_data); 179 vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); 180 g_free(resp); 181 } 182 183 static void 184 virgl_cmd_submit_3d(VuGpu *g, 185 struct virtio_gpu_ctrl_command *cmd) 186 { 187 struct virtio_gpu_cmd_submit cs; 188 void *buf; 189 size_t s; 190 191 VUGPU_FILL_CMD(cs); 192 193 buf = g_malloc(cs.size); 194 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 195 sizeof(cs), buf, cs.size); 196 if (s != cs.size) { 197 g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size); 198 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 199 goto out; 200 } 201 202 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); 203 204 out: 205 g_free(buf); 206 } 207 208 static void 209 virgl_cmd_transfer_to_host_2d(VuGpu *g, 210 struct virtio_gpu_ctrl_command *cmd) 211 { 212 struct virtio_gpu_transfer_to_host_2d t2d; 213 struct virtio_gpu_box box; 214 215 VUGPU_FILL_CMD(t2d); 216 217 box.x = t2d.r.x; 218 box.y = t2d.r.y; 219 box.z = 0; 220 box.w = t2d.r.width; 221 box.h = t2d.r.height; 222 box.d = 1; 223 224 virgl_renderer_transfer_write_iov(t2d.resource_id, 225 0, 226 0, 227 0, 228 0, 229 (struct virgl_box *)&box, 230 t2d.offset, NULL, 0); 231 } 232 233 static void 234 virgl_cmd_transfer_to_host_3d(VuGpu *g, 235 struct virtio_gpu_ctrl_command *cmd) 236 { 237 struct virtio_gpu_transfer_host_3d t3d; 238 239 VUGPU_FILL_CMD(t3d); 240 241 virgl_renderer_transfer_write_iov(t3d.resource_id, 242 t3d.hdr.ctx_id, 243 t3d.level, 244 t3d.stride, 245 t3d.layer_stride, 246 (struct virgl_box *)&t3d.box, 247 t3d.offset, NULL, 0); 248 } 249 250 static void 251 virgl_cmd_transfer_from_host_3d(VuGpu *g, 252 struct virtio_gpu_ctrl_command *cmd) 253 { 254 struct virtio_gpu_transfer_host_3d tf3d; 255 256 VUGPU_FILL_CMD(tf3d); 257 258 virgl_renderer_transfer_read_iov(tf3d.resource_id, 259 tf3d.hdr.ctx_id, 260 tf3d.level, 261 tf3d.stride, 262 tf3d.layer_stride, 263 (struct virgl_box *)&tf3d.box, 264 tf3d.offset, NULL, 0); 265 } 266 267 static void 268 virgl_resource_attach_backing(VuGpu *g, 269 struct virtio_gpu_ctrl_command *cmd) 270 { 271 struct virtio_gpu_resource_attach_backing att_rb; 272 struct iovec *res_iovs; 273 int ret; 274 275 VUGPU_FILL_CMD(att_rb); 276 277 ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); 278 if (ret != 0) { 279 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 280 return; 281 } 282 283 virgl_renderer_resource_attach_iov(att_rb.resource_id, 284 res_iovs, att_rb.nr_entries); 285 } 286 287 static void 288 virgl_resource_detach_backing(VuGpu *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 struct virtio_gpu_resource_detach_backing detach_rb; 292 struct iovec *res_iovs = NULL; 293 int num_iovs = 0; 294 295 VUGPU_FILL_CMD(detach_rb); 296 297 virgl_renderer_resource_detach_iov(detach_rb.resource_id, 298 &res_iovs, 299 &num_iovs); 300 if (res_iovs == NULL || num_iovs == 0) { 301 return; 302 } 303 g_free(res_iovs); 304 } 305 306 static void 307 virgl_cmd_set_scanout(VuGpu *g, 308 struct virtio_gpu_ctrl_command *cmd) 309 { 310 struct virtio_gpu_set_scanout ss; 311 struct virgl_renderer_resource_info info; 312 int ret; 313 314 VUGPU_FILL_CMD(ss); 315 316 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 317 g_critical("%s: illegal scanout id specified %d", 318 __func__, ss.scanout_id); 319 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 320 return; 321 } 322 323 memset(&info, 0, sizeof(info)); 324 325 if (ss.resource_id && ss.r.width && ss.r.height) { 326 ret = virgl_renderer_resource_get_info(ss.resource_id, &info); 327 if (ret == -1) { 328 g_critical("%s: illegal resource specified %d\n", 329 __func__, ss.resource_id); 330 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 331 return; 332 } 333 334 int fd = -1; 335 if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) { 336 g_critical("%s: failed to get fd for texture\n", __func__); 337 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 338 return; 339 } 340 assert(fd >= 0); 341 VhostUserGpuMsg msg = { 342 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 343 .size = sizeof(VhostUserGpuDMABUFScanout), 344 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 345 .payload.dmabuf_scanout.x = ss.r.x, 346 .payload.dmabuf_scanout.y = ss.r.y, 347 .payload.dmabuf_scanout.width = ss.r.width, 348 .payload.dmabuf_scanout.height = ss.r.height, 349 .payload.dmabuf_scanout.fd_width = info.width, 350 .payload.dmabuf_scanout.fd_height = info.height, 351 .payload.dmabuf_scanout.fd_stride = info.stride, 352 .payload.dmabuf_scanout.fd_flags = info.flags, 353 .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc 354 }; 355 vg_send_msg(g, &msg, fd); 356 close(fd); 357 } else { 358 VhostUserGpuMsg msg = { 359 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 360 .size = sizeof(VhostUserGpuDMABUFScanout), 361 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 362 }; 363 g_debug("disable scanout"); 364 vg_send_msg(g, &msg, -1); 365 } 366 g->scanout[ss.scanout_id].resource_id = ss.resource_id; 367 } 368 369 static void 370 virgl_cmd_resource_flush(VuGpu *g, 371 struct virtio_gpu_ctrl_command *cmd) 372 { 373 struct virtio_gpu_resource_flush rf; 374 int i; 375 376 VUGPU_FILL_CMD(rf); 377 378 glFlush(); 379 if (!rf.resource_id) { 380 g_debug("bad resource id for flush..?"); 381 return; 382 } 383 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 384 if (g->scanout[i].resource_id != rf.resource_id) { 385 continue; 386 } 387 VhostUserGpuMsg msg = { 388 .request = VHOST_USER_GPU_DMABUF_UPDATE, 389 .size = sizeof(VhostUserGpuUpdate), 390 .payload.update.scanout_id = i, 391 .payload.update.x = rf.r.x, 392 .payload.update.y = rf.r.y, 393 .payload.update.width = rf.r.width, 394 .payload.update.height = rf.r.height 395 }; 396 vg_send_msg(g, &msg, -1); 397 vg_wait_ok(g); 398 } 399 } 400 401 static void 402 virgl_cmd_ctx_attach_resource(VuGpu *g, 403 struct virtio_gpu_ctrl_command *cmd) 404 { 405 struct virtio_gpu_ctx_resource att_res; 406 407 VUGPU_FILL_CMD(att_res); 408 409 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); 410 } 411 412 static void 413 virgl_cmd_ctx_detach_resource(VuGpu *g, 414 struct virtio_gpu_ctrl_command *cmd) 415 { 416 struct virtio_gpu_ctx_resource det_res; 417 418 VUGPU_FILL_CMD(det_res); 419 420 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); 421 } 422 423 void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) 424 { 425 virgl_renderer_force_ctx_0(); 426 switch (cmd->cmd_hdr.type) { 427 case VIRTIO_GPU_CMD_CTX_CREATE: 428 virgl_cmd_context_create(g, cmd); 429 break; 430 case VIRTIO_GPU_CMD_CTX_DESTROY: 431 virgl_cmd_context_destroy(g, cmd); 432 break; 433 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 434 virgl_cmd_create_resource_2d(g, cmd); 435 break; 436 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: 437 virgl_cmd_create_resource_3d(g, cmd); 438 break; 439 case VIRTIO_GPU_CMD_SUBMIT_3D: 440 virgl_cmd_submit_3d(g, cmd); 441 break; 442 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 443 virgl_cmd_transfer_to_host_2d(g, cmd); 444 break; 445 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: 446 virgl_cmd_transfer_to_host_3d(g, cmd); 447 break; 448 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: 449 virgl_cmd_transfer_from_host_3d(g, cmd); 450 break; 451 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 452 virgl_resource_attach_backing(g, cmd); 453 break; 454 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 455 virgl_resource_detach_backing(g, cmd); 456 break; 457 case VIRTIO_GPU_CMD_SET_SCANOUT: 458 virgl_cmd_set_scanout(g, cmd); 459 break; 460 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 461 virgl_cmd_resource_flush(g, cmd); 462 break; 463 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 464 virgl_cmd_resource_unref(g, cmd); 465 break; 466 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: 467 /* TODO add security */ 468 virgl_cmd_ctx_attach_resource(g, cmd); 469 break; 470 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: 471 /* TODO add security */ 472 virgl_cmd_ctx_detach_resource(g, cmd); 473 break; 474 case VIRTIO_GPU_CMD_GET_CAPSET_INFO: 475 virgl_cmd_get_capset_info(g, cmd); 476 break; 477 case VIRTIO_GPU_CMD_GET_CAPSET: 478 virgl_cmd_get_capset(g, cmd); 479 break; 480 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 481 vg_get_display_info(g, cmd); 482 break; 483 default: 484 g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 485 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 486 break; 487 } 488 489 if (cmd->state != VG_CMD_STATE_NEW) { 490 return; 491 } 492 493 if (cmd->error) { 494 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__, 495 cmd->cmd_hdr.type, cmd->error); 496 vg_ctrl_response_nodata(g, cmd, cmd->error); 497 return; 498 } 499 500 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 501 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 502 return; 503 } 504 505 g_debug("Creating fence id:%" PRId64 " type:%d", 506 cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 507 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 508 } 509 510 static void 511 virgl_write_fence(void *opaque, uint32_t fence) 512 { 513 VuGpu *g = opaque; 514 struct virtio_gpu_ctrl_command *cmd, *tmp; 515 516 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 517 /* 518 * the guest can end up emitting fences out of order 519 * so we should check all fenced cmds not just the first one. 520 */ 521 if (cmd->cmd_hdr.fence_id > fence) { 522 continue; 523 } 524 g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id); 525 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 526 QTAILQ_REMOVE(&g->fenceq, cmd, next); 527 free(cmd); 528 g->inflight--; 529 } 530 } 531 532 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 533 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 534 static int 535 virgl_get_drm_fd(void *opaque) 536 { 537 VuGpu *g = opaque; 538 539 return g->drm_rnode_fd; 540 } 541 #endif 542 543 static struct virgl_renderer_callbacks virgl_cbs = { 544 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 545 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 546 .get_drm_fd = virgl_get_drm_fd, 547 .version = 2, 548 #else 549 .version = 1, 550 #endif 551 .write_fence = virgl_write_fence, 552 }; 553 554 static void 555 vg_virgl_poll(VuDev *dev, int condition, void *data) 556 { 557 virgl_renderer_poll(); 558 } 559 560 bool 561 vg_virgl_init(VuGpu *g) 562 { 563 int ret; 564 565 if (g->drm_rnode_fd && virgl_cbs.version == 1) { 566 g_warning("virgl will use the default rendernode"); 567 } 568 569 ret = virgl_renderer_init(g, 570 VIRGL_RENDERER_USE_EGL | 571 VIRGL_RENDERER_THREAD_SYNC, 572 &virgl_cbs); 573 if (ret != 0) { 574 return false; 575 } 576 577 ret = virgl_renderer_get_poll_fd(); 578 if (ret != -1) { 579 g->renderer_source = 580 vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g); 581 } 582 583 return true; 584 } 585