1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "sysemu/cpus.h" 18 #include "ui/console.h" 19 #include "trace.h" 20 #include "sysemu/dma.h" 21 #include "sysemu/sysemu.h" 22 #include "hw/virtio/virtio.h" 23 #include "migration/qemu-file-types.h" 24 #include "hw/virtio/virtio-gpu.h" 25 #include "hw/virtio/virtio-gpu-bswap.h" 26 #include "hw/virtio/virtio-gpu-pixman.h" 27 #include "hw/virtio/virtio-bus.h" 28 #include "hw/qdev-properties.h" 29 #include "migration/blocker.h" 30 #include "qemu/log.h" 31 #include "qemu/module.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 35 #define VIRTIO_GPU_VM_VERSION 1 36 37 static struct virtio_gpu_simple_resource * 38 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 39 bool require_backing, 40 const char *caller, uint32_t *error); 41 42 static void virtio_gpu_reset_bh(void *opaque); 43 44 static Error *blob_mig_blocker; 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 void *data; 53 54 res = virtio_gpu_find_check_resource(g, resource_id, false, 55 __func__, NULL); 56 if (!res) { 57 return; 58 } 59 60 if (res->blob_size) { 61 if (res->blob_size < (s->current_cursor->width * 62 s->current_cursor->height * 4)) { 63 return; 64 } 65 data = res->blob; 66 } else { 67 if (pixman_image_get_width(res->image) != s->current_cursor->width || 68 pixman_image_get_height(res->image) != s->current_cursor->height) { 69 return; 70 } 71 data = pixman_image_get_data(res->image); 72 } 73 74 pixels = s->current_cursor->width * s->current_cursor->height; 75 memcpy(s->current_cursor->data, data, 76 pixels * sizeof(uint32_t)); 77 } 78 79 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 80 { 81 struct virtio_gpu_scanout *s; 82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 84 85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 86 return; 87 } 88 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 89 90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 91 cursor->pos.x, 92 cursor->pos.y, 93 move ? "move" : "update", 94 cursor->resource_id); 95 96 if (!move) { 97 if (!s->current_cursor) { 98 s->current_cursor = cursor_alloc(64, 64); 99 } 100 101 s->current_cursor->hot_x = cursor->hot_x; 102 s->current_cursor->hot_y = cursor->hot_y; 103 104 if (cursor->resource_id > 0) { 105 vgc->update_cursor_data(g, s, cursor->resource_id); 106 } 107 dpy_cursor_define(s->con, s->current_cursor); 108 109 s->cursor = *cursor; 110 } else { 111 s->cursor.pos.x = cursor->pos.x; 112 s->cursor.pos.y = cursor->pos.y; 113 } 114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 115 cursor->resource_id ? 1 : 0); 116 } 117 118 struct virtio_gpu_simple_resource * 119 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 120 { 121 struct virtio_gpu_simple_resource *res; 122 123 QTAILQ_FOREACH(res, &g->reslist, next) { 124 if (res->resource_id == resource_id) { 125 return res; 126 } 127 } 128 return NULL; 129 } 130 131 static struct virtio_gpu_simple_resource * 132 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 133 bool require_backing, 134 const char *caller, uint32_t *error) 135 { 136 struct virtio_gpu_simple_resource *res; 137 138 res = virtio_gpu_find_resource(g, resource_id); 139 if (!res) { 140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 141 caller, resource_id); 142 if (error) { 143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 144 } 145 return NULL; 146 } 147 148 if (require_backing) { 149 if (!res->iov || (!res->image && !res->blob)) { 150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 151 caller, resource_id); 152 if (error) { 153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 154 } 155 return NULL; 156 } 157 } 158 159 return res; 160 } 161 162 void virtio_gpu_ctrl_response(VirtIOGPU *g, 163 struct virtio_gpu_ctrl_command *cmd, 164 struct virtio_gpu_ctrl_hdr *resp, 165 size_t resp_len) 166 { 167 size_t s; 168 169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 170 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 171 resp->fence_id = cmd->cmd_hdr.fence_id; 172 resp->ctx_id = cmd->cmd_hdr.ctx_id; 173 } 174 virtio_gpu_ctrl_hdr_bswap(resp); 175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 176 if (s != resp_len) { 177 qemu_log_mask(LOG_GUEST_ERROR, 178 "%s: response size incorrect %zu vs %zu\n", 179 __func__, s, resp_len); 180 } 181 virtqueue_push(cmd->vq, &cmd->elem, s); 182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 183 cmd->finished = true; 184 } 185 186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 187 struct virtio_gpu_ctrl_command *cmd, 188 enum virtio_gpu_ctrl_type type) 189 { 190 struct virtio_gpu_ctrl_hdr resp; 191 192 memset(&resp, 0, sizeof(resp)); 193 resp.type = type; 194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 195 } 196 197 void virtio_gpu_get_display_info(VirtIOGPU *g, 198 struct virtio_gpu_ctrl_command *cmd) 199 { 200 struct virtio_gpu_resp_display_info display_info; 201 202 trace_virtio_gpu_cmd_get_display_info(); 203 memset(&display_info, 0, sizeof(display_info)); 204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 207 sizeof(display_info)); 208 } 209 210 void virtio_gpu_get_edid(VirtIOGPU *g, 211 struct virtio_gpu_ctrl_command *cmd) 212 { 213 struct virtio_gpu_resp_edid edid; 214 struct virtio_gpu_cmd_get_edid get_edid; 215 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 216 217 VIRTIO_GPU_FILL_CMD(get_edid); 218 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 219 220 if (get_edid.scanout >= b->conf.max_outputs) { 221 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 222 return; 223 } 224 225 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 226 memset(&edid, 0, sizeof(edid)); 227 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 228 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); 229 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 230 } 231 232 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 233 uint32_t width, uint32_t height) 234 { 235 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 236 * pixman_image_create_bits will fail in case it overflow. 237 */ 238 239 int bpp = PIXMAN_FORMAT_BPP(pformat); 240 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 241 return height * stride; 242 } 243 244 #ifdef WIN32 245 static void 246 win32_pixman_image_destroy(pixman_image_t *image, void *data) 247 { 248 HANDLE handle = data; 249 250 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn); 251 } 252 #endif 253 254 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 255 struct virtio_gpu_ctrl_command *cmd) 256 { 257 pixman_format_code_t pformat; 258 struct virtio_gpu_simple_resource *res; 259 struct virtio_gpu_resource_create_2d c2d; 260 261 VIRTIO_GPU_FILL_CMD(c2d); 262 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 263 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 264 c2d.width, c2d.height); 265 266 if (c2d.resource_id == 0) { 267 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 268 __func__); 269 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 270 return; 271 } 272 273 res = virtio_gpu_find_resource(g, c2d.resource_id); 274 if (res) { 275 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 276 __func__, c2d.resource_id); 277 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 278 return; 279 } 280 281 res = g_new0(struct virtio_gpu_simple_resource, 1); 282 283 res->width = c2d.width; 284 res->height = c2d.height; 285 res->format = c2d.format; 286 res->resource_id = c2d.resource_id; 287 288 pformat = virtio_gpu_get_pixman_format(c2d.format); 289 if (!pformat) { 290 qemu_log_mask(LOG_GUEST_ERROR, 291 "%s: host couldn't handle guest format %d\n", 292 __func__, c2d.format); 293 g_free(res); 294 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 295 return; 296 } 297 298 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 299 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 300 void *bits = NULL; 301 #ifdef WIN32 302 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 303 if (!bits) { 304 goto end; 305 } 306 #endif 307 res->image = pixman_image_create_bits( 308 pformat, 309 c2d.width, 310 c2d.height, 311 bits, c2d.height ? res->hostmem / c2d.height : 0); 312 #ifdef WIN32 313 if (res->image) { 314 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 315 } 316 #endif 317 } 318 319 #ifdef WIN32 320 end: 321 #endif 322 if (!res->image) { 323 qemu_log_mask(LOG_GUEST_ERROR, 324 "%s: resource creation failed %d %d %d\n", 325 __func__, c2d.resource_id, c2d.width, c2d.height); 326 g_free(res); 327 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 328 return; 329 } 330 331 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 332 g->hostmem += res->hostmem; 333 } 334 335 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 336 struct virtio_gpu_ctrl_command *cmd) 337 { 338 struct virtio_gpu_simple_resource *res; 339 struct virtio_gpu_resource_create_blob cblob; 340 int ret; 341 342 VIRTIO_GPU_FILL_CMD(cblob); 343 virtio_gpu_create_blob_bswap(&cblob); 344 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 345 346 if (cblob.resource_id == 0) { 347 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 348 __func__); 349 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 350 return; 351 } 352 353 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 354 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 355 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 356 __func__); 357 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 358 return; 359 } 360 361 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 362 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 363 __func__, cblob.resource_id); 364 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 365 return; 366 } 367 368 res = g_new0(struct virtio_gpu_simple_resource, 1); 369 res->resource_id = cblob.resource_id; 370 res->blob_size = cblob.size; 371 372 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 373 cmd, &res->addrs, &res->iov, 374 &res->iov_cnt); 375 if (ret != 0) { 376 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 377 g_free(res); 378 return; 379 } 380 381 virtio_gpu_init_udmabuf(res); 382 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 383 } 384 385 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 386 { 387 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 388 struct virtio_gpu_simple_resource *res; 389 390 if (scanout->resource_id == 0) { 391 return; 392 } 393 394 res = virtio_gpu_find_resource(g, scanout->resource_id); 395 if (res) { 396 res->scanout_bitmask &= ~(1 << scanout_id); 397 } 398 399 dpy_gfx_replace_surface(scanout->con, NULL); 400 scanout->resource_id = 0; 401 scanout->ds = NULL; 402 scanout->width = 0; 403 scanout->height = 0; 404 } 405 406 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 407 struct virtio_gpu_simple_resource *res) 408 { 409 int i; 410 411 if (res->scanout_bitmask) { 412 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 413 if (res->scanout_bitmask & (1 << i)) { 414 virtio_gpu_disable_scanout(g, i); 415 } 416 } 417 } 418 419 qemu_pixman_image_unref(res->image); 420 virtio_gpu_cleanup_mapping(g, res); 421 QTAILQ_REMOVE(&g->reslist, res, next); 422 g->hostmem -= res->hostmem; 423 g_free(res); 424 } 425 426 static void virtio_gpu_resource_unref(VirtIOGPU *g, 427 struct virtio_gpu_ctrl_command *cmd) 428 { 429 struct virtio_gpu_simple_resource *res; 430 struct virtio_gpu_resource_unref unref; 431 432 VIRTIO_GPU_FILL_CMD(unref); 433 virtio_gpu_bswap_32(&unref, sizeof(unref)); 434 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 435 436 res = virtio_gpu_find_resource(g, unref.resource_id); 437 if (!res) { 438 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 439 __func__, unref.resource_id); 440 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 441 return; 442 } 443 virtio_gpu_resource_destroy(g, res); 444 } 445 446 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 447 struct virtio_gpu_ctrl_command *cmd) 448 { 449 struct virtio_gpu_simple_resource *res; 450 int h, bpp; 451 uint32_t src_offset, dst_offset, stride; 452 pixman_format_code_t format; 453 struct virtio_gpu_transfer_to_host_2d t2d; 454 void *img_data; 455 456 VIRTIO_GPU_FILL_CMD(t2d); 457 virtio_gpu_t2d_bswap(&t2d); 458 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 459 460 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 461 __func__, &cmd->error); 462 if (!res || res->blob) { 463 return; 464 } 465 466 if (t2d.r.x > res->width || 467 t2d.r.y > res->height || 468 t2d.r.width > res->width || 469 t2d.r.height > res->height || 470 t2d.r.x + t2d.r.width > res->width || 471 t2d.r.y + t2d.r.height > res->height) { 472 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 473 " bounds for resource %d: %d %d %d %d vs %d %d\n", 474 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 475 t2d.r.width, t2d.r.height, res->width, res->height); 476 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 477 return; 478 } 479 480 format = pixman_image_get_format(res->image); 481 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 482 stride = pixman_image_get_stride(res->image); 483 img_data = pixman_image_get_data(res->image); 484 485 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 486 for (h = 0; h < t2d.r.height; h++) { 487 src_offset = t2d.offset + stride * h; 488 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 489 490 iov_to_buf(res->iov, res->iov_cnt, src_offset, 491 (uint8_t *)img_data + dst_offset, 492 t2d.r.width * bpp); 493 } 494 } else { 495 src_offset = t2d.offset; 496 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 497 iov_to_buf(res->iov, res->iov_cnt, src_offset, 498 (uint8_t *)img_data + dst_offset, 499 stride * t2d.r.height); 500 } 501 } 502 503 static void virtio_gpu_resource_flush(VirtIOGPU *g, 504 struct virtio_gpu_ctrl_command *cmd) 505 { 506 struct virtio_gpu_simple_resource *res; 507 struct virtio_gpu_resource_flush rf; 508 struct virtio_gpu_scanout *scanout; 509 pixman_region16_t flush_region; 510 bool within_bounds = false; 511 bool update_submitted = false; 512 int i; 513 514 VIRTIO_GPU_FILL_CMD(rf); 515 virtio_gpu_bswap_32(&rf, sizeof(rf)); 516 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 517 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 518 519 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 520 __func__, &cmd->error); 521 if (!res) { 522 return; 523 } 524 525 if (res->blob) { 526 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 527 scanout = &g->parent_obj.scanout[i]; 528 if (scanout->resource_id == res->resource_id && 529 rf.r.x < scanout->x + scanout->width && 530 rf.r.x + rf.r.width >= scanout->x && 531 rf.r.y < scanout->y + scanout->height && 532 rf.r.y + rf.r.height >= scanout->y) { 533 within_bounds = true; 534 535 if (console_has_gl(scanout->con)) { 536 dpy_gl_update(scanout->con, 0, 0, scanout->width, 537 scanout->height); 538 update_submitted = true; 539 } 540 } 541 } 542 543 if (update_submitted) { 544 return; 545 } 546 if (!within_bounds) { 547 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 548 " bounds for flush %d: %d %d %d %d\n", 549 __func__, rf.resource_id, rf.r.x, rf.r.y, 550 rf.r.width, rf.r.height); 551 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 552 return; 553 } 554 } 555 556 if (!res->blob && 557 (rf.r.x > res->width || 558 rf.r.y > res->height || 559 rf.r.width > res->width || 560 rf.r.height > res->height || 561 rf.r.x + rf.r.width > res->width || 562 rf.r.y + rf.r.height > res->height)) { 563 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 564 " bounds for resource %d: %d %d %d %d vs %d %d\n", 565 __func__, rf.resource_id, rf.r.x, rf.r.y, 566 rf.r.width, rf.r.height, res->width, res->height); 567 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 568 return; 569 } 570 571 pixman_region_init_rect(&flush_region, 572 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 573 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 574 pixman_region16_t region, finalregion; 575 pixman_box16_t *extents; 576 577 if (!(res->scanout_bitmask & (1 << i))) { 578 continue; 579 } 580 scanout = &g->parent_obj.scanout[i]; 581 582 pixman_region_init(&finalregion); 583 pixman_region_init_rect(®ion, scanout->x, scanout->y, 584 scanout->width, scanout->height); 585 586 pixman_region_intersect(&finalregion, &flush_region, ®ion); 587 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 588 extents = pixman_region_extents(&finalregion); 589 /* work out the area we need to update for each console */ 590 dpy_gfx_update(g->parent_obj.scanout[i].con, 591 extents->x1, extents->y1, 592 extents->x2 - extents->x1, 593 extents->y2 - extents->y1); 594 595 pixman_region_fini(®ion); 596 pixman_region_fini(&finalregion); 597 } 598 pixman_region_fini(&flush_region); 599 } 600 601 static void virtio_unref_resource(pixman_image_t *image, void *data) 602 { 603 pixman_image_unref(data); 604 } 605 606 static void virtio_gpu_update_scanout(VirtIOGPU *g, 607 uint32_t scanout_id, 608 struct virtio_gpu_simple_resource *res, 609 struct virtio_gpu_rect *r) 610 { 611 struct virtio_gpu_simple_resource *ores; 612 struct virtio_gpu_scanout *scanout; 613 614 scanout = &g->parent_obj.scanout[scanout_id]; 615 ores = virtio_gpu_find_resource(g, scanout->resource_id); 616 if (ores) { 617 ores->scanout_bitmask &= ~(1 << scanout_id); 618 } 619 620 res->scanout_bitmask |= (1 << scanout_id); 621 scanout->resource_id = res->resource_id; 622 scanout->x = r->x; 623 scanout->y = r->y; 624 scanout->width = r->width; 625 scanout->height = r->height; 626 } 627 628 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 629 uint32_t scanout_id, 630 struct virtio_gpu_framebuffer *fb, 631 struct virtio_gpu_simple_resource *res, 632 struct virtio_gpu_rect *r, 633 uint32_t *error) 634 { 635 struct virtio_gpu_scanout *scanout; 636 uint8_t *data; 637 638 scanout = &g->parent_obj.scanout[scanout_id]; 639 640 if (r->x > fb->width || 641 r->y > fb->height || 642 r->width < 16 || 643 r->height < 16 || 644 r->width > fb->width || 645 r->height > fb->height || 646 r->x + r->width > fb->width || 647 r->y + r->height > fb->height) { 648 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 649 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 650 __func__, scanout_id, res->resource_id, 651 r->x, r->y, r->width, r->height, 652 fb->width, fb->height); 653 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 654 return; 655 } 656 657 g->parent_obj.enable = 1; 658 659 if (res->blob) { 660 if (console_has_gl(scanout->con)) { 661 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 662 virtio_gpu_update_scanout(g, scanout_id, res, r); 663 } else { 664 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 665 } 666 return; 667 } 668 669 data = res->blob; 670 } else { 671 data = (uint8_t *)pixman_image_get_data(res->image); 672 } 673 674 /* create a surface for this scanout */ 675 if ((res->blob && !console_has_gl(scanout->con)) || 676 !scanout->ds || 677 surface_data(scanout->ds) != data + fb->offset || 678 scanout->width != r->width || 679 scanout->height != r->height) { 680 pixman_image_t *rect; 681 void *ptr = data + fb->offset; 682 rect = pixman_image_create_bits(fb->format, r->width, r->height, 683 ptr, fb->stride); 684 685 if (res->image) { 686 pixman_image_ref(res->image); 687 pixman_image_set_destroy_function(rect, virtio_unref_resource, 688 res->image); 689 } 690 691 /* realloc the surface ptr */ 692 scanout->ds = qemu_create_displaysurface_pixman(rect); 693 if (!scanout->ds) { 694 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 695 return; 696 } 697 #ifdef WIN32 698 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); 699 #endif 700 701 pixman_image_unref(rect); 702 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 703 scanout->ds); 704 } 705 706 virtio_gpu_update_scanout(g, scanout_id, res, r); 707 } 708 709 static void virtio_gpu_set_scanout(VirtIOGPU *g, 710 struct virtio_gpu_ctrl_command *cmd) 711 { 712 struct virtio_gpu_simple_resource *res; 713 struct virtio_gpu_framebuffer fb = { 0 }; 714 struct virtio_gpu_set_scanout ss; 715 716 VIRTIO_GPU_FILL_CMD(ss); 717 virtio_gpu_bswap_32(&ss, sizeof(ss)); 718 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 719 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 720 721 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 722 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 723 __func__, ss.scanout_id); 724 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 725 return; 726 } 727 728 if (ss.resource_id == 0) { 729 virtio_gpu_disable_scanout(g, ss.scanout_id); 730 return; 731 } 732 733 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 734 __func__, &cmd->error); 735 if (!res) { 736 return; 737 } 738 739 fb.format = pixman_image_get_format(res->image); 740 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 741 fb.width = pixman_image_get_width(res->image); 742 fb.height = pixman_image_get_height(res->image); 743 fb.stride = pixman_image_get_stride(res->image); 744 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 745 746 virtio_gpu_do_set_scanout(g, ss.scanout_id, 747 &fb, res, &ss.r, &cmd->error); 748 } 749 750 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 751 struct virtio_gpu_ctrl_command *cmd) 752 { 753 struct virtio_gpu_simple_resource *res; 754 struct virtio_gpu_framebuffer fb = { 0 }; 755 struct virtio_gpu_set_scanout_blob ss; 756 uint64_t fbend; 757 758 VIRTIO_GPU_FILL_CMD(ss); 759 virtio_gpu_scanout_blob_bswap(&ss); 760 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 761 ss.r.width, ss.r.height, ss.r.x, 762 ss.r.y); 763 764 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 765 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 766 __func__, ss.scanout_id); 767 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 768 return; 769 } 770 771 if (ss.resource_id == 0) { 772 virtio_gpu_disable_scanout(g, ss.scanout_id); 773 return; 774 } 775 776 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 777 __func__, &cmd->error); 778 if (!res) { 779 return; 780 } 781 782 fb.format = virtio_gpu_get_pixman_format(ss.format); 783 if (!fb.format) { 784 qemu_log_mask(LOG_GUEST_ERROR, 785 "%s: host couldn't handle guest format %d\n", 786 __func__, ss.format); 787 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 788 return; 789 } 790 791 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 792 fb.width = ss.width; 793 fb.height = ss.height; 794 fb.stride = ss.strides[0]; 795 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 796 797 fbend = fb.offset; 798 fbend += fb.stride * (ss.r.height - 1); 799 fbend += fb.bytes_pp * ss.r.width; 800 if (fbend > res->blob_size) { 801 qemu_log_mask(LOG_GUEST_ERROR, 802 "%s: fb end out of range\n", 803 __func__); 804 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 805 return; 806 } 807 808 virtio_gpu_do_set_scanout(g, ss.scanout_id, 809 &fb, res, &ss.r, &cmd->error); 810 } 811 812 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 813 uint32_t nr_entries, uint32_t offset, 814 struct virtio_gpu_ctrl_command *cmd, 815 uint64_t **addr, struct iovec **iov, 816 uint32_t *niov) 817 { 818 struct virtio_gpu_mem_entry *ents; 819 size_t esize, s; 820 int e, v; 821 822 if (nr_entries > 16384) { 823 qemu_log_mask(LOG_GUEST_ERROR, 824 "%s: nr_entries is too big (%d > 16384)\n", 825 __func__, nr_entries); 826 return -1; 827 } 828 829 esize = sizeof(*ents) * nr_entries; 830 ents = g_malloc(esize); 831 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 832 offset, ents, esize); 833 if (s != esize) { 834 qemu_log_mask(LOG_GUEST_ERROR, 835 "%s: command data size incorrect %zu vs %zu\n", 836 __func__, s, esize); 837 g_free(ents); 838 return -1; 839 } 840 841 *iov = NULL; 842 if (addr) { 843 *addr = NULL; 844 } 845 for (e = 0, v = 0; e < nr_entries; e++) { 846 uint64_t a = le64_to_cpu(ents[e].addr); 847 uint32_t l = le32_to_cpu(ents[e].length); 848 hwaddr len; 849 void *map; 850 851 do { 852 len = l; 853 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 854 DMA_DIRECTION_TO_DEVICE, 855 MEMTXATTRS_UNSPECIFIED); 856 if (!map) { 857 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 858 " element %d\n", __func__, e); 859 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 860 g_free(ents); 861 *iov = NULL; 862 if (addr) { 863 g_free(*addr); 864 *addr = NULL; 865 } 866 return -1; 867 } 868 869 if (!(v % 16)) { 870 *iov = g_renew(struct iovec, *iov, v + 16); 871 if (addr) { 872 *addr = g_renew(uint64_t, *addr, v + 16); 873 } 874 } 875 (*iov)[v].iov_base = map; 876 (*iov)[v].iov_len = len; 877 if (addr) { 878 (*addr)[v] = a; 879 } 880 881 a += len; 882 l -= len; 883 v += 1; 884 } while (l > 0); 885 } 886 *niov = v; 887 888 g_free(ents); 889 return 0; 890 } 891 892 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 893 struct iovec *iov, uint32_t count) 894 { 895 int i; 896 897 for (i = 0; i < count; i++) { 898 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 899 iov[i].iov_base, iov[i].iov_len, 900 DMA_DIRECTION_TO_DEVICE, 901 iov[i].iov_len); 902 } 903 g_free(iov); 904 } 905 906 void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 907 struct virtio_gpu_simple_resource *res) 908 { 909 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 910 res->iov = NULL; 911 res->iov_cnt = 0; 912 g_free(res->addrs); 913 res->addrs = NULL; 914 915 if (res->blob) { 916 virtio_gpu_fini_udmabuf(res); 917 } 918 } 919 920 static void 921 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 922 struct virtio_gpu_ctrl_command *cmd) 923 { 924 struct virtio_gpu_simple_resource *res; 925 struct virtio_gpu_resource_attach_backing ab; 926 int ret; 927 928 VIRTIO_GPU_FILL_CMD(ab); 929 virtio_gpu_bswap_32(&ab, sizeof(ab)); 930 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 931 932 res = virtio_gpu_find_resource(g, ab.resource_id); 933 if (!res) { 934 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 935 __func__, ab.resource_id); 936 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 937 return; 938 } 939 940 if (res->iov) { 941 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 942 return; 943 } 944 945 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 946 &res->addrs, &res->iov, &res->iov_cnt); 947 if (ret != 0) { 948 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 949 return; 950 } 951 } 952 953 static void 954 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 955 struct virtio_gpu_ctrl_command *cmd) 956 { 957 struct virtio_gpu_simple_resource *res; 958 struct virtio_gpu_resource_detach_backing detach; 959 960 VIRTIO_GPU_FILL_CMD(detach); 961 virtio_gpu_bswap_32(&detach, sizeof(detach)); 962 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 963 964 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 965 __func__, &cmd->error); 966 if (!res) { 967 return; 968 } 969 virtio_gpu_cleanup_mapping(g, res); 970 } 971 972 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 973 struct virtio_gpu_ctrl_command *cmd) 974 { 975 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 976 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 977 978 switch (cmd->cmd_hdr.type) { 979 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 980 virtio_gpu_get_display_info(g, cmd); 981 break; 982 case VIRTIO_GPU_CMD_GET_EDID: 983 virtio_gpu_get_edid(g, cmd); 984 break; 985 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 986 virtio_gpu_resource_create_2d(g, cmd); 987 break; 988 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 989 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 990 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 991 break; 992 } 993 virtio_gpu_resource_create_blob(g, cmd); 994 break; 995 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 996 virtio_gpu_resource_unref(g, cmd); 997 break; 998 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 999 virtio_gpu_resource_flush(g, cmd); 1000 break; 1001 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 1002 virtio_gpu_transfer_to_host_2d(g, cmd); 1003 break; 1004 case VIRTIO_GPU_CMD_SET_SCANOUT: 1005 virtio_gpu_set_scanout(g, cmd); 1006 break; 1007 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 1008 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1009 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 1010 break; 1011 } 1012 virtio_gpu_set_scanout_blob(g, cmd); 1013 break; 1014 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1015 virtio_gpu_resource_attach_backing(g, cmd); 1016 break; 1017 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1018 virtio_gpu_resource_detach_backing(g, cmd); 1019 break; 1020 default: 1021 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1022 break; 1023 } 1024 if (!cmd->finished) { 1025 if (!g->parent_obj.renderer_blocked) { 1026 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1027 VIRTIO_GPU_RESP_OK_NODATA); 1028 } 1029 } 1030 } 1031 1032 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1033 { 1034 VirtIOGPU *g = VIRTIO_GPU(vdev); 1035 qemu_bh_schedule(g->ctrl_bh); 1036 } 1037 1038 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1039 { 1040 VirtIOGPU *g = VIRTIO_GPU(vdev); 1041 qemu_bh_schedule(g->cursor_bh); 1042 } 1043 1044 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1045 { 1046 struct virtio_gpu_ctrl_command *cmd; 1047 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1048 1049 if (g->processing_cmdq) { 1050 return; 1051 } 1052 g->processing_cmdq = true; 1053 while (!QTAILQ_EMPTY(&g->cmdq)) { 1054 cmd = QTAILQ_FIRST(&g->cmdq); 1055 1056 if (g->parent_obj.renderer_blocked) { 1057 break; 1058 } 1059 1060 /* process command */ 1061 vgc->process_cmd(g, cmd); 1062 1063 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1064 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1065 g->stats.requests++; 1066 } 1067 1068 if (!cmd->finished) { 1069 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1070 g->inflight++; 1071 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1072 if (g->stats.max_inflight < g->inflight) { 1073 g->stats.max_inflight = g->inflight; 1074 } 1075 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1076 } 1077 } else { 1078 g_free(cmd); 1079 } 1080 } 1081 g->processing_cmdq = false; 1082 } 1083 1084 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1085 { 1086 struct virtio_gpu_ctrl_command *cmd, *tmp; 1087 1088 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1089 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1090 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1091 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1092 g_free(cmd); 1093 g->inflight--; 1094 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1095 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1096 } 1097 } 1098 } 1099 1100 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1101 { 1102 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1103 1104 virtio_gpu_process_fenceq(g); 1105 virtio_gpu_process_cmdq(g); 1106 } 1107 1108 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1109 { 1110 VirtIOGPU *g = VIRTIO_GPU(vdev); 1111 struct virtio_gpu_ctrl_command *cmd; 1112 1113 if (!virtio_queue_ready(vq)) { 1114 return; 1115 } 1116 1117 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1118 while (cmd) { 1119 cmd->vq = vq; 1120 cmd->error = 0; 1121 cmd->finished = false; 1122 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1123 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1124 } 1125 1126 virtio_gpu_process_cmdq(g); 1127 } 1128 1129 static void virtio_gpu_ctrl_bh(void *opaque) 1130 { 1131 VirtIOGPU *g = opaque; 1132 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1133 1134 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq); 1135 } 1136 1137 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1138 { 1139 VirtIOGPU *g = VIRTIO_GPU(vdev); 1140 VirtQueueElement *elem; 1141 size_t s; 1142 struct virtio_gpu_update_cursor cursor_info; 1143 1144 if (!virtio_queue_ready(vq)) { 1145 return; 1146 } 1147 for (;;) { 1148 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1149 if (!elem) { 1150 break; 1151 } 1152 1153 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1154 &cursor_info, sizeof(cursor_info)); 1155 if (s != sizeof(cursor_info)) { 1156 qemu_log_mask(LOG_GUEST_ERROR, 1157 "%s: cursor size incorrect %zu vs %zu\n", 1158 __func__, s, sizeof(cursor_info)); 1159 } else { 1160 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1161 update_cursor(g, &cursor_info); 1162 } 1163 virtqueue_push(vq, elem, 0); 1164 virtio_notify(vdev, vq); 1165 g_free(elem); 1166 } 1167 } 1168 1169 static void virtio_gpu_cursor_bh(void *opaque) 1170 { 1171 VirtIOGPU *g = opaque; 1172 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1173 } 1174 1175 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1176 .name = "virtio-gpu-one-scanout", 1177 .version_id = 1, 1178 .fields = (VMStateField[]) { 1179 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1180 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1181 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1182 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1183 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1184 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1185 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1186 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1187 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1188 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1189 VMSTATE_END_OF_LIST() 1190 }, 1191 }; 1192 1193 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1194 .name = "virtio-gpu-scanouts", 1195 .version_id = 1, 1196 .fields = (VMStateField[]) { 1197 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1198 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1199 struct VirtIOGPU, NULL), 1200 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1201 parent_obj.conf.max_outputs, 1, 1202 vmstate_virtio_gpu_scanout, 1203 struct virtio_gpu_scanout), 1204 VMSTATE_END_OF_LIST() 1205 }, 1206 }; 1207 1208 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1209 const VMStateField *field, JSONWriter *vmdesc) 1210 { 1211 VirtIOGPU *g = opaque; 1212 struct virtio_gpu_simple_resource *res; 1213 int i; 1214 1215 /* in 2d mode we should never find unprocessed commands here */ 1216 assert(QTAILQ_EMPTY(&g->cmdq)); 1217 1218 QTAILQ_FOREACH(res, &g->reslist, next) { 1219 qemu_put_be32(f, res->resource_id); 1220 qemu_put_be32(f, res->width); 1221 qemu_put_be32(f, res->height); 1222 qemu_put_be32(f, res->format); 1223 qemu_put_be32(f, res->iov_cnt); 1224 for (i = 0; i < res->iov_cnt; i++) { 1225 qemu_put_be64(f, res->addrs[i]); 1226 qemu_put_be32(f, res->iov[i].iov_len); 1227 } 1228 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1229 pixman_image_get_stride(res->image) * res->height); 1230 } 1231 qemu_put_be32(f, 0); /* end of list */ 1232 1233 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1234 } 1235 1236 static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, 1237 struct virtio_gpu_simple_resource *res) 1238 { 1239 int i; 1240 1241 for (i = 0; i < res->iov_cnt; i++) { 1242 hwaddr len = res->iov[i].iov_len; 1243 res->iov[i].iov_base = 1244 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1245 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); 1246 1247 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1248 /* Clean up the half-a-mapping we just created... */ 1249 if (res->iov[i].iov_base) { 1250 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, 1251 len, DMA_DIRECTION_TO_DEVICE, 0); 1252 } 1253 /* ...and the mappings for previous loop iterations */ 1254 res->iov_cnt = i; 1255 virtio_gpu_cleanup_mapping(g, res); 1256 return false; 1257 } 1258 } 1259 1260 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1261 g->hostmem += res->hostmem; 1262 return true; 1263 } 1264 1265 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1266 const VMStateField *field) 1267 { 1268 VirtIOGPU *g = opaque; 1269 struct virtio_gpu_simple_resource *res; 1270 uint32_t resource_id, pformat; 1271 void *bits = NULL; 1272 int i; 1273 1274 g->hostmem = 0; 1275 1276 resource_id = qemu_get_be32(f); 1277 while (resource_id != 0) { 1278 res = virtio_gpu_find_resource(g, resource_id); 1279 if (res) { 1280 return -EINVAL; 1281 } 1282 1283 res = g_new0(struct virtio_gpu_simple_resource, 1); 1284 res->resource_id = resource_id; 1285 res->width = qemu_get_be32(f); 1286 res->height = qemu_get_be32(f); 1287 res->format = qemu_get_be32(f); 1288 res->iov_cnt = qemu_get_be32(f); 1289 1290 /* allocate */ 1291 pformat = virtio_gpu_get_pixman_format(res->format); 1292 if (!pformat) { 1293 g_free(res); 1294 return -EINVAL; 1295 } 1296 1297 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1298 #ifdef WIN32 1299 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 1300 if (!bits) { 1301 g_free(res); 1302 return -EINVAL; 1303 } 1304 #endif 1305 res->image = pixman_image_create_bits( 1306 pformat, 1307 res->width, res->height, 1308 bits, res->height ? res->hostmem / res->height : 0); 1309 if (!res->image) { 1310 g_free(res); 1311 return -EINVAL; 1312 } 1313 #ifdef WIN32 1314 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 1315 #endif 1316 1317 res->addrs = g_new(uint64_t, res->iov_cnt); 1318 res->iov = g_new(struct iovec, res->iov_cnt); 1319 1320 /* read data */ 1321 for (i = 0; i < res->iov_cnt; i++) { 1322 res->addrs[i] = qemu_get_be64(f); 1323 res->iov[i].iov_len = qemu_get_be32(f); 1324 } 1325 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1326 pixman_image_get_stride(res->image) * res->height); 1327 1328 if (!virtio_gpu_load_restore_mapping(g, res)) { 1329 pixman_image_unref(res->image); 1330 g_free(res); 1331 return -EINVAL; 1332 } 1333 1334 resource_id = qemu_get_be32(f); 1335 } 1336 1337 /* load & apply scanout state */ 1338 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1339 1340 return 0; 1341 } 1342 1343 static int virtio_gpu_post_load(void *opaque, int version_id) 1344 { 1345 VirtIOGPU *g = opaque; 1346 struct virtio_gpu_scanout *scanout; 1347 struct virtio_gpu_simple_resource *res; 1348 int i; 1349 1350 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1351 /* FIXME: should take scanout.r.{x,y} into account */ 1352 scanout = &g->parent_obj.scanout[i]; 1353 if (!scanout->resource_id) { 1354 continue; 1355 } 1356 res = virtio_gpu_find_resource(g, scanout->resource_id); 1357 if (!res) { 1358 return -EINVAL; 1359 } 1360 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1361 if (!scanout->ds) { 1362 return -EINVAL; 1363 } 1364 #ifdef WIN32 1365 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); 1366 #endif 1367 1368 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1369 dpy_gfx_update_full(scanout->con); 1370 if (scanout->cursor.resource_id) { 1371 update_cursor(g, &scanout->cursor); 1372 } 1373 res->scanout_bitmask |= (1 << i); 1374 } 1375 1376 return 0; 1377 } 1378 1379 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1380 { 1381 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1382 VirtIOGPU *g = VIRTIO_GPU(qdev); 1383 1384 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1385 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && 1386 !virtio_gpu_have_udmabuf()) { 1387 error_setg(errp, "need rutabaga or udmabuf for blob resources"); 1388 return; 1389 } 1390 1391 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1392 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1393 return; 1394 } 1395 1396 if (!blob_mig_blocker) { 1397 error_setg(&blob_mig_blocker, 1398 "virtio-gpu blob VMs are currently not migratable."); 1399 } 1400 if (migrate_add_blocker(blob_mig_blocker, errp)) { 1401 return; 1402 } 1403 } 1404 1405 if (!virtio_gpu_base_device_realize(qdev, 1406 virtio_gpu_handle_ctrl_cb, 1407 virtio_gpu_handle_cursor_cb, 1408 errp)) { 1409 return; 1410 } 1411 1412 g->ctrl_vq = virtio_get_queue(vdev, 0); 1413 g->cursor_vq = virtio_get_queue(vdev, 1); 1414 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, 1415 &qdev->mem_reentrancy_guard); 1416 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, 1417 &qdev->mem_reentrancy_guard); 1418 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); 1419 qemu_cond_init(&g->reset_cond); 1420 QTAILQ_INIT(&g->reslist); 1421 QTAILQ_INIT(&g->cmdq); 1422 QTAILQ_INIT(&g->fenceq); 1423 } 1424 1425 static void virtio_gpu_device_unrealize(DeviceState *qdev) 1426 { 1427 VirtIOGPU *g = VIRTIO_GPU(qdev); 1428 1429 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1430 migrate_del_blocker(blob_mig_blocker); 1431 } 1432 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete); 1433 g_clear_pointer(&g->cursor_bh, qemu_bh_delete); 1434 g_clear_pointer(&g->reset_bh, qemu_bh_delete); 1435 qemu_cond_destroy(&g->reset_cond); 1436 virtio_gpu_base_device_unrealize(qdev); 1437 } 1438 1439 static void virtio_gpu_reset_bh(void *opaque) 1440 { 1441 VirtIOGPU *g = VIRTIO_GPU(opaque); 1442 struct virtio_gpu_simple_resource *res, *tmp; 1443 int i = 0; 1444 1445 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1446 virtio_gpu_resource_destroy(g, res); 1447 } 1448 1449 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1450 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); 1451 } 1452 1453 g->reset_finished = true; 1454 qemu_cond_signal(&g->reset_cond); 1455 } 1456 1457 void virtio_gpu_reset(VirtIODevice *vdev) 1458 { 1459 VirtIOGPU *g = VIRTIO_GPU(vdev); 1460 struct virtio_gpu_ctrl_command *cmd; 1461 1462 if (qemu_in_vcpu_thread()) { 1463 g->reset_finished = false; 1464 qemu_bh_schedule(g->reset_bh); 1465 while (!g->reset_finished) { 1466 qemu_cond_wait_iothread(&g->reset_cond); 1467 } 1468 } else { 1469 virtio_gpu_reset_bh(g); 1470 } 1471 1472 while (!QTAILQ_EMPTY(&g->cmdq)) { 1473 cmd = QTAILQ_FIRST(&g->cmdq); 1474 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1475 g_free(cmd); 1476 } 1477 1478 while (!QTAILQ_EMPTY(&g->fenceq)) { 1479 cmd = QTAILQ_FIRST(&g->fenceq); 1480 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1481 g->inflight--; 1482 g_free(cmd); 1483 } 1484 1485 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1486 } 1487 1488 static void 1489 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1490 { 1491 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1492 1493 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1494 } 1495 1496 static void 1497 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1498 { 1499 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1500 const struct virtio_gpu_config *vgconfig = 1501 (const struct virtio_gpu_config *)config; 1502 1503 if (vgconfig->events_clear) { 1504 g->virtio_config.events_read &= ~vgconfig->events_clear; 1505 } 1506 } 1507 1508 /* 1509 * For historical reasons virtio_gpu does not adhere to virtio migration 1510 * scheme as described in doc/virtio-migration.txt, in a sense that no 1511 * save/load callback are provided to the core. Instead the device data 1512 * is saved/loaded after the core data. 1513 * 1514 * Because of this we need a special vmsd. 1515 */ 1516 static const VMStateDescription vmstate_virtio_gpu = { 1517 .name = "virtio-gpu", 1518 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1519 .version_id = VIRTIO_GPU_VM_VERSION, 1520 .fields = (VMStateField[]) { 1521 VMSTATE_VIRTIO_DEVICE /* core */, 1522 { 1523 .name = "virtio-gpu", 1524 .info = &(const VMStateInfo) { 1525 .name = "virtio-gpu", 1526 .get = virtio_gpu_load, 1527 .put = virtio_gpu_save, 1528 }, 1529 .flags = VMS_SINGLE, 1530 } /* device */, 1531 VMSTATE_END_OF_LIST() 1532 }, 1533 .post_load = virtio_gpu_post_load, 1534 }; 1535 1536 static Property virtio_gpu_properties[] = { 1537 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1538 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1539 256 * MiB), 1540 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1541 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1542 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), 1543 DEFINE_PROP_END_OF_LIST(), 1544 }; 1545 1546 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1547 { 1548 DeviceClass *dc = DEVICE_CLASS(klass); 1549 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1550 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1551 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1552 1553 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1554 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1555 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1556 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1557 1558 vdc->realize = virtio_gpu_device_realize; 1559 vdc->unrealize = virtio_gpu_device_unrealize; 1560 vdc->reset = virtio_gpu_reset; 1561 vdc->get_config = virtio_gpu_get_config; 1562 vdc->set_config = virtio_gpu_set_config; 1563 1564 dc->vmsd = &vmstate_virtio_gpu; 1565 device_class_set_props(dc, virtio_gpu_properties); 1566 } 1567 1568 static const TypeInfo virtio_gpu_info = { 1569 .name = TYPE_VIRTIO_GPU, 1570 .parent = TYPE_VIRTIO_GPU_BASE, 1571 .instance_size = sizeof(VirtIOGPU), 1572 .class_size = sizeof(VirtIOGPUClass), 1573 .class_init = virtio_gpu_class_init, 1574 }; 1575 module_obj(TYPE_VIRTIO_GPU); 1576 module_kconfig(VIRTIO_GPU); 1577 1578 static void virtio_register_types(void) 1579 { 1580 type_register_static(&virtio_gpu_info); 1581 } 1582 1583 type_init(virtio_register_types) 1584