162232bf4SGerd Hoffmann /* 262232bf4SGerd Hoffmann * Virtio GPU Device 362232bf4SGerd Hoffmann * 462232bf4SGerd Hoffmann * Copyright Red Hat, Inc. 2013-2014 562232bf4SGerd Hoffmann * 662232bf4SGerd Hoffmann * Authors: 762232bf4SGerd Hoffmann * Dave Airlie <airlied@redhat.com> 862232bf4SGerd Hoffmann * Gerd Hoffmann <kraxel@redhat.com> 962232bf4SGerd Hoffmann * 102e252145SGerd Hoffmann * This work is licensed under the terms of the GNU GPL, version 2 or later. 1162232bf4SGerd Hoffmann * See the COPYING file in the top-level directory. 1262232bf4SGerd Hoffmann */ 1362232bf4SGerd Hoffmann 149b8bfe21SPeter Maydell #include "qemu/osdep.h" 15f0353b0dSPhilippe Mathieu-Daudé #include "qemu/units.h" 1662232bf4SGerd Hoffmann #include "qemu/iov.h" 1762232bf4SGerd Hoffmann #include "ui/console.h" 1862232bf4SGerd Hoffmann #include "trace.h" 198da132a5SGerd Hoffmann #include "sysemu/dma.h" 202f780b6aSMarkus Armbruster #include "sysemu/sysemu.h" 2162232bf4SGerd Hoffmann #include "hw/virtio/virtio.h" 22ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h" 2362232bf4SGerd Hoffmann #include "hw/virtio/virtio-gpu.h" 24ad08e67aSMarc-André Lureau #include "hw/virtio/virtio-gpu-bswap.h" 2583a7d3c0SMarc-André Lureau #include "hw/virtio/virtio-gpu-pixman.h" 2662232bf4SGerd Hoffmann #include "hw/virtio/virtio-bus.h" 271ed2cb32SGerd Hoffmann #include "hw/display/edid.h" 28a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 2903dd024fSPaolo Bonzini #include "qemu/log.h" 300b8fa32fSMarkus Armbruster #include "qemu/module.h" 315e3d741cSMarc-André Lureau #include "qapi/error.h" 3250d8e25eSMarc-André Lureau #include "qemu/error-report.h" 3362232bf4SGerd Hoffmann 340c244e50SGerd Hoffmann #define VIRTIO_GPU_VM_VERSION 1 350c244e50SGerd Hoffmann 3662232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource* 3762232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 3825c001a4SVivek Kasireddy static struct virtio_gpu_simple_resource * 3925c001a4SVivek Kasireddy virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 4025c001a4SVivek Kasireddy bool require_backing, 4125c001a4SVivek Kasireddy const char *caller, uint32_t *error); 4262232bf4SGerd Hoffmann 433bb68f79SGerd Hoffmann static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 443bb68f79SGerd Hoffmann struct virtio_gpu_simple_resource *res); 45b8e23926SLi Qiang 462c267d66SGerd Hoffmann void virtio_gpu_update_cursor_data(VirtIOGPU *g, 4762232bf4SGerd Hoffmann struct virtio_gpu_scanout *s, 4862232bf4SGerd Hoffmann uint32_t resource_id) 4962232bf4SGerd Hoffmann { 5062232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 5162232bf4SGerd Hoffmann uint32_t pixels; 5262232bf4SGerd Hoffmann 5325c001a4SVivek Kasireddy res = virtio_gpu_find_check_resource(g, resource_id, false, 5425c001a4SVivek Kasireddy __func__, NULL); 5562232bf4SGerd Hoffmann if (!res) { 5662232bf4SGerd Hoffmann return; 5762232bf4SGerd Hoffmann } 5862232bf4SGerd Hoffmann 5962232bf4SGerd Hoffmann if (pixman_image_get_width(res->image) != s->current_cursor->width || 6062232bf4SGerd Hoffmann pixman_image_get_height(res->image) != s->current_cursor->height) { 6162232bf4SGerd Hoffmann return; 6262232bf4SGerd Hoffmann } 6362232bf4SGerd Hoffmann 6462232bf4SGerd Hoffmann pixels = s->current_cursor->width * s->current_cursor->height; 6562232bf4SGerd Hoffmann memcpy(s->current_cursor->data, 6662232bf4SGerd Hoffmann pixman_image_get_data(res->image), 6762232bf4SGerd Hoffmann pixels * sizeof(uint32_t)); 6862232bf4SGerd Hoffmann } 6962232bf4SGerd Hoffmann 7062232bf4SGerd Hoffmann static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 7162232bf4SGerd Hoffmann { 7262232bf4SGerd Hoffmann struct virtio_gpu_scanout *s; 732c267d66SGerd Hoffmann VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 740c244e50SGerd Hoffmann bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 7562232bf4SGerd Hoffmann 7650d8e25eSMarc-André Lureau if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 7762232bf4SGerd Hoffmann return; 7862232bf4SGerd Hoffmann } 7950d8e25eSMarc-André Lureau s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 8062232bf4SGerd Hoffmann 81e9c1b459SGerd Hoffmann trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 82e9c1b459SGerd Hoffmann cursor->pos.x, 83e9c1b459SGerd Hoffmann cursor->pos.y, 84e9c1b459SGerd Hoffmann move ? "move" : "update", 85e9c1b459SGerd Hoffmann cursor->resource_id); 86e9c1b459SGerd Hoffmann 870c244e50SGerd Hoffmann if (!move) { 8862232bf4SGerd Hoffmann if (!s->current_cursor) { 8962232bf4SGerd Hoffmann s->current_cursor = cursor_alloc(64, 64); 9062232bf4SGerd Hoffmann } 9162232bf4SGerd Hoffmann 9262232bf4SGerd Hoffmann s->current_cursor->hot_x = cursor->hot_x; 9362232bf4SGerd Hoffmann s->current_cursor->hot_y = cursor->hot_y; 9462232bf4SGerd Hoffmann 9562232bf4SGerd Hoffmann if (cursor->resource_id > 0) { 962c267d66SGerd Hoffmann vgc->update_cursor_data(g, s, cursor->resource_id); 9762232bf4SGerd Hoffmann } 9862232bf4SGerd Hoffmann dpy_cursor_define(s->con, s->current_cursor); 990c244e50SGerd Hoffmann 1000c244e50SGerd Hoffmann s->cursor = *cursor; 1010c244e50SGerd Hoffmann } else { 1020c244e50SGerd Hoffmann s->cursor.pos.x = cursor->pos.x; 1030c244e50SGerd Hoffmann s->cursor.pos.y = cursor->pos.y; 10462232bf4SGerd Hoffmann } 10562232bf4SGerd Hoffmann dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 10662232bf4SGerd Hoffmann cursor->resource_id ? 1 : 0); 10762232bf4SGerd Hoffmann } 10862232bf4SGerd Hoffmann 10962232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource * 11062232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 11162232bf4SGerd Hoffmann { 11262232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 11362232bf4SGerd Hoffmann 11462232bf4SGerd Hoffmann QTAILQ_FOREACH(res, &g->reslist, next) { 11562232bf4SGerd Hoffmann if (res->resource_id == resource_id) { 11662232bf4SGerd Hoffmann return res; 11762232bf4SGerd Hoffmann } 11862232bf4SGerd Hoffmann } 11962232bf4SGerd Hoffmann return NULL; 12062232bf4SGerd Hoffmann } 12162232bf4SGerd Hoffmann 12225c001a4SVivek Kasireddy static struct virtio_gpu_simple_resource * 12325c001a4SVivek Kasireddy virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 12425c001a4SVivek Kasireddy bool require_backing, 12525c001a4SVivek Kasireddy const char *caller, uint32_t *error) 12625c001a4SVivek Kasireddy { 12725c001a4SVivek Kasireddy struct virtio_gpu_simple_resource *res; 12825c001a4SVivek Kasireddy 12925c001a4SVivek Kasireddy res = virtio_gpu_find_resource(g, resource_id); 13025c001a4SVivek Kasireddy if (!res) { 13125c001a4SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 13225c001a4SVivek Kasireddy caller, resource_id); 13325c001a4SVivek Kasireddy if (error) { 13425c001a4SVivek Kasireddy *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 13525c001a4SVivek Kasireddy } 13625c001a4SVivek Kasireddy return NULL; 13725c001a4SVivek Kasireddy } 13825c001a4SVivek Kasireddy 13925c001a4SVivek Kasireddy if (require_backing) { 140e0933d91SVivek Kasireddy if (!res->iov || (!res->image && !res->blob)) { 14125c001a4SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 14225c001a4SVivek Kasireddy caller, resource_id); 14325c001a4SVivek Kasireddy if (error) { 14425c001a4SVivek Kasireddy *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 14525c001a4SVivek Kasireddy } 14625c001a4SVivek Kasireddy return NULL; 14725c001a4SVivek Kasireddy } 14825c001a4SVivek Kasireddy } 14925c001a4SVivek Kasireddy 15025c001a4SVivek Kasireddy return res; 15125c001a4SVivek Kasireddy } 15225c001a4SVivek Kasireddy 15362232bf4SGerd Hoffmann void virtio_gpu_ctrl_response(VirtIOGPU *g, 15462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 15562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr *resp, 15662232bf4SGerd Hoffmann size_t resp_len) 15762232bf4SGerd Hoffmann { 15862232bf4SGerd Hoffmann size_t s; 15962232bf4SGerd Hoffmann 16062232bf4SGerd Hoffmann if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 16162232bf4SGerd Hoffmann resp->flags |= VIRTIO_GPU_FLAG_FENCE; 16262232bf4SGerd Hoffmann resp->fence_id = cmd->cmd_hdr.fence_id; 16362232bf4SGerd Hoffmann resp->ctx_id = cmd->cmd_hdr.ctx_id; 16462232bf4SGerd Hoffmann } 1651715d6b5SFarhan Ali virtio_gpu_ctrl_hdr_bswap(resp); 16662232bf4SGerd Hoffmann s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 16762232bf4SGerd Hoffmann if (s != resp_len) { 16862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 16962232bf4SGerd Hoffmann "%s: response size incorrect %zu vs %zu\n", 17062232bf4SGerd Hoffmann __func__, s, resp_len); 17162232bf4SGerd Hoffmann } 17262232bf4SGerd Hoffmann virtqueue_push(cmd->vq, &cmd->elem, s); 17362232bf4SGerd Hoffmann virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 17462232bf4SGerd Hoffmann cmd->finished = true; 17562232bf4SGerd Hoffmann } 17662232bf4SGerd Hoffmann 17762232bf4SGerd Hoffmann void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 17862232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 17962232bf4SGerd Hoffmann enum virtio_gpu_ctrl_type type) 18062232bf4SGerd Hoffmann { 18162232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr resp; 18262232bf4SGerd Hoffmann 18362232bf4SGerd Hoffmann memset(&resp, 0, sizeof(resp)); 18462232bf4SGerd Hoffmann resp.type = type; 18562232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 18662232bf4SGerd Hoffmann } 18762232bf4SGerd Hoffmann 18862232bf4SGerd Hoffmann void virtio_gpu_get_display_info(VirtIOGPU *g, 18962232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 19062232bf4SGerd Hoffmann { 19162232bf4SGerd Hoffmann struct virtio_gpu_resp_display_info display_info; 19262232bf4SGerd Hoffmann 19362232bf4SGerd Hoffmann trace_virtio_gpu_cmd_get_display_info(); 19462232bf4SGerd Hoffmann memset(&display_info, 0, sizeof(display_info)); 19562232bf4SGerd Hoffmann display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 19650d8e25eSMarc-André Lureau virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 19762232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 19862232bf4SGerd Hoffmann sizeof(display_info)); 19962232bf4SGerd Hoffmann } 20062232bf4SGerd Hoffmann 2011ed2cb32SGerd Hoffmann static void 2021ed2cb32SGerd Hoffmann virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 2031ed2cb32SGerd Hoffmann struct virtio_gpu_resp_edid *edid) 2041ed2cb32SGerd Hoffmann { 20550d8e25eSMarc-André Lureau VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 2061ed2cb32SGerd Hoffmann qemu_edid_info info = { 2074bf47f36SMarc-André Lureau .width_mm = b->req_state[scanout].width_mm, 2084bf47f36SMarc-André Lureau .height_mm = b->req_state[scanout].height_mm, 20950d8e25eSMarc-André Lureau .prefx = b->req_state[scanout].width, 21050d8e25eSMarc-André Lureau .prefy = b->req_state[scanout].height, 2111ed2cb32SGerd Hoffmann }; 2121ed2cb32SGerd Hoffmann 2131ed2cb32SGerd Hoffmann edid->size = cpu_to_le32(sizeof(edid->edid)); 2141ed2cb32SGerd Hoffmann qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 2151ed2cb32SGerd Hoffmann } 2161ed2cb32SGerd Hoffmann 2171ed2cb32SGerd Hoffmann void virtio_gpu_get_edid(VirtIOGPU *g, 2181ed2cb32SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 2191ed2cb32SGerd Hoffmann { 2201ed2cb32SGerd Hoffmann struct virtio_gpu_resp_edid edid; 2211ed2cb32SGerd Hoffmann struct virtio_gpu_cmd_get_edid get_edid; 22250d8e25eSMarc-André Lureau VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 2231ed2cb32SGerd Hoffmann 2241ed2cb32SGerd Hoffmann VIRTIO_GPU_FILL_CMD(get_edid); 2251ed2cb32SGerd Hoffmann virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 2261ed2cb32SGerd Hoffmann 22750d8e25eSMarc-André Lureau if (get_edid.scanout >= b->conf.max_outputs) { 2281ed2cb32SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 2291ed2cb32SGerd Hoffmann return; 2301ed2cb32SGerd Hoffmann } 2311ed2cb32SGerd Hoffmann 2321ed2cb32SGerd Hoffmann trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 2331ed2cb32SGerd Hoffmann memset(&edid, 0, sizeof(edid)); 2341ed2cb32SGerd Hoffmann edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 2351ed2cb32SGerd Hoffmann virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 2361ed2cb32SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 2371ed2cb32SGerd Hoffmann } 2381ed2cb32SGerd Hoffmann 239c53f5b89STao Wu static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 240c53f5b89STao Wu uint32_t width, uint32_t height) 241c53f5b89STao Wu { 242c53f5b89STao Wu /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 243c53f5b89STao Wu * pixman_image_create_bits will fail in case it overflow. 244c53f5b89STao Wu */ 245c53f5b89STao Wu 246c53f5b89STao Wu int bpp = PIXMAN_FORMAT_BPP(pformat); 247c53f5b89STao Wu int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 248c53f5b89STao Wu return height * stride; 249c53f5b89STao Wu } 250c53f5b89STao Wu 25162232bf4SGerd Hoffmann static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 25262232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 25362232bf4SGerd Hoffmann { 25462232bf4SGerd Hoffmann pixman_format_code_t pformat; 25562232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 25662232bf4SGerd Hoffmann struct virtio_gpu_resource_create_2d c2d; 25762232bf4SGerd Hoffmann 25862232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(c2d); 2591715d6b5SFarhan Ali virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 26062232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 26162232bf4SGerd Hoffmann c2d.width, c2d.height); 26262232bf4SGerd Hoffmann 26362232bf4SGerd Hoffmann if (c2d.resource_id == 0) { 26462232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 26562232bf4SGerd Hoffmann __func__); 26662232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 26762232bf4SGerd Hoffmann return; 26862232bf4SGerd Hoffmann } 26962232bf4SGerd Hoffmann 27062232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, c2d.resource_id); 27162232bf4SGerd Hoffmann if (res) { 27262232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 27362232bf4SGerd Hoffmann __func__, c2d.resource_id); 27462232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 27562232bf4SGerd Hoffmann return; 27662232bf4SGerd Hoffmann } 27762232bf4SGerd Hoffmann 27862232bf4SGerd Hoffmann res = g_new0(struct virtio_gpu_simple_resource, 1); 27962232bf4SGerd Hoffmann 28062232bf4SGerd Hoffmann res->width = c2d.width; 28162232bf4SGerd Hoffmann res->height = c2d.height; 28262232bf4SGerd Hoffmann res->format = c2d.format; 28362232bf4SGerd Hoffmann res->resource_id = c2d.resource_id; 28462232bf4SGerd Hoffmann 28583a7d3c0SMarc-André Lureau pformat = virtio_gpu_get_pixman_format(c2d.format); 28662232bf4SGerd Hoffmann if (!pformat) { 28762232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 28862232bf4SGerd Hoffmann "%s: host couldn't handle guest format %d\n", 28962232bf4SGerd Hoffmann __func__, c2d.format); 290cb3a0522SLi Qiang g_free(res); 29162232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 29262232bf4SGerd Hoffmann return; 29362232bf4SGerd Hoffmann } 2949b7621bcSGerd Hoffmann 295c53f5b89STao Wu res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 29650d8e25eSMarc-André Lureau if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 29762232bf4SGerd Hoffmann res->image = pixman_image_create_bits(pformat, 29862232bf4SGerd Hoffmann c2d.width, 29962232bf4SGerd Hoffmann c2d.height, 30062232bf4SGerd Hoffmann NULL, 0); 3019b7621bcSGerd Hoffmann } 30262232bf4SGerd Hoffmann 30362232bf4SGerd Hoffmann if (!res->image) { 30462232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 30562232bf4SGerd Hoffmann "%s: resource creation failed %d %d %d\n", 30662232bf4SGerd Hoffmann __func__, c2d.resource_id, c2d.width, c2d.height); 30762232bf4SGerd Hoffmann g_free(res); 30862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 30962232bf4SGerd Hoffmann return; 31062232bf4SGerd Hoffmann } 31162232bf4SGerd Hoffmann 31262232bf4SGerd Hoffmann QTAILQ_INSERT_HEAD(&g->reslist, res, next); 3139b7621bcSGerd Hoffmann g->hostmem += res->hostmem; 31462232bf4SGerd Hoffmann } 31562232bf4SGerd Hoffmann 316e0933d91SVivek Kasireddy static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 317e0933d91SVivek Kasireddy struct virtio_gpu_ctrl_command *cmd) 318e0933d91SVivek Kasireddy { 319e0933d91SVivek Kasireddy struct virtio_gpu_simple_resource *res; 320e0933d91SVivek Kasireddy struct virtio_gpu_resource_create_blob cblob; 321e0933d91SVivek Kasireddy int ret; 322e0933d91SVivek Kasireddy 323e0933d91SVivek Kasireddy VIRTIO_GPU_FILL_CMD(cblob); 324e0933d91SVivek Kasireddy virtio_gpu_create_blob_bswap(&cblob); 325e0933d91SVivek Kasireddy trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 326e0933d91SVivek Kasireddy 327e0933d91SVivek Kasireddy if (cblob.resource_id == 0) { 328e0933d91SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 329e0933d91SVivek Kasireddy __func__); 330e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 331e0933d91SVivek Kasireddy return; 332e0933d91SVivek Kasireddy } 333e0933d91SVivek Kasireddy 334e0933d91SVivek Kasireddy res = virtio_gpu_find_resource(g, cblob.resource_id); 335e0933d91SVivek Kasireddy if (res) { 336e0933d91SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 337e0933d91SVivek Kasireddy __func__, cblob.resource_id); 338e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 339e0933d91SVivek Kasireddy return; 340e0933d91SVivek Kasireddy } 341e0933d91SVivek Kasireddy 342e0933d91SVivek Kasireddy res = g_new0(struct virtio_gpu_simple_resource, 1); 343e0933d91SVivek Kasireddy res->resource_id = cblob.resource_id; 344e0933d91SVivek Kasireddy res->blob_size = cblob.size; 345e0933d91SVivek Kasireddy 346e0933d91SVivek Kasireddy if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 347e0933d91SVivek Kasireddy cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 348e0933d91SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 349e0933d91SVivek Kasireddy __func__); 350e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 351e0933d91SVivek Kasireddy g_free(res); 352e0933d91SVivek Kasireddy return; 353e0933d91SVivek Kasireddy } 354e0933d91SVivek Kasireddy 355e0933d91SVivek Kasireddy if (res->iov) { 356e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 357e0933d91SVivek Kasireddy return; 358e0933d91SVivek Kasireddy } 359e0933d91SVivek Kasireddy 360e0933d91SVivek Kasireddy ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 361e0933d91SVivek Kasireddy cmd, &res->addrs, &res->iov, 362e0933d91SVivek Kasireddy &res->iov_cnt); 363e0933d91SVivek Kasireddy if (ret != 0) { 364e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 365e0933d91SVivek Kasireddy return; 366e0933d91SVivek Kasireddy } 367e0933d91SVivek Kasireddy 368e0933d91SVivek Kasireddy virtio_gpu_init_udmabuf(res); 369e0933d91SVivek Kasireddy QTAILQ_INSERT_HEAD(&g->reslist, res, next); 370e0933d91SVivek Kasireddy } 371e0933d91SVivek Kasireddy 372da566a18SGerd Hoffmann static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 373da566a18SGerd Hoffmann { 37450d8e25eSMarc-André Lureau struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 375da566a18SGerd Hoffmann struct virtio_gpu_simple_resource *res; 376da566a18SGerd Hoffmann 377da566a18SGerd Hoffmann if (scanout->resource_id == 0) { 378da566a18SGerd Hoffmann return; 379da566a18SGerd Hoffmann } 380da566a18SGerd Hoffmann 381da566a18SGerd Hoffmann res = virtio_gpu_find_resource(g, scanout->resource_id); 382da566a18SGerd Hoffmann if (res) { 383da566a18SGerd Hoffmann res->scanout_bitmask &= ~(1 << scanout_id); 384da566a18SGerd Hoffmann } 385da566a18SGerd Hoffmann 386ed8f3fe6SAkihiko Odaki dpy_gfx_replace_surface(scanout->con, NULL); 387da566a18SGerd Hoffmann scanout->resource_id = 0; 388da566a18SGerd Hoffmann scanout->ds = NULL; 389da566a18SGerd Hoffmann scanout->width = 0; 390da566a18SGerd Hoffmann scanout->height = 0; 391da566a18SGerd Hoffmann } 392da566a18SGerd Hoffmann 39362232bf4SGerd Hoffmann static void virtio_gpu_resource_destroy(VirtIOGPU *g, 39462232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res) 39562232bf4SGerd Hoffmann { 3961fccd7c5SGerd Hoffmann int i; 3971fccd7c5SGerd Hoffmann 3981fccd7c5SGerd Hoffmann if (res->scanout_bitmask) { 39950d8e25eSMarc-André Lureau for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 4001fccd7c5SGerd Hoffmann if (res->scanout_bitmask & (1 << i)) { 4011fccd7c5SGerd Hoffmann virtio_gpu_disable_scanout(g, i); 4021fccd7c5SGerd Hoffmann } 4031fccd7c5SGerd Hoffmann } 4041fccd7c5SGerd Hoffmann } 4051fccd7c5SGerd Hoffmann 406*32db3c63SVivek Kasireddy qemu_pixman_image_unref(res->image); 4073bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping(g, res); 40862232bf4SGerd Hoffmann QTAILQ_REMOVE(&g->reslist, res, next); 4099b7621bcSGerd Hoffmann g->hostmem -= res->hostmem; 41062232bf4SGerd Hoffmann g_free(res); 41162232bf4SGerd Hoffmann } 41262232bf4SGerd Hoffmann 41362232bf4SGerd Hoffmann static void virtio_gpu_resource_unref(VirtIOGPU *g, 41462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 41562232bf4SGerd Hoffmann { 41662232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 41762232bf4SGerd Hoffmann struct virtio_gpu_resource_unref unref; 41862232bf4SGerd Hoffmann 41962232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(unref); 4201715d6b5SFarhan Ali virtio_gpu_bswap_32(&unref, sizeof(unref)); 42162232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_unref(unref.resource_id); 42262232bf4SGerd Hoffmann 42362232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, unref.resource_id); 42462232bf4SGerd Hoffmann if (!res) { 42562232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 42662232bf4SGerd Hoffmann __func__, unref.resource_id); 42762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 42862232bf4SGerd Hoffmann return; 42962232bf4SGerd Hoffmann } 43062232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 43162232bf4SGerd Hoffmann } 43262232bf4SGerd Hoffmann 43362232bf4SGerd Hoffmann static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 43462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 43562232bf4SGerd Hoffmann { 43662232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 43762232bf4SGerd Hoffmann int h; 43862232bf4SGerd Hoffmann uint32_t src_offset, dst_offset, stride; 43962232bf4SGerd Hoffmann int bpp; 44062232bf4SGerd Hoffmann pixman_format_code_t format; 44162232bf4SGerd Hoffmann struct virtio_gpu_transfer_to_host_2d t2d; 44262232bf4SGerd Hoffmann 44362232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(t2d); 4441715d6b5SFarhan Ali virtio_gpu_t2d_bswap(&t2d); 44562232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 44662232bf4SGerd Hoffmann 44725c001a4SVivek Kasireddy res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 44825c001a4SVivek Kasireddy __func__, &cmd->error); 449e0933d91SVivek Kasireddy if (!res || res->blob) { 45062232bf4SGerd Hoffmann return; 45162232bf4SGerd Hoffmann } 45262232bf4SGerd Hoffmann 45362232bf4SGerd Hoffmann if (t2d.r.x > res->width || 45462232bf4SGerd Hoffmann t2d.r.y > res->height || 45562232bf4SGerd Hoffmann t2d.r.width > res->width || 45662232bf4SGerd Hoffmann t2d.r.height > res->height || 45762232bf4SGerd Hoffmann t2d.r.x + t2d.r.width > res->width || 45862232bf4SGerd Hoffmann t2d.r.y + t2d.r.height > res->height) { 45962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 46062232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 46162232bf4SGerd Hoffmann __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 46262232bf4SGerd Hoffmann t2d.r.width, t2d.r.height, res->width, res->height); 46362232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 46462232bf4SGerd Hoffmann return; 46562232bf4SGerd Hoffmann } 46662232bf4SGerd Hoffmann 46762232bf4SGerd Hoffmann format = pixman_image_get_format(res->image); 468e5f99037SMarc-André Lureau bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 46962232bf4SGerd Hoffmann stride = pixman_image_get_stride(res->image); 47062232bf4SGerd Hoffmann 47162232bf4SGerd Hoffmann if (t2d.offset || t2d.r.x || t2d.r.y || 47262232bf4SGerd Hoffmann t2d.r.width != pixman_image_get_width(res->image)) { 47362232bf4SGerd Hoffmann void *img_data = pixman_image_get_data(res->image); 47462232bf4SGerd Hoffmann for (h = 0; h < t2d.r.height; h++) { 47562232bf4SGerd Hoffmann src_offset = t2d.offset + stride * h; 47662232bf4SGerd Hoffmann dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 47762232bf4SGerd Hoffmann 47862232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, src_offset, 47962232bf4SGerd Hoffmann (uint8_t *)img_data 48062232bf4SGerd Hoffmann + dst_offset, t2d.r.width * bpp); 48162232bf4SGerd Hoffmann } 48262232bf4SGerd Hoffmann } else { 48362232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, 0, 48462232bf4SGerd Hoffmann pixman_image_get_data(res->image), 48562232bf4SGerd Hoffmann pixman_image_get_stride(res->image) 48662232bf4SGerd Hoffmann * pixman_image_get_height(res->image)); 48762232bf4SGerd Hoffmann } 48862232bf4SGerd Hoffmann } 48962232bf4SGerd Hoffmann 49062232bf4SGerd Hoffmann static void virtio_gpu_resource_flush(VirtIOGPU *g, 49162232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 49262232bf4SGerd Hoffmann { 49362232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 49462232bf4SGerd Hoffmann struct virtio_gpu_resource_flush rf; 495*32db3c63SVivek Kasireddy struct virtio_gpu_scanout *scanout; 49662232bf4SGerd Hoffmann pixman_region16_t flush_region; 49762232bf4SGerd Hoffmann int i; 49862232bf4SGerd Hoffmann 49962232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(rf); 5001715d6b5SFarhan Ali virtio_gpu_bswap_32(&rf, sizeof(rf)); 50162232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_flush(rf.resource_id, 50262232bf4SGerd Hoffmann rf.r.width, rf.r.height, rf.r.x, rf.r.y); 50362232bf4SGerd Hoffmann 50425c001a4SVivek Kasireddy res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 50525c001a4SVivek Kasireddy __func__, &cmd->error); 506*32db3c63SVivek Kasireddy if (!res) { 50762232bf4SGerd Hoffmann return; 50862232bf4SGerd Hoffmann } 50962232bf4SGerd Hoffmann 510*32db3c63SVivek Kasireddy if (res->blob) { 511*32db3c63SVivek Kasireddy for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 512*32db3c63SVivek Kasireddy scanout = &g->parent_obj.scanout[i]; 513*32db3c63SVivek Kasireddy if (scanout->resource_id == res->resource_id && 514*32db3c63SVivek Kasireddy console_has_gl(scanout->con)) { 515*32db3c63SVivek Kasireddy dpy_gl_update(scanout->con, 0, 0, scanout->width, 516*32db3c63SVivek Kasireddy scanout->height); 517*32db3c63SVivek Kasireddy return; 518*32db3c63SVivek Kasireddy } 519*32db3c63SVivek Kasireddy } 520*32db3c63SVivek Kasireddy } 521*32db3c63SVivek Kasireddy 522*32db3c63SVivek Kasireddy if (!res->blob && 523*32db3c63SVivek Kasireddy (rf.r.x > res->width || 52462232bf4SGerd Hoffmann rf.r.y > res->height || 52562232bf4SGerd Hoffmann rf.r.width > res->width || 52662232bf4SGerd Hoffmann rf.r.height > res->height || 52762232bf4SGerd Hoffmann rf.r.x + rf.r.width > res->width || 528*32db3c63SVivek Kasireddy rf.r.y + rf.r.height > res->height)) { 52962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 53062232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 53162232bf4SGerd Hoffmann __func__, rf.resource_id, rf.r.x, rf.r.y, 53262232bf4SGerd Hoffmann rf.r.width, rf.r.height, res->width, res->height); 53362232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 53462232bf4SGerd Hoffmann return; 53562232bf4SGerd Hoffmann } 53662232bf4SGerd Hoffmann 53762232bf4SGerd Hoffmann pixman_region_init_rect(&flush_region, 53862232bf4SGerd Hoffmann rf.r.x, rf.r.y, rf.r.width, rf.r.height); 53950d8e25eSMarc-André Lureau for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 54062232bf4SGerd Hoffmann pixman_region16_t region, finalregion; 54162232bf4SGerd Hoffmann pixman_box16_t *extents; 54262232bf4SGerd Hoffmann 54362232bf4SGerd Hoffmann if (!(res->scanout_bitmask & (1 << i))) { 54462232bf4SGerd Hoffmann continue; 54562232bf4SGerd Hoffmann } 54650d8e25eSMarc-André Lureau scanout = &g->parent_obj.scanout[i]; 54762232bf4SGerd Hoffmann 54862232bf4SGerd Hoffmann pixman_region_init(&finalregion); 54962232bf4SGerd Hoffmann pixman_region_init_rect(®ion, scanout->x, scanout->y, 55062232bf4SGerd Hoffmann scanout->width, scanout->height); 55162232bf4SGerd Hoffmann 55262232bf4SGerd Hoffmann pixman_region_intersect(&finalregion, &flush_region, ®ion); 55362232bf4SGerd Hoffmann pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 55462232bf4SGerd Hoffmann extents = pixman_region_extents(&finalregion); 55562232bf4SGerd Hoffmann /* work out the area we need to update for each console */ 55650d8e25eSMarc-André Lureau dpy_gfx_update(g->parent_obj.scanout[i].con, 55762232bf4SGerd Hoffmann extents->x1, extents->y1, 55862232bf4SGerd Hoffmann extents->x2 - extents->x1, 55962232bf4SGerd Hoffmann extents->y2 - extents->y1); 56062232bf4SGerd Hoffmann 56162232bf4SGerd Hoffmann pixman_region_fini(®ion); 56262232bf4SGerd Hoffmann pixman_region_fini(&finalregion); 56362232bf4SGerd Hoffmann } 56462232bf4SGerd Hoffmann pixman_region_fini(&flush_region); 56562232bf4SGerd Hoffmann } 56662232bf4SGerd Hoffmann 567fa06e5cbSGerd Hoffmann static void virtio_unref_resource(pixman_image_t *image, void *data) 568fa06e5cbSGerd Hoffmann { 569fa06e5cbSGerd Hoffmann pixman_image_unref(data); 570fa06e5cbSGerd Hoffmann } 571fa06e5cbSGerd Hoffmann 57281cd9f71SVivek Kasireddy static void virtio_gpu_update_scanout(VirtIOGPU *g, 57381cd9f71SVivek Kasireddy uint32_t scanout_id, 57481cd9f71SVivek Kasireddy struct virtio_gpu_simple_resource *res, 57581cd9f71SVivek Kasireddy struct virtio_gpu_rect *r) 57681cd9f71SVivek Kasireddy { 57781cd9f71SVivek Kasireddy struct virtio_gpu_simple_resource *ores; 57881cd9f71SVivek Kasireddy struct virtio_gpu_scanout *scanout; 57981cd9f71SVivek Kasireddy 58081cd9f71SVivek Kasireddy scanout = &g->parent_obj.scanout[scanout_id]; 58181cd9f71SVivek Kasireddy ores = virtio_gpu_find_resource(g, scanout->resource_id); 58281cd9f71SVivek Kasireddy if (ores) { 58381cd9f71SVivek Kasireddy ores->scanout_bitmask &= ~(1 << scanout_id); 58481cd9f71SVivek Kasireddy } 58581cd9f71SVivek Kasireddy 58681cd9f71SVivek Kasireddy res->scanout_bitmask |= (1 << scanout_id); 58781cd9f71SVivek Kasireddy scanout->resource_id = res->resource_id; 58881cd9f71SVivek Kasireddy scanout->x = r->x; 58981cd9f71SVivek Kasireddy scanout->y = r->y; 59081cd9f71SVivek Kasireddy scanout->width = r->width; 59181cd9f71SVivek Kasireddy scanout->height = r->height; 59281cd9f71SVivek Kasireddy } 59381cd9f71SVivek Kasireddy 594e64d4b6aSVivek Kasireddy static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 595e64d4b6aSVivek Kasireddy uint32_t scanout_id, 596e64d4b6aSVivek Kasireddy struct virtio_gpu_framebuffer *fb, 597e64d4b6aSVivek Kasireddy struct virtio_gpu_simple_resource *res, 598e64d4b6aSVivek Kasireddy struct virtio_gpu_rect *r, 599e64d4b6aSVivek Kasireddy uint32_t *error) 600e64d4b6aSVivek Kasireddy { 601e64d4b6aSVivek Kasireddy struct virtio_gpu_scanout *scanout; 602e64d4b6aSVivek Kasireddy uint8_t *data; 603e64d4b6aSVivek Kasireddy 604e64d4b6aSVivek Kasireddy if (scanout_id >= g->parent_obj.conf.max_outputs) { 605e64d4b6aSVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 606e64d4b6aSVivek Kasireddy __func__, scanout_id); 607e64d4b6aSVivek Kasireddy *error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 608e64d4b6aSVivek Kasireddy return; 609e64d4b6aSVivek Kasireddy } 610e64d4b6aSVivek Kasireddy scanout = &g->parent_obj.scanout[scanout_id]; 611e64d4b6aSVivek Kasireddy 612e64d4b6aSVivek Kasireddy if (r->x > fb->width || 613e64d4b6aSVivek Kasireddy r->y > fb->height || 614e64d4b6aSVivek Kasireddy r->width < 16 || 615e64d4b6aSVivek Kasireddy r->height < 16 || 616e64d4b6aSVivek Kasireddy r->width > fb->width || 617e64d4b6aSVivek Kasireddy r->height > fb->height || 618e64d4b6aSVivek Kasireddy r->x + r->width > fb->width || 619e64d4b6aSVivek Kasireddy r->y + r->height > fb->height) { 620e64d4b6aSVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 621e64d4b6aSVivek Kasireddy " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 622e64d4b6aSVivek Kasireddy __func__, scanout_id, res->resource_id, 623e64d4b6aSVivek Kasireddy r->x, r->y, r->width, r->height, 624e64d4b6aSVivek Kasireddy fb->width, fb->height); 625e64d4b6aSVivek Kasireddy *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 626e64d4b6aSVivek Kasireddy return; 627e64d4b6aSVivek Kasireddy } 628e64d4b6aSVivek Kasireddy 629e64d4b6aSVivek Kasireddy g->parent_obj.enable = 1; 630*32db3c63SVivek Kasireddy 631*32db3c63SVivek Kasireddy if (res->blob) { 632*32db3c63SVivek Kasireddy if (console_has_gl(scanout->con)) { 633*32db3c63SVivek Kasireddy if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb)) { 634*32db3c63SVivek Kasireddy virtio_gpu_update_scanout(g, scanout_id, res, r); 635*32db3c63SVivek Kasireddy return; 636*32db3c63SVivek Kasireddy } 637*32db3c63SVivek Kasireddy } 638*32db3c63SVivek Kasireddy 639*32db3c63SVivek Kasireddy data = res->blob; 640*32db3c63SVivek Kasireddy } else { 641e64d4b6aSVivek Kasireddy data = (uint8_t *)pixman_image_get_data(res->image); 642*32db3c63SVivek Kasireddy } 643e64d4b6aSVivek Kasireddy 644e64d4b6aSVivek Kasireddy /* create a surface for this scanout */ 645*32db3c63SVivek Kasireddy if ((res->blob && !console_has_gl(scanout->con)) || 646*32db3c63SVivek Kasireddy !scanout->ds || 647e64d4b6aSVivek Kasireddy surface_data(scanout->ds) != data + fb->offset || 648e64d4b6aSVivek Kasireddy scanout->width != r->width || 649e64d4b6aSVivek Kasireddy scanout->height != r->height) { 650e64d4b6aSVivek Kasireddy pixman_image_t *rect; 651e64d4b6aSVivek Kasireddy void *ptr = data + fb->offset; 652e64d4b6aSVivek Kasireddy rect = pixman_image_create_bits(fb->format, r->width, r->height, 653e64d4b6aSVivek Kasireddy ptr, fb->stride); 654e64d4b6aSVivek Kasireddy 655e64d4b6aSVivek Kasireddy if (res->image) { 656e64d4b6aSVivek Kasireddy pixman_image_ref(res->image); 657e64d4b6aSVivek Kasireddy pixman_image_set_destroy_function(rect, virtio_unref_resource, 658e64d4b6aSVivek Kasireddy res->image); 659e64d4b6aSVivek Kasireddy } 660e64d4b6aSVivek Kasireddy 661e64d4b6aSVivek Kasireddy /* realloc the surface ptr */ 662e64d4b6aSVivek Kasireddy scanout->ds = qemu_create_displaysurface_pixman(rect); 663e64d4b6aSVivek Kasireddy if (!scanout->ds) { 664e64d4b6aSVivek Kasireddy *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 665e64d4b6aSVivek Kasireddy return; 666e64d4b6aSVivek Kasireddy } 667e64d4b6aSVivek Kasireddy 668e64d4b6aSVivek Kasireddy pixman_image_unref(rect); 669e64d4b6aSVivek Kasireddy dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 670e64d4b6aSVivek Kasireddy scanout->ds); 671e64d4b6aSVivek Kasireddy } 672e64d4b6aSVivek Kasireddy 67381cd9f71SVivek Kasireddy virtio_gpu_update_scanout(g, scanout_id, res, r); 674e64d4b6aSVivek Kasireddy } 675e64d4b6aSVivek Kasireddy 67662232bf4SGerd Hoffmann static void virtio_gpu_set_scanout(VirtIOGPU *g, 67762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 67862232bf4SGerd Hoffmann { 679e64d4b6aSVivek Kasireddy struct virtio_gpu_simple_resource *res; 680e64d4b6aSVivek Kasireddy struct virtio_gpu_framebuffer fb = { 0 }; 68162232bf4SGerd Hoffmann struct virtio_gpu_set_scanout ss; 68262232bf4SGerd Hoffmann 68362232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ss); 6841715d6b5SFarhan Ali virtio_gpu_bswap_32(&ss, sizeof(ss)); 68562232bf4SGerd Hoffmann trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 68662232bf4SGerd Hoffmann ss.r.width, ss.r.height, ss.r.x, ss.r.y); 68762232bf4SGerd Hoffmann 68862232bf4SGerd Hoffmann if (ss.resource_id == 0) { 689da566a18SGerd Hoffmann virtio_gpu_disable_scanout(g, ss.scanout_id); 69062232bf4SGerd Hoffmann return; 69162232bf4SGerd Hoffmann } 69262232bf4SGerd Hoffmann 69325c001a4SVivek Kasireddy res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 69425c001a4SVivek Kasireddy __func__, &cmd->error); 69562232bf4SGerd Hoffmann if (!res) { 69662232bf4SGerd Hoffmann return; 69762232bf4SGerd Hoffmann } 69862232bf4SGerd Hoffmann 699e64d4b6aSVivek Kasireddy fb.format = pixman_image_get_format(res->image); 700e64d4b6aSVivek Kasireddy fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 701e64d4b6aSVivek Kasireddy fb.width = pixman_image_get_width(res->image); 702e64d4b6aSVivek Kasireddy fb.height = pixman_image_get_height(res->image); 703e64d4b6aSVivek Kasireddy fb.stride = pixman_image_get_stride(res->image); 704e64d4b6aSVivek Kasireddy fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 70562232bf4SGerd Hoffmann 706e64d4b6aSVivek Kasireddy virtio_gpu_do_set_scanout(g, ss.scanout_id, 707e64d4b6aSVivek Kasireddy &fb, res, &ss.r, &cmd->error); 70862232bf4SGerd Hoffmann } 70962232bf4SGerd Hoffmann 710*32db3c63SVivek Kasireddy static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 711*32db3c63SVivek Kasireddy struct virtio_gpu_ctrl_command *cmd) 712*32db3c63SVivek Kasireddy { 713*32db3c63SVivek Kasireddy struct virtio_gpu_simple_resource *res; 714*32db3c63SVivek Kasireddy struct virtio_gpu_framebuffer fb = { 0 }; 715*32db3c63SVivek Kasireddy struct virtio_gpu_set_scanout_blob ss; 716*32db3c63SVivek Kasireddy uint64_t fbend; 717*32db3c63SVivek Kasireddy 718*32db3c63SVivek Kasireddy VIRTIO_GPU_FILL_CMD(ss); 719*32db3c63SVivek Kasireddy virtio_gpu_scanout_blob_bswap(&ss); 720*32db3c63SVivek Kasireddy trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 721*32db3c63SVivek Kasireddy ss.r.width, ss.r.height, ss.r.x, 722*32db3c63SVivek Kasireddy ss.r.y); 723*32db3c63SVivek Kasireddy 724*32db3c63SVivek Kasireddy if (ss.resource_id == 0) { 725*32db3c63SVivek Kasireddy virtio_gpu_disable_scanout(g, ss.scanout_id); 726*32db3c63SVivek Kasireddy return; 727*32db3c63SVivek Kasireddy } 728*32db3c63SVivek Kasireddy 729*32db3c63SVivek Kasireddy res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 730*32db3c63SVivek Kasireddy __func__, &cmd->error); 731*32db3c63SVivek Kasireddy if (!res) { 732*32db3c63SVivek Kasireddy return; 733*32db3c63SVivek Kasireddy } 734*32db3c63SVivek Kasireddy 735*32db3c63SVivek Kasireddy fb.format = virtio_gpu_get_pixman_format(ss.format); 736*32db3c63SVivek Kasireddy if (!fb.format) { 737*32db3c63SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, 738*32db3c63SVivek Kasireddy "%s: host couldn't handle guest format %d\n", 739*32db3c63SVivek Kasireddy __func__, ss.format); 740*32db3c63SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 741*32db3c63SVivek Kasireddy return; 742*32db3c63SVivek Kasireddy } 743*32db3c63SVivek Kasireddy 744*32db3c63SVivek Kasireddy fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 745*32db3c63SVivek Kasireddy fb.width = ss.width; 746*32db3c63SVivek Kasireddy fb.height = ss.height; 747*32db3c63SVivek Kasireddy fb.stride = ss.strides[0]; 748*32db3c63SVivek Kasireddy fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 749*32db3c63SVivek Kasireddy 750*32db3c63SVivek Kasireddy fbend = fb.offset; 751*32db3c63SVivek Kasireddy fbend += fb.stride * (ss.r.height - 1); 752*32db3c63SVivek Kasireddy fbend += fb.bytes_pp * ss.r.width; 753*32db3c63SVivek Kasireddy if (fbend > res->blob_size) { 754*32db3c63SVivek Kasireddy qemu_log_mask(LOG_GUEST_ERROR, 755*32db3c63SVivek Kasireddy "%s: fb end out of range\n", 756*32db3c63SVivek Kasireddy __func__); 757*32db3c63SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 758*32db3c63SVivek Kasireddy return; 759*32db3c63SVivek Kasireddy } 760*32db3c63SVivek Kasireddy 761*32db3c63SVivek Kasireddy virtio_gpu_do_set_scanout(g, ss.scanout_id, 762*32db3c63SVivek Kasireddy &fb, res, &ss.r, &cmd->error); 763*32db3c63SVivek Kasireddy } 764*32db3c63SVivek Kasireddy 7653bb68f79SGerd Hoffmann int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 76670d37662SVivek Kasireddy uint32_t nr_entries, uint32_t offset, 76762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 7689049f8bcSGerd Hoffmann uint64_t **addr, struct iovec **iov, 7699049f8bcSGerd Hoffmann uint32_t *niov) 77062232bf4SGerd Hoffmann { 77162232bf4SGerd Hoffmann struct virtio_gpu_mem_entry *ents; 77262232bf4SGerd Hoffmann size_t esize, s; 7739049f8bcSGerd Hoffmann int e, v; 77462232bf4SGerd Hoffmann 77570d37662SVivek Kasireddy if (nr_entries > 16384) { 77662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 7772c84167bSGerd Hoffmann "%s: nr_entries is too big (%d > 16384)\n", 77870d37662SVivek Kasireddy __func__, nr_entries); 77962232bf4SGerd Hoffmann return -1; 78062232bf4SGerd Hoffmann } 78162232bf4SGerd Hoffmann 78270d37662SVivek Kasireddy esize = sizeof(*ents) * nr_entries; 78362232bf4SGerd Hoffmann ents = g_malloc(esize); 78462232bf4SGerd Hoffmann s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 78570d37662SVivek Kasireddy offset, ents, esize); 78662232bf4SGerd Hoffmann if (s != esize) { 78762232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 78862232bf4SGerd Hoffmann "%s: command data size incorrect %zu vs %zu\n", 78962232bf4SGerd Hoffmann __func__, s, esize); 79062232bf4SGerd Hoffmann g_free(ents); 79162232bf4SGerd Hoffmann return -1; 79262232bf4SGerd Hoffmann } 79362232bf4SGerd Hoffmann 7949049f8bcSGerd Hoffmann *iov = NULL; 7950c244e50SGerd Hoffmann if (addr) { 7969049f8bcSGerd Hoffmann *addr = NULL; 7970c244e50SGerd Hoffmann } 79870d37662SVivek Kasireddy for (e = 0, v = 0; e < nr_entries; e++) { 7999049f8bcSGerd Hoffmann uint64_t a = le64_to_cpu(ents[e].addr); 8009049f8bcSGerd Hoffmann uint32_t l = le32_to_cpu(ents[e].length); 8019049f8bcSGerd Hoffmann hwaddr len; 8029049f8bcSGerd Hoffmann void *map; 8039049f8bcSGerd Hoffmann 8049049f8bcSGerd Hoffmann do { 8059049f8bcSGerd Hoffmann len = l; 8069049f8bcSGerd Hoffmann map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 8078da132a5SGerd Hoffmann a, &len, DMA_DIRECTION_TO_DEVICE); 8089049f8bcSGerd Hoffmann if (!map) { 80962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 81070d37662SVivek Kasireddy " element %d\n", __func__, e); 8119049f8bcSGerd Hoffmann virtio_gpu_cleanup_mapping_iov(g, *iov, v); 81262232bf4SGerd Hoffmann g_free(ents); 81362232bf4SGerd Hoffmann *iov = NULL; 8140c244e50SGerd Hoffmann if (addr) { 8150c244e50SGerd Hoffmann g_free(*addr); 8160c244e50SGerd Hoffmann *addr = NULL; 8170c244e50SGerd Hoffmann } 81862232bf4SGerd Hoffmann return -1; 81962232bf4SGerd Hoffmann } 8209049f8bcSGerd Hoffmann 8219049f8bcSGerd Hoffmann if (!(v % 16)) { 8229049f8bcSGerd Hoffmann *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); 8239049f8bcSGerd Hoffmann if (addr) { 8249049f8bcSGerd Hoffmann *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); 82562232bf4SGerd Hoffmann } 8269049f8bcSGerd Hoffmann } 8279049f8bcSGerd Hoffmann (*iov)[v].iov_base = map; 8289049f8bcSGerd Hoffmann (*iov)[v].iov_len = len; 8299049f8bcSGerd Hoffmann if (addr) { 8309049f8bcSGerd Hoffmann (*addr)[v] = a; 8319049f8bcSGerd Hoffmann } 8329049f8bcSGerd Hoffmann 8339049f8bcSGerd Hoffmann a += len; 8349049f8bcSGerd Hoffmann l -= len; 8359049f8bcSGerd Hoffmann v += 1; 8369049f8bcSGerd Hoffmann } while (l > 0); 8379049f8bcSGerd Hoffmann } 8389049f8bcSGerd Hoffmann *niov = v; 8399049f8bcSGerd Hoffmann 84062232bf4SGerd Hoffmann g_free(ents); 84162232bf4SGerd Hoffmann return 0; 84262232bf4SGerd Hoffmann } 84362232bf4SGerd Hoffmann 8443bb68f79SGerd Hoffmann void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 8453bb68f79SGerd Hoffmann struct iovec *iov, uint32_t count) 84662232bf4SGerd Hoffmann { 84762232bf4SGerd Hoffmann int i; 84862232bf4SGerd Hoffmann 84962232bf4SGerd Hoffmann for (i = 0; i < count; i++) { 8508da132a5SGerd Hoffmann dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 8518da132a5SGerd Hoffmann iov[i].iov_base, iov[i].iov_len, 8528da132a5SGerd Hoffmann DMA_DIRECTION_TO_DEVICE, 85362232bf4SGerd Hoffmann iov[i].iov_len); 85462232bf4SGerd Hoffmann } 8557f3be0f2SGerd Hoffmann g_free(iov); 85662232bf4SGerd Hoffmann } 85762232bf4SGerd Hoffmann 8583bb68f79SGerd Hoffmann static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 8593bb68f79SGerd Hoffmann struct virtio_gpu_simple_resource *res) 86062232bf4SGerd Hoffmann { 8613bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 86262232bf4SGerd Hoffmann res->iov = NULL; 86362232bf4SGerd Hoffmann res->iov_cnt = 0; 8640c244e50SGerd Hoffmann g_free(res->addrs); 8650c244e50SGerd Hoffmann res->addrs = NULL; 866e0933d91SVivek Kasireddy 867e0933d91SVivek Kasireddy if (res->blob) { 868e0933d91SVivek Kasireddy virtio_gpu_fini_udmabuf(res); 869e0933d91SVivek Kasireddy } 87062232bf4SGerd Hoffmann } 87162232bf4SGerd Hoffmann 87262232bf4SGerd Hoffmann static void 87362232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(VirtIOGPU *g, 87462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 87562232bf4SGerd Hoffmann { 87662232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 87762232bf4SGerd Hoffmann struct virtio_gpu_resource_attach_backing ab; 87862232bf4SGerd Hoffmann int ret; 87962232bf4SGerd Hoffmann 88062232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ab); 8811715d6b5SFarhan Ali virtio_gpu_bswap_32(&ab, sizeof(ab)); 88262232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 88362232bf4SGerd Hoffmann 88462232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, ab.resource_id); 88562232bf4SGerd Hoffmann if (!res) { 88662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 88762232bf4SGerd Hoffmann __func__, ab.resource_id); 88862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 88962232bf4SGerd Hoffmann return; 89062232bf4SGerd Hoffmann } 89162232bf4SGerd Hoffmann 892204f01b3SLi Qiang if (res->iov) { 893204f01b3SLi Qiang cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 894204f01b3SLi Qiang return; 895204f01b3SLi Qiang } 896204f01b3SLi Qiang 89770d37662SVivek Kasireddy ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 89870d37662SVivek Kasireddy &res->addrs, &res->iov, &res->iov_cnt); 89962232bf4SGerd Hoffmann if (ret != 0) { 90062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 90162232bf4SGerd Hoffmann return; 90262232bf4SGerd Hoffmann } 90362232bf4SGerd Hoffmann } 90462232bf4SGerd Hoffmann 90562232bf4SGerd Hoffmann static void 90662232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(VirtIOGPU *g, 90762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 90862232bf4SGerd Hoffmann { 90962232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 91062232bf4SGerd Hoffmann struct virtio_gpu_resource_detach_backing detach; 91162232bf4SGerd Hoffmann 91262232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(detach); 9131715d6b5SFarhan Ali virtio_gpu_bswap_32(&detach, sizeof(detach)); 91462232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 91562232bf4SGerd Hoffmann 91625c001a4SVivek Kasireddy res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 91725c001a4SVivek Kasireddy __func__, &cmd->error); 91825c001a4SVivek Kasireddy if (!res) { 91962232bf4SGerd Hoffmann return; 92062232bf4SGerd Hoffmann } 9213bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping(g, res); 92262232bf4SGerd Hoffmann } 92362232bf4SGerd Hoffmann 9242f47691aSGerd Hoffmann void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 92562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 92662232bf4SGerd Hoffmann { 92762232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 9281715d6b5SFarhan Ali virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 92962232bf4SGerd Hoffmann 93062232bf4SGerd Hoffmann switch (cmd->cmd_hdr.type) { 93162232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 93262232bf4SGerd Hoffmann virtio_gpu_get_display_info(g, cmd); 93362232bf4SGerd Hoffmann break; 9341ed2cb32SGerd Hoffmann case VIRTIO_GPU_CMD_GET_EDID: 9351ed2cb32SGerd Hoffmann virtio_gpu_get_edid(g, cmd); 9361ed2cb32SGerd Hoffmann break; 93762232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 93862232bf4SGerd Hoffmann virtio_gpu_resource_create_2d(g, cmd); 93962232bf4SGerd Hoffmann break; 940e0933d91SVivek Kasireddy case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 941e0933d91SVivek Kasireddy if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 942e0933d91SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 943e0933d91SVivek Kasireddy break; 944e0933d91SVivek Kasireddy } 945e0933d91SVivek Kasireddy virtio_gpu_resource_create_blob(g, cmd); 946e0933d91SVivek Kasireddy break; 94762232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_UNREF: 94862232bf4SGerd Hoffmann virtio_gpu_resource_unref(g, cmd); 94962232bf4SGerd Hoffmann break; 95062232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 95162232bf4SGerd Hoffmann virtio_gpu_resource_flush(g, cmd); 95262232bf4SGerd Hoffmann break; 95362232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 95462232bf4SGerd Hoffmann virtio_gpu_transfer_to_host_2d(g, cmd); 95562232bf4SGerd Hoffmann break; 95662232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_SET_SCANOUT: 95762232bf4SGerd Hoffmann virtio_gpu_set_scanout(g, cmd); 95862232bf4SGerd Hoffmann break; 959*32db3c63SVivek Kasireddy case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 960*32db3c63SVivek Kasireddy if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 961*32db3c63SVivek Kasireddy cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 962*32db3c63SVivek Kasireddy break; 963*32db3c63SVivek Kasireddy } 964*32db3c63SVivek Kasireddy virtio_gpu_set_scanout_blob(g, cmd); 965*32db3c63SVivek Kasireddy break; 96662232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 96762232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(g, cmd); 96862232bf4SGerd Hoffmann break; 96962232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 97062232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(g, cmd); 97162232bf4SGerd Hoffmann break; 97262232bf4SGerd Hoffmann default: 97362232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 97462232bf4SGerd Hoffmann break; 97562232bf4SGerd Hoffmann } 97662232bf4SGerd Hoffmann if (!cmd->finished) { 97762232bf4SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 97862232bf4SGerd Hoffmann VIRTIO_GPU_RESP_OK_NODATA); 97962232bf4SGerd Hoffmann } 98062232bf4SGerd Hoffmann } 98162232bf4SGerd Hoffmann 98262232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 98362232bf4SGerd Hoffmann { 98462232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 98562232bf4SGerd Hoffmann qemu_bh_schedule(g->ctrl_bh); 98662232bf4SGerd Hoffmann } 98762232bf4SGerd Hoffmann 98862232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 98962232bf4SGerd Hoffmann { 99062232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 99162232bf4SGerd Hoffmann qemu_bh_schedule(g->cursor_bh); 99262232bf4SGerd Hoffmann } 99362232bf4SGerd Hoffmann 9940c55a1cfSGerd Hoffmann void virtio_gpu_process_cmdq(VirtIOGPU *g) 9953eb769fdSGerd Hoffmann { 9963eb769fdSGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 9972f47691aSGerd Hoffmann VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 9983eb769fdSGerd Hoffmann 999f8f3c271SMarc-André Lureau if (g->processing_cmdq) { 1000f8f3c271SMarc-André Lureau return; 1001f8f3c271SMarc-André Lureau } 1002f8f3c271SMarc-André Lureau g->processing_cmdq = true; 10033eb769fdSGerd Hoffmann while (!QTAILQ_EMPTY(&g->cmdq)) { 10043eb769fdSGerd Hoffmann cmd = QTAILQ_FIRST(&g->cmdq); 10053eb769fdSGerd Hoffmann 100650d8e25eSMarc-André Lureau if (g->parent_obj.renderer_blocked) { 10070c55a1cfSGerd Hoffmann break; 10080c55a1cfSGerd Hoffmann } 1009ad341aacSMarc-André Lureau 1010ad341aacSMarc-André Lureau /* process command */ 10112f47691aSGerd Hoffmann vgc->process_cmd(g, cmd); 1012ad341aacSMarc-André Lureau 10133eb769fdSGerd Hoffmann QTAILQ_REMOVE(&g->cmdq, cmd, next); 101450d8e25eSMarc-André Lureau if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 10153eb769fdSGerd Hoffmann g->stats.requests++; 10163eb769fdSGerd Hoffmann } 10173eb769fdSGerd Hoffmann 10183eb769fdSGerd Hoffmann if (!cmd->finished) { 10193eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 10203eb769fdSGerd Hoffmann g->inflight++; 102150d8e25eSMarc-André Lureau if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 10223eb769fdSGerd Hoffmann if (g->stats.max_inflight < g->inflight) { 10233eb769fdSGerd Hoffmann g->stats.max_inflight = g->inflight; 10243eb769fdSGerd Hoffmann } 10253eb769fdSGerd Hoffmann fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 10263eb769fdSGerd Hoffmann } 10273eb769fdSGerd Hoffmann } else { 10283eb769fdSGerd Hoffmann g_free(cmd); 10293eb769fdSGerd Hoffmann } 10303eb769fdSGerd Hoffmann } 1031f8f3c271SMarc-André Lureau g->processing_cmdq = false; 10323eb769fdSGerd Hoffmann } 10333eb769fdSGerd Hoffmann 103462232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 103562232bf4SGerd Hoffmann { 103662232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 103762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 103862232bf4SGerd Hoffmann 103962232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 104062232bf4SGerd Hoffmann return; 104162232bf4SGerd Hoffmann } 104262232bf4SGerd Hoffmann 104351b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 104451b19ebeSPaolo Bonzini while (cmd) { 104562232bf4SGerd Hoffmann cmd->vq = vq; 104662232bf4SGerd Hoffmann cmd->error = 0; 104762232bf4SGerd Hoffmann cmd->finished = false; 10483eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 104951b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 105062232bf4SGerd Hoffmann } 10519d9e1521SGerd Hoffmann 10523eb769fdSGerd Hoffmann virtio_gpu_process_cmdq(g); 105362232bf4SGerd Hoffmann } 105462232bf4SGerd Hoffmann 105562232bf4SGerd Hoffmann static void virtio_gpu_ctrl_bh(void *opaque) 105662232bf4SGerd Hoffmann { 105762232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 1058cabbe8e5SGerd Hoffmann VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1059cabbe8e5SGerd Hoffmann 1060cabbe8e5SGerd Hoffmann vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 106162232bf4SGerd Hoffmann } 106262232bf4SGerd Hoffmann 106362232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 106462232bf4SGerd Hoffmann { 106562232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 106651b19ebeSPaolo Bonzini VirtQueueElement *elem; 106762232bf4SGerd Hoffmann size_t s; 106862232bf4SGerd Hoffmann struct virtio_gpu_update_cursor cursor_info; 106962232bf4SGerd Hoffmann 107062232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 107162232bf4SGerd Hoffmann return; 107262232bf4SGerd Hoffmann } 107351b19ebeSPaolo Bonzini for (;;) { 107451b19ebeSPaolo Bonzini elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 107551b19ebeSPaolo Bonzini if (!elem) { 107651b19ebeSPaolo Bonzini break; 107751b19ebeSPaolo Bonzini } 107851b19ebeSPaolo Bonzini 107951b19ebeSPaolo Bonzini s = iov_to_buf(elem->out_sg, elem->out_num, 0, 108062232bf4SGerd Hoffmann &cursor_info, sizeof(cursor_info)); 108162232bf4SGerd Hoffmann if (s != sizeof(cursor_info)) { 108262232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 108362232bf4SGerd Hoffmann "%s: cursor size incorrect %zu vs %zu\n", 108462232bf4SGerd Hoffmann __func__, s, sizeof(cursor_info)); 108562232bf4SGerd Hoffmann } else { 10861715d6b5SFarhan Ali virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 108762232bf4SGerd Hoffmann update_cursor(g, &cursor_info); 108862232bf4SGerd Hoffmann } 108951b19ebeSPaolo Bonzini virtqueue_push(vq, elem, 0); 109062232bf4SGerd Hoffmann virtio_notify(vdev, vq); 109151b19ebeSPaolo Bonzini g_free(elem); 109262232bf4SGerd Hoffmann } 109362232bf4SGerd Hoffmann } 109462232bf4SGerd Hoffmann 109562232bf4SGerd Hoffmann static void virtio_gpu_cursor_bh(void *opaque) 109662232bf4SGerd Hoffmann { 109762232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 109850d8e25eSMarc-André Lureau virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 109962232bf4SGerd Hoffmann } 110062232bf4SGerd Hoffmann 11010c244e50SGerd Hoffmann static const VMStateDescription vmstate_virtio_gpu_scanout = { 11020c244e50SGerd Hoffmann .name = "virtio-gpu-one-scanout", 11030c244e50SGerd Hoffmann .version_id = 1, 11040c244e50SGerd Hoffmann .fields = (VMStateField[]) { 11050c244e50SGerd Hoffmann VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 11060c244e50SGerd Hoffmann VMSTATE_UINT32(width, struct virtio_gpu_scanout), 11070c244e50SGerd Hoffmann VMSTATE_UINT32(height, struct virtio_gpu_scanout), 11080c244e50SGerd Hoffmann VMSTATE_INT32(x, struct virtio_gpu_scanout), 11090c244e50SGerd Hoffmann VMSTATE_INT32(y, struct virtio_gpu_scanout), 11100c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 11110c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 11120c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 11130c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 11140c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 11150c244e50SGerd Hoffmann VMSTATE_END_OF_LIST() 11160c244e50SGerd Hoffmann }, 11170c244e50SGerd Hoffmann }; 11180c244e50SGerd Hoffmann 11190c244e50SGerd Hoffmann static const VMStateDescription vmstate_virtio_gpu_scanouts = { 11200c244e50SGerd Hoffmann .name = "virtio-gpu-scanouts", 11210c244e50SGerd Hoffmann .version_id = 1, 11220c244e50SGerd Hoffmann .fields = (VMStateField[]) { 112350d8e25eSMarc-André Lureau VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 112450d8e25eSMarc-André Lureau VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 112550d8e25eSMarc-André Lureau struct VirtIOGPU, NULL), 112650d8e25eSMarc-André Lureau VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 112750d8e25eSMarc-André Lureau parent_obj.conf.max_outputs, 1, 11280c244e50SGerd Hoffmann vmstate_virtio_gpu_scanout, 11290c244e50SGerd Hoffmann struct virtio_gpu_scanout), 11300c244e50SGerd Hoffmann VMSTATE_END_OF_LIST() 11310c244e50SGerd Hoffmann }, 11320c244e50SGerd Hoffmann }; 11330c244e50SGerd Hoffmann 11342c21ee76SJianjun Duan static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 11353ddba9a9SMarkus Armbruster const VMStateField *field, JSONWriter *vmdesc) 11360c244e50SGerd Hoffmann { 11370c244e50SGerd Hoffmann VirtIOGPU *g = opaque; 11380c244e50SGerd Hoffmann struct virtio_gpu_simple_resource *res; 11390c244e50SGerd Hoffmann int i; 11400c244e50SGerd Hoffmann 11410c244e50SGerd Hoffmann /* in 2d mode we should never find unprocessed commands here */ 11420c244e50SGerd Hoffmann assert(QTAILQ_EMPTY(&g->cmdq)); 11430c244e50SGerd Hoffmann 11440c244e50SGerd Hoffmann QTAILQ_FOREACH(res, &g->reslist, next) { 11450c244e50SGerd Hoffmann qemu_put_be32(f, res->resource_id); 11460c244e50SGerd Hoffmann qemu_put_be32(f, res->width); 11470c244e50SGerd Hoffmann qemu_put_be32(f, res->height); 11480c244e50SGerd Hoffmann qemu_put_be32(f, res->format); 11490c244e50SGerd Hoffmann qemu_put_be32(f, res->iov_cnt); 11500c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 11510c244e50SGerd Hoffmann qemu_put_be64(f, res->addrs[i]); 11520c244e50SGerd Hoffmann qemu_put_be32(f, res->iov[i].iov_len); 11530c244e50SGerd Hoffmann } 11540c244e50SGerd Hoffmann qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 11550c244e50SGerd Hoffmann pixman_image_get_stride(res->image) * res->height); 11560c244e50SGerd Hoffmann } 11570c244e50SGerd Hoffmann qemu_put_be32(f, 0); /* end of list */ 11580c244e50SGerd Hoffmann 11592f168d07SDr. David Alan Gilbert return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 11600c244e50SGerd Hoffmann } 11610c244e50SGerd Hoffmann 11622c21ee76SJianjun Duan static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 116303fee66fSMarc-André Lureau const VMStateField *field) 11640c244e50SGerd Hoffmann { 11650c244e50SGerd Hoffmann VirtIOGPU *g = opaque; 11660c244e50SGerd Hoffmann struct virtio_gpu_simple_resource *res; 11670c244e50SGerd Hoffmann struct virtio_gpu_scanout *scanout; 11680c244e50SGerd Hoffmann uint32_t resource_id, pformat; 11698a502efdSHalil Pasic int i; 11700c244e50SGerd Hoffmann 1171039aa5dbSPeter Maydell g->hostmem = 0; 1172039aa5dbSPeter Maydell 11730c244e50SGerd Hoffmann resource_id = qemu_get_be32(f); 11740c244e50SGerd Hoffmann while (resource_id != 0) { 1175b0ee78ffSLi Qiang res = virtio_gpu_find_resource(g, resource_id); 1176b0ee78ffSLi Qiang if (res) { 1177b0ee78ffSLi Qiang return -EINVAL; 1178b0ee78ffSLi Qiang } 1179b0ee78ffSLi Qiang 11800c244e50SGerd Hoffmann res = g_new0(struct virtio_gpu_simple_resource, 1); 11810c244e50SGerd Hoffmann res->resource_id = resource_id; 11820c244e50SGerd Hoffmann res->width = qemu_get_be32(f); 11830c244e50SGerd Hoffmann res->height = qemu_get_be32(f); 11840c244e50SGerd Hoffmann res->format = qemu_get_be32(f); 11850c244e50SGerd Hoffmann res->iov_cnt = qemu_get_be32(f); 11860c244e50SGerd Hoffmann 11870c244e50SGerd Hoffmann /* allocate */ 118883a7d3c0SMarc-André Lureau pformat = virtio_gpu_get_pixman_format(res->format); 11890c244e50SGerd Hoffmann if (!pformat) { 1190c84f0f25SPeter Maydell g_free(res); 11910c244e50SGerd Hoffmann return -EINVAL; 11920c244e50SGerd Hoffmann } 11930c244e50SGerd Hoffmann res->image = pixman_image_create_bits(pformat, 11940c244e50SGerd Hoffmann res->width, res->height, 11950c244e50SGerd Hoffmann NULL, 0); 11960c244e50SGerd Hoffmann if (!res->image) { 1197c84f0f25SPeter Maydell g_free(res); 11980c244e50SGerd Hoffmann return -EINVAL; 11990c244e50SGerd Hoffmann } 12000c244e50SGerd Hoffmann 1201c53f5b89STao Wu res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1202039aa5dbSPeter Maydell 12030c244e50SGerd Hoffmann res->addrs = g_new(uint64_t, res->iov_cnt); 12040c244e50SGerd Hoffmann res->iov = g_new(struct iovec, res->iov_cnt); 12050c244e50SGerd Hoffmann 12060c244e50SGerd Hoffmann /* read data */ 12070c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 12080c244e50SGerd Hoffmann res->addrs[i] = qemu_get_be64(f); 12090c244e50SGerd Hoffmann res->iov[i].iov_len = qemu_get_be32(f); 12100c244e50SGerd Hoffmann } 12110c244e50SGerd Hoffmann qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 12120c244e50SGerd Hoffmann pixman_image_get_stride(res->image) * res->height); 12130c244e50SGerd Hoffmann 12140c244e50SGerd Hoffmann /* restore mapping */ 12150c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 12160c244e50SGerd Hoffmann hwaddr len = res->iov[i].iov_len; 12170c244e50SGerd Hoffmann res->iov[i].iov_base = 12188da132a5SGerd Hoffmann dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 12198da132a5SGerd Hoffmann res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); 12203bb68f79SGerd Hoffmann 12210c244e50SGerd Hoffmann if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1222c84f0f25SPeter Maydell /* Clean up the half-a-mapping we just created... */ 1223c84f0f25SPeter Maydell if (res->iov[i].iov_base) { 12248da132a5SGerd Hoffmann dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 12258da132a5SGerd Hoffmann res->iov[i].iov_base, 1226a7f85e03SGerd Hoffmann len, 12278da132a5SGerd Hoffmann DMA_DIRECTION_TO_DEVICE, 1228a7f85e03SGerd Hoffmann 0); 1229c84f0f25SPeter Maydell } 1230c84f0f25SPeter Maydell /* ...and the mappings for previous loop iterations */ 1231c84f0f25SPeter Maydell res->iov_cnt = i; 12323bb68f79SGerd Hoffmann virtio_gpu_cleanup_mapping(g, res); 1233c84f0f25SPeter Maydell pixman_image_unref(res->image); 1234c84f0f25SPeter Maydell g_free(res); 12350c244e50SGerd Hoffmann return -EINVAL; 12360c244e50SGerd Hoffmann } 12370c244e50SGerd Hoffmann } 12380c244e50SGerd Hoffmann 12390c244e50SGerd Hoffmann QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1240039aa5dbSPeter Maydell g->hostmem += res->hostmem; 12410c244e50SGerd Hoffmann 12420c244e50SGerd Hoffmann resource_id = qemu_get_be32(f); 12430c244e50SGerd Hoffmann } 12440c244e50SGerd Hoffmann 12450c244e50SGerd Hoffmann /* load & apply scanout state */ 12460c244e50SGerd Hoffmann vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 124750d8e25eSMarc-André Lureau for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 124850d8e25eSMarc-André Lureau scanout = &g->parent_obj.scanout[i]; 12490c244e50SGerd Hoffmann if (!scanout->resource_id) { 12500c244e50SGerd Hoffmann continue; 12510c244e50SGerd Hoffmann } 12520c244e50SGerd Hoffmann res = virtio_gpu_find_resource(g, scanout->resource_id); 12530c244e50SGerd Hoffmann if (!res) { 12540c244e50SGerd Hoffmann return -EINVAL; 12550c244e50SGerd Hoffmann } 12560c244e50SGerd Hoffmann scanout->ds = qemu_create_displaysurface_pixman(res->image); 12570c244e50SGerd Hoffmann if (!scanout->ds) { 12580c244e50SGerd Hoffmann return -EINVAL; 12590c244e50SGerd Hoffmann } 12600c244e50SGerd Hoffmann 12610c244e50SGerd Hoffmann dpy_gfx_replace_surface(scanout->con, scanout->ds); 126291155f8bSGerd Hoffmann dpy_gfx_update_full(scanout->con); 126310750ee0SGerd Hoffmann if (scanout->cursor.resource_id) { 12640c244e50SGerd Hoffmann update_cursor(g, &scanout->cursor); 126510750ee0SGerd Hoffmann } 12660c244e50SGerd Hoffmann res->scanout_bitmask |= (1 << i); 12670c244e50SGerd Hoffmann } 12680c244e50SGerd Hoffmann 12690c244e50SGerd Hoffmann return 0; 12700c244e50SGerd Hoffmann } 12710c244e50SGerd Hoffmann 127237f86af0SGerd Hoffmann void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 127362232bf4SGerd Hoffmann { 127462232bf4SGerd Hoffmann VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 127562232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(qdev); 12769d9e1521SGerd Hoffmann 1277cce386e1SVivek Kasireddy if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1278cce386e1SVivek Kasireddy if (!virtio_gpu_have_udmabuf()) { 1279cce386e1SVivek Kasireddy error_setg(errp, "cannot enable blob resources without udmabuf"); 1280cce386e1SVivek Kasireddy return; 1281cce386e1SVivek Kasireddy } 1282cce386e1SVivek Kasireddy 1283cce386e1SVivek Kasireddy if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1284cce386e1SVivek Kasireddy error_setg(errp, "blobs and virgl are not compatible (yet)"); 1285cce386e1SVivek Kasireddy return; 1286cce386e1SVivek Kasireddy } 1287cce386e1SVivek Kasireddy } 1288cce386e1SVivek Kasireddy 128950d8e25eSMarc-André Lureau if (!virtio_gpu_base_device_realize(qdev, 129050d8e25eSMarc-André Lureau virtio_gpu_handle_ctrl_cb, 129150d8e25eSMarc-André Lureau virtio_gpu_handle_cursor_cb, 129250d8e25eSMarc-André Lureau errp)) { 1293fe44dc91SAshijeet Acharya return; 1294fe44dc91SAshijeet Acharya } 1295fe44dc91SAshijeet Acharya 129650d8e25eSMarc-André Lureau g->ctrl_vq = virtio_get_queue(vdev, 0); 129750d8e25eSMarc-André Lureau g->cursor_vq = virtio_get_queue(vdev, 1); 129862232bf4SGerd Hoffmann g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 129962232bf4SGerd Hoffmann g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 130062232bf4SGerd Hoffmann QTAILQ_INIT(&g->reslist); 13013eb769fdSGerd Hoffmann QTAILQ_INIT(&g->cmdq); 130262232bf4SGerd Hoffmann QTAILQ_INIT(&g->fenceq); 130362232bf4SGerd Hoffmann } 130462232bf4SGerd Hoffmann 130576fa8b35SGerd Hoffmann void virtio_gpu_reset(VirtIODevice *vdev) 130662232bf4SGerd Hoffmann { 130762232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 130862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res, *tmp; 1309dc84ed5bSGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 131062232bf4SGerd Hoffmann 131162232bf4SGerd Hoffmann QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 131262232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 131362232bf4SGerd Hoffmann } 13149d9e1521SGerd Hoffmann 1315dc84ed5bSGerd Hoffmann while (!QTAILQ_EMPTY(&g->cmdq)) { 1316dc84ed5bSGerd Hoffmann cmd = QTAILQ_FIRST(&g->cmdq); 1317dc84ed5bSGerd Hoffmann QTAILQ_REMOVE(&g->cmdq, cmd, next); 1318dc84ed5bSGerd Hoffmann g_free(cmd); 1319dc84ed5bSGerd Hoffmann } 1320dc84ed5bSGerd Hoffmann 1321dc84ed5bSGerd Hoffmann while (!QTAILQ_EMPTY(&g->fenceq)) { 1322dc84ed5bSGerd Hoffmann cmd = QTAILQ_FIRST(&g->fenceq); 1323dc84ed5bSGerd Hoffmann QTAILQ_REMOVE(&g->fenceq, cmd, next); 1324dc84ed5bSGerd Hoffmann g->inflight--; 1325dc84ed5bSGerd Hoffmann g_free(cmd); 1326dc84ed5bSGerd Hoffmann } 1327dc84ed5bSGerd Hoffmann 132850d8e25eSMarc-André Lureau virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 132950d8e25eSMarc-André Lureau } 133050d8e25eSMarc-André Lureau 133150d8e25eSMarc-André Lureau static void 133250d8e25eSMarc-André Lureau virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 133350d8e25eSMarc-André Lureau { 133450d8e25eSMarc-André Lureau VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 133550d8e25eSMarc-André Lureau 133650d8e25eSMarc-André Lureau memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 133750d8e25eSMarc-André Lureau } 133850d8e25eSMarc-André Lureau 133950d8e25eSMarc-André Lureau static void 134050d8e25eSMarc-André Lureau virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 134150d8e25eSMarc-André Lureau { 134250d8e25eSMarc-André Lureau VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 134350d8e25eSMarc-André Lureau const struct virtio_gpu_config *vgconfig = 134450d8e25eSMarc-André Lureau (const struct virtio_gpu_config *)config; 134550d8e25eSMarc-André Lureau 134650d8e25eSMarc-André Lureau if (vgconfig->events_clear) { 134750d8e25eSMarc-André Lureau g->virtio_config.events_read &= ~vgconfig->events_clear; 134850d8e25eSMarc-André Lureau } 134962232bf4SGerd Hoffmann } 135062232bf4SGerd Hoffmann 13518a502efdSHalil Pasic /* 13528a502efdSHalil Pasic * For historical reasons virtio_gpu does not adhere to virtio migration 13538a502efdSHalil Pasic * scheme as described in doc/virtio-migration.txt, in a sense that no 13548a502efdSHalil Pasic * save/load callback are provided to the core. Instead the device data 13558a502efdSHalil Pasic * is saved/loaded after the core data. 13568a502efdSHalil Pasic * 13578a502efdSHalil Pasic * Because of this we need a special vmsd. 13588a502efdSHalil Pasic */ 13598a502efdSHalil Pasic static const VMStateDescription vmstate_virtio_gpu = { 13608a502efdSHalil Pasic .name = "virtio-gpu", 13618a502efdSHalil Pasic .minimum_version_id = VIRTIO_GPU_VM_VERSION, 13628a502efdSHalil Pasic .version_id = VIRTIO_GPU_VM_VERSION, 13638a502efdSHalil Pasic .fields = (VMStateField[]) { 13648a502efdSHalil Pasic VMSTATE_VIRTIO_DEVICE /* core */, 13658a502efdSHalil Pasic { 13668a502efdSHalil Pasic .name = "virtio-gpu", 13678a502efdSHalil Pasic .info = &(const VMStateInfo) { 13688a502efdSHalil Pasic .name = "virtio-gpu", 13698a502efdSHalil Pasic .get = virtio_gpu_load, 13708a502efdSHalil Pasic .put = virtio_gpu_save, 13718a502efdSHalil Pasic }, 13728a502efdSHalil Pasic .flags = VMS_SINGLE, 13738a502efdSHalil Pasic } /* device */, 13748a502efdSHalil Pasic VMSTATE_END_OF_LIST() 13758a502efdSHalil Pasic }, 13768a502efdSHalil Pasic }; 13770fc07498SDr. David Alan Gilbert 137862232bf4SGerd Hoffmann static Property virtio_gpu_properties[] = { 137950d8e25eSMarc-André Lureau VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 138050d8e25eSMarc-André Lureau DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 138150d8e25eSMarc-André Lureau 256 * MiB), 1382cce386e1SVivek Kasireddy DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1383cce386e1SVivek Kasireddy VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 138462232bf4SGerd Hoffmann DEFINE_PROP_END_OF_LIST(), 138562232bf4SGerd Hoffmann }; 138662232bf4SGerd Hoffmann 138762232bf4SGerd Hoffmann static void virtio_gpu_class_init(ObjectClass *klass, void *data) 138862232bf4SGerd Hoffmann { 138962232bf4SGerd Hoffmann DeviceClass *dc = DEVICE_CLASS(klass); 139062232bf4SGerd Hoffmann VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1391cabbe8e5SGerd Hoffmann VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 139262232bf4SGerd Hoffmann 1393cabbe8e5SGerd Hoffmann vgc->handle_ctrl = virtio_gpu_handle_ctrl; 13942f47691aSGerd Hoffmann vgc->process_cmd = virtio_gpu_simple_process_cmd; 13952c267d66SGerd Hoffmann vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1396cabbe8e5SGerd Hoffmann 139762232bf4SGerd Hoffmann vdc->realize = virtio_gpu_device_realize; 139850d8e25eSMarc-André Lureau vdc->reset = virtio_gpu_reset; 139962232bf4SGerd Hoffmann vdc->get_config = virtio_gpu_get_config; 140062232bf4SGerd Hoffmann vdc->set_config = virtio_gpu_set_config; 140162232bf4SGerd Hoffmann 14020fc07498SDr. David Alan Gilbert dc->vmsd = &vmstate_virtio_gpu; 14034f67d30bSMarc-André Lureau device_class_set_props(dc, virtio_gpu_properties); 140462232bf4SGerd Hoffmann } 140562232bf4SGerd Hoffmann 140662232bf4SGerd Hoffmann static const TypeInfo virtio_gpu_info = { 140762232bf4SGerd Hoffmann .name = TYPE_VIRTIO_GPU, 140850d8e25eSMarc-André Lureau .parent = TYPE_VIRTIO_GPU_BASE, 140962232bf4SGerd Hoffmann .instance_size = sizeof(VirtIOGPU), 1410cabbe8e5SGerd Hoffmann .class_size = sizeof(VirtIOGPUClass), 141162232bf4SGerd Hoffmann .class_init = virtio_gpu_class_init, 141262232bf4SGerd Hoffmann }; 141362232bf4SGerd Hoffmann 141462232bf4SGerd Hoffmann static void virtio_register_types(void) 141562232bf4SGerd Hoffmann { 141662232bf4SGerd Hoffmann type_register_static(&virtio_gpu_info); 141762232bf4SGerd Hoffmann } 141862232bf4SGerd Hoffmann 141962232bf4SGerd Hoffmann type_init(virtio_register_types) 1420