162232bf4SGerd Hoffmann /* 262232bf4SGerd Hoffmann * Virtio GPU Device 362232bf4SGerd Hoffmann * 462232bf4SGerd Hoffmann * Copyright Red Hat, Inc. 2013-2014 562232bf4SGerd Hoffmann * 662232bf4SGerd Hoffmann * Authors: 762232bf4SGerd Hoffmann * Dave Airlie <airlied@redhat.com> 862232bf4SGerd Hoffmann * Gerd Hoffmann <kraxel@redhat.com> 962232bf4SGerd Hoffmann * 102e252145SGerd Hoffmann * This work is licensed under the terms of the GNU GPL, version 2 or later. 1162232bf4SGerd Hoffmann * See the COPYING file in the top-level directory. 1262232bf4SGerd Hoffmann */ 1362232bf4SGerd Hoffmann 149b8bfe21SPeter Maydell #include "qemu/osdep.h" 1562232bf4SGerd Hoffmann #include "qemu-common.h" 1662232bf4SGerd Hoffmann #include "qemu/iov.h" 1762232bf4SGerd Hoffmann #include "ui/console.h" 1862232bf4SGerd Hoffmann #include "trace.h" 1962232bf4SGerd Hoffmann #include "hw/virtio/virtio.h" 2062232bf4SGerd Hoffmann #include "hw/virtio/virtio-gpu.h" 2162232bf4SGerd Hoffmann #include "hw/virtio/virtio-bus.h" 22de889221SDr. David Alan Gilbert #include "migration/migration.h" 2303dd024fSPaolo Bonzini #include "qemu/log.h" 245e3d741cSMarc-André Lureau #include "qapi/error.h" 2562232bf4SGerd Hoffmann 260c244e50SGerd Hoffmann #define VIRTIO_GPU_VM_VERSION 1 270c244e50SGerd Hoffmann 2862232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource* 2962232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 3062232bf4SGerd Hoffmann 319d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 32a9c94277SMarkus Armbruster #include <virglrenderer.h> 339d9e1521SGerd Hoffmann #define VIRGL(_g, _virgl, _simple, ...) \ 349d9e1521SGerd Hoffmann do { \ 359d9e1521SGerd Hoffmann if (_g->use_virgl_renderer) { \ 369d9e1521SGerd Hoffmann _virgl(__VA_ARGS__); \ 379d9e1521SGerd Hoffmann } else { \ 389d9e1521SGerd Hoffmann _simple(__VA_ARGS__); \ 399d9e1521SGerd Hoffmann } \ 409d9e1521SGerd Hoffmann } while (0) 419d9e1521SGerd Hoffmann #else 429d9e1521SGerd Hoffmann #define VIRGL(_g, _virgl, _simple, ...) \ 439d9e1521SGerd Hoffmann do { \ 449d9e1521SGerd Hoffmann _simple(__VA_ARGS__); \ 459d9e1521SGerd Hoffmann } while (0) 469d9e1521SGerd Hoffmann #endif 479d9e1521SGerd Hoffmann 4862232bf4SGerd Hoffmann static void update_cursor_data_simple(VirtIOGPU *g, 4962232bf4SGerd Hoffmann struct virtio_gpu_scanout *s, 5062232bf4SGerd Hoffmann uint32_t resource_id) 5162232bf4SGerd Hoffmann { 5262232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 5362232bf4SGerd Hoffmann uint32_t pixels; 5462232bf4SGerd Hoffmann 5562232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, resource_id); 5662232bf4SGerd Hoffmann if (!res) { 5762232bf4SGerd Hoffmann return; 5862232bf4SGerd Hoffmann } 5962232bf4SGerd Hoffmann 6062232bf4SGerd Hoffmann if (pixman_image_get_width(res->image) != s->current_cursor->width || 6162232bf4SGerd Hoffmann pixman_image_get_height(res->image) != s->current_cursor->height) { 6262232bf4SGerd Hoffmann return; 6362232bf4SGerd Hoffmann } 6462232bf4SGerd Hoffmann 6562232bf4SGerd Hoffmann pixels = s->current_cursor->width * s->current_cursor->height; 6662232bf4SGerd Hoffmann memcpy(s->current_cursor->data, 6762232bf4SGerd Hoffmann pixman_image_get_data(res->image), 6862232bf4SGerd Hoffmann pixels * sizeof(uint32_t)); 6962232bf4SGerd Hoffmann } 7062232bf4SGerd Hoffmann 719d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 729d9e1521SGerd Hoffmann 739d9e1521SGerd Hoffmann static void update_cursor_data_virgl(VirtIOGPU *g, 749d9e1521SGerd Hoffmann struct virtio_gpu_scanout *s, 759d9e1521SGerd Hoffmann uint32_t resource_id) 769d9e1521SGerd Hoffmann { 779d9e1521SGerd Hoffmann uint32_t width, height; 789d9e1521SGerd Hoffmann uint32_t pixels, *data; 799d9e1521SGerd Hoffmann 809d9e1521SGerd Hoffmann data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 819d9e1521SGerd Hoffmann if (!data) { 829d9e1521SGerd Hoffmann return; 839d9e1521SGerd Hoffmann } 849d9e1521SGerd Hoffmann 859d9e1521SGerd Hoffmann if (width != s->current_cursor->width || 869d9e1521SGerd Hoffmann height != s->current_cursor->height) { 879d9e1521SGerd Hoffmann return; 889d9e1521SGerd Hoffmann } 899d9e1521SGerd Hoffmann 909d9e1521SGerd Hoffmann pixels = s->current_cursor->width * s->current_cursor->height; 919d9e1521SGerd Hoffmann memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 929d9e1521SGerd Hoffmann free(data); 939d9e1521SGerd Hoffmann } 949d9e1521SGerd Hoffmann 959d9e1521SGerd Hoffmann #endif 969d9e1521SGerd Hoffmann 9762232bf4SGerd Hoffmann static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 9862232bf4SGerd Hoffmann { 9962232bf4SGerd Hoffmann struct virtio_gpu_scanout *s; 1000c244e50SGerd Hoffmann bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 10162232bf4SGerd Hoffmann 10262232bf4SGerd Hoffmann if (cursor->pos.scanout_id >= g->conf.max_outputs) { 10362232bf4SGerd Hoffmann return; 10462232bf4SGerd Hoffmann } 10562232bf4SGerd Hoffmann s = &g->scanout[cursor->pos.scanout_id]; 10662232bf4SGerd Hoffmann 107e9c1b459SGerd Hoffmann trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 108e9c1b459SGerd Hoffmann cursor->pos.x, 109e9c1b459SGerd Hoffmann cursor->pos.y, 110e9c1b459SGerd Hoffmann move ? "move" : "update", 111e9c1b459SGerd Hoffmann cursor->resource_id); 112e9c1b459SGerd Hoffmann 1130c244e50SGerd Hoffmann if (!move) { 11462232bf4SGerd Hoffmann if (!s->current_cursor) { 11562232bf4SGerd Hoffmann s->current_cursor = cursor_alloc(64, 64); 11662232bf4SGerd Hoffmann } 11762232bf4SGerd Hoffmann 11862232bf4SGerd Hoffmann s->current_cursor->hot_x = cursor->hot_x; 11962232bf4SGerd Hoffmann s->current_cursor->hot_y = cursor->hot_y; 12062232bf4SGerd Hoffmann 12162232bf4SGerd Hoffmann if (cursor->resource_id > 0) { 1229d9e1521SGerd Hoffmann VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 1239d9e1521SGerd Hoffmann g, s, cursor->resource_id); 12462232bf4SGerd Hoffmann } 12562232bf4SGerd Hoffmann dpy_cursor_define(s->con, s->current_cursor); 1260c244e50SGerd Hoffmann 1270c244e50SGerd Hoffmann s->cursor = *cursor; 1280c244e50SGerd Hoffmann } else { 1290c244e50SGerd Hoffmann s->cursor.pos.x = cursor->pos.x; 1300c244e50SGerd Hoffmann s->cursor.pos.y = cursor->pos.y; 13162232bf4SGerd Hoffmann } 13262232bf4SGerd Hoffmann dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 13362232bf4SGerd Hoffmann cursor->resource_id ? 1 : 0); 13462232bf4SGerd Hoffmann } 13562232bf4SGerd Hoffmann 13662232bf4SGerd Hoffmann static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 13762232bf4SGerd Hoffmann { 13862232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 13962232bf4SGerd Hoffmann memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 14062232bf4SGerd Hoffmann } 14162232bf4SGerd Hoffmann 14262232bf4SGerd Hoffmann static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 14362232bf4SGerd Hoffmann { 14462232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 14562232bf4SGerd Hoffmann struct virtio_gpu_config vgconfig; 14662232bf4SGerd Hoffmann 14762232bf4SGerd Hoffmann memcpy(&vgconfig, config, sizeof(g->virtio_config)); 14862232bf4SGerd Hoffmann 14962232bf4SGerd Hoffmann if (vgconfig.events_clear) { 15062232bf4SGerd Hoffmann g->virtio_config.events_read &= ~vgconfig.events_clear; 15162232bf4SGerd Hoffmann } 15262232bf4SGerd Hoffmann } 15362232bf4SGerd Hoffmann 1549d5b731dSJason Wang static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 1559d5b731dSJason Wang Error **errp) 15662232bf4SGerd Hoffmann { 1579d9e1521SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 1589d9e1521SGerd Hoffmann 1599d9e1521SGerd Hoffmann if (virtio_gpu_virgl_enabled(g->conf)) { 160fff02bc0SPaolo Bonzini features |= (1 << VIRTIO_GPU_F_VIRGL); 1619d9e1521SGerd Hoffmann } 16262232bf4SGerd Hoffmann return features; 16362232bf4SGerd Hoffmann } 16462232bf4SGerd Hoffmann 1659d9e1521SGerd Hoffmann static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 1669d9e1521SGerd Hoffmann { 167fff02bc0SPaolo Bonzini static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 1689d9e1521SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 1699d9e1521SGerd Hoffmann 1709d9e1521SGerd Hoffmann g->use_virgl_renderer = ((features & virgl) == virgl); 1719d9e1521SGerd Hoffmann trace_virtio_gpu_features(g->use_virgl_renderer); 1729d9e1521SGerd Hoffmann } 1739d9e1521SGerd Hoffmann 17462232bf4SGerd Hoffmann static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 17562232bf4SGerd Hoffmann { 17662232bf4SGerd Hoffmann g->virtio_config.events_read |= event_type; 17762232bf4SGerd Hoffmann virtio_notify_config(&g->parent_obj); 17862232bf4SGerd Hoffmann } 17962232bf4SGerd Hoffmann 18062232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource * 18162232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 18262232bf4SGerd Hoffmann { 18362232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 18462232bf4SGerd Hoffmann 18562232bf4SGerd Hoffmann QTAILQ_FOREACH(res, &g->reslist, next) { 18662232bf4SGerd Hoffmann if (res->resource_id == resource_id) { 18762232bf4SGerd Hoffmann return res; 18862232bf4SGerd Hoffmann } 18962232bf4SGerd Hoffmann } 19062232bf4SGerd Hoffmann return NULL; 19162232bf4SGerd Hoffmann } 19262232bf4SGerd Hoffmann 19362232bf4SGerd Hoffmann void virtio_gpu_ctrl_response(VirtIOGPU *g, 19462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 19562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr *resp, 19662232bf4SGerd Hoffmann size_t resp_len) 19762232bf4SGerd Hoffmann { 19862232bf4SGerd Hoffmann size_t s; 19962232bf4SGerd Hoffmann 20062232bf4SGerd Hoffmann if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 20162232bf4SGerd Hoffmann resp->flags |= VIRTIO_GPU_FLAG_FENCE; 20262232bf4SGerd Hoffmann resp->fence_id = cmd->cmd_hdr.fence_id; 20362232bf4SGerd Hoffmann resp->ctx_id = cmd->cmd_hdr.ctx_id; 20462232bf4SGerd Hoffmann } 20562232bf4SGerd Hoffmann s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 20662232bf4SGerd Hoffmann if (s != resp_len) { 20762232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 20862232bf4SGerd Hoffmann "%s: response size incorrect %zu vs %zu\n", 20962232bf4SGerd Hoffmann __func__, s, resp_len); 21062232bf4SGerd Hoffmann } 21162232bf4SGerd Hoffmann virtqueue_push(cmd->vq, &cmd->elem, s); 21262232bf4SGerd Hoffmann virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 21362232bf4SGerd Hoffmann cmd->finished = true; 21462232bf4SGerd Hoffmann } 21562232bf4SGerd Hoffmann 21662232bf4SGerd Hoffmann void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 21762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 21862232bf4SGerd Hoffmann enum virtio_gpu_ctrl_type type) 21962232bf4SGerd Hoffmann { 22062232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr resp; 22162232bf4SGerd Hoffmann 22262232bf4SGerd Hoffmann memset(&resp, 0, sizeof(resp)); 22362232bf4SGerd Hoffmann resp.type = type; 22462232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 22562232bf4SGerd Hoffmann } 22662232bf4SGerd Hoffmann 22762232bf4SGerd Hoffmann static void 22862232bf4SGerd Hoffmann virtio_gpu_fill_display_info(VirtIOGPU *g, 22962232bf4SGerd Hoffmann struct virtio_gpu_resp_display_info *dpy_info) 23062232bf4SGerd Hoffmann { 23162232bf4SGerd Hoffmann int i; 23262232bf4SGerd Hoffmann 23362232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 23462232bf4SGerd Hoffmann if (g->enabled_output_bitmask & (1 << i)) { 23562232bf4SGerd Hoffmann dpy_info->pmodes[i].enabled = 1; 23662232bf4SGerd Hoffmann dpy_info->pmodes[i].r.width = g->req_state[i].width; 23762232bf4SGerd Hoffmann dpy_info->pmodes[i].r.height = g->req_state[i].height; 23862232bf4SGerd Hoffmann } 23962232bf4SGerd Hoffmann } 24062232bf4SGerd Hoffmann } 24162232bf4SGerd Hoffmann 24262232bf4SGerd Hoffmann void virtio_gpu_get_display_info(VirtIOGPU *g, 24362232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 24462232bf4SGerd Hoffmann { 24562232bf4SGerd Hoffmann struct virtio_gpu_resp_display_info display_info; 24662232bf4SGerd Hoffmann 24762232bf4SGerd Hoffmann trace_virtio_gpu_cmd_get_display_info(); 24862232bf4SGerd Hoffmann memset(&display_info, 0, sizeof(display_info)); 24962232bf4SGerd Hoffmann display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 25062232bf4SGerd Hoffmann virtio_gpu_fill_display_info(g, &display_info); 25162232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 25262232bf4SGerd Hoffmann sizeof(display_info)); 25362232bf4SGerd Hoffmann } 25462232bf4SGerd Hoffmann 25562232bf4SGerd Hoffmann static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 25662232bf4SGerd Hoffmann { 25762232bf4SGerd Hoffmann switch (virtio_gpu_format) { 25862232bf4SGerd Hoffmann #ifdef HOST_WORDS_BIGENDIAN 25962232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 26062232bf4SGerd Hoffmann return PIXMAN_b8g8r8x8; 26162232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 26262232bf4SGerd Hoffmann return PIXMAN_b8g8r8a8; 26362232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 26462232bf4SGerd Hoffmann return PIXMAN_x8r8g8b8; 26562232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 26662232bf4SGerd Hoffmann return PIXMAN_a8r8g8b8; 26762232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 26862232bf4SGerd Hoffmann return PIXMAN_r8g8b8x8; 26962232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 27062232bf4SGerd Hoffmann return PIXMAN_r8g8b8a8; 27162232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 27262232bf4SGerd Hoffmann return PIXMAN_x8b8g8r8; 27362232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 27462232bf4SGerd Hoffmann return PIXMAN_a8b8g8r8; 27562232bf4SGerd Hoffmann #else 27662232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 27762232bf4SGerd Hoffmann return PIXMAN_x8r8g8b8; 27862232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 27962232bf4SGerd Hoffmann return PIXMAN_a8r8g8b8; 28062232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 28162232bf4SGerd Hoffmann return PIXMAN_b8g8r8x8; 28262232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 28362232bf4SGerd Hoffmann return PIXMAN_b8g8r8a8; 28462232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 28562232bf4SGerd Hoffmann return PIXMAN_x8b8g8r8; 28662232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 28762232bf4SGerd Hoffmann return PIXMAN_a8b8g8r8; 28862232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 28962232bf4SGerd Hoffmann return PIXMAN_r8g8b8x8; 29062232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 29162232bf4SGerd Hoffmann return PIXMAN_r8g8b8a8; 29262232bf4SGerd Hoffmann #endif 29362232bf4SGerd Hoffmann default: 29462232bf4SGerd Hoffmann return 0; 29562232bf4SGerd Hoffmann } 29662232bf4SGerd Hoffmann } 29762232bf4SGerd Hoffmann 29862232bf4SGerd Hoffmann static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 29962232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 30062232bf4SGerd Hoffmann { 30162232bf4SGerd Hoffmann pixman_format_code_t pformat; 30262232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 30362232bf4SGerd Hoffmann struct virtio_gpu_resource_create_2d c2d; 30462232bf4SGerd Hoffmann 30562232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(c2d); 30662232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 30762232bf4SGerd Hoffmann c2d.width, c2d.height); 30862232bf4SGerd Hoffmann 30962232bf4SGerd Hoffmann if (c2d.resource_id == 0) { 31062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 31162232bf4SGerd Hoffmann __func__); 31262232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 31362232bf4SGerd Hoffmann return; 31462232bf4SGerd Hoffmann } 31562232bf4SGerd Hoffmann 31662232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, c2d.resource_id); 31762232bf4SGerd Hoffmann if (res) { 31862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 31962232bf4SGerd Hoffmann __func__, c2d.resource_id); 32062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 32162232bf4SGerd Hoffmann return; 32262232bf4SGerd Hoffmann } 32362232bf4SGerd Hoffmann 32462232bf4SGerd Hoffmann res = g_new0(struct virtio_gpu_simple_resource, 1); 32562232bf4SGerd Hoffmann 32662232bf4SGerd Hoffmann res->width = c2d.width; 32762232bf4SGerd Hoffmann res->height = c2d.height; 32862232bf4SGerd Hoffmann res->format = c2d.format; 32962232bf4SGerd Hoffmann res->resource_id = c2d.resource_id; 33062232bf4SGerd Hoffmann 33162232bf4SGerd Hoffmann pformat = get_pixman_format(c2d.format); 33262232bf4SGerd Hoffmann if (!pformat) { 33362232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 33462232bf4SGerd Hoffmann "%s: host couldn't handle guest format %d\n", 33562232bf4SGerd Hoffmann __func__, c2d.format); 33662232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 33762232bf4SGerd Hoffmann return; 33862232bf4SGerd Hoffmann } 33962232bf4SGerd Hoffmann res->image = pixman_image_create_bits(pformat, 34062232bf4SGerd Hoffmann c2d.width, 34162232bf4SGerd Hoffmann c2d.height, 34262232bf4SGerd Hoffmann NULL, 0); 34362232bf4SGerd Hoffmann 34462232bf4SGerd Hoffmann if (!res->image) { 34562232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 34662232bf4SGerd Hoffmann "%s: resource creation failed %d %d %d\n", 34762232bf4SGerd Hoffmann __func__, c2d.resource_id, c2d.width, c2d.height); 34862232bf4SGerd Hoffmann g_free(res); 34962232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 35062232bf4SGerd Hoffmann return; 35162232bf4SGerd Hoffmann } 35262232bf4SGerd Hoffmann 35362232bf4SGerd Hoffmann QTAILQ_INSERT_HEAD(&g->reslist, res, next); 35462232bf4SGerd Hoffmann } 35562232bf4SGerd Hoffmann 35662232bf4SGerd Hoffmann static void virtio_gpu_resource_destroy(VirtIOGPU *g, 35762232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res) 35862232bf4SGerd Hoffmann { 35962232bf4SGerd Hoffmann pixman_image_unref(res->image); 36062232bf4SGerd Hoffmann QTAILQ_REMOVE(&g->reslist, res, next); 36162232bf4SGerd Hoffmann g_free(res); 36262232bf4SGerd Hoffmann } 36362232bf4SGerd Hoffmann 36462232bf4SGerd Hoffmann static void virtio_gpu_resource_unref(VirtIOGPU *g, 36562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 36662232bf4SGerd Hoffmann { 36762232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 36862232bf4SGerd Hoffmann struct virtio_gpu_resource_unref unref; 36962232bf4SGerd Hoffmann 37062232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(unref); 37162232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_unref(unref.resource_id); 37262232bf4SGerd Hoffmann 37362232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, unref.resource_id); 37462232bf4SGerd Hoffmann if (!res) { 37562232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 37662232bf4SGerd Hoffmann __func__, unref.resource_id); 37762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 37862232bf4SGerd Hoffmann return; 37962232bf4SGerd Hoffmann } 38062232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 38162232bf4SGerd Hoffmann } 38262232bf4SGerd Hoffmann 38362232bf4SGerd Hoffmann static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 38462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 38562232bf4SGerd Hoffmann { 38662232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 38762232bf4SGerd Hoffmann int h; 38862232bf4SGerd Hoffmann uint32_t src_offset, dst_offset, stride; 38962232bf4SGerd Hoffmann int bpp; 39062232bf4SGerd Hoffmann pixman_format_code_t format; 39162232bf4SGerd Hoffmann struct virtio_gpu_transfer_to_host_2d t2d; 39262232bf4SGerd Hoffmann 39362232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(t2d); 39462232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 39562232bf4SGerd Hoffmann 39662232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, t2d.resource_id); 39762232bf4SGerd Hoffmann if (!res || !res->iov) { 39862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 39962232bf4SGerd Hoffmann __func__, t2d.resource_id); 40062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 40162232bf4SGerd Hoffmann return; 40262232bf4SGerd Hoffmann } 40362232bf4SGerd Hoffmann 40462232bf4SGerd Hoffmann if (t2d.r.x > res->width || 40562232bf4SGerd Hoffmann t2d.r.y > res->height || 40662232bf4SGerd Hoffmann t2d.r.width > res->width || 40762232bf4SGerd Hoffmann t2d.r.height > res->height || 40862232bf4SGerd Hoffmann t2d.r.x + t2d.r.width > res->width || 40962232bf4SGerd Hoffmann t2d.r.y + t2d.r.height > res->height) { 41062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 41162232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 41262232bf4SGerd Hoffmann __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 41362232bf4SGerd Hoffmann t2d.r.width, t2d.r.height, res->width, res->height); 41462232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 41562232bf4SGerd Hoffmann return; 41662232bf4SGerd Hoffmann } 41762232bf4SGerd Hoffmann 41862232bf4SGerd Hoffmann format = pixman_image_get_format(res->image); 41962232bf4SGerd Hoffmann bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 42062232bf4SGerd Hoffmann stride = pixman_image_get_stride(res->image); 42162232bf4SGerd Hoffmann 42262232bf4SGerd Hoffmann if (t2d.offset || t2d.r.x || t2d.r.y || 42362232bf4SGerd Hoffmann t2d.r.width != pixman_image_get_width(res->image)) { 42462232bf4SGerd Hoffmann void *img_data = pixman_image_get_data(res->image); 42562232bf4SGerd Hoffmann for (h = 0; h < t2d.r.height; h++) { 42662232bf4SGerd Hoffmann src_offset = t2d.offset + stride * h; 42762232bf4SGerd Hoffmann dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 42862232bf4SGerd Hoffmann 42962232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, src_offset, 43062232bf4SGerd Hoffmann (uint8_t *)img_data 43162232bf4SGerd Hoffmann + dst_offset, t2d.r.width * bpp); 43262232bf4SGerd Hoffmann } 43362232bf4SGerd Hoffmann } else { 43462232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, 0, 43562232bf4SGerd Hoffmann pixman_image_get_data(res->image), 43662232bf4SGerd Hoffmann pixman_image_get_stride(res->image) 43762232bf4SGerd Hoffmann * pixman_image_get_height(res->image)); 43862232bf4SGerd Hoffmann } 43962232bf4SGerd Hoffmann } 44062232bf4SGerd Hoffmann 44162232bf4SGerd Hoffmann static void virtio_gpu_resource_flush(VirtIOGPU *g, 44262232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 44362232bf4SGerd Hoffmann { 44462232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 44562232bf4SGerd Hoffmann struct virtio_gpu_resource_flush rf; 44662232bf4SGerd Hoffmann pixman_region16_t flush_region; 44762232bf4SGerd Hoffmann int i; 44862232bf4SGerd Hoffmann 44962232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(rf); 45062232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_flush(rf.resource_id, 45162232bf4SGerd Hoffmann rf.r.width, rf.r.height, rf.r.x, rf.r.y); 45262232bf4SGerd Hoffmann 45362232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, rf.resource_id); 45462232bf4SGerd Hoffmann if (!res) { 45562232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 45662232bf4SGerd Hoffmann __func__, rf.resource_id); 45762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 45862232bf4SGerd Hoffmann return; 45962232bf4SGerd Hoffmann } 46062232bf4SGerd Hoffmann 46162232bf4SGerd Hoffmann if (rf.r.x > res->width || 46262232bf4SGerd Hoffmann rf.r.y > res->height || 46362232bf4SGerd Hoffmann rf.r.width > res->width || 46462232bf4SGerd Hoffmann rf.r.height > res->height || 46562232bf4SGerd Hoffmann rf.r.x + rf.r.width > res->width || 46662232bf4SGerd Hoffmann rf.r.y + rf.r.height > res->height) { 46762232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 46862232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 46962232bf4SGerd Hoffmann __func__, rf.resource_id, rf.r.x, rf.r.y, 47062232bf4SGerd Hoffmann rf.r.width, rf.r.height, res->width, res->height); 47162232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 47262232bf4SGerd Hoffmann return; 47362232bf4SGerd Hoffmann } 47462232bf4SGerd Hoffmann 47562232bf4SGerd Hoffmann pixman_region_init_rect(&flush_region, 47662232bf4SGerd Hoffmann rf.r.x, rf.r.y, rf.r.width, rf.r.height); 4772fe76055SMarc-André Lureau for (i = 0; i < g->conf.max_outputs; i++) { 47862232bf4SGerd Hoffmann struct virtio_gpu_scanout *scanout; 47962232bf4SGerd Hoffmann pixman_region16_t region, finalregion; 48062232bf4SGerd Hoffmann pixman_box16_t *extents; 48162232bf4SGerd Hoffmann 48262232bf4SGerd Hoffmann if (!(res->scanout_bitmask & (1 << i))) { 48362232bf4SGerd Hoffmann continue; 48462232bf4SGerd Hoffmann } 48562232bf4SGerd Hoffmann scanout = &g->scanout[i]; 48662232bf4SGerd Hoffmann 48762232bf4SGerd Hoffmann pixman_region_init(&finalregion); 48862232bf4SGerd Hoffmann pixman_region_init_rect(®ion, scanout->x, scanout->y, 48962232bf4SGerd Hoffmann scanout->width, scanout->height); 49062232bf4SGerd Hoffmann 49162232bf4SGerd Hoffmann pixman_region_intersect(&finalregion, &flush_region, ®ion); 49262232bf4SGerd Hoffmann pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 49362232bf4SGerd Hoffmann extents = pixman_region_extents(&finalregion); 49462232bf4SGerd Hoffmann /* work out the area we need to update for each console */ 49562232bf4SGerd Hoffmann dpy_gfx_update(g->scanout[i].con, 49662232bf4SGerd Hoffmann extents->x1, extents->y1, 49762232bf4SGerd Hoffmann extents->x2 - extents->x1, 49862232bf4SGerd Hoffmann extents->y2 - extents->y1); 49962232bf4SGerd Hoffmann 50062232bf4SGerd Hoffmann pixman_region_fini(®ion); 50162232bf4SGerd Hoffmann pixman_region_fini(&finalregion); 50262232bf4SGerd Hoffmann } 50362232bf4SGerd Hoffmann pixman_region_fini(&flush_region); 50462232bf4SGerd Hoffmann } 50562232bf4SGerd Hoffmann 506fa06e5cbSGerd Hoffmann static void virtio_unref_resource(pixman_image_t *image, void *data) 507fa06e5cbSGerd Hoffmann { 508fa06e5cbSGerd Hoffmann pixman_image_unref(data); 509fa06e5cbSGerd Hoffmann } 510fa06e5cbSGerd Hoffmann 51162232bf4SGerd Hoffmann static void virtio_gpu_set_scanout(VirtIOGPU *g, 51262232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 51362232bf4SGerd Hoffmann { 51462232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 51562232bf4SGerd Hoffmann struct virtio_gpu_scanout *scanout; 51662232bf4SGerd Hoffmann pixman_format_code_t format; 51762232bf4SGerd Hoffmann uint32_t offset; 51862232bf4SGerd Hoffmann int bpp; 51962232bf4SGerd Hoffmann struct virtio_gpu_set_scanout ss; 52062232bf4SGerd Hoffmann 52162232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ss); 52262232bf4SGerd Hoffmann trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 52362232bf4SGerd Hoffmann ss.r.width, ss.r.height, ss.r.x, ss.r.y); 52462232bf4SGerd Hoffmann 5252fe76055SMarc-André Lureau if (ss.scanout_id >= g->conf.max_outputs) { 526fe89fdebSMarc-André Lureau qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 527fe89fdebSMarc-André Lureau __func__, ss.scanout_id); 528fe89fdebSMarc-André Lureau cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 529fe89fdebSMarc-André Lureau return; 530fe89fdebSMarc-André Lureau } 531fe89fdebSMarc-André Lureau 53262232bf4SGerd Hoffmann g->enable = 1; 53362232bf4SGerd Hoffmann if (ss.resource_id == 0) { 53462232bf4SGerd Hoffmann scanout = &g->scanout[ss.scanout_id]; 53562232bf4SGerd Hoffmann if (scanout->resource_id) { 53662232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, scanout->resource_id); 53762232bf4SGerd Hoffmann if (res) { 53862232bf4SGerd Hoffmann res->scanout_bitmask &= ~(1 << ss.scanout_id); 53962232bf4SGerd Hoffmann } 54062232bf4SGerd Hoffmann } 541fe89fdebSMarc-André Lureau if (ss.scanout_id == 0) { 54262232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 54362232bf4SGerd Hoffmann "%s: illegal scanout id specified %d", 54462232bf4SGerd Hoffmann __func__, ss.scanout_id); 54562232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 54662232bf4SGerd Hoffmann return; 54762232bf4SGerd Hoffmann } 54862232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 54962232bf4SGerd Hoffmann scanout->ds = NULL; 55062232bf4SGerd Hoffmann scanout->width = 0; 55162232bf4SGerd Hoffmann scanout->height = 0; 55262232bf4SGerd Hoffmann return; 55362232bf4SGerd Hoffmann } 55462232bf4SGerd Hoffmann 55562232bf4SGerd Hoffmann /* create a surface for this scanout */ 55662232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, ss.resource_id); 55762232bf4SGerd Hoffmann if (!res) { 55862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 55962232bf4SGerd Hoffmann __func__, ss.resource_id); 56062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 56162232bf4SGerd Hoffmann return; 56262232bf4SGerd Hoffmann } 56362232bf4SGerd Hoffmann 56462232bf4SGerd Hoffmann if (ss.r.x > res->width || 56562232bf4SGerd Hoffmann ss.r.y > res->height || 56662232bf4SGerd Hoffmann ss.r.width > res->width || 56762232bf4SGerd Hoffmann ss.r.height > res->height || 56862232bf4SGerd Hoffmann ss.r.x + ss.r.width > res->width || 56962232bf4SGerd Hoffmann ss.r.y + ss.r.height > res->height) { 57062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 57162232bf4SGerd Hoffmann " resource %d, (%d,%d)+%d,%d vs %d %d\n", 57262232bf4SGerd Hoffmann __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 57362232bf4SGerd Hoffmann ss.r.width, ss.r.height, res->width, res->height); 57462232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 57562232bf4SGerd Hoffmann return; 57662232bf4SGerd Hoffmann } 57762232bf4SGerd Hoffmann 57862232bf4SGerd Hoffmann scanout = &g->scanout[ss.scanout_id]; 57962232bf4SGerd Hoffmann 58062232bf4SGerd Hoffmann format = pixman_image_get_format(res->image); 58162232bf4SGerd Hoffmann bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 58262232bf4SGerd Hoffmann offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 58362232bf4SGerd Hoffmann if (!scanout->ds || surface_data(scanout->ds) 58462232bf4SGerd Hoffmann != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 58562232bf4SGerd Hoffmann scanout->width != ss.r.width || 58662232bf4SGerd Hoffmann scanout->height != ss.r.height) { 587fa06e5cbSGerd Hoffmann pixman_image_t *rect; 588fa06e5cbSGerd Hoffmann void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 589fa06e5cbSGerd Hoffmann rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 590fa06e5cbSGerd Hoffmann pixman_image_get_stride(res->image)); 591fa06e5cbSGerd Hoffmann pixman_image_ref(res->image); 592fa06e5cbSGerd Hoffmann pixman_image_set_destroy_function(rect, virtio_unref_resource, 593fa06e5cbSGerd Hoffmann res->image); 59462232bf4SGerd Hoffmann /* realloc the surface ptr */ 595fa06e5cbSGerd Hoffmann scanout->ds = qemu_create_displaysurface_pixman(rect); 59662232bf4SGerd Hoffmann if (!scanout->ds) { 59762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 59862232bf4SGerd Hoffmann return; 59962232bf4SGerd Hoffmann } 60062232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 60162232bf4SGerd Hoffmann } 60262232bf4SGerd Hoffmann 60362232bf4SGerd Hoffmann res->scanout_bitmask |= (1 << ss.scanout_id); 60462232bf4SGerd Hoffmann scanout->resource_id = ss.resource_id; 60562232bf4SGerd Hoffmann scanout->x = ss.r.x; 60662232bf4SGerd Hoffmann scanout->y = ss.r.y; 60762232bf4SGerd Hoffmann scanout->width = ss.r.width; 60862232bf4SGerd Hoffmann scanout->height = ss.r.height; 60962232bf4SGerd Hoffmann } 61062232bf4SGerd Hoffmann 61162232bf4SGerd Hoffmann int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 61262232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 6130c244e50SGerd Hoffmann uint64_t **addr, struct iovec **iov) 61462232bf4SGerd Hoffmann { 61562232bf4SGerd Hoffmann struct virtio_gpu_mem_entry *ents; 61662232bf4SGerd Hoffmann size_t esize, s; 61762232bf4SGerd Hoffmann int i; 61862232bf4SGerd Hoffmann 61962232bf4SGerd Hoffmann if (ab->nr_entries > 16384) { 62062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 6212c84167bSGerd Hoffmann "%s: nr_entries is too big (%d > 16384)\n", 62262232bf4SGerd Hoffmann __func__, ab->nr_entries); 62362232bf4SGerd Hoffmann return -1; 62462232bf4SGerd Hoffmann } 62562232bf4SGerd Hoffmann 62662232bf4SGerd Hoffmann esize = sizeof(*ents) * ab->nr_entries; 62762232bf4SGerd Hoffmann ents = g_malloc(esize); 62862232bf4SGerd Hoffmann s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 62962232bf4SGerd Hoffmann sizeof(*ab), ents, esize); 63062232bf4SGerd Hoffmann if (s != esize) { 63162232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 63262232bf4SGerd Hoffmann "%s: command data size incorrect %zu vs %zu\n", 63362232bf4SGerd Hoffmann __func__, s, esize); 63462232bf4SGerd Hoffmann g_free(ents); 63562232bf4SGerd Hoffmann return -1; 63662232bf4SGerd Hoffmann } 63762232bf4SGerd Hoffmann 63862232bf4SGerd Hoffmann *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 6390c244e50SGerd Hoffmann if (addr) { 6400c244e50SGerd Hoffmann *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 6410c244e50SGerd Hoffmann } 64262232bf4SGerd Hoffmann for (i = 0; i < ab->nr_entries; i++) { 64362232bf4SGerd Hoffmann hwaddr len = ents[i].length; 64462232bf4SGerd Hoffmann (*iov)[i].iov_len = ents[i].length; 64562232bf4SGerd Hoffmann (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 6460c244e50SGerd Hoffmann if (addr) { 6470c244e50SGerd Hoffmann (*addr)[i] = ents[i].addr; 6480c244e50SGerd Hoffmann } 64962232bf4SGerd Hoffmann if (!(*iov)[i].iov_base || len != ents[i].length) { 65062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 65162232bf4SGerd Hoffmann " resource %d element %d\n", 65262232bf4SGerd Hoffmann __func__, ab->resource_id, i); 65362232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(*iov, i); 65462232bf4SGerd Hoffmann g_free(ents); 65562232bf4SGerd Hoffmann *iov = NULL; 6560c244e50SGerd Hoffmann if (addr) { 6570c244e50SGerd Hoffmann g_free(*addr); 6580c244e50SGerd Hoffmann *addr = NULL; 6590c244e50SGerd Hoffmann } 66062232bf4SGerd Hoffmann return -1; 66162232bf4SGerd Hoffmann } 66262232bf4SGerd Hoffmann } 66362232bf4SGerd Hoffmann g_free(ents); 66462232bf4SGerd Hoffmann return 0; 66562232bf4SGerd Hoffmann } 66662232bf4SGerd Hoffmann 66762232bf4SGerd Hoffmann void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 66862232bf4SGerd Hoffmann { 66962232bf4SGerd Hoffmann int i; 67062232bf4SGerd Hoffmann 67162232bf4SGerd Hoffmann for (i = 0; i < count; i++) { 67262232bf4SGerd Hoffmann cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 67362232bf4SGerd Hoffmann iov[i].iov_len); 67462232bf4SGerd Hoffmann } 6757f3be0f2SGerd Hoffmann g_free(iov); 67662232bf4SGerd Hoffmann } 67762232bf4SGerd Hoffmann 67862232bf4SGerd Hoffmann static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 67962232bf4SGerd Hoffmann { 68062232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 68162232bf4SGerd Hoffmann res->iov = NULL; 68262232bf4SGerd Hoffmann res->iov_cnt = 0; 6830c244e50SGerd Hoffmann g_free(res->addrs); 6840c244e50SGerd Hoffmann res->addrs = NULL; 68562232bf4SGerd Hoffmann } 68662232bf4SGerd Hoffmann 68762232bf4SGerd Hoffmann static void 68862232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(VirtIOGPU *g, 68962232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 69062232bf4SGerd Hoffmann { 69162232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 69262232bf4SGerd Hoffmann struct virtio_gpu_resource_attach_backing ab; 69362232bf4SGerd Hoffmann int ret; 69462232bf4SGerd Hoffmann 69562232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ab); 69662232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 69762232bf4SGerd Hoffmann 69862232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, ab.resource_id); 69962232bf4SGerd Hoffmann if (!res) { 70062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 70162232bf4SGerd Hoffmann __func__, ab.resource_id); 70262232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 70362232bf4SGerd Hoffmann return; 70462232bf4SGerd Hoffmann } 70562232bf4SGerd Hoffmann 7060c244e50SGerd Hoffmann ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 70762232bf4SGerd Hoffmann if (ret != 0) { 70862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 70962232bf4SGerd Hoffmann return; 71062232bf4SGerd Hoffmann } 71162232bf4SGerd Hoffmann 71262232bf4SGerd Hoffmann res->iov_cnt = ab.nr_entries; 71362232bf4SGerd Hoffmann } 71462232bf4SGerd Hoffmann 71562232bf4SGerd Hoffmann static void 71662232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(VirtIOGPU *g, 71762232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 71862232bf4SGerd Hoffmann { 71962232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 72062232bf4SGerd Hoffmann struct virtio_gpu_resource_detach_backing detach; 72162232bf4SGerd Hoffmann 72262232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(detach); 72362232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 72462232bf4SGerd Hoffmann 72562232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, detach.resource_id); 72662232bf4SGerd Hoffmann if (!res || !res->iov) { 72762232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 72862232bf4SGerd Hoffmann __func__, detach.resource_id); 72962232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 73062232bf4SGerd Hoffmann return; 73162232bf4SGerd Hoffmann } 73262232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping(res); 73362232bf4SGerd Hoffmann } 73462232bf4SGerd Hoffmann 73562232bf4SGerd Hoffmann static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 73662232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 73762232bf4SGerd Hoffmann { 73862232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 73962232bf4SGerd Hoffmann 74062232bf4SGerd Hoffmann switch (cmd->cmd_hdr.type) { 74162232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 74262232bf4SGerd Hoffmann virtio_gpu_get_display_info(g, cmd); 74362232bf4SGerd Hoffmann break; 74462232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 74562232bf4SGerd Hoffmann virtio_gpu_resource_create_2d(g, cmd); 74662232bf4SGerd Hoffmann break; 74762232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_UNREF: 74862232bf4SGerd Hoffmann virtio_gpu_resource_unref(g, cmd); 74962232bf4SGerd Hoffmann break; 75062232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 75162232bf4SGerd Hoffmann virtio_gpu_resource_flush(g, cmd); 75262232bf4SGerd Hoffmann break; 75362232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 75462232bf4SGerd Hoffmann virtio_gpu_transfer_to_host_2d(g, cmd); 75562232bf4SGerd Hoffmann break; 75662232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_SET_SCANOUT: 75762232bf4SGerd Hoffmann virtio_gpu_set_scanout(g, cmd); 75862232bf4SGerd Hoffmann break; 75962232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 76062232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(g, cmd); 76162232bf4SGerd Hoffmann break; 76262232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 76362232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(g, cmd); 76462232bf4SGerd Hoffmann break; 76562232bf4SGerd Hoffmann default: 76662232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 76762232bf4SGerd Hoffmann break; 76862232bf4SGerd Hoffmann } 76962232bf4SGerd Hoffmann if (!cmd->finished) { 77062232bf4SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 77162232bf4SGerd Hoffmann VIRTIO_GPU_RESP_OK_NODATA); 77262232bf4SGerd Hoffmann } 77362232bf4SGerd Hoffmann } 77462232bf4SGerd Hoffmann 77562232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 77662232bf4SGerd Hoffmann { 77762232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 77862232bf4SGerd Hoffmann qemu_bh_schedule(g->ctrl_bh); 77962232bf4SGerd Hoffmann } 78062232bf4SGerd Hoffmann 78162232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 78262232bf4SGerd Hoffmann { 78362232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 78462232bf4SGerd Hoffmann qemu_bh_schedule(g->cursor_bh); 78562232bf4SGerd Hoffmann } 78662232bf4SGerd Hoffmann 7870c55a1cfSGerd Hoffmann void virtio_gpu_process_cmdq(VirtIOGPU *g) 7883eb769fdSGerd Hoffmann { 7893eb769fdSGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 7903eb769fdSGerd Hoffmann 7913eb769fdSGerd Hoffmann while (!QTAILQ_EMPTY(&g->cmdq)) { 7923eb769fdSGerd Hoffmann cmd = QTAILQ_FIRST(&g->cmdq); 7933eb769fdSGerd Hoffmann 7943eb769fdSGerd Hoffmann /* process command */ 7953eb769fdSGerd Hoffmann VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 7963eb769fdSGerd Hoffmann g, cmd); 7970c55a1cfSGerd Hoffmann if (cmd->waiting) { 7980c55a1cfSGerd Hoffmann break; 7990c55a1cfSGerd Hoffmann } 8003eb769fdSGerd Hoffmann QTAILQ_REMOVE(&g->cmdq, cmd, next); 8013eb769fdSGerd Hoffmann if (virtio_gpu_stats_enabled(g->conf)) { 8023eb769fdSGerd Hoffmann g->stats.requests++; 8033eb769fdSGerd Hoffmann } 8043eb769fdSGerd Hoffmann 8053eb769fdSGerd Hoffmann if (!cmd->finished) { 8063eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 8073eb769fdSGerd Hoffmann g->inflight++; 8083eb769fdSGerd Hoffmann if (virtio_gpu_stats_enabled(g->conf)) { 8093eb769fdSGerd Hoffmann if (g->stats.max_inflight < g->inflight) { 8103eb769fdSGerd Hoffmann g->stats.max_inflight = g->inflight; 8113eb769fdSGerd Hoffmann } 8123eb769fdSGerd Hoffmann fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 8133eb769fdSGerd Hoffmann } 8143eb769fdSGerd Hoffmann } else { 8153eb769fdSGerd Hoffmann g_free(cmd); 8163eb769fdSGerd Hoffmann } 8173eb769fdSGerd Hoffmann } 8183eb769fdSGerd Hoffmann } 8193eb769fdSGerd Hoffmann 82062232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 82162232bf4SGerd Hoffmann { 82262232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 82362232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 82462232bf4SGerd Hoffmann 82562232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 82662232bf4SGerd Hoffmann return; 82762232bf4SGerd Hoffmann } 82862232bf4SGerd Hoffmann 8299d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 8309d9e1521SGerd Hoffmann if (!g->renderer_inited && g->use_virgl_renderer) { 8319d9e1521SGerd Hoffmann virtio_gpu_virgl_init(g); 8329d9e1521SGerd Hoffmann g->renderer_inited = true; 8339d9e1521SGerd Hoffmann } 8349d9e1521SGerd Hoffmann #endif 8359d9e1521SGerd Hoffmann 83651b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 83751b19ebeSPaolo Bonzini while (cmd) { 83862232bf4SGerd Hoffmann cmd->vq = vq; 83962232bf4SGerd Hoffmann cmd->error = 0; 84062232bf4SGerd Hoffmann cmd->finished = false; 8413eb769fdSGerd Hoffmann cmd->waiting = false; 8423eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 84351b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 84462232bf4SGerd Hoffmann } 8459d9e1521SGerd Hoffmann 8463eb769fdSGerd Hoffmann virtio_gpu_process_cmdq(g); 8473eb769fdSGerd Hoffmann 8489d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 8499d9e1521SGerd Hoffmann if (g->use_virgl_renderer) { 8509d9e1521SGerd Hoffmann virtio_gpu_virgl_fence_poll(g); 8519d9e1521SGerd Hoffmann } 8529d9e1521SGerd Hoffmann #endif 85362232bf4SGerd Hoffmann } 85462232bf4SGerd Hoffmann 85562232bf4SGerd Hoffmann static void virtio_gpu_ctrl_bh(void *opaque) 85662232bf4SGerd Hoffmann { 85762232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 85862232bf4SGerd Hoffmann virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 85962232bf4SGerd Hoffmann } 86062232bf4SGerd Hoffmann 86162232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 86262232bf4SGerd Hoffmann { 86362232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 86451b19ebeSPaolo Bonzini VirtQueueElement *elem; 86562232bf4SGerd Hoffmann size_t s; 86662232bf4SGerd Hoffmann struct virtio_gpu_update_cursor cursor_info; 86762232bf4SGerd Hoffmann 86862232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 86962232bf4SGerd Hoffmann return; 87062232bf4SGerd Hoffmann } 87151b19ebeSPaolo Bonzini for (;;) { 87251b19ebeSPaolo Bonzini elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 87351b19ebeSPaolo Bonzini if (!elem) { 87451b19ebeSPaolo Bonzini break; 87551b19ebeSPaolo Bonzini } 87651b19ebeSPaolo Bonzini 87751b19ebeSPaolo Bonzini s = iov_to_buf(elem->out_sg, elem->out_num, 0, 87862232bf4SGerd Hoffmann &cursor_info, sizeof(cursor_info)); 87962232bf4SGerd Hoffmann if (s != sizeof(cursor_info)) { 88062232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 88162232bf4SGerd Hoffmann "%s: cursor size incorrect %zu vs %zu\n", 88262232bf4SGerd Hoffmann __func__, s, sizeof(cursor_info)); 88362232bf4SGerd Hoffmann } else { 88462232bf4SGerd Hoffmann update_cursor(g, &cursor_info); 88562232bf4SGerd Hoffmann } 88651b19ebeSPaolo Bonzini virtqueue_push(vq, elem, 0); 88762232bf4SGerd Hoffmann virtio_notify(vdev, vq); 88851b19ebeSPaolo Bonzini g_free(elem); 88962232bf4SGerd Hoffmann } 89062232bf4SGerd Hoffmann } 89162232bf4SGerd Hoffmann 89262232bf4SGerd Hoffmann static void virtio_gpu_cursor_bh(void *opaque) 89362232bf4SGerd Hoffmann { 89462232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 89562232bf4SGerd Hoffmann virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 89662232bf4SGerd Hoffmann } 89762232bf4SGerd Hoffmann 89862232bf4SGerd Hoffmann static void virtio_gpu_invalidate_display(void *opaque) 89962232bf4SGerd Hoffmann { 90062232bf4SGerd Hoffmann } 90162232bf4SGerd Hoffmann 90262232bf4SGerd Hoffmann static void virtio_gpu_update_display(void *opaque) 90362232bf4SGerd Hoffmann { 90462232bf4SGerd Hoffmann } 90562232bf4SGerd Hoffmann 90662232bf4SGerd Hoffmann static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 90762232bf4SGerd Hoffmann { 90862232bf4SGerd Hoffmann } 90962232bf4SGerd Hoffmann 91062232bf4SGerd Hoffmann static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 91162232bf4SGerd Hoffmann { 91262232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 91362232bf4SGerd Hoffmann 9146b860806SMarc-André Lureau if (idx >= g->conf.max_outputs) { 91562232bf4SGerd Hoffmann return -1; 91662232bf4SGerd Hoffmann } 91762232bf4SGerd Hoffmann 91862232bf4SGerd Hoffmann g->req_state[idx].x = info->xoff; 91962232bf4SGerd Hoffmann g->req_state[idx].y = info->yoff; 92062232bf4SGerd Hoffmann g->req_state[idx].width = info->width; 92162232bf4SGerd Hoffmann g->req_state[idx].height = info->height; 92262232bf4SGerd Hoffmann 92362232bf4SGerd Hoffmann if (info->width && info->height) { 92462232bf4SGerd Hoffmann g->enabled_output_bitmask |= (1 << idx); 92562232bf4SGerd Hoffmann } else { 92662232bf4SGerd Hoffmann g->enabled_output_bitmask &= ~(1 << idx); 92762232bf4SGerd Hoffmann } 92862232bf4SGerd Hoffmann 92962232bf4SGerd Hoffmann /* send event to guest */ 93062232bf4SGerd Hoffmann virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 93162232bf4SGerd Hoffmann return 0; 93262232bf4SGerd Hoffmann } 93362232bf4SGerd Hoffmann 934321c9adbSGerd Hoffmann static void virtio_gpu_gl_block(void *opaque, bool block) 935321c9adbSGerd Hoffmann { 936321c9adbSGerd Hoffmann VirtIOGPU *g = opaque; 937321c9adbSGerd Hoffmann 938c540128fSMarc-André Lureau if (block) { 939c540128fSMarc-André Lureau g->renderer_blocked++; 940c540128fSMarc-André Lureau } else { 941c540128fSMarc-André Lureau g->renderer_blocked--; 942c540128fSMarc-André Lureau } 943c540128fSMarc-André Lureau assert(g->renderer_blocked >= 0); 944c540128fSMarc-André Lureau 945c540128fSMarc-André Lureau if (g->renderer_blocked == 0) { 946321c9adbSGerd Hoffmann virtio_gpu_process_cmdq(g); 947321c9adbSGerd Hoffmann } 948321c9adbSGerd Hoffmann } 949321c9adbSGerd Hoffmann 95062232bf4SGerd Hoffmann const GraphicHwOps virtio_gpu_ops = { 95162232bf4SGerd Hoffmann .invalidate = virtio_gpu_invalidate_display, 95262232bf4SGerd Hoffmann .gfx_update = virtio_gpu_update_display, 95362232bf4SGerd Hoffmann .text_update = virtio_gpu_text_update, 95462232bf4SGerd Hoffmann .ui_info = virtio_gpu_ui_info, 955321c9adbSGerd Hoffmann .gl_block = virtio_gpu_gl_block, 95662232bf4SGerd Hoffmann }; 95762232bf4SGerd Hoffmann 9580c244e50SGerd Hoffmann static const VMStateDescription vmstate_virtio_gpu_scanout = { 9590c244e50SGerd Hoffmann .name = "virtio-gpu-one-scanout", 9600c244e50SGerd Hoffmann .version_id = 1, 9610c244e50SGerd Hoffmann .fields = (VMStateField[]) { 9620c244e50SGerd Hoffmann VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 9630c244e50SGerd Hoffmann VMSTATE_UINT32(width, struct virtio_gpu_scanout), 9640c244e50SGerd Hoffmann VMSTATE_UINT32(height, struct virtio_gpu_scanout), 9650c244e50SGerd Hoffmann VMSTATE_INT32(x, struct virtio_gpu_scanout), 9660c244e50SGerd Hoffmann VMSTATE_INT32(y, struct virtio_gpu_scanout), 9670c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 9680c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 9690c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 9700c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 9710c244e50SGerd Hoffmann VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 9720c244e50SGerd Hoffmann VMSTATE_END_OF_LIST() 9730c244e50SGerd Hoffmann }, 9740c244e50SGerd Hoffmann }; 9750c244e50SGerd Hoffmann 9760c244e50SGerd Hoffmann static const VMStateDescription vmstate_virtio_gpu_scanouts = { 9770c244e50SGerd Hoffmann .name = "virtio-gpu-scanouts", 9780c244e50SGerd Hoffmann .version_id = 1, 9790c244e50SGerd Hoffmann .fields = (VMStateField[]) { 9800c244e50SGerd Hoffmann VMSTATE_INT32(enable, struct VirtIOGPU), 9810c244e50SGerd Hoffmann VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU), 9820c244e50SGerd Hoffmann VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 9830c244e50SGerd Hoffmann conf.max_outputs, 1, 9840c244e50SGerd Hoffmann vmstate_virtio_gpu_scanout, 9850c244e50SGerd Hoffmann struct virtio_gpu_scanout), 9860c244e50SGerd Hoffmann VMSTATE_END_OF_LIST() 9870c244e50SGerd Hoffmann }, 9880c244e50SGerd Hoffmann }; 9890c244e50SGerd Hoffmann 990*0fc07498SDr. David Alan Gilbert static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size) 9910c244e50SGerd Hoffmann { 9920c244e50SGerd Hoffmann VirtIOGPU *g = opaque; 9930c244e50SGerd Hoffmann VirtIODevice *vdev = VIRTIO_DEVICE(g); 9940c244e50SGerd Hoffmann struct virtio_gpu_simple_resource *res; 9950c244e50SGerd Hoffmann int i; 9960c244e50SGerd Hoffmann 9970c244e50SGerd Hoffmann virtio_save(vdev, f); 9980c244e50SGerd Hoffmann 9990c244e50SGerd Hoffmann /* in 2d mode we should never find unprocessed commands here */ 10000c244e50SGerd Hoffmann assert(QTAILQ_EMPTY(&g->cmdq)); 10010c244e50SGerd Hoffmann 10020c244e50SGerd Hoffmann QTAILQ_FOREACH(res, &g->reslist, next) { 10030c244e50SGerd Hoffmann qemu_put_be32(f, res->resource_id); 10040c244e50SGerd Hoffmann qemu_put_be32(f, res->width); 10050c244e50SGerd Hoffmann qemu_put_be32(f, res->height); 10060c244e50SGerd Hoffmann qemu_put_be32(f, res->format); 10070c244e50SGerd Hoffmann qemu_put_be32(f, res->iov_cnt); 10080c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 10090c244e50SGerd Hoffmann qemu_put_be64(f, res->addrs[i]); 10100c244e50SGerd Hoffmann qemu_put_be32(f, res->iov[i].iov_len); 10110c244e50SGerd Hoffmann } 10120c244e50SGerd Hoffmann qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 10130c244e50SGerd Hoffmann pixman_image_get_stride(res->image) * res->height); 10140c244e50SGerd Hoffmann } 10150c244e50SGerd Hoffmann qemu_put_be32(f, 0); /* end of list */ 10160c244e50SGerd Hoffmann 10170c244e50SGerd Hoffmann vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 10180c244e50SGerd Hoffmann } 10190c244e50SGerd Hoffmann 1020*0fc07498SDr. David Alan Gilbert static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size) 10210c244e50SGerd Hoffmann { 10220c244e50SGerd Hoffmann VirtIOGPU *g = opaque; 10230c244e50SGerd Hoffmann VirtIODevice *vdev = VIRTIO_DEVICE(g); 10240c244e50SGerd Hoffmann struct virtio_gpu_simple_resource *res; 10250c244e50SGerd Hoffmann struct virtio_gpu_scanout *scanout; 10260c244e50SGerd Hoffmann uint32_t resource_id, pformat; 10270c244e50SGerd Hoffmann int i, ret; 10280c244e50SGerd Hoffmann 1029*0fc07498SDr. David Alan Gilbert ret = virtio_load(vdev, f, VIRTIO_GPU_VM_VERSION); 10300c244e50SGerd Hoffmann if (ret) { 10310c244e50SGerd Hoffmann return ret; 10320c244e50SGerd Hoffmann } 10330c244e50SGerd Hoffmann 10340c244e50SGerd Hoffmann resource_id = qemu_get_be32(f); 10350c244e50SGerd Hoffmann while (resource_id != 0) { 10360c244e50SGerd Hoffmann res = g_new0(struct virtio_gpu_simple_resource, 1); 10370c244e50SGerd Hoffmann res->resource_id = resource_id; 10380c244e50SGerd Hoffmann res->width = qemu_get_be32(f); 10390c244e50SGerd Hoffmann res->height = qemu_get_be32(f); 10400c244e50SGerd Hoffmann res->format = qemu_get_be32(f); 10410c244e50SGerd Hoffmann res->iov_cnt = qemu_get_be32(f); 10420c244e50SGerd Hoffmann 10430c244e50SGerd Hoffmann /* allocate */ 10440c244e50SGerd Hoffmann pformat = get_pixman_format(res->format); 10450c244e50SGerd Hoffmann if (!pformat) { 10460c244e50SGerd Hoffmann return -EINVAL; 10470c244e50SGerd Hoffmann } 10480c244e50SGerd Hoffmann res->image = pixman_image_create_bits(pformat, 10490c244e50SGerd Hoffmann res->width, res->height, 10500c244e50SGerd Hoffmann NULL, 0); 10510c244e50SGerd Hoffmann if (!res->image) { 10520c244e50SGerd Hoffmann return -EINVAL; 10530c244e50SGerd Hoffmann } 10540c244e50SGerd Hoffmann 10550c244e50SGerd Hoffmann res->addrs = g_new(uint64_t, res->iov_cnt); 10560c244e50SGerd Hoffmann res->iov = g_new(struct iovec, res->iov_cnt); 10570c244e50SGerd Hoffmann 10580c244e50SGerd Hoffmann /* read data */ 10590c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 10600c244e50SGerd Hoffmann res->addrs[i] = qemu_get_be64(f); 10610c244e50SGerd Hoffmann res->iov[i].iov_len = qemu_get_be32(f); 10620c244e50SGerd Hoffmann } 10630c244e50SGerd Hoffmann qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 10640c244e50SGerd Hoffmann pixman_image_get_stride(res->image) * res->height); 10650c244e50SGerd Hoffmann 10660c244e50SGerd Hoffmann /* restore mapping */ 10670c244e50SGerd Hoffmann for (i = 0; i < res->iov_cnt; i++) { 10680c244e50SGerd Hoffmann hwaddr len = res->iov[i].iov_len; 10690c244e50SGerd Hoffmann res->iov[i].iov_base = 10700c244e50SGerd Hoffmann cpu_physical_memory_map(res->addrs[i], &len, 1); 10710c244e50SGerd Hoffmann if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 10720c244e50SGerd Hoffmann return -EINVAL; 10730c244e50SGerd Hoffmann } 10740c244e50SGerd Hoffmann } 10750c244e50SGerd Hoffmann 10760c244e50SGerd Hoffmann QTAILQ_INSERT_HEAD(&g->reslist, res, next); 10770c244e50SGerd Hoffmann 10780c244e50SGerd Hoffmann resource_id = qemu_get_be32(f); 10790c244e50SGerd Hoffmann } 10800c244e50SGerd Hoffmann 10810c244e50SGerd Hoffmann /* load & apply scanout state */ 10820c244e50SGerd Hoffmann vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 10830c244e50SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 10840c244e50SGerd Hoffmann scanout = &g->scanout[i]; 10850c244e50SGerd Hoffmann if (!scanout->resource_id) { 10860c244e50SGerd Hoffmann continue; 10870c244e50SGerd Hoffmann } 10880c244e50SGerd Hoffmann res = virtio_gpu_find_resource(g, scanout->resource_id); 10890c244e50SGerd Hoffmann if (!res) { 10900c244e50SGerd Hoffmann return -EINVAL; 10910c244e50SGerd Hoffmann } 10920c244e50SGerd Hoffmann scanout->ds = qemu_create_displaysurface_pixman(res->image); 10930c244e50SGerd Hoffmann if (!scanout->ds) { 10940c244e50SGerd Hoffmann return -EINVAL; 10950c244e50SGerd Hoffmann } 10960c244e50SGerd Hoffmann 10970c244e50SGerd Hoffmann dpy_gfx_replace_surface(scanout->con, scanout->ds); 10980c244e50SGerd Hoffmann dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 10990c244e50SGerd Hoffmann update_cursor(g, &scanout->cursor); 11000c244e50SGerd Hoffmann res->scanout_bitmask |= (1 << i); 11010c244e50SGerd Hoffmann } 11020c244e50SGerd Hoffmann 11030c244e50SGerd Hoffmann return 0; 11040c244e50SGerd Hoffmann } 11050c244e50SGerd Hoffmann 110662232bf4SGerd Hoffmann static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 110762232bf4SGerd Hoffmann { 110862232bf4SGerd Hoffmann VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 110962232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(qdev); 11109d9e1521SGerd Hoffmann bool have_virgl; 111162232bf4SGerd Hoffmann int i; 111262232bf4SGerd Hoffmann 1113acfc4846SMarc-André Lureau if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1114acfc4846SMarc-André Lureau error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 11155e3d741cSMarc-André Lureau return; 11165e3d741cSMarc-André Lureau } 11175e3d741cSMarc-André Lureau 111862232bf4SGerd Hoffmann g->config_size = sizeof(struct virtio_gpu_config); 111962232bf4SGerd Hoffmann g->virtio_config.num_scanouts = g->conf.max_outputs; 112062232bf4SGerd Hoffmann virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 112162232bf4SGerd Hoffmann g->config_size); 112262232bf4SGerd Hoffmann 112362232bf4SGerd Hoffmann g->req_state[0].width = 1024; 112462232bf4SGerd Hoffmann g->req_state[0].height = 768; 112562232bf4SGerd Hoffmann 11269d9e1521SGerd Hoffmann g->use_virgl_renderer = false; 11279d9e1521SGerd Hoffmann #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 11289d9e1521SGerd Hoffmann have_virgl = false; 11299d9e1521SGerd Hoffmann #else 11309d9e1521SGerd Hoffmann have_virgl = display_opengl; 11319d9e1521SGerd Hoffmann #endif 11329d9e1521SGerd Hoffmann if (!have_virgl) { 11339d9e1521SGerd Hoffmann g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 11349d9e1521SGerd Hoffmann } 11359d9e1521SGerd Hoffmann 11369d9e1521SGerd Hoffmann if (virtio_gpu_virgl_enabled(g->conf)) { 11379d9e1521SGerd Hoffmann /* use larger control queue in 3d mode */ 11389d9e1521SGerd Hoffmann g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 11399d9e1521SGerd Hoffmann g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 11409d9e1521SGerd Hoffmann g->virtio_config.num_capsets = 1; 11419d9e1521SGerd Hoffmann } else { 114262232bf4SGerd Hoffmann g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 114362232bf4SGerd Hoffmann g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 11449d9e1521SGerd Hoffmann } 114562232bf4SGerd Hoffmann 114662232bf4SGerd Hoffmann g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 114762232bf4SGerd Hoffmann g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 114862232bf4SGerd Hoffmann QTAILQ_INIT(&g->reslist); 11493eb769fdSGerd Hoffmann QTAILQ_INIT(&g->cmdq); 115062232bf4SGerd Hoffmann QTAILQ_INIT(&g->fenceq); 115162232bf4SGerd Hoffmann 115262232bf4SGerd Hoffmann g->enabled_output_bitmask = 1; 115362232bf4SGerd Hoffmann g->qdev = qdev; 115462232bf4SGerd Hoffmann 115562232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 115662232bf4SGerd Hoffmann g->scanout[i].con = 115762232bf4SGerd Hoffmann graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 115862232bf4SGerd Hoffmann if (i > 0) { 115962232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[i].con, NULL); 116062232bf4SGerd Hoffmann } 116162232bf4SGerd Hoffmann } 1162fa49e465SGerd Hoffmann 11630c244e50SGerd Hoffmann if (virtio_gpu_virgl_enabled(g->conf)) { 1164de889221SDr. David Alan Gilbert error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1165de889221SDr. David Alan Gilbert migrate_add_blocker(g->migration_blocker); 11660c244e50SGerd Hoffmann } 116762232bf4SGerd Hoffmann } 116862232bf4SGerd Hoffmann 1169de889221SDr. David Alan Gilbert static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1170de889221SDr. David Alan Gilbert { 1171de889221SDr. David Alan Gilbert VirtIOGPU *g = VIRTIO_GPU(qdev); 1172de889221SDr. David Alan Gilbert if (g->migration_blocker) { 1173de889221SDr. David Alan Gilbert migrate_del_blocker(g->migration_blocker); 1174de889221SDr. David Alan Gilbert error_free(g->migration_blocker); 1175de889221SDr. David Alan Gilbert } 1176de889221SDr. David Alan Gilbert } 1177de889221SDr. David Alan Gilbert 117862232bf4SGerd Hoffmann static void virtio_gpu_instance_init(Object *obj) 117962232bf4SGerd Hoffmann { 118062232bf4SGerd Hoffmann } 118162232bf4SGerd Hoffmann 118262232bf4SGerd Hoffmann static void virtio_gpu_reset(VirtIODevice *vdev) 118362232bf4SGerd Hoffmann { 118462232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 118562232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res, *tmp; 118662232bf4SGerd Hoffmann int i; 118762232bf4SGerd Hoffmann 118862232bf4SGerd Hoffmann g->enable = 0; 118962232bf4SGerd Hoffmann 119062232bf4SGerd Hoffmann QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 119162232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 119262232bf4SGerd Hoffmann } 119362232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 119462232bf4SGerd Hoffmann #if 0 119562232bf4SGerd Hoffmann g->req_state[i].x = 0; 119662232bf4SGerd Hoffmann g->req_state[i].y = 0; 119762232bf4SGerd Hoffmann if (i == 0) { 119862232bf4SGerd Hoffmann g->req_state[0].width = 1024; 119962232bf4SGerd Hoffmann g->req_state[0].height = 768; 120062232bf4SGerd Hoffmann } else { 120162232bf4SGerd Hoffmann g->req_state[i].width = 0; 120262232bf4SGerd Hoffmann g->req_state[i].height = 0; 120362232bf4SGerd Hoffmann } 120462232bf4SGerd Hoffmann #endif 120562232bf4SGerd Hoffmann g->scanout[i].resource_id = 0; 120662232bf4SGerd Hoffmann g->scanout[i].width = 0; 120762232bf4SGerd Hoffmann g->scanout[i].height = 0; 120862232bf4SGerd Hoffmann g->scanout[i].x = 0; 120962232bf4SGerd Hoffmann g->scanout[i].y = 0; 121062232bf4SGerd Hoffmann g->scanout[i].ds = NULL; 121162232bf4SGerd Hoffmann } 121262232bf4SGerd Hoffmann g->enabled_output_bitmask = 1; 12139d9e1521SGerd Hoffmann 12149d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 12159d9e1521SGerd Hoffmann if (g->use_virgl_renderer) { 12169d9e1521SGerd Hoffmann virtio_gpu_virgl_reset(g); 12179d9e1521SGerd Hoffmann g->use_virgl_renderer = 0; 12189d9e1521SGerd Hoffmann } 12199d9e1521SGerd Hoffmann #endif 122062232bf4SGerd Hoffmann } 122162232bf4SGerd Hoffmann 1222*0fc07498SDr. David Alan Gilbert VMSTATE_VIRTIO_DEVICE(gpu, VIRTIO_GPU_VM_VERSION, virtio_gpu_load, 1223*0fc07498SDr. David Alan Gilbert virtio_gpu_save); 1224*0fc07498SDr. David Alan Gilbert 122562232bf4SGerd Hoffmann static Property virtio_gpu_properties[] = { 1226b3409a31SGerd Hoffmann DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 12279d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 12289d9e1521SGerd Hoffmann DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 12299d9e1521SGerd Hoffmann VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 12309d9e1521SGerd Hoffmann DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 12319d9e1521SGerd Hoffmann VIRTIO_GPU_FLAG_STATS_ENABLED, false), 12329d9e1521SGerd Hoffmann #endif 123362232bf4SGerd Hoffmann DEFINE_PROP_END_OF_LIST(), 123462232bf4SGerd Hoffmann }; 123562232bf4SGerd Hoffmann 123662232bf4SGerd Hoffmann static void virtio_gpu_class_init(ObjectClass *klass, void *data) 123762232bf4SGerd Hoffmann { 123862232bf4SGerd Hoffmann DeviceClass *dc = DEVICE_CLASS(klass); 123962232bf4SGerd Hoffmann VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 124062232bf4SGerd Hoffmann 124162232bf4SGerd Hoffmann vdc->realize = virtio_gpu_device_realize; 1242de889221SDr. David Alan Gilbert vdc->unrealize = virtio_gpu_device_unrealize; 124362232bf4SGerd Hoffmann vdc->get_config = virtio_gpu_get_config; 124462232bf4SGerd Hoffmann vdc->set_config = virtio_gpu_set_config; 124562232bf4SGerd Hoffmann vdc->get_features = virtio_gpu_get_features; 12469d9e1521SGerd Hoffmann vdc->set_features = virtio_gpu_set_features; 124762232bf4SGerd Hoffmann 124862232bf4SGerd Hoffmann vdc->reset = virtio_gpu_reset; 124962232bf4SGerd Hoffmann 125062232bf4SGerd Hoffmann dc->props = virtio_gpu_properties; 1251*0fc07498SDr. David Alan Gilbert dc->vmsd = &vmstate_virtio_gpu; 125262232bf4SGerd Hoffmann } 125362232bf4SGerd Hoffmann 125462232bf4SGerd Hoffmann static const TypeInfo virtio_gpu_info = { 125562232bf4SGerd Hoffmann .name = TYPE_VIRTIO_GPU, 125662232bf4SGerd Hoffmann .parent = TYPE_VIRTIO_DEVICE, 125762232bf4SGerd Hoffmann .instance_size = sizeof(VirtIOGPU), 125862232bf4SGerd Hoffmann .instance_init = virtio_gpu_instance_init, 125962232bf4SGerd Hoffmann .class_init = virtio_gpu_class_init, 126062232bf4SGerd Hoffmann }; 126162232bf4SGerd Hoffmann 126262232bf4SGerd Hoffmann static void virtio_register_types(void) 126362232bf4SGerd Hoffmann { 126462232bf4SGerd Hoffmann type_register_static(&virtio_gpu_info); 126562232bf4SGerd Hoffmann } 126662232bf4SGerd Hoffmann 126762232bf4SGerd Hoffmann type_init(virtio_register_types) 126862232bf4SGerd Hoffmann 126962232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 127062232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 127162232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 127262232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 127362232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 127462232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 127562232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 127662232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 127762232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 127862232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 127962232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 12809d9e1521SGerd Hoffmann 12819d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 12829d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 12839d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 12849d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 12859d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 12869d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 12879d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 12889d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 12899d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 12909d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1291