162232bf4SGerd Hoffmann /* 262232bf4SGerd Hoffmann * Virtio GPU Device 362232bf4SGerd Hoffmann * 462232bf4SGerd Hoffmann * Copyright Red Hat, Inc. 2013-2014 562232bf4SGerd Hoffmann * 662232bf4SGerd Hoffmann * Authors: 762232bf4SGerd Hoffmann * Dave Airlie <airlied@redhat.com> 862232bf4SGerd Hoffmann * Gerd Hoffmann <kraxel@redhat.com> 962232bf4SGerd Hoffmann * 102e252145SGerd Hoffmann * This work is licensed under the terms of the GNU GPL, version 2 or later. 1162232bf4SGerd Hoffmann * See the COPYING file in the top-level directory. 1262232bf4SGerd Hoffmann */ 1362232bf4SGerd Hoffmann 149b8bfe21SPeter Maydell #include "qemu/osdep.h" 1562232bf4SGerd Hoffmann #include "qemu-common.h" 1662232bf4SGerd Hoffmann #include "qemu/iov.h" 1762232bf4SGerd Hoffmann #include "ui/console.h" 1862232bf4SGerd Hoffmann #include "trace.h" 1962232bf4SGerd Hoffmann #include "hw/virtio/virtio.h" 2062232bf4SGerd Hoffmann #include "hw/virtio/virtio-gpu.h" 2162232bf4SGerd Hoffmann #include "hw/virtio/virtio-bus.h" 22*03dd024fSPaolo Bonzini #include "qemu/log.h" 2362232bf4SGerd Hoffmann 2462232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource* 2562232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 2662232bf4SGerd Hoffmann 279d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 289d9e1521SGerd Hoffmann #include "virglrenderer.h" 299d9e1521SGerd Hoffmann #define VIRGL(_g, _virgl, _simple, ...) \ 309d9e1521SGerd Hoffmann do { \ 319d9e1521SGerd Hoffmann if (_g->use_virgl_renderer) { \ 329d9e1521SGerd Hoffmann _virgl(__VA_ARGS__); \ 339d9e1521SGerd Hoffmann } else { \ 349d9e1521SGerd Hoffmann _simple(__VA_ARGS__); \ 359d9e1521SGerd Hoffmann } \ 369d9e1521SGerd Hoffmann } while (0) 379d9e1521SGerd Hoffmann #else 389d9e1521SGerd Hoffmann #define VIRGL(_g, _virgl, _simple, ...) \ 399d9e1521SGerd Hoffmann do { \ 409d9e1521SGerd Hoffmann _simple(__VA_ARGS__); \ 419d9e1521SGerd Hoffmann } while (0) 429d9e1521SGerd Hoffmann #endif 439d9e1521SGerd Hoffmann 4462232bf4SGerd Hoffmann static void update_cursor_data_simple(VirtIOGPU *g, 4562232bf4SGerd Hoffmann struct virtio_gpu_scanout *s, 4662232bf4SGerd Hoffmann uint32_t resource_id) 4762232bf4SGerd Hoffmann { 4862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 4962232bf4SGerd Hoffmann uint32_t pixels; 5062232bf4SGerd Hoffmann 5162232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, resource_id); 5262232bf4SGerd Hoffmann if (!res) { 5362232bf4SGerd Hoffmann return; 5462232bf4SGerd Hoffmann } 5562232bf4SGerd Hoffmann 5662232bf4SGerd Hoffmann if (pixman_image_get_width(res->image) != s->current_cursor->width || 5762232bf4SGerd Hoffmann pixman_image_get_height(res->image) != s->current_cursor->height) { 5862232bf4SGerd Hoffmann return; 5962232bf4SGerd Hoffmann } 6062232bf4SGerd Hoffmann 6162232bf4SGerd Hoffmann pixels = s->current_cursor->width * s->current_cursor->height; 6262232bf4SGerd Hoffmann memcpy(s->current_cursor->data, 6362232bf4SGerd Hoffmann pixman_image_get_data(res->image), 6462232bf4SGerd Hoffmann pixels * sizeof(uint32_t)); 6562232bf4SGerd Hoffmann } 6662232bf4SGerd Hoffmann 679d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 689d9e1521SGerd Hoffmann 699d9e1521SGerd Hoffmann static void update_cursor_data_virgl(VirtIOGPU *g, 709d9e1521SGerd Hoffmann struct virtio_gpu_scanout *s, 719d9e1521SGerd Hoffmann uint32_t resource_id) 729d9e1521SGerd Hoffmann { 739d9e1521SGerd Hoffmann uint32_t width, height; 749d9e1521SGerd Hoffmann uint32_t pixels, *data; 759d9e1521SGerd Hoffmann 769d9e1521SGerd Hoffmann data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 779d9e1521SGerd Hoffmann if (!data) { 789d9e1521SGerd Hoffmann return; 799d9e1521SGerd Hoffmann } 809d9e1521SGerd Hoffmann 819d9e1521SGerd Hoffmann if (width != s->current_cursor->width || 829d9e1521SGerd Hoffmann height != s->current_cursor->height) { 839d9e1521SGerd Hoffmann return; 849d9e1521SGerd Hoffmann } 859d9e1521SGerd Hoffmann 869d9e1521SGerd Hoffmann pixels = s->current_cursor->width * s->current_cursor->height; 879d9e1521SGerd Hoffmann memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 889d9e1521SGerd Hoffmann free(data); 899d9e1521SGerd Hoffmann } 909d9e1521SGerd Hoffmann 919d9e1521SGerd Hoffmann #endif 929d9e1521SGerd Hoffmann 9362232bf4SGerd Hoffmann static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 9462232bf4SGerd Hoffmann { 9562232bf4SGerd Hoffmann struct virtio_gpu_scanout *s; 96e9c1b459SGerd Hoffmann bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 9762232bf4SGerd Hoffmann 9862232bf4SGerd Hoffmann if (cursor->pos.scanout_id >= g->conf.max_outputs) { 9962232bf4SGerd Hoffmann return; 10062232bf4SGerd Hoffmann } 10162232bf4SGerd Hoffmann s = &g->scanout[cursor->pos.scanout_id]; 10262232bf4SGerd Hoffmann 103e9c1b459SGerd Hoffmann trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 104e9c1b459SGerd Hoffmann cursor->pos.x, 105e9c1b459SGerd Hoffmann cursor->pos.y, 106e9c1b459SGerd Hoffmann move ? "move" : "update", 107e9c1b459SGerd Hoffmann cursor->resource_id); 108e9c1b459SGerd Hoffmann 109e9c1b459SGerd Hoffmann if (move) { 11062232bf4SGerd Hoffmann if (!s->current_cursor) { 11162232bf4SGerd Hoffmann s->current_cursor = cursor_alloc(64, 64); 11262232bf4SGerd Hoffmann } 11362232bf4SGerd Hoffmann 11462232bf4SGerd Hoffmann s->current_cursor->hot_x = cursor->hot_x; 11562232bf4SGerd Hoffmann s->current_cursor->hot_y = cursor->hot_y; 11662232bf4SGerd Hoffmann 11762232bf4SGerd Hoffmann if (cursor->resource_id > 0) { 1189d9e1521SGerd Hoffmann VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 1199d9e1521SGerd Hoffmann g, s, cursor->resource_id); 12062232bf4SGerd Hoffmann } 12162232bf4SGerd Hoffmann dpy_cursor_define(s->con, s->current_cursor); 12262232bf4SGerd Hoffmann } 12362232bf4SGerd Hoffmann dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 12462232bf4SGerd Hoffmann cursor->resource_id ? 1 : 0); 12562232bf4SGerd Hoffmann } 12662232bf4SGerd Hoffmann 12762232bf4SGerd Hoffmann static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 12862232bf4SGerd Hoffmann { 12962232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 13062232bf4SGerd Hoffmann memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 13162232bf4SGerd Hoffmann } 13262232bf4SGerd Hoffmann 13362232bf4SGerd Hoffmann static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 13462232bf4SGerd Hoffmann { 13562232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 13662232bf4SGerd Hoffmann struct virtio_gpu_config vgconfig; 13762232bf4SGerd Hoffmann 13862232bf4SGerd Hoffmann memcpy(&vgconfig, config, sizeof(g->virtio_config)); 13962232bf4SGerd Hoffmann 14062232bf4SGerd Hoffmann if (vgconfig.events_clear) { 14162232bf4SGerd Hoffmann g->virtio_config.events_read &= ~vgconfig.events_clear; 14262232bf4SGerd Hoffmann } 14362232bf4SGerd Hoffmann } 14462232bf4SGerd Hoffmann 1459d5b731dSJason Wang static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 1469d5b731dSJason Wang Error **errp) 14762232bf4SGerd Hoffmann { 1489d9e1521SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 1499d9e1521SGerd Hoffmann 1509d9e1521SGerd Hoffmann if (virtio_gpu_virgl_enabled(g->conf)) { 151fff02bc0SPaolo Bonzini features |= (1 << VIRTIO_GPU_F_VIRGL); 1529d9e1521SGerd Hoffmann } 15362232bf4SGerd Hoffmann return features; 15462232bf4SGerd Hoffmann } 15562232bf4SGerd Hoffmann 1569d9e1521SGerd Hoffmann static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 1579d9e1521SGerd Hoffmann { 158fff02bc0SPaolo Bonzini static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 1599d9e1521SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 1609d9e1521SGerd Hoffmann 1619d9e1521SGerd Hoffmann g->use_virgl_renderer = ((features & virgl) == virgl); 1629d9e1521SGerd Hoffmann trace_virtio_gpu_features(g->use_virgl_renderer); 1639d9e1521SGerd Hoffmann } 1649d9e1521SGerd Hoffmann 16562232bf4SGerd Hoffmann static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 16662232bf4SGerd Hoffmann { 16762232bf4SGerd Hoffmann g->virtio_config.events_read |= event_type; 16862232bf4SGerd Hoffmann virtio_notify_config(&g->parent_obj); 16962232bf4SGerd Hoffmann } 17062232bf4SGerd Hoffmann 17162232bf4SGerd Hoffmann static struct virtio_gpu_simple_resource * 17262232bf4SGerd Hoffmann virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 17362232bf4SGerd Hoffmann { 17462232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 17562232bf4SGerd Hoffmann 17662232bf4SGerd Hoffmann QTAILQ_FOREACH(res, &g->reslist, next) { 17762232bf4SGerd Hoffmann if (res->resource_id == resource_id) { 17862232bf4SGerd Hoffmann return res; 17962232bf4SGerd Hoffmann } 18062232bf4SGerd Hoffmann } 18162232bf4SGerd Hoffmann return NULL; 18262232bf4SGerd Hoffmann } 18362232bf4SGerd Hoffmann 18462232bf4SGerd Hoffmann void virtio_gpu_ctrl_response(VirtIOGPU *g, 18562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 18662232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr *resp, 18762232bf4SGerd Hoffmann size_t resp_len) 18862232bf4SGerd Hoffmann { 18962232bf4SGerd Hoffmann size_t s; 19062232bf4SGerd Hoffmann 19162232bf4SGerd Hoffmann if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 19262232bf4SGerd Hoffmann resp->flags |= VIRTIO_GPU_FLAG_FENCE; 19362232bf4SGerd Hoffmann resp->fence_id = cmd->cmd_hdr.fence_id; 19462232bf4SGerd Hoffmann resp->ctx_id = cmd->cmd_hdr.ctx_id; 19562232bf4SGerd Hoffmann } 19662232bf4SGerd Hoffmann s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 19762232bf4SGerd Hoffmann if (s != resp_len) { 19862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 19962232bf4SGerd Hoffmann "%s: response size incorrect %zu vs %zu\n", 20062232bf4SGerd Hoffmann __func__, s, resp_len); 20162232bf4SGerd Hoffmann } 20262232bf4SGerd Hoffmann virtqueue_push(cmd->vq, &cmd->elem, s); 20362232bf4SGerd Hoffmann virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 20462232bf4SGerd Hoffmann cmd->finished = true; 20562232bf4SGerd Hoffmann } 20662232bf4SGerd Hoffmann 20762232bf4SGerd Hoffmann void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 20862232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 20962232bf4SGerd Hoffmann enum virtio_gpu_ctrl_type type) 21062232bf4SGerd Hoffmann { 21162232bf4SGerd Hoffmann struct virtio_gpu_ctrl_hdr resp; 21262232bf4SGerd Hoffmann 21362232bf4SGerd Hoffmann memset(&resp, 0, sizeof(resp)); 21462232bf4SGerd Hoffmann resp.type = type; 21562232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 21662232bf4SGerd Hoffmann } 21762232bf4SGerd Hoffmann 21862232bf4SGerd Hoffmann static void 21962232bf4SGerd Hoffmann virtio_gpu_fill_display_info(VirtIOGPU *g, 22062232bf4SGerd Hoffmann struct virtio_gpu_resp_display_info *dpy_info) 22162232bf4SGerd Hoffmann { 22262232bf4SGerd Hoffmann int i; 22362232bf4SGerd Hoffmann 22462232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 22562232bf4SGerd Hoffmann if (g->enabled_output_bitmask & (1 << i)) { 22662232bf4SGerd Hoffmann dpy_info->pmodes[i].enabled = 1; 22762232bf4SGerd Hoffmann dpy_info->pmodes[i].r.width = g->req_state[i].width; 22862232bf4SGerd Hoffmann dpy_info->pmodes[i].r.height = g->req_state[i].height; 22962232bf4SGerd Hoffmann } 23062232bf4SGerd Hoffmann } 23162232bf4SGerd Hoffmann } 23262232bf4SGerd Hoffmann 23362232bf4SGerd Hoffmann void virtio_gpu_get_display_info(VirtIOGPU *g, 23462232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 23562232bf4SGerd Hoffmann { 23662232bf4SGerd Hoffmann struct virtio_gpu_resp_display_info display_info; 23762232bf4SGerd Hoffmann 23862232bf4SGerd Hoffmann trace_virtio_gpu_cmd_get_display_info(); 23962232bf4SGerd Hoffmann memset(&display_info, 0, sizeof(display_info)); 24062232bf4SGerd Hoffmann display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 24162232bf4SGerd Hoffmann virtio_gpu_fill_display_info(g, &display_info); 24262232bf4SGerd Hoffmann virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 24362232bf4SGerd Hoffmann sizeof(display_info)); 24462232bf4SGerd Hoffmann } 24562232bf4SGerd Hoffmann 24662232bf4SGerd Hoffmann static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 24762232bf4SGerd Hoffmann { 24862232bf4SGerd Hoffmann switch (virtio_gpu_format) { 24962232bf4SGerd Hoffmann #ifdef HOST_WORDS_BIGENDIAN 25062232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 25162232bf4SGerd Hoffmann return PIXMAN_b8g8r8x8; 25262232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 25362232bf4SGerd Hoffmann return PIXMAN_b8g8r8a8; 25462232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 25562232bf4SGerd Hoffmann return PIXMAN_x8r8g8b8; 25662232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 25762232bf4SGerd Hoffmann return PIXMAN_a8r8g8b8; 25862232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 25962232bf4SGerd Hoffmann return PIXMAN_r8g8b8x8; 26062232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 26162232bf4SGerd Hoffmann return PIXMAN_r8g8b8a8; 26262232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 26362232bf4SGerd Hoffmann return PIXMAN_x8b8g8r8; 26462232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 26562232bf4SGerd Hoffmann return PIXMAN_a8b8g8r8; 26662232bf4SGerd Hoffmann #else 26762232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 26862232bf4SGerd Hoffmann return PIXMAN_x8r8g8b8; 26962232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 27062232bf4SGerd Hoffmann return PIXMAN_a8r8g8b8; 27162232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 27262232bf4SGerd Hoffmann return PIXMAN_b8g8r8x8; 27362232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 27462232bf4SGerd Hoffmann return PIXMAN_b8g8r8a8; 27562232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 27662232bf4SGerd Hoffmann return PIXMAN_x8b8g8r8; 27762232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 27862232bf4SGerd Hoffmann return PIXMAN_a8b8g8r8; 27962232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 28062232bf4SGerd Hoffmann return PIXMAN_r8g8b8x8; 28162232bf4SGerd Hoffmann case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 28262232bf4SGerd Hoffmann return PIXMAN_r8g8b8a8; 28362232bf4SGerd Hoffmann #endif 28462232bf4SGerd Hoffmann default: 28562232bf4SGerd Hoffmann return 0; 28662232bf4SGerd Hoffmann } 28762232bf4SGerd Hoffmann } 28862232bf4SGerd Hoffmann 28962232bf4SGerd Hoffmann static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 29062232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 29162232bf4SGerd Hoffmann { 29262232bf4SGerd Hoffmann pixman_format_code_t pformat; 29362232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 29462232bf4SGerd Hoffmann struct virtio_gpu_resource_create_2d c2d; 29562232bf4SGerd Hoffmann 29662232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(c2d); 29762232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 29862232bf4SGerd Hoffmann c2d.width, c2d.height); 29962232bf4SGerd Hoffmann 30062232bf4SGerd Hoffmann if (c2d.resource_id == 0) { 30162232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 30262232bf4SGerd Hoffmann __func__); 30362232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 30462232bf4SGerd Hoffmann return; 30562232bf4SGerd Hoffmann } 30662232bf4SGerd Hoffmann 30762232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, c2d.resource_id); 30862232bf4SGerd Hoffmann if (res) { 30962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 31062232bf4SGerd Hoffmann __func__, c2d.resource_id); 31162232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 31262232bf4SGerd Hoffmann return; 31362232bf4SGerd Hoffmann } 31462232bf4SGerd Hoffmann 31562232bf4SGerd Hoffmann res = g_new0(struct virtio_gpu_simple_resource, 1); 31662232bf4SGerd Hoffmann 31762232bf4SGerd Hoffmann res->width = c2d.width; 31862232bf4SGerd Hoffmann res->height = c2d.height; 31962232bf4SGerd Hoffmann res->format = c2d.format; 32062232bf4SGerd Hoffmann res->resource_id = c2d.resource_id; 32162232bf4SGerd Hoffmann 32262232bf4SGerd Hoffmann pformat = get_pixman_format(c2d.format); 32362232bf4SGerd Hoffmann if (!pformat) { 32462232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 32562232bf4SGerd Hoffmann "%s: host couldn't handle guest format %d\n", 32662232bf4SGerd Hoffmann __func__, c2d.format); 32762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 32862232bf4SGerd Hoffmann return; 32962232bf4SGerd Hoffmann } 33062232bf4SGerd Hoffmann res->image = pixman_image_create_bits(pformat, 33162232bf4SGerd Hoffmann c2d.width, 33262232bf4SGerd Hoffmann c2d.height, 33362232bf4SGerd Hoffmann NULL, 0); 33462232bf4SGerd Hoffmann 33562232bf4SGerd Hoffmann if (!res->image) { 33662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 33762232bf4SGerd Hoffmann "%s: resource creation failed %d %d %d\n", 33862232bf4SGerd Hoffmann __func__, c2d.resource_id, c2d.width, c2d.height); 33962232bf4SGerd Hoffmann g_free(res); 34062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 34162232bf4SGerd Hoffmann return; 34262232bf4SGerd Hoffmann } 34362232bf4SGerd Hoffmann 34462232bf4SGerd Hoffmann QTAILQ_INSERT_HEAD(&g->reslist, res, next); 34562232bf4SGerd Hoffmann } 34662232bf4SGerd Hoffmann 34762232bf4SGerd Hoffmann static void virtio_gpu_resource_destroy(VirtIOGPU *g, 34862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res) 34962232bf4SGerd Hoffmann { 35062232bf4SGerd Hoffmann pixman_image_unref(res->image); 35162232bf4SGerd Hoffmann QTAILQ_REMOVE(&g->reslist, res, next); 35262232bf4SGerd Hoffmann g_free(res); 35362232bf4SGerd Hoffmann } 35462232bf4SGerd Hoffmann 35562232bf4SGerd Hoffmann static void virtio_gpu_resource_unref(VirtIOGPU *g, 35662232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 35762232bf4SGerd Hoffmann { 35862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 35962232bf4SGerd Hoffmann struct virtio_gpu_resource_unref unref; 36062232bf4SGerd Hoffmann 36162232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(unref); 36262232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_unref(unref.resource_id); 36362232bf4SGerd Hoffmann 36462232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, unref.resource_id); 36562232bf4SGerd Hoffmann if (!res) { 36662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 36762232bf4SGerd Hoffmann __func__, unref.resource_id); 36862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 36962232bf4SGerd Hoffmann return; 37062232bf4SGerd Hoffmann } 37162232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 37262232bf4SGerd Hoffmann } 37362232bf4SGerd Hoffmann 37462232bf4SGerd Hoffmann static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 37562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 37662232bf4SGerd Hoffmann { 37762232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 37862232bf4SGerd Hoffmann int h; 37962232bf4SGerd Hoffmann uint32_t src_offset, dst_offset, stride; 38062232bf4SGerd Hoffmann int bpp; 38162232bf4SGerd Hoffmann pixman_format_code_t format; 38262232bf4SGerd Hoffmann struct virtio_gpu_transfer_to_host_2d t2d; 38362232bf4SGerd Hoffmann 38462232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(t2d); 38562232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 38662232bf4SGerd Hoffmann 38762232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, t2d.resource_id); 38862232bf4SGerd Hoffmann if (!res || !res->iov) { 38962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 39062232bf4SGerd Hoffmann __func__, t2d.resource_id); 39162232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 39262232bf4SGerd Hoffmann return; 39362232bf4SGerd Hoffmann } 39462232bf4SGerd Hoffmann 39562232bf4SGerd Hoffmann if (t2d.r.x > res->width || 39662232bf4SGerd Hoffmann t2d.r.y > res->height || 39762232bf4SGerd Hoffmann t2d.r.width > res->width || 39862232bf4SGerd Hoffmann t2d.r.height > res->height || 39962232bf4SGerd Hoffmann t2d.r.x + t2d.r.width > res->width || 40062232bf4SGerd Hoffmann t2d.r.y + t2d.r.height > res->height) { 40162232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 40262232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 40362232bf4SGerd Hoffmann __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 40462232bf4SGerd Hoffmann t2d.r.width, t2d.r.height, res->width, res->height); 40562232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 40662232bf4SGerd Hoffmann return; 40762232bf4SGerd Hoffmann } 40862232bf4SGerd Hoffmann 40962232bf4SGerd Hoffmann format = pixman_image_get_format(res->image); 41062232bf4SGerd Hoffmann bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 41162232bf4SGerd Hoffmann stride = pixman_image_get_stride(res->image); 41262232bf4SGerd Hoffmann 41362232bf4SGerd Hoffmann if (t2d.offset || t2d.r.x || t2d.r.y || 41462232bf4SGerd Hoffmann t2d.r.width != pixman_image_get_width(res->image)) { 41562232bf4SGerd Hoffmann void *img_data = pixman_image_get_data(res->image); 41662232bf4SGerd Hoffmann for (h = 0; h < t2d.r.height; h++) { 41762232bf4SGerd Hoffmann src_offset = t2d.offset + stride * h; 41862232bf4SGerd Hoffmann dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 41962232bf4SGerd Hoffmann 42062232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, src_offset, 42162232bf4SGerd Hoffmann (uint8_t *)img_data 42262232bf4SGerd Hoffmann + dst_offset, t2d.r.width * bpp); 42362232bf4SGerd Hoffmann } 42462232bf4SGerd Hoffmann } else { 42562232bf4SGerd Hoffmann iov_to_buf(res->iov, res->iov_cnt, 0, 42662232bf4SGerd Hoffmann pixman_image_get_data(res->image), 42762232bf4SGerd Hoffmann pixman_image_get_stride(res->image) 42862232bf4SGerd Hoffmann * pixman_image_get_height(res->image)); 42962232bf4SGerd Hoffmann } 43062232bf4SGerd Hoffmann } 43162232bf4SGerd Hoffmann 43262232bf4SGerd Hoffmann static void virtio_gpu_resource_flush(VirtIOGPU *g, 43362232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 43462232bf4SGerd Hoffmann { 43562232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 43662232bf4SGerd Hoffmann struct virtio_gpu_resource_flush rf; 43762232bf4SGerd Hoffmann pixman_region16_t flush_region; 43862232bf4SGerd Hoffmann int i; 43962232bf4SGerd Hoffmann 44062232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(rf); 44162232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_flush(rf.resource_id, 44262232bf4SGerd Hoffmann rf.r.width, rf.r.height, rf.r.x, rf.r.y); 44362232bf4SGerd Hoffmann 44462232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, rf.resource_id); 44562232bf4SGerd Hoffmann if (!res) { 44662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 44762232bf4SGerd Hoffmann __func__, rf.resource_id); 44862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 44962232bf4SGerd Hoffmann return; 45062232bf4SGerd Hoffmann } 45162232bf4SGerd Hoffmann 45262232bf4SGerd Hoffmann if (rf.r.x > res->width || 45362232bf4SGerd Hoffmann rf.r.y > res->height || 45462232bf4SGerd Hoffmann rf.r.width > res->width || 45562232bf4SGerd Hoffmann rf.r.height > res->height || 45662232bf4SGerd Hoffmann rf.r.x + rf.r.width > res->width || 45762232bf4SGerd Hoffmann rf.r.y + rf.r.height > res->height) { 45862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 45962232bf4SGerd Hoffmann " bounds for resource %d: %d %d %d %d vs %d %d\n", 46062232bf4SGerd Hoffmann __func__, rf.resource_id, rf.r.x, rf.r.y, 46162232bf4SGerd Hoffmann rf.r.width, rf.r.height, res->width, res->height); 46262232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 46362232bf4SGerd Hoffmann return; 46462232bf4SGerd Hoffmann } 46562232bf4SGerd Hoffmann 46662232bf4SGerd Hoffmann pixman_region_init_rect(&flush_region, 46762232bf4SGerd Hoffmann rf.r.x, rf.r.y, rf.r.width, rf.r.height); 46862232bf4SGerd Hoffmann for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 46962232bf4SGerd Hoffmann struct virtio_gpu_scanout *scanout; 47062232bf4SGerd Hoffmann pixman_region16_t region, finalregion; 47162232bf4SGerd Hoffmann pixman_box16_t *extents; 47262232bf4SGerd Hoffmann 47362232bf4SGerd Hoffmann if (!(res->scanout_bitmask & (1 << i))) { 47462232bf4SGerd Hoffmann continue; 47562232bf4SGerd Hoffmann } 47662232bf4SGerd Hoffmann scanout = &g->scanout[i]; 47762232bf4SGerd Hoffmann 47862232bf4SGerd Hoffmann pixman_region_init(&finalregion); 47962232bf4SGerd Hoffmann pixman_region_init_rect(®ion, scanout->x, scanout->y, 48062232bf4SGerd Hoffmann scanout->width, scanout->height); 48162232bf4SGerd Hoffmann 48262232bf4SGerd Hoffmann pixman_region_intersect(&finalregion, &flush_region, ®ion); 48362232bf4SGerd Hoffmann pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 48462232bf4SGerd Hoffmann extents = pixman_region_extents(&finalregion); 48562232bf4SGerd Hoffmann /* work out the area we need to update for each console */ 48662232bf4SGerd Hoffmann dpy_gfx_update(g->scanout[i].con, 48762232bf4SGerd Hoffmann extents->x1, extents->y1, 48862232bf4SGerd Hoffmann extents->x2 - extents->x1, 48962232bf4SGerd Hoffmann extents->y2 - extents->y1); 49062232bf4SGerd Hoffmann 49162232bf4SGerd Hoffmann pixman_region_fini(®ion); 49262232bf4SGerd Hoffmann pixman_region_fini(&finalregion); 49362232bf4SGerd Hoffmann } 49462232bf4SGerd Hoffmann pixman_region_fini(&flush_region); 49562232bf4SGerd Hoffmann } 49662232bf4SGerd Hoffmann 49762232bf4SGerd Hoffmann static void virtio_gpu_set_scanout(VirtIOGPU *g, 49862232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 49962232bf4SGerd Hoffmann { 50062232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 50162232bf4SGerd Hoffmann struct virtio_gpu_scanout *scanout; 50262232bf4SGerd Hoffmann pixman_format_code_t format; 50362232bf4SGerd Hoffmann uint32_t offset; 50462232bf4SGerd Hoffmann int bpp; 50562232bf4SGerd Hoffmann struct virtio_gpu_set_scanout ss; 50662232bf4SGerd Hoffmann 50762232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ss); 50862232bf4SGerd Hoffmann trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 50962232bf4SGerd Hoffmann ss.r.width, ss.r.height, ss.r.x, ss.r.y); 51062232bf4SGerd Hoffmann 51162232bf4SGerd Hoffmann g->enable = 1; 51262232bf4SGerd Hoffmann if (ss.resource_id == 0) { 51362232bf4SGerd Hoffmann scanout = &g->scanout[ss.scanout_id]; 51462232bf4SGerd Hoffmann if (scanout->resource_id) { 51562232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, scanout->resource_id); 51662232bf4SGerd Hoffmann if (res) { 51762232bf4SGerd Hoffmann res->scanout_bitmask &= ~(1 << ss.scanout_id); 51862232bf4SGerd Hoffmann } 51962232bf4SGerd Hoffmann } 52062232bf4SGerd Hoffmann if (ss.scanout_id == 0 || 52162232bf4SGerd Hoffmann ss.scanout_id >= g->conf.max_outputs) { 52262232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 52362232bf4SGerd Hoffmann "%s: illegal scanout id specified %d", 52462232bf4SGerd Hoffmann __func__, ss.scanout_id); 52562232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 52662232bf4SGerd Hoffmann return; 52762232bf4SGerd Hoffmann } 52862232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 52962232bf4SGerd Hoffmann scanout->ds = NULL; 53062232bf4SGerd Hoffmann scanout->width = 0; 53162232bf4SGerd Hoffmann scanout->height = 0; 53262232bf4SGerd Hoffmann return; 53362232bf4SGerd Hoffmann } 53462232bf4SGerd Hoffmann 53562232bf4SGerd Hoffmann /* create a surface for this scanout */ 53662232bf4SGerd Hoffmann if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 53762232bf4SGerd Hoffmann ss.scanout_id >= g->conf.max_outputs) { 53862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 53962232bf4SGerd Hoffmann __func__, ss.scanout_id); 54062232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 54162232bf4SGerd Hoffmann return; 54262232bf4SGerd Hoffmann } 54362232bf4SGerd Hoffmann 54462232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, ss.resource_id); 54562232bf4SGerd Hoffmann if (!res) { 54662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 54762232bf4SGerd Hoffmann __func__, ss.resource_id); 54862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 54962232bf4SGerd Hoffmann return; 55062232bf4SGerd Hoffmann } 55162232bf4SGerd Hoffmann 55262232bf4SGerd Hoffmann if (ss.r.x > res->width || 55362232bf4SGerd Hoffmann ss.r.y > res->height || 55462232bf4SGerd Hoffmann ss.r.width > res->width || 55562232bf4SGerd Hoffmann ss.r.height > res->height || 55662232bf4SGerd Hoffmann ss.r.x + ss.r.width > res->width || 55762232bf4SGerd Hoffmann ss.r.y + ss.r.height > res->height) { 55862232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 55962232bf4SGerd Hoffmann " resource %d, (%d,%d)+%d,%d vs %d %d\n", 56062232bf4SGerd Hoffmann __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 56162232bf4SGerd Hoffmann ss.r.width, ss.r.height, res->width, res->height); 56262232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 56362232bf4SGerd Hoffmann return; 56462232bf4SGerd Hoffmann } 56562232bf4SGerd Hoffmann 56662232bf4SGerd Hoffmann scanout = &g->scanout[ss.scanout_id]; 56762232bf4SGerd Hoffmann 56862232bf4SGerd Hoffmann format = pixman_image_get_format(res->image); 56962232bf4SGerd Hoffmann bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 57062232bf4SGerd Hoffmann offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 57162232bf4SGerd Hoffmann if (!scanout->ds || surface_data(scanout->ds) 57262232bf4SGerd Hoffmann != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 57362232bf4SGerd Hoffmann scanout->width != ss.r.width || 57462232bf4SGerd Hoffmann scanout->height != ss.r.height) { 57562232bf4SGerd Hoffmann /* realloc the surface ptr */ 576ca58b45fSGerd Hoffmann scanout->ds = qemu_create_displaysurface_pixman(res->image); 57762232bf4SGerd Hoffmann if (!scanout->ds) { 57862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 57962232bf4SGerd Hoffmann return; 58062232bf4SGerd Hoffmann } 58162232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 58262232bf4SGerd Hoffmann } 58362232bf4SGerd Hoffmann 58462232bf4SGerd Hoffmann res->scanout_bitmask |= (1 << ss.scanout_id); 58562232bf4SGerd Hoffmann scanout->resource_id = ss.resource_id; 58662232bf4SGerd Hoffmann scanout->x = ss.r.x; 58762232bf4SGerd Hoffmann scanout->y = ss.r.y; 58862232bf4SGerd Hoffmann scanout->width = ss.r.width; 58962232bf4SGerd Hoffmann scanout->height = ss.r.height; 59062232bf4SGerd Hoffmann } 59162232bf4SGerd Hoffmann 59262232bf4SGerd Hoffmann int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 59362232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd, 59462232bf4SGerd Hoffmann struct iovec **iov) 59562232bf4SGerd Hoffmann { 59662232bf4SGerd Hoffmann struct virtio_gpu_mem_entry *ents; 59762232bf4SGerd Hoffmann size_t esize, s; 59862232bf4SGerd Hoffmann int i; 59962232bf4SGerd Hoffmann 60062232bf4SGerd Hoffmann if (ab->nr_entries > 16384) { 60162232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 6022c84167bSGerd Hoffmann "%s: nr_entries is too big (%d > 16384)\n", 60362232bf4SGerd Hoffmann __func__, ab->nr_entries); 60462232bf4SGerd Hoffmann return -1; 60562232bf4SGerd Hoffmann } 60662232bf4SGerd Hoffmann 60762232bf4SGerd Hoffmann esize = sizeof(*ents) * ab->nr_entries; 60862232bf4SGerd Hoffmann ents = g_malloc(esize); 60962232bf4SGerd Hoffmann s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 61062232bf4SGerd Hoffmann sizeof(*ab), ents, esize); 61162232bf4SGerd Hoffmann if (s != esize) { 61262232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 61362232bf4SGerd Hoffmann "%s: command data size incorrect %zu vs %zu\n", 61462232bf4SGerd Hoffmann __func__, s, esize); 61562232bf4SGerd Hoffmann g_free(ents); 61662232bf4SGerd Hoffmann return -1; 61762232bf4SGerd Hoffmann } 61862232bf4SGerd Hoffmann 61962232bf4SGerd Hoffmann *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 62062232bf4SGerd Hoffmann for (i = 0; i < ab->nr_entries; i++) { 62162232bf4SGerd Hoffmann hwaddr len = ents[i].length; 62262232bf4SGerd Hoffmann (*iov)[i].iov_len = ents[i].length; 62362232bf4SGerd Hoffmann (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 62462232bf4SGerd Hoffmann if (!(*iov)[i].iov_base || len != ents[i].length) { 62562232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 62662232bf4SGerd Hoffmann " resource %d element %d\n", 62762232bf4SGerd Hoffmann __func__, ab->resource_id, i); 62862232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(*iov, i); 62962232bf4SGerd Hoffmann g_free(ents); 63062232bf4SGerd Hoffmann *iov = NULL; 63162232bf4SGerd Hoffmann return -1; 63262232bf4SGerd Hoffmann } 63362232bf4SGerd Hoffmann } 63462232bf4SGerd Hoffmann g_free(ents); 63562232bf4SGerd Hoffmann return 0; 63662232bf4SGerd Hoffmann } 63762232bf4SGerd Hoffmann 63862232bf4SGerd Hoffmann void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 63962232bf4SGerd Hoffmann { 64062232bf4SGerd Hoffmann int i; 64162232bf4SGerd Hoffmann 64262232bf4SGerd Hoffmann for (i = 0; i < count; i++) { 64362232bf4SGerd Hoffmann cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 64462232bf4SGerd Hoffmann iov[i].iov_len); 64562232bf4SGerd Hoffmann } 6467f3be0f2SGerd Hoffmann g_free(iov); 64762232bf4SGerd Hoffmann } 64862232bf4SGerd Hoffmann 64962232bf4SGerd Hoffmann static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 65062232bf4SGerd Hoffmann { 65162232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 65262232bf4SGerd Hoffmann res->iov = NULL; 65362232bf4SGerd Hoffmann res->iov_cnt = 0; 65462232bf4SGerd Hoffmann } 65562232bf4SGerd Hoffmann 65662232bf4SGerd Hoffmann static void 65762232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(VirtIOGPU *g, 65862232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 65962232bf4SGerd Hoffmann { 66062232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 66162232bf4SGerd Hoffmann struct virtio_gpu_resource_attach_backing ab; 66262232bf4SGerd Hoffmann int ret; 66362232bf4SGerd Hoffmann 66462232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(ab); 66562232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 66662232bf4SGerd Hoffmann 66762232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, ab.resource_id); 66862232bf4SGerd Hoffmann if (!res) { 66962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 67062232bf4SGerd Hoffmann __func__, ab.resource_id); 67162232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 67262232bf4SGerd Hoffmann return; 67362232bf4SGerd Hoffmann } 67462232bf4SGerd Hoffmann 67562232bf4SGerd Hoffmann ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 67662232bf4SGerd Hoffmann if (ret != 0) { 67762232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 67862232bf4SGerd Hoffmann return; 67962232bf4SGerd Hoffmann } 68062232bf4SGerd Hoffmann 68162232bf4SGerd Hoffmann res->iov_cnt = ab.nr_entries; 68262232bf4SGerd Hoffmann } 68362232bf4SGerd Hoffmann 68462232bf4SGerd Hoffmann static void 68562232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(VirtIOGPU *g, 68662232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 68762232bf4SGerd Hoffmann { 68862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res; 68962232bf4SGerd Hoffmann struct virtio_gpu_resource_detach_backing detach; 69062232bf4SGerd Hoffmann 69162232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(detach); 69262232bf4SGerd Hoffmann trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 69362232bf4SGerd Hoffmann 69462232bf4SGerd Hoffmann res = virtio_gpu_find_resource(g, detach.resource_id); 69562232bf4SGerd Hoffmann if (!res || !res->iov) { 69662232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 69762232bf4SGerd Hoffmann __func__, detach.resource_id); 69862232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 69962232bf4SGerd Hoffmann return; 70062232bf4SGerd Hoffmann } 70162232bf4SGerd Hoffmann virtio_gpu_cleanup_mapping(res); 70262232bf4SGerd Hoffmann } 70362232bf4SGerd Hoffmann 70462232bf4SGerd Hoffmann static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 70562232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd) 70662232bf4SGerd Hoffmann { 70762232bf4SGerd Hoffmann VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 70862232bf4SGerd Hoffmann 70962232bf4SGerd Hoffmann switch (cmd->cmd_hdr.type) { 71062232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 71162232bf4SGerd Hoffmann virtio_gpu_get_display_info(g, cmd); 71262232bf4SGerd Hoffmann break; 71362232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 71462232bf4SGerd Hoffmann virtio_gpu_resource_create_2d(g, cmd); 71562232bf4SGerd Hoffmann break; 71662232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_UNREF: 71762232bf4SGerd Hoffmann virtio_gpu_resource_unref(g, cmd); 71862232bf4SGerd Hoffmann break; 71962232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 72062232bf4SGerd Hoffmann virtio_gpu_resource_flush(g, cmd); 72162232bf4SGerd Hoffmann break; 72262232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 72362232bf4SGerd Hoffmann virtio_gpu_transfer_to_host_2d(g, cmd); 72462232bf4SGerd Hoffmann break; 72562232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_SET_SCANOUT: 72662232bf4SGerd Hoffmann virtio_gpu_set_scanout(g, cmd); 72762232bf4SGerd Hoffmann break; 72862232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 72962232bf4SGerd Hoffmann virtio_gpu_resource_attach_backing(g, cmd); 73062232bf4SGerd Hoffmann break; 73162232bf4SGerd Hoffmann case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 73262232bf4SGerd Hoffmann virtio_gpu_resource_detach_backing(g, cmd); 73362232bf4SGerd Hoffmann break; 73462232bf4SGerd Hoffmann default: 73562232bf4SGerd Hoffmann cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 73662232bf4SGerd Hoffmann break; 73762232bf4SGerd Hoffmann } 73862232bf4SGerd Hoffmann if (!cmd->finished) { 73962232bf4SGerd Hoffmann virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 74062232bf4SGerd Hoffmann VIRTIO_GPU_RESP_OK_NODATA); 74162232bf4SGerd Hoffmann } 74262232bf4SGerd Hoffmann } 74362232bf4SGerd Hoffmann 74462232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 74562232bf4SGerd Hoffmann { 74662232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 74762232bf4SGerd Hoffmann qemu_bh_schedule(g->ctrl_bh); 74862232bf4SGerd Hoffmann } 74962232bf4SGerd Hoffmann 75062232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 75162232bf4SGerd Hoffmann { 75262232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 75362232bf4SGerd Hoffmann qemu_bh_schedule(g->cursor_bh); 75462232bf4SGerd Hoffmann } 75562232bf4SGerd Hoffmann 7560c55a1cfSGerd Hoffmann void virtio_gpu_process_cmdq(VirtIOGPU *g) 7573eb769fdSGerd Hoffmann { 7583eb769fdSGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 7593eb769fdSGerd Hoffmann 7603eb769fdSGerd Hoffmann while (!QTAILQ_EMPTY(&g->cmdq)) { 7613eb769fdSGerd Hoffmann cmd = QTAILQ_FIRST(&g->cmdq); 7623eb769fdSGerd Hoffmann 7633eb769fdSGerd Hoffmann /* process command */ 7643eb769fdSGerd Hoffmann VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 7653eb769fdSGerd Hoffmann g, cmd); 7660c55a1cfSGerd Hoffmann if (cmd->waiting) { 7670c55a1cfSGerd Hoffmann break; 7680c55a1cfSGerd Hoffmann } 7693eb769fdSGerd Hoffmann QTAILQ_REMOVE(&g->cmdq, cmd, next); 7703eb769fdSGerd Hoffmann if (virtio_gpu_stats_enabled(g->conf)) { 7713eb769fdSGerd Hoffmann g->stats.requests++; 7723eb769fdSGerd Hoffmann } 7733eb769fdSGerd Hoffmann 7743eb769fdSGerd Hoffmann if (!cmd->finished) { 7753eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 7763eb769fdSGerd Hoffmann g->inflight++; 7773eb769fdSGerd Hoffmann if (virtio_gpu_stats_enabled(g->conf)) { 7783eb769fdSGerd Hoffmann if (g->stats.max_inflight < g->inflight) { 7793eb769fdSGerd Hoffmann g->stats.max_inflight = g->inflight; 7803eb769fdSGerd Hoffmann } 7813eb769fdSGerd Hoffmann fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 7823eb769fdSGerd Hoffmann } 7833eb769fdSGerd Hoffmann } else { 7843eb769fdSGerd Hoffmann g_free(cmd); 7853eb769fdSGerd Hoffmann } 7863eb769fdSGerd Hoffmann } 7873eb769fdSGerd Hoffmann } 7883eb769fdSGerd Hoffmann 78962232bf4SGerd Hoffmann static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 79062232bf4SGerd Hoffmann { 79162232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 79262232bf4SGerd Hoffmann struct virtio_gpu_ctrl_command *cmd; 79362232bf4SGerd Hoffmann 79462232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 79562232bf4SGerd Hoffmann return; 79662232bf4SGerd Hoffmann } 79762232bf4SGerd Hoffmann 7989d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 7999d9e1521SGerd Hoffmann if (!g->renderer_inited && g->use_virgl_renderer) { 8009d9e1521SGerd Hoffmann virtio_gpu_virgl_init(g); 8019d9e1521SGerd Hoffmann g->renderer_inited = true; 8029d9e1521SGerd Hoffmann } 8039d9e1521SGerd Hoffmann #endif 8049d9e1521SGerd Hoffmann 80551b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 80651b19ebeSPaolo Bonzini while (cmd) { 80762232bf4SGerd Hoffmann cmd->vq = vq; 80862232bf4SGerd Hoffmann cmd->error = 0; 80962232bf4SGerd Hoffmann cmd->finished = false; 8103eb769fdSGerd Hoffmann cmd->waiting = false; 8113eb769fdSGerd Hoffmann QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 81251b19ebeSPaolo Bonzini cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 81362232bf4SGerd Hoffmann } 8149d9e1521SGerd Hoffmann 8153eb769fdSGerd Hoffmann virtio_gpu_process_cmdq(g); 8163eb769fdSGerd Hoffmann 8179d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 8189d9e1521SGerd Hoffmann if (g->use_virgl_renderer) { 8199d9e1521SGerd Hoffmann virtio_gpu_virgl_fence_poll(g); 8209d9e1521SGerd Hoffmann } 8219d9e1521SGerd Hoffmann #endif 82262232bf4SGerd Hoffmann } 82362232bf4SGerd Hoffmann 82462232bf4SGerd Hoffmann static void virtio_gpu_ctrl_bh(void *opaque) 82562232bf4SGerd Hoffmann { 82662232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 82762232bf4SGerd Hoffmann virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 82862232bf4SGerd Hoffmann } 82962232bf4SGerd Hoffmann 83062232bf4SGerd Hoffmann static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 83162232bf4SGerd Hoffmann { 83262232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 83351b19ebeSPaolo Bonzini VirtQueueElement *elem; 83462232bf4SGerd Hoffmann size_t s; 83562232bf4SGerd Hoffmann struct virtio_gpu_update_cursor cursor_info; 83662232bf4SGerd Hoffmann 83762232bf4SGerd Hoffmann if (!virtio_queue_ready(vq)) { 83862232bf4SGerd Hoffmann return; 83962232bf4SGerd Hoffmann } 84051b19ebeSPaolo Bonzini for (;;) { 84151b19ebeSPaolo Bonzini elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 84251b19ebeSPaolo Bonzini if (!elem) { 84351b19ebeSPaolo Bonzini break; 84451b19ebeSPaolo Bonzini } 84551b19ebeSPaolo Bonzini 84651b19ebeSPaolo Bonzini s = iov_to_buf(elem->out_sg, elem->out_num, 0, 84762232bf4SGerd Hoffmann &cursor_info, sizeof(cursor_info)); 84862232bf4SGerd Hoffmann if (s != sizeof(cursor_info)) { 84962232bf4SGerd Hoffmann qemu_log_mask(LOG_GUEST_ERROR, 85062232bf4SGerd Hoffmann "%s: cursor size incorrect %zu vs %zu\n", 85162232bf4SGerd Hoffmann __func__, s, sizeof(cursor_info)); 85262232bf4SGerd Hoffmann } else { 85362232bf4SGerd Hoffmann update_cursor(g, &cursor_info); 85462232bf4SGerd Hoffmann } 85551b19ebeSPaolo Bonzini virtqueue_push(vq, elem, 0); 85662232bf4SGerd Hoffmann virtio_notify(vdev, vq); 85751b19ebeSPaolo Bonzini g_free(elem); 85862232bf4SGerd Hoffmann } 85962232bf4SGerd Hoffmann } 86062232bf4SGerd Hoffmann 86162232bf4SGerd Hoffmann static void virtio_gpu_cursor_bh(void *opaque) 86262232bf4SGerd Hoffmann { 86362232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 86462232bf4SGerd Hoffmann virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 86562232bf4SGerd Hoffmann } 86662232bf4SGerd Hoffmann 86762232bf4SGerd Hoffmann static void virtio_gpu_invalidate_display(void *opaque) 86862232bf4SGerd Hoffmann { 86962232bf4SGerd Hoffmann } 87062232bf4SGerd Hoffmann 87162232bf4SGerd Hoffmann static void virtio_gpu_update_display(void *opaque) 87262232bf4SGerd Hoffmann { 87362232bf4SGerd Hoffmann } 87462232bf4SGerd Hoffmann 87562232bf4SGerd Hoffmann static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 87662232bf4SGerd Hoffmann { 87762232bf4SGerd Hoffmann } 87862232bf4SGerd Hoffmann 87962232bf4SGerd Hoffmann static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 88062232bf4SGerd Hoffmann { 88162232bf4SGerd Hoffmann VirtIOGPU *g = opaque; 88262232bf4SGerd Hoffmann 88362232bf4SGerd Hoffmann if (idx > g->conf.max_outputs) { 88462232bf4SGerd Hoffmann return -1; 88562232bf4SGerd Hoffmann } 88662232bf4SGerd Hoffmann 88762232bf4SGerd Hoffmann g->req_state[idx].x = info->xoff; 88862232bf4SGerd Hoffmann g->req_state[idx].y = info->yoff; 88962232bf4SGerd Hoffmann g->req_state[idx].width = info->width; 89062232bf4SGerd Hoffmann g->req_state[idx].height = info->height; 89162232bf4SGerd Hoffmann 89262232bf4SGerd Hoffmann if (info->width && info->height) { 89362232bf4SGerd Hoffmann g->enabled_output_bitmask |= (1 << idx); 89462232bf4SGerd Hoffmann } else { 89562232bf4SGerd Hoffmann g->enabled_output_bitmask &= ~(1 << idx); 89662232bf4SGerd Hoffmann } 89762232bf4SGerd Hoffmann 89862232bf4SGerd Hoffmann /* send event to guest */ 89962232bf4SGerd Hoffmann virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 90062232bf4SGerd Hoffmann return 0; 90162232bf4SGerd Hoffmann } 90262232bf4SGerd Hoffmann 903321c9adbSGerd Hoffmann static void virtio_gpu_gl_block(void *opaque, bool block) 904321c9adbSGerd Hoffmann { 905321c9adbSGerd Hoffmann VirtIOGPU *g = opaque; 906321c9adbSGerd Hoffmann 907321c9adbSGerd Hoffmann g->renderer_blocked = block; 908321c9adbSGerd Hoffmann if (!block) { 909321c9adbSGerd Hoffmann virtio_gpu_process_cmdq(g); 910321c9adbSGerd Hoffmann } 911321c9adbSGerd Hoffmann } 912321c9adbSGerd Hoffmann 91362232bf4SGerd Hoffmann const GraphicHwOps virtio_gpu_ops = { 91462232bf4SGerd Hoffmann .invalidate = virtio_gpu_invalidate_display, 91562232bf4SGerd Hoffmann .gfx_update = virtio_gpu_update_display, 91662232bf4SGerd Hoffmann .text_update = virtio_gpu_text_update, 91762232bf4SGerd Hoffmann .ui_info = virtio_gpu_ui_info, 918321c9adbSGerd Hoffmann .gl_block = virtio_gpu_gl_block, 91962232bf4SGerd Hoffmann }; 92062232bf4SGerd Hoffmann 921fa49e465SGerd Hoffmann static const VMStateDescription vmstate_virtio_gpu_unmigratable = { 922fa49e465SGerd Hoffmann .name = "virtio-gpu", 923fa49e465SGerd Hoffmann .unmigratable = 1, 924fa49e465SGerd Hoffmann }; 925fa49e465SGerd Hoffmann 92662232bf4SGerd Hoffmann static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 92762232bf4SGerd Hoffmann { 92862232bf4SGerd Hoffmann VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 92962232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(qdev); 9309d9e1521SGerd Hoffmann bool have_virgl; 93162232bf4SGerd Hoffmann int i; 93262232bf4SGerd Hoffmann 93362232bf4SGerd Hoffmann g->config_size = sizeof(struct virtio_gpu_config); 93462232bf4SGerd Hoffmann g->virtio_config.num_scanouts = g->conf.max_outputs; 93562232bf4SGerd Hoffmann virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 93662232bf4SGerd Hoffmann g->config_size); 93762232bf4SGerd Hoffmann 93862232bf4SGerd Hoffmann g->req_state[0].width = 1024; 93962232bf4SGerd Hoffmann g->req_state[0].height = 768; 94062232bf4SGerd Hoffmann 9419d9e1521SGerd Hoffmann g->use_virgl_renderer = false; 9429d9e1521SGerd Hoffmann #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 9439d9e1521SGerd Hoffmann have_virgl = false; 9449d9e1521SGerd Hoffmann #else 9459d9e1521SGerd Hoffmann have_virgl = display_opengl; 9469d9e1521SGerd Hoffmann #endif 9479d9e1521SGerd Hoffmann if (!have_virgl) { 9489d9e1521SGerd Hoffmann g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 9499d9e1521SGerd Hoffmann } 9509d9e1521SGerd Hoffmann 9519d9e1521SGerd Hoffmann if (virtio_gpu_virgl_enabled(g->conf)) { 9529d9e1521SGerd Hoffmann /* use larger control queue in 3d mode */ 9539d9e1521SGerd Hoffmann g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 9549d9e1521SGerd Hoffmann g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 9559d9e1521SGerd Hoffmann g->virtio_config.num_capsets = 1; 9569d9e1521SGerd Hoffmann } else { 95762232bf4SGerd Hoffmann g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 95862232bf4SGerd Hoffmann g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 9599d9e1521SGerd Hoffmann } 96062232bf4SGerd Hoffmann 96162232bf4SGerd Hoffmann g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 96262232bf4SGerd Hoffmann g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 96362232bf4SGerd Hoffmann QTAILQ_INIT(&g->reslist); 9643eb769fdSGerd Hoffmann QTAILQ_INIT(&g->cmdq); 96562232bf4SGerd Hoffmann QTAILQ_INIT(&g->fenceq); 96662232bf4SGerd Hoffmann 96762232bf4SGerd Hoffmann g->enabled_output_bitmask = 1; 96862232bf4SGerd Hoffmann g->qdev = qdev; 96962232bf4SGerd Hoffmann 97062232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 97162232bf4SGerd Hoffmann g->scanout[i].con = 97262232bf4SGerd Hoffmann graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 97362232bf4SGerd Hoffmann if (i > 0) { 97462232bf4SGerd Hoffmann dpy_gfx_replace_surface(g->scanout[i].con, NULL); 97562232bf4SGerd Hoffmann } 97662232bf4SGerd Hoffmann } 977fa49e465SGerd Hoffmann 978fa49e465SGerd Hoffmann vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); 97962232bf4SGerd Hoffmann } 98062232bf4SGerd Hoffmann 98162232bf4SGerd Hoffmann static void virtio_gpu_instance_init(Object *obj) 98262232bf4SGerd Hoffmann { 98362232bf4SGerd Hoffmann } 98462232bf4SGerd Hoffmann 98562232bf4SGerd Hoffmann static void virtio_gpu_reset(VirtIODevice *vdev) 98662232bf4SGerd Hoffmann { 98762232bf4SGerd Hoffmann VirtIOGPU *g = VIRTIO_GPU(vdev); 98862232bf4SGerd Hoffmann struct virtio_gpu_simple_resource *res, *tmp; 98962232bf4SGerd Hoffmann int i; 99062232bf4SGerd Hoffmann 99162232bf4SGerd Hoffmann g->enable = 0; 99262232bf4SGerd Hoffmann 99362232bf4SGerd Hoffmann QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 99462232bf4SGerd Hoffmann virtio_gpu_resource_destroy(g, res); 99562232bf4SGerd Hoffmann } 99662232bf4SGerd Hoffmann for (i = 0; i < g->conf.max_outputs; i++) { 99762232bf4SGerd Hoffmann #if 0 99862232bf4SGerd Hoffmann g->req_state[i].x = 0; 99962232bf4SGerd Hoffmann g->req_state[i].y = 0; 100062232bf4SGerd Hoffmann if (i == 0) { 100162232bf4SGerd Hoffmann g->req_state[0].width = 1024; 100262232bf4SGerd Hoffmann g->req_state[0].height = 768; 100362232bf4SGerd Hoffmann } else { 100462232bf4SGerd Hoffmann g->req_state[i].width = 0; 100562232bf4SGerd Hoffmann g->req_state[i].height = 0; 100662232bf4SGerd Hoffmann } 100762232bf4SGerd Hoffmann #endif 100862232bf4SGerd Hoffmann g->scanout[i].resource_id = 0; 100962232bf4SGerd Hoffmann g->scanout[i].width = 0; 101062232bf4SGerd Hoffmann g->scanout[i].height = 0; 101162232bf4SGerd Hoffmann g->scanout[i].x = 0; 101262232bf4SGerd Hoffmann g->scanout[i].y = 0; 101362232bf4SGerd Hoffmann g->scanout[i].ds = NULL; 101462232bf4SGerd Hoffmann } 101562232bf4SGerd Hoffmann g->enabled_output_bitmask = 1; 10169d9e1521SGerd Hoffmann 10179d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 10189d9e1521SGerd Hoffmann if (g->use_virgl_renderer) { 10199d9e1521SGerd Hoffmann virtio_gpu_virgl_reset(g); 10209d9e1521SGerd Hoffmann g->use_virgl_renderer = 0; 10219d9e1521SGerd Hoffmann } 10229d9e1521SGerd Hoffmann #endif 102362232bf4SGerd Hoffmann } 102462232bf4SGerd Hoffmann 102562232bf4SGerd Hoffmann static Property virtio_gpu_properties[] = { 1026b3409a31SGerd Hoffmann DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 10279d9e1521SGerd Hoffmann #ifdef CONFIG_VIRGL 10289d9e1521SGerd Hoffmann DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 10299d9e1521SGerd Hoffmann VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 10309d9e1521SGerd Hoffmann DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 10319d9e1521SGerd Hoffmann VIRTIO_GPU_FLAG_STATS_ENABLED, false), 10329d9e1521SGerd Hoffmann #endif 103362232bf4SGerd Hoffmann DEFINE_PROP_END_OF_LIST(), 103462232bf4SGerd Hoffmann }; 103562232bf4SGerd Hoffmann 103662232bf4SGerd Hoffmann static void virtio_gpu_class_init(ObjectClass *klass, void *data) 103762232bf4SGerd Hoffmann { 103862232bf4SGerd Hoffmann DeviceClass *dc = DEVICE_CLASS(klass); 103962232bf4SGerd Hoffmann VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 104062232bf4SGerd Hoffmann 104162232bf4SGerd Hoffmann vdc->realize = virtio_gpu_device_realize; 104262232bf4SGerd Hoffmann vdc->get_config = virtio_gpu_get_config; 104362232bf4SGerd Hoffmann vdc->set_config = virtio_gpu_set_config; 104462232bf4SGerd Hoffmann vdc->get_features = virtio_gpu_get_features; 10459d9e1521SGerd Hoffmann vdc->set_features = virtio_gpu_set_features; 104662232bf4SGerd Hoffmann 104762232bf4SGerd Hoffmann vdc->reset = virtio_gpu_reset; 104862232bf4SGerd Hoffmann 104962232bf4SGerd Hoffmann dc->props = virtio_gpu_properties; 105062232bf4SGerd Hoffmann } 105162232bf4SGerd Hoffmann 105262232bf4SGerd Hoffmann static const TypeInfo virtio_gpu_info = { 105362232bf4SGerd Hoffmann .name = TYPE_VIRTIO_GPU, 105462232bf4SGerd Hoffmann .parent = TYPE_VIRTIO_DEVICE, 105562232bf4SGerd Hoffmann .instance_size = sizeof(VirtIOGPU), 105662232bf4SGerd Hoffmann .instance_init = virtio_gpu_instance_init, 105762232bf4SGerd Hoffmann .class_init = virtio_gpu_class_init, 105862232bf4SGerd Hoffmann }; 105962232bf4SGerd Hoffmann 106062232bf4SGerd Hoffmann static void virtio_register_types(void) 106162232bf4SGerd Hoffmann { 106262232bf4SGerd Hoffmann type_register_static(&virtio_gpu_info); 106362232bf4SGerd Hoffmann } 106462232bf4SGerd Hoffmann 106562232bf4SGerd Hoffmann type_init(virtio_register_types) 106662232bf4SGerd Hoffmann 106762232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 106862232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 106962232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 107062232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 107162232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 107262232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 107362232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 107462232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 107562232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 107662232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 107762232bf4SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 10789d9e1521SGerd Hoffmann 10799d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 10809d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 10819d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 10829d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 10839d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 10849d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 10859d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 10869d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 10879d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 10889d9e1521SGerd Hoffmann QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1089