xref: /linux/drivers/gpu/drm/virtio/virtgpu_vq.c (revision 52338415)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36 
37 #define MAX_INLINE_CMD_SIZE   96
38 #define MAX_INLINE_RESP_SIZE  24
39 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
40 			       + MAX_INLINE_CMD_SIZE		 \
41 			       + MAX_INLINE_RESP_SIZE)
42 
43 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
44 {
45 	struct drm_device *dev = vq->vdev->priv;
46 	struct virtio_gpu_device *vgdev = dev->dev_private;
47 
48 	schedule_work(&vgdev->ctrlq.dequeue_work);
49 }
50 
51 void virtio_gpu_cursor_ack(struct virtqueue *vq)
52 {
53 	struct drm_device *dev = vq->vdev->priv;
54 	struct virtio_gpu_device *vgdev = dev->dev_private;
55 
56 	schedule_work(&vgdev->cursorq.dequeue_work);
57 }
58 
59 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
60 {
61 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
62 					 VBUFFER_SIZE,
63 					 __alignof__(struct virtio_gpu_vbuffer),
64 					 0, NULL);
65 	if (!vgdev->vbufs)
66 		return -ENOMEM;
67 	return 0;
68 }
69 
70 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 	kmem_cache_destroy(vgdev->vbufs);
73 	vgdev->vbufs = NULL;
74 }
75 
76 static struct virtio_gpu_vbuffer*
77 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
78 		    int size, int resp_size, void *resp_buf,
79 		    virtio_gpu_resp_cb resp_cb)
80 {
81 	struct virtio_gpu_vbuffer *vbuf;
82 
83 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
84 	if (!vbuf)
85 		return ERR_PTR(-ENOMEM);
86 
87 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
88 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
89 	vbuf->size = size;
90 
91 	vbuf->resp_cb = resp_cb;
92 	vbuf->resp_size = resp_size;
93 	if (resp_size <= MAX_INLINE_RESP_SIZE)
94 		vbuf->resp_buf = (void *)vbuf->buf + size;
95 	else
96 		vbuf->resp_buf = resp_buf;
97 	BUG_ON(!vbuf->resp_buf);
98 	return vbuf;
99 }
100 
101 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
102 				  struct virtio_gpu_vbuffer **vbuffer_p,
103 				  int size)
104 {
105 	struct virtio_gpu_vbuffer *vbuf;
106 
107 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
108 				   sizeof(struct virtio_gpu_ctrl_hdr),
109 				   NULL, NULL);
110 	if (IS_ERR(vbuf)) {
111 		*vbuffer_p = NULL;
112 		return ERR_CAST(vbuf);
113 	}
114 	*vbuffer_p = vbuf;
115 	return vbuf->buf;
116 }
117 
118 static struct virtio_gpu_update_cursor*
119 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
120 			struct virtio_gpu_vbuffer **vbuffer_p)
121 {
122 	struct virtio_gpu_vbuffer *vbuf;
123 
124 	vbuf = virtio_gpu_get_vbuf
125 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
126 		 0, NULL, NULL);
127 	if (IS_ERR(vbuf)) {
128 		*vbuffer_p = NULL;
129 		return ERR_CAST(vbuf);
130 	}
131 	*vbuffer_p = vbuf;
132 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
133 }
134 
135 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
136 				       virtio_gpu_resp_cb cb,
137 				       struct virtio_gpu_vbuffer **vbuffer_p,
138 				       int cmd_size, int resp_size,
139 				       void *resp_buf)
140 {
141 	struct virtio_gpu_vbuffer *vbuf;
142 
143 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
144 				   resp_size, resp_buf, cb);
145 	if (IS_ERR(vbuf)) {
146 		*vbuffer_p = NULL;
147 		return ERR_CAST(vbuf);
148 	}
149 	*vbuffer_p = vbuf;
150 	return (struct virtio_gpu_command *)vbuf->buf;
151 }
152 
153 static void free_vbuf(struct virtio_gpu_device *vgdev,
154 		      struct virtio_gpu_vbuffer *vbuf)
155 {
156 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
157 		kfree(vbuf->resp_buf);
158 	kfree(vbuf->data_buf);
159 	kmem_cache_free(vgdev->vbufs, vbuf);
160 }
161 
162 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
163 {
164 	struct virtio_gpu_vbuffer *vbuf;
165 	unsigned int len;
166 	int freed = 0;
167 
168 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
169 		list_add_tail(&vbuf->list, reclaim_list);
170 		freed++;
171 	}
172 	if (freed == 0)
173 		DRM_DEBUG("Huh? zero vbufs reclaimed");
174 }
175 
176 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
177 {
178 	struct virtio_gpu_device *vgdev =
179 		container_of(work, struct virtio_gpu_device,
180 			     ctrlq.dequeue_work);
181 	struct list_head reclaim_list;
182 	struct virtio_gpu_vbuffer *entry, *tmp;
183 	struct virtio_gpu_ctrl_hdr *resp;
184 	u64 fence_id = 0;
185 
186 	INIT_LIST_HEAD(&reclaim_list);
187 	spin_lock(&vgdev->ctrlq.qlock);
188 	do {
189 		virtqueue_disable_cb(vgdev->ctrlq.vq);
190 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
191 
192 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
193 	spin_unlock(&vgdev->ctrlq.qlock);
194 
195 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
196 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
197 
198 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
199 
200 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
201 			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
202 				struct virtio_gpu_ctrl_hdr *cmd;
203 				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
204 				DRM_ERROR("response 0x%x (command 0x%x)\n",
205 					  le32_to_cpu(resp->type),
206 					  le32_to_cpu(cmd->type));
207 			} else
208 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
209 		}
210 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
211 			u64 f = le64_to_cpu(resp->fence_id);
212 
213 			if (fence_id > f) {
214 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
215 					  __func__, fence_id, f);
216 			} else {
217 				fence_id = f;
218 			}
219 		}
220 		if (entry->resp_cb)
221 			entry->resp_cb(vgdev, entry);
222 
223 		list_del(&entry->list);
224 		free_vbuf(vgdev, entry);
225 	}
226 	wake_up(&vgdev->ctrlq.ack_queue);
227 
228 	if (fence_id)
229 		virtio_gpu_fence_event_process(vgdev, fence_id);
230 }
231 
232 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
233 {
234 	struct virtio_gpu_device *vgdev =
235 		container_of(work, struct virtio_gpu_device,
236 			     cursorq.dequeue_work);
237 	struct list_head reclaim_list;
238 	struct virtio_gpu_vbuffer *entry, *tmp;
239 
240 	INIT_LIST_HEAD(&reclaim_list);
241 	spin_lock(&vgdev->cursorq.qlock);
242 	do {
243 		virtqueue_disable_cb(vgdev->cursorq.vq);
244 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
245 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
246 	spin_unlock(&vgdev->cursorq.qlock);
247 
248 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
249 		list_del(&entry->list);
250 		free_vbuf(vgdev, entry);
251 	}
252 	wake_up(&vgdev->cursorq.ack_queue);
253 }
254 
255 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
256 					       struct virtio_gpu_vbuffer *vbuf)
257 		__releases(&vgdev->ctrlq.qlock)
258 		__acquires(&vgdev->ctrlq.qlock)
259 {
260 	struct virtqueue *vq = vgdev->ctrlq.vq;
261 	struct scatterlist *sgs[3], vcmd, vout, vresp;
262 	int outcnt = 0, incnt = 0;
263 	int ret;
264 
265 	if (!vgdev->vqs_ready)
266 		return -ENODEV;
267 
268 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
269 	sgs[outcnt + incnt] = &vcmd;
270 	outcnt++;
271 
272 	if (vbuf->data_size) {
273 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
274 		sgs[outcnt + incnt] = &vout;
275 		outcnt++;
276 	}
277 
278 	if (vbuf->resp_size) {
279 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
280 		sgs[outcnt + incnt] = &vresp;
281 		incnt++;
282 	}
283 
284 retry:
285 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
286 	if (ret == -ENOSPC) {
287 		spin_unlock(&vgdev->ctrlq.qlock);
288 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
289 		spin_lock(&vgdev->ctrlq.qlock);
290 		goto retry;
291 	} else {
292 		trace_virtio_gpu_cmd_queue(vq,
293 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
294 
295 		virtqueue_kick(vq);
296 	}
297 
298 	if (!ret)
299 		ret = vq->num_free;
300 	return ret;
301 }
302 
303 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
304 					struct virtio_gpu_vbuffer *vbuf)
305 {
306 	int rc;
307 
308 	spin_lock(&vgdev->ctrlq.qlock);
309 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
310 	spin_unlock(&vgdev->ctrlq.qlock);
311 	return rc;
312 }
313 
314 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
315 					       struct virtio_gpu_vbuffer *vbuf,
316 					       struct virtio_gpu_ctrl_hdr *hdr,
317 					       struct virtio_gpu_fence *fence)
318 {
319 	struct virtqueue *vq = vgdev->ctrlq.vq;
320 	int rc;
321 
322 again:
323 	spin_lock(&vgdev->ctrlq.qlock);
324 
325 	/*
326 	 * Make sure we have enouth space in the virtqueue.  If not
327 	 * wait here until we have.
328 	 *
329 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
330 	 * to wait for free space, which can result in fence ids being
331 	 * submitted out-of-order.
332 	 */
333 	if (vq->num_free < 3) {
334 		spin_unlock(&vgdev->ctrlq.qlock);
335 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
336 		goto again;
337 	}
338 
339 	if (fence)
340 		virtio_gpu_fence_emit(vgdev, hdr, fence);
341 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
342 	spin_unlock(&vgdev->ctrlq.qlock);
343 	return rc;
344 }
345 
346 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
347 				   struct virtio_gpu_vbuffer *vbuf)
348 {
349 	struct virtqueue *vq = vgdev->cursorq.vq;
350 	struct scatterlist *sgs[1], ccmd;
351 	int ret;
352 	int outcnt;
353 
354 	if (!vgdev->vqs_ready)
355 		return -ENODEV;
356 
357 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
358 	sgs[0] = &ccmd;
359 	outcnt = 1;
360 
361 	spin_lock(&vgdev->cursorq.qlock);
362 retry:
363 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
364 	if (ret == -ENOSPC) {
365 		spin_unlock(&vgdev->cursorq.qlock);
366 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
367 		spin_lock(&vgdev->cursorq.qlock);
368 		goto retry;
369 	} else {
370 		trace_virtio_gpu_cmd_queue(vq,
371 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
372 
373 		virtqueue_kick(vq);
374 	}
375 
376 	spin_unlock(&vgdev->cursorq.qlock);
377 
378 	if (!ret)
379 		ret = vq->num_free;
380 	return ret;
381 }
382 
383 /* just create gem objects for userspace and long lived objects,
384  * just use dma_alloced pages for the queue objects?
385  */
386 
387 /* create a basic resource */
388 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
389 				    struct virtio_gpu_object *bo,
390 				    struct virtio_gpu_object_params *params,
391 				    struct virtio_gpu_fence *fence)
392 {
393 	struct virtio_gpu_resource_create_2d *cmd_p;
394 	struct virtio_gpu_vbuffer *vbuf;
395 
396 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
397 	memset(cmd_p, 0, sizeof(*cmd_p));
398 
399 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
400 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
401 	cmd_p->format = cpu_to_le32(params->format);
402 	cmd_p->width = cpu_to_le32(params->width);
403 	cmd_p->height = cpu_to_le32(params->height);
404 
405 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
406 	bo->created = true;
407 }
408 
409 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
410 				   uint32_t resource_id)
411 {
412 	struct virtio_gpu_resource_unref *cmd_p;
413 	struct virtio_gpu_vbuffer *vbuf;
414 
415 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
416 	memset(cmd_p, 0, sizeof(*cmd_p));
417 
418 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
419 	cmd_p->resource_id = cpu_to_le32(resource_id);
420 
421 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
422 }
423 
424 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
425 						  uint32_t resource_id,
426 						  struct virtio_gpu_fence *fence)
427 {
428 	struct virtio_gpu_resource_detach_backing *cmd_p;
429 	struct virtio_gpu_vbuffer *vbuf;
430 
431 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
432 	memset(cmd_p, 0, sizeof(*cmd_p));
433 
434 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
435 	cmd_p->resource_id = cpu_to_le32(resource_id);
436 
437 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
438 }
439 
440 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
441 				uint32_t scanout_id, uint32_t resource_id,
442 				uint32_t width, uint32_t height,
443 				uint32_t x, uint32_t y)
444 {
445 	struct virtio_gpu_set_scanout *cmd_p;
446 	struct virtio_gpu_vbuffer *vbuf;
447 
448 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
449 	memset(cmd_p, 0, sizeof(*cmd_p));
450 
451 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
452 	cmd_p->resource_id = cpu_to_le32(resource_id);
453 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
454 	cmd_p->r.width = cpu_to_le32(width);
455 	cmd_p->r.height = cpu_to_le32(height);
456 	cmd_p->r.x = cpu_to_le32(x);
457 	cmd_p->r.y = cpu_to_le32(y);
458 
459 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
460 }
461 
462 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
463 				   uint32_t resource_id,
464 				   uint32_t x, uint32_t y,
465 				   uint32_t width, uint32_t height)
466 {
467 	struct virtio_gpu_resource_flush *cmd_p;
468 	struct virtio_gpu_vbuffer *vbuf;
469 
470 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
471 	memset(cmd_p, 0, sizeof(*cmd_p));
472 
473 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
474 	cmd_p->resource_id = cpu_to_le32(resource_id);
475 	cmd_p->r.width = cpu_to_le32(width);
476 	cmd_p->r.height = cpu_to_le32(height);
477 	cmd_p->r.x = cpu_to_le32(x);
478 	cmd_p->r.y = cpu_to_le32(y);
479 
480 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
481 }
482 
483 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
484 					struct virtio_gpu_object *bo,
485 					uint64_t offset,
486 					__le32 width, __le32 height,
487 					__le32 x, __le32 y,
488 					struct virtio_gpu_fence *fence)
489 {
490 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
491 	struct virtio_gpu_vbuffer *vbuf;
492 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
493 
494 	if (use_dma_api)
495 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
496 				       bo->pages->sgl, bo->pages->nents,
497 				       DMA_TO_DEVICE);
498 
499 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
500 	memset(cmd_p, 0, sizeof(*cmd_p));
501 
502 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
503 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
504 	cmd_p->offset = cpu_to_le64(offset);
505 	cmd_p->r.width = width;
506 	cmd_p->r.height = height;
507 	cmd_p->r.x = x;
508 	cmd_p->r.y = y;
509 
510 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
511 }
512 
513 static void
514 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
515 				       uint32_t resource_id,
516 				       struct virtio_gpu_mem_entry *ents,
517 				       uint32_t nents,
518 				       struct virtio_gpu_fence *fence)
519 {
520 	struct virtio_gpu_resource_attach_backing *cmd_p;
521 	struct virtio_gpu_vbuffer *vbuf;
522 
523 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
524 	memset(cmd_p, 0, sizeof(*cmd_p));
525 
526 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
527 	cmd_p->resource_id = cpu_to_le32(resource_id);
528 	cmd_p->nr_entries = cpu_to_le32(nents);
529 
530 	vbuf->data_buf = ents;
531 	vbuf->data_size = sizeof(*ents) * nents;
532 
533 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
534 }
535 
536 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
537 					       struct virtio_gpu_vbuffer *vbuf)
538 {
539 	struct virtio_gpu_resp_display_info *resp =
540 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
541 	int i;
542 
543 	spin_lock(&vgdev->display_info_lock);
544 	for (i = 0; i < vgdev->num_scanouts; i++) {
545 		vgdev->outputs[i].info = resp->pmodes[i];
546 		if (resp->pmodes[i].enabled) {
547 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
548 				  le32_to_cpu(resp->pmodes[i].r.width),
549 				  le32_to_cpu(resp->pmodes[i].r.height),
550 				  le32_to_cpu(resp->pmodes[i].r.x),
551 				  le32_to_cpu(resp->pmodes[i].r.y));
552 		} else {
553 			DRM_DEBUG("output %d: disabled", i);
554 		}
555 	}
556 
557 	vgdev->display_info_pending = false;
558 	spin_unlock(&vgdev->display_info_lock);
559 	wake_up(&vgdev->resp_wq);
560 
561 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
562 		drm_kms_helper_hotplug_event(vgdev->ddev);
563 }
564 
565 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
566 					      struct virtio_gpu_vbuffer *vbuf)
567 {
568 	struct virtio_gpu_get_capset_info *cmd =
569 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
570 	struct virtio_gpu_resp_capset_info *resp =
571 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
572 	int i = le32_to_cpu(cmd->capset_index);
573 
574 	spin_lock(&vgdev->display_info_lock);
575 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
576 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
577 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
578 	spin_unlock(&vgdev->display_info_lock);
579 	wake_up(&vgdev->resp_wq);
580 }
581 
582 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
583 				     struct virtio_gpu_vbuffer *vbuf)
584 {
585 	struct virtio_gpu_get_capset *cmd =
586 		(struct virtio_gpu_get_capset *)vbuf->buf;
587 	struct virtio_gpu_resp_capset *resp =
588 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
589 	struct virtio_gpu_drv_cap_cache *cache_ent;
590 
591 	spin_lock(&vgdev->display_info_lock);
592 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
593 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
594 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
595 			memcpy(cache_ent->caps_cache, resp->capset_data,
596 			       cache_ent->size);
597 			/* Copy must occur before is_valid is signalled. */
598 			smp_wmb();
599 			atomic_set(&cache_ent->is_valid, 1);
600 			break;
601 		}
602 	}
603 	spin_unlock(&vgdev->display_info_lock);
604 	wake_up_all(&vgdev->resp_wq);
605 }
606 
607 static int virtio_get_edid_block(void *data, u8 *buf,
608 				 unsigned int block, size_t len)
609 {
610 	struct virtio_gpu_resp_edid *resp = data;
611 	size_t start = block * EDID_LENGTH;
612 
613 	if (start + len > le32_to_cpu(resp->size))
614 		return -1;
615 	memcpy(buf, resp->edid + start, len);
616 	return 0;
617 }
618 
619 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
620 				       struct virtio_gpu_vbuffer *vbuf)
621 {
622 	struct virtio_gpu_cmd_get_edid *cmd =
623 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
624 	struct virtio_gpu_resp_edid *resp =
625 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
626 	uint32_t scanout = le32_to_cpu(cmd->scanout);
627 	struct virtio_gpu_output *output;
628 	struct edid *new_edid, *old_edid;
629 
630 	if (scanout >= vgdev->num_scanouts)
631 		return;
632 	output = vgdev->outputs + scanout;
633 
634 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
635 	drm_connector_update_edid_property(&output->conn, new_edid);
636 
637 	spin_lock(&vgdev->display_info_lock);
638 	old_edid = output->edid;
639 	output->edid = new_edid;
640 	spin_unlock(&vgdev->display_info_lock);
641 
642 	kfree(old_edid);
643 	wake_up(&vgdev->resp_wq);
644 }
645 
646 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
647 {
648 	struct virtio_gpu_ctrl_hdr *cmd_p;
649 	struct virtio_gpu_vbuffer *vbuf;
650 	void *resp_buf;
651 
652 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
653 			   GFP_KERNEL);
654 	if (!resp_buf)
655 		return -ENOMEM;
656 
657 	cmd_p = virtio_gpu_alloc_cmd_resp
658 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
659 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
660 		 resp_buf);
661 	memset(cmd_p, 0, sizeof(*cmd_p));
662 
663 	vgdev->display_info_pending = true;
664 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
665 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
666 	return 0;
667 }
668 
669 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
670 {
671 	struct virtio_gpu_get_capset_info *cmd_p;
672 	struct virtio_gpu_vbuffer *vbuf;
673 	void *resp_buf;
674 
675 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
676 			   GFP_KERNEL);
677 	if (!resp_buf)
678 		return -ENOMEM;
679 
680 	cmd_p = virtio_gpu_alloc_cmd_resp
681 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
682 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
683 		 resp_buf);
684 	memset(cmd_p, 0, sizeof(*cmd_p));
685 
686 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
687 	cmd_p->capset_index = cpu_to_le32(idx);
688 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
689 	return 0;
690 }
691 
692 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
693 			      int idx, int version,
694 			      struct virtio_gpu_drv_cap_cache **cache_p)
695 {
696 	struct virtio_gpu_get_capset *cmd_p;
697 	struct virtio_gpu_vbuffer *vbuf;
698 	int max_size;
699 	struct virtio_gpu_drv_cap_cache *cache_ent;
700 	struct virtio_gpu_drv_cap_cache *search_ent;
701 	void *resp_buf;
702 
703 	*cache_p = NULL;
704 
705 	if (idx >= vgdev->num_capsets)
706 		return -EINVAL;
707 
708 	if (version > vgdev->capsets[idx].max_version)
709 		return -EINVAL;
710 
711 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
712 	if (!cache_ent)
713 		return -ENOMEM;
714 
715 	max_size = vgdev->capsets[idx].max_size;
716 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
717 	if (!cache_ent->caps_cache) {
718 		kfree(cache_ent);
719 		return -ENOMEM;
720 	}
721 
722 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
723 			   GFP_KERNEL);
724 	if (!resp_buf) {
725 		kfree(cache_ent->caps_cache);
726 		kfree(cache_ent);
727 		return -ENOMEM;
728 	}
729 
730 	cache_ent->version = version;
731 	cache_ent->id = vgdev->capsets[idx].id;
732 	atomic_set(&cache_ent->is_valid, 0);
733 	cache_ent->size = max_size;
734 	spin_lock(&vgdev->display_info_lock);
735 	/* Search while under lock in case it was added by another task. */
736 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
737 		if (search_ent->id == vgdev->capsets[idx].id &&
738 		    search_ent->version == version) {
739 			*cache_p = search_ent;
740 			break;
741 		}
742 	}
743 	if (!*cache_p)
744 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
745 	spin_unlock(&vgdev->display_info_lock);
746 
747 	if (*cache_p) {
748 		/* Entry was found, so free everything that was just created. */
749 		kfree(resp_buf);
750 		kfree(cache_ent->caps_cache);
751 		kfree(cache_ent);
752 		return 0;
753 	}
754 
755 	cmd_p = virtio_gpu_alloc_cmd_resp
756 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
757 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
758 		 resp_buf);
759 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
760 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
761 	cmd_p->capset_version = cpu_to_le32(version);
762 	*cache_p = cache_ent;
763 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764 
765 	return 0;
766 }
767 
768 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
769 {
770 	struct virtio_gpu_cmd_get_edid *cmd_p;
771 	struct virtio_gpu_vbuffer *vbuf;
772 	void *resp_buf;
773 	int scanout;
774 
775 	if (WARN_ON(!vgdev->has_edid))
776 		return -EINVAL;
777 
778 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
779 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
780 				   GFP_KERNEL);
781 		if (!resp_buf)
782 			return -ENOMEM;
783 
784 		cmd_p = virtio_gpu_alloc_cmd_resp
785 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
786 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
787 			 resp_buf);
788 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
789 		cmd_p->scanout = cpu_to_le32(scanout);
790 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
791 	}
792 
793 	return 0;
794 }
795 
796 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
797 				   uint32_t nlen, const char *name)
798 {
799 	struct virtio_gpu_ctx_create *cmd_p;
800 	struct virtio_gpu_vbuffer *vbuf;
801 
802 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
803 	memset(cmd_p, 0, sizeof(*cmd_p));
804 
805 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
806 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
807 	cmd_p->nlen = cpu_to_le32(nlen);
808 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
809 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
810 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
811 }
812 
813 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
814 				    uint32_t id)
815 {
816 	struct virtio_gpu_ctx_destroy *cmd_p;
817 	struct virtio_gpu_vbuffer *vbuf;
818 
819 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
820 	memset(cmd_p, 0, sizeof(*cmd_p));
821 
822 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
823 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
824 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
825 }
826 
827 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
828 					    uint32_t ctx_id,
829 					    uint32_t resource_id)
830 {
831 	struct virtio_gpu_ctx_resource *cmd_p;
832 	struct virtio_gpu_vbuffer *vbuf;
833 
834 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
835 	memset(cmd_p, 0, sizeof(*cmd_p));
836 
837 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
838 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
839 	cmd_p->resource_id = cpu_to_le32(resource_id);
840 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
841 
842 }
843 
844 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
845 					    uint32_t ctx_id,
846 					    uint32_t resource_id)
847 {
848 	struct virtio_gpu_ctx_resource *cmd_p;
849 	struct virtio_gpu_vbuffer *vbuf;
850 
851 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
852 	memset(cmd_p, 0, sizeof(*cmd_p));
853 
854 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
855 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
856 	cmd_p->resource_id = cpu_to_le32(resource_id);
857 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
858 }
859 
860 void
861 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
862 				  struct virtio_gpu_object *bo,
863 				  struct virtio_gpu_object_params *params,
864 				  struct virtio_gpu_fence *fence)
865 {
866 	struct virtio_gpu_resource_create_3d *cmd_p;
867 	struct virtio_gpu_vbuffer *vbuf;
868 
869 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
870 	memset(cmd_p, 0, sizeof(*cmd_p));
871 
872 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
873 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
874 	cmd_p->format = cpu_to_le32(params->format);
875 	cmd_p->width = cpu_to_le32(params->width);
876 	cmd_p->height = cpu_to_le32(params->height);
877 
878 	cmd_p->target = cpu_to_le32(params->target);
879 	cmd_p->bind = cpu_to_le32(params->bind);
880 	cmd_p->depth = cpu_to_le32(params->depth);
881 	cmd_p->array_size = cpu_to_le32(params->array_size);
882 	cmd_p->last_level = cpu_to_le32(params->last_level);
883 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
884 	cmd_p->flags = cpu_to_le32(params->flags);
885 
886 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
887 	bo->created = true;
888 }
889 
890 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
891 					struct virtio_gpu_object *bo,
892 					uint32_t ctx_id,
893 					uint64_t offset, uint32_t level,
894 					struct virtio_gpu_box *box,
895 					struct virtio_gpu_fence *fence)
896 {
897 	struct virtio_gpu_transfer_host_3d *cmd_p;
898 	struct virtio_gpu_vbuffer *vbuf;
899 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
900 
901 	if (use_dma_api)
902 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
903 				       bo->pages->sgl, bo->pages->nents,
904 				       DMA_TO_DEVICE);
905 
906 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
907 	memset(cmd_p, 0, sizeof(*cmd_p));
908 
909 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
910 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
911 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
912 	cmd_p->box = *box;
913 	cmd_p->offset = cpu_to_le64(offset);
914 	cmd_p->level = cpu_to_le32(level);
915 
916 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
917 }
918 
919 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
920 					  uint32_t resource_id, uint32_t ctx_id,
921 					  uint64_t offset, uint32_t level,
922 					  struct virtio_gpu_box *box,
923 					  struct virtio_gpu_fence *fence)
924 {
925 	struct virtio_gpu_transfer_host_3d *cmd_p;
926 	struct virtio_gpu_vbuffer *vbuf;
927 
928 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
929 	memset(cmd_p, 0, sizeof(*cmd_p));
930 
931 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
932 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
933 	cmd_p->resource_id = cpu_to_le32(resource_id);
934 	cmd_p->box = *box;
935 	cmd_p->offset = cpu_to_le64(offset);
936 	cmd_p->level = cpu_to_le32(level);
937 
938 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
939 }
940 
941 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
942 			   void *data, uint32_t data_size,
943 			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
944 {
945 	struct virtio_gpu_cmd_submit *cmd_p;
946 	struct virtio_gpu_vbuffer *vbuf;
947 
948 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
949 	memset(cmd_p, 0, sizeof(*cmd_p));
950 
951 	vbuf->data_buf = data;
952 	vbuf->data_size = data_size;
953 
954 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
955 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
956 	cmd_p->size = cpu_to_le32(data_size);
957 
958 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
959 }
960 
961 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
962 			     struct virtio_gpu_object *obj,
963 			     struct virtio_gpu_fence *fence)
964 {
965 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
966 	struct virtio_gpu_mem_entry *ents;
967 	struct scatterlist *sg;
968 	int si, nents;
969 
970 	if (WARN_ON_ONCE(!obj->created))
971 		return -EINVAL;
972 
973 	if (!obj->pages) {
974 		int ret;
975 
976 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
977 		if (ret)
978 			return ret;
979 	}
980 
981 	if (use_dma_api) {
982 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
983 					 obj->pages->sgl, obj->pages->nents,
984 					 DMA_TO_DEVICE);
985 		nents = obj->mapped;
986 	} else {
987 		nents = obj->pages->nents;
988 	}
989 
990 	/* gets freed when the ring has consumed it */
991 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
992 			     GFP_KERNEL);
993 	if (!ents) {
994 		DRM_ERROR("failed to allocate ent list\n");
995 		return -ENOMEM;
996 	}
997 
998 	for_each_sg(obj->pages->sgl, sg, nents, si) {
999 		ents[si].addr = cpu_to_le64(use_dma_api
1000 					    ? sg_dma_address(sg)
1001 					    : sg_phys(sg));
1002 		ents[si].length = cpu_to_le32(sg->length);
1003 		ents[si].padding = 0;
1004 	}
1005 
1006 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1007 					       ents, nents,
1008 					       fence);
1009 	return 0;
1010 }
1011 
1012 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1013 			      struct virtio_gpu_object *obj)
1014 {
1015 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1016 
1017 	if (use_dma_api && obj->mapped) {
1018 		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1019 		/* detach backing and wait for the host process it ... */
1020 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1021 		dma_fence_wait(&fence->f, true);
1022 		dma_fence_put(&fence->f);
1023 
1024 		/* ... then tear down iommu mappings */
1025 		dma_unmap_sg(vgdev->vdev->dev.parent,
1026 			     obj->pages->sgl, obj->mapped,
1027 			     DMA_TO_DEVICE);
1028 		obj->mapped = 0;
1029 	} else {
1030 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1031 	}
1032 }
1033 
1034 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1035 			    struct virtio_gpu_output *output)
1036 {
1037 	struct virtio_gpu_vbuffer *vbuf;
1038 	struct virtio_gpu_update_cursor *cur_p;
1039 
1040 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1041 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1042 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1043 	virtio_gpu_queue_cursor(vgdev, vbuf);
1044 }
1045