xref: /openbsd/sys/dev/pv/viogpu.c (revision 9e6efb0a)
1 /*	$OpenBSD: viogpu.c,v 1.6 2024/05/24 10:05:55 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2021-2023 joshua stein <jcs@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/timeout.h>
23 
24 #include <uvm/uvm_extern.h>
25 
26 #include <dev/pv/virtioreg.h>
27 #include <dev/pv/virtiovar.h>
28 #include <dev/pv/viogpu.h>
29 
30 #include <dev/wscons/wsconsio.h>
31 #include <dev/wscons/wsdisplayvar.h>
32 #include <dev/rasops/rasops.h>
33 
34 #if VIRTIO_DEBUG
35 #define DPRINTF(x...) printf(x)
36 #else
37 #define DPRINTF(x...)
38 #endif
39 
40 struct viogpu_softc;
41 
42 int	viogpu_match(struct device *, void *, void *);
43 void	viogpu_attach(struct device *, struct device *, void *);
44 int	viogpu_send_cmd(struct viogpu_softc *, void *, size_t, void *, size_t);
45 int	viogpu_vq_wait(struct virtqueue *vq);
46 void	viogpu_rx_soft(void *arg);
47 
48 int	viogpu_get_display_info(struct viogpu_softc *);
49 int	viogpu_create_2d(struct viogpu_softc *, int, int, int);
50 int	viogpu_set_scanout(struct viogpu_softc *, int, int, int, int);
51 int	viogpu_attach_backing(struct viogpu_softc *, int, bus_dmamap_t);
52 int	viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int, uint32_t,
53 	    uint32_t);
54 int	viogpu_flush_resource(struct viogpu_softc *, int, uint32_t, uint32_t);
55 
56 void	viogpu_repaint(void *);
57 
58 int	viogpu_wsioctl(void *, u_long, caddr_t, int, struct proc *);
59 paddr_t	viogpu_wsmmap(void *, off_t, int);
60 int	viogpu_alloc_screen(void *, const struct wsscreen_descr *, void **,
61 	    int *, int *, uint32_t *);
62 
63 #define VIOGPU_HEIGHT		160
64 #define VIOGPU_WIDTH		160
65 
66 struct viogpu_softc {
67 	struct device		sc_dev;
68 	struct virtio_softc	*sc_virtio;
69 #define	VQCTRL	0
70 #define	VQCURS	1
71 	struct virtqueue	sc_vqs[2];
72 
73 	bus_dma_segment_t	sc_dma_seg;
74 	bus_dmamap_t		sc_dma_map;
75 	size_t			sc_dma_size;
76 	void			*sc_cmd;
77 	int			sc_fence_id;
78 
79 	int			sc_fb_width;
80 	int			sc_fb_height;
81 	bus_dma_segment_t	sc_fb_dma_seg;
82 	bus_dmamap_t		sc_fb_dma_map;
83 	size_t			sc_fb_dma_size;
84 	caddr_t			sc_fb_dma_kva;
85 
86 	struct rasops_info	sc_ri;
87 	struct wsscreen_descr	sc_wsd;
88 	struct wsscreen_list	sc_wsl;
89 	struct wsscreen_descr	*sc_scrlist[1];
90 	int			console;
91 	int			primary;
92 
93 	struct timeout		sc_timo;
94 };
95 
96 struct virtio_feature_name viogpu_feature_names[] = {
97 #if VIRTIO_DEBUG
98 	{ VIRTIO_GPU_F_VIRGL,		"VirGL" },
99 	{ VIRTIO_GPU_F_EDID,		"EDID" },
100 #endif
101 	{ 0, 				NULL },
102 };
103 
104 struct wsscreen_descr viogpu_stdscreen = { "std" };
105 
106 const struct wsscreen_descr *viogpu_scrlist[] = {
107 	&viogpu_stdscreen,
108 };
109 
110 struct wsscreen_list viogpu_screenlist = {
111 	nitems(viogpu_scrlist), viogpu_scrlist
112 };
113 
114 struct wsdisplay_accessops viogpu_accessops = {
115 	.ioctl = viogpu_wsioctl,
116 	.mmap = viogpu_wsmmap,
117 	.alloc_screen = viogpu_alloc_screen,
118 	.free_screen = rasops_free_screen,
119 	.show_screen = rasops_show_screen,
120 	.getchar = rasops_getchar,
121 	.load_font = rasops_load_font,
122 	.list_font = rasops_list_font,
123 	.scrollback = rasops_scrollback,
124 };
125 
126 const struct cfattach viogpu_ca = {
127 	sizeof(struct viogpu_softc),
128 	viogpu_match,
129 	viogpu_attach,
130 	NULL
131 };
132 
133 struct cfdriver viogpu_cd = {
134 	NULL, "viogpu", DV_DULL
135 };
136 
137 int
138 viogpu_match(struct device *parent, void *match, void *aux)
139 {
140 	struct virtio_softc *va = aux;
141 
142 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_GPU)
143 		return 1;
144 
145 	return 0;
146 }
147 
148 void
149 viogpu_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct viogpu_softc *sc = (struct viogpu_softc *)self;
152 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
153 	struct wsemuldisplaydev_attach_args waa;
154 	struct rasops_info *ri = &sc->sc_ri;
155 	uint32_t defattr;
156 	int nsegs;
157 
158 	if (vsc->sc_child != NULL) {
159 		printf(": child already attached for %s\n", parent->dv_xname);
160 		return;
161 	}
162 	vsc->sc_child = self;
163 
164 	virtio_negotiate_features(vsc, viogpu_feature_names);
165 	if (!vsc->sc_version_1) {
166 		printf(": requires virtio version 1\n");
167 		return;
168 	}
169 
170 	vsc->sc_ipl = IPL_TTY;
171 	softintr_establish(IPL_TTY, viogpu_rx_soft, vsc);
172 	sc->sc_virtio = vsc;
173 
174 	/* allocate command and cursor virtqueues */
175 	vsc->sc_vqs = sc->sc_vqs;
176 	if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCTRL], VQCTRL, NBPG, 1,
177 	    "control")) {
178 		printf(": alloc_vq failed\n");
179 		return;
180 	}
181 	sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_wait;
182 
183 	if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], VQCURS, NBPG, 1,
184 	    "cursor")) {
185 		printf(": alloc_vq failed\n");
186 		return;
187 	}
188 	vsc->sc_nvqs = nitems(sc->sc_vqs);
189 
190 	/* setup DMA space for sending commands */
191 	sc->sc_dma_size = NBPG;
192 	if (bus_dmamap_create(vsc->sc_dmat, sc->sc_dma_size, 1,
193 	    sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
194 	    &sc->sc_dma_map) != 0) {
195 		printf(": create failed");
196 		goto err;
197 	}
198 	if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_dma_size, 16, 0,
199 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
200 		printf(": alloc failed");
201 		goto destroy;
202 	}
203 	if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_dma_seg, nsegs,
204 	    sc->sc_dma_size, (caddr_t *)&sc->sc_cmd, BUS_DMA_NOWAIT) != 0) {
205 		printf(": map failed");
206 		goto free;
207 	}
208 	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd,
209 	    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0) {
210 		printf(": load failed");
211 		goto unmap;
212 	}
213 
214 	if (viogpu_get_display_info(sc) != 0)
215 		goto unmap;
216 
217 	/* setup DMA space for actual framebuffer */
218 	sc->sc_fb_dma_size = sc->sc_fb_width * sc->sc_fb_height * 4;
219 	if (bus_dmamap_create(vsc->sc_dmat, sc->sc_fb_dma_size, 1,
220 	    sc->sc_fb_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
221 	    &sc->sc_fb_dma_map) != 0)
222 		goto unmap;
223 	if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_fb_dma_size, 1024, 0,
224 	    &sc->sc_fb_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
225 		goto fb_destroy;
226 	if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_fb_dma_seg, nsegs,
227 	    sc->sc_fb_dma_size, &sc->sc_fb_dma_kva, BUS_DMA_NOWAIT) != 0)
228 		goto fb_free;
229 	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_fb_dma_map,
230 	    sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
231 		goto fb_unmap;
232 
233 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
234 
235 	if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
236 		goto fb_unmap;
237 
238 	if (viogpu_attach_backing(sc, 1, sc->sc_fb_dma_map) != 0)
239 		goto fb_unmap;
240 
241 	if (viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width,
242 	    sc->sc_fb_height) != 0)
243 		goto fb_unmap;
244 
245 	sc->console = 1;
246 
247 	ri->ri_hw = sc;
248 	ri->ri_bits = sc->sc_fb_dma_kva;
249 	ri->ri_flg = RI_VCONS | RI_CENTER | RI_CLEAR | RI_WRONLY;
250 	ri->ri_depth = 32;
251 	ri->ri_width = sc->sc_fb_width;
252 	ri->ri_height = sc->sc_fb_height;
253 	ri->ri_stride = ri->ri_width * ri->ri_depth / 8;
254 	ri->ri_bpos = 0;	/* B8G8R8X8 */
255 	ri->ri_bnum = 8;
256 	ri->ri_gpos = 8;
257 	ri->ri_gnum = 8;
258 	ri->ri_rpos = 16;
259 	ri->ri_rnum = 8;
260 	rasops_init(ri, VIOGPU_HEIGHT, VIOGPU_WIDTH);
261 
262 	strlcpy(sc->sc_wsd.name, "std", sizeof(sc->sc_wsd.name));
263 	sc->sc_wsd.capabilities = ri->ri_caps;
264 	sc->sc_wsd.nrows = ri->ri_rows;
265 	sc->sc_wsd.ncols = ri->ri_cols;
266 	sc->sc_wsd.textops = &ri->ri_ops;
267 	sc->sc_wsd.fontwidth = ri->ri_font->fontwidth;
268 	sc->sc_wsd.fontheight = ri->ri_font->fontheight;
269 
270 	sc->sc_scrlist[0] = &sc->sc_wsd;
271 	sc->sc_wsl.nscreens = 1;
272 	sc->sc_wsl.screens = (const struct wsscreen_descr **)sc->sc_scrlist;
273 
274 	printf(": %dx%d, %dbpp\n", ri->ri_width, ri->ri_height, ri->ri_depth);
275 
276 	timeout_set(&sc->sc_timo, viogpu_repaint, sc);
277 	viogpu_repaint(sc);
278 
279 	if (sc->console) {
280 		ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr);
281 		wsdisplay_cnattach(&sc->sc_wsd, ri->ri_active, 0, 0, defattr);
282 	}
283 
284 	memset(&waa, 0, sizeof(waa));
285 	waa.scrdata = &sc->sc_wsl;
286 	waa.accessops = &viogpu_accessops;
287 	waa.accesscookie = ri;
288 	waa.console = sc->console;
289 
290 	config_found_sm(self, &waa, wsemuldisplaydevprint,
291 	    wsemuldisplaydevsubmatch);
292 	return;
293 
294 fb_unmap:
295 	bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_fb_dma_kva,
296 	    sc->sc_fb_dma_size);
297 fb_free:
298 	bus_dmamem_free(vsc->sc_dmat, &sc->sc_fb_dma_seg, 1);
299 fb_destroy:
300 	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_fb_dma_map);
301 unmap:
302 	bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_cmd, sc->sc_dma_size);
303 free:
304 	bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
305 destroy:
306 	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
307 err:
308 	printf(": DMA setup failed\n");
309 	return;
310 }
311 
312 void
313 viogpu_repaint(void *arg)
314 {
315 	struct viogpu_softc *sc = (struct viogpu_softc *)arg;
316 	int s;
317 
318 	s = spltty();
319 
320 	viogpu_transfer_to_host_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
321 	viogpu_flush_resource(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
322 
323 	timeout_add_msec(&sc->sc_timo, 10);
324 	splx(s);
325 }
326 
327 int
328 viogpu_vq_wait(struct virtqueue *vq)
329 {
330 	struct virtio_softc *vsc = vq->vq_owner;
331 	struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
332 	int slot, len;
333 
334 	while (virtio_dequeue(vsc, vq, &slot, &len) != 0)
335 		;
336 
337 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
338 	    BUS_DMASYNC_POSTREAD);
339 
340 	virtio_dequeue_commit(vq, slot);
341 
342 	return 1;
343 }
344 
345 void
346 viogpu_rx_soft(void *arg)
347 {
348 	struct virtio_softc *vsc = (struct virtio_softc *)arg;
349 	struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
350 	struct virtqueue *vq = &sc->sc_vqs[VQCTRL];
351 	int slot, len;
352 
353 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
354 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map,
355 		    slot, len, BUS_DMASYNC_POSTREAD);
356 		virtio_dequeue_commit(vq, slot);
357 	}
358 }
359 
360 int
361 viogpu_send_cmd(struct viogpu_softc *sc, void *cmd, size_t cmd_size, void *ret,
362     size_t ret_size)
363 {
364 	struct virtio_softc *vsc = sc->sc_virtio;
365 	struct virtqueue *vq = &vsc->sc_vqs[VQCTRL];
366 	struct virtio_gpu_ctrl_hdr *hdr =
367 	    (struct virtio_gpu_ctrl_hdr *)sc->sc_cmd;
368 	struct virtio_gpu_ctrl_hdr *ret_hdr = (struct virtio_gpu_ctrl_hdr *)ret;
369 	int slot, r;
370 
371 	memcpy(sc->sc_cmd, cmd, cmd_size);
372 	memset(sc->sc_cmd + cmd_size, 0, ret_size);
373 
374 #if VIRTIO_DEBUG
375 	printf("%s: [%ld -> %ld]: ", __func__, cmd_size, ret_size);
376 	for (int i = 0; i < cmd_size; i++) {
377 		printf(" %02x", ((unsigned char *)sc->sc_cmd)[i]);
378 	}
379 	printf("\n");
380 #endif
381 
382 	hdr->flags |= VIRTIO_GPU_FLAG_FENCE;
383 	hdr->fence_id = ++sc->sc_fence_id;
384 
385 	r = virtio_enqueue_prep(vq, &slot);
386 	if (r != 0)
387 		panic("%s: control vq busy", sc->sc_dev.dv_xname);
388 
389  	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd,
390 	    cmd_size + ret_size, NULL, BUS_DMA_NOWAIT);
391 	if (r != 0)
392 		panic("%s: dmamap load failed", sc->sc_dev.dv_xname);
393 
394 	r = virtio_enqueue_reserve(vq, slot, sc->sc_dma_map->dm_nsegs + 1);
395 	if (r != 0)
396 		panic("%s: control vq busy", sc->sc_dev.dv_xname);
397 
398 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
399 	    BUS_DMASYNC_PREWRITE);
400 
401 	virtio_enqueue_p(vq, slot, sc->sc_dma_map, 0, cmd_size, 1);
402 	virtio_enqueue_p(vq, slot, sc->sc_dma_map, cmd_size, ret_size, 0);
403 	virtio_enqueue_commit(vsc, vq, slot, 1);
404 
405 	viogpu_vq_wait(vq);
406 
407 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
408 	    BUS_DMASYNC_POSTWRITE);
409 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, cmd_size, ret_size,
410 	    BUS_DMASYNC_POSTREAD);
411 
412 	memcpy(ret, sc->sc_cmd + cmd_size, ret_size);
413 
414 	if (ret_hdr->fence_id != sc->sc_fence_id)
415 		printf("%s: return fence id not right (0x%llx != 0x%x)\n",
416 		    __func__, ret_hdr->fence_id, sc->sc_fence_id);
417 
418 	return 0;
419 }
420 
421 int
422 viogpu_get_display_info(struct viogpu_softc *sc)
423 {
424 	struct virtio_gpu_ctrl_hdr hdr = { 0 };
425 	struct virtio_gpu_resp_display_info info = { 0 };
426 
427 	hdr.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
428 
429 	viogpu_send_cmd(sc, &hdr, sizeof(hdr), &info, sizeof(info));
430 
431 	if (info.hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
432 		printf("%s: failed getting display info\n",
433 		    sc->sc_dev.dv_xname);
434 		return 1;
435 	}
436 
437 	if (!info.pmodes[0].enabled) {
438 		printf("%s: pmodes[0] is not enabled\n", sc->sc_dev.dv_xname);
439 		return 1;
440 	}
441 
442 	sc->sc_fb_width = info.pmodes[0].r.width;
443 	sc->sc_fb_height = info.pmodes[0].r.height;
444 
445 	return 0;
446 }
447 
448 int
449 viogpu_create_2d(struct viogpu_softc *sc, int resource_id, int width,
450     int height)
451 {
452 	struct virtio_gpu_resource_create_2d res = { 0 };
453 	struct virtio_gpu_ctrl_hdr resp = { 0 };
454 
455 	res.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
456 	res.resource_id = resource_id;
457 	res.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
458 	res.width = width;
459 	res.height = height;
460 
461 	viogpu_send_cmd(sc, &res, sizeof(res), &resp, sizeof(resp));
462 
463 	if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
464 		printf("%s: failed CREATE_2D: %d\n", sc->sc_dev.dv_xname,
465 		    resp.type);
466 		return 1;
467 	}
468 
469 	return 0;
470 }
471 
472 int
473 viogpu_set_scanout(struct viogpu_softc *sc, int scanout_id, int resource_id,
474     int width, int height)
475 {
476 	struct virtio_gpu_set_scanout ss = { 0 };
477 	struct virtio_gpu_ctrl_hdr resp = { 0 };
478 
479 	ss.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
480 	ss.scanout_id = scanout_id;
481 	ss.resource_id = resource_id;
482 	ss.r.width = width;
483 	ss.r.height = height;
484 
485 	viogpu_send_cmd(sc, &ss, sizeof(ss), &resp, sizeof(resp));
486 
487 	if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
488 		printf("%s: failed SET_SCANOUT: %d\n", sc->sc_dev.dv_xname,
489 		    resp.type);
490 		return 1;
491 	}
492 
493 	return 0;
494 }
495 
496 int
497 viogpu_attach_backing(struct viogpu_softc *sc, int resource_id,
498     bus_dmamap_t dmamap)
499 {
500 	struct virtio_gpu_resource_attach_backing_entries {
501 		struct virtio_gpu_ctrl_hdr hdr;
502 		__le32 resource_id;
503 		__le32 nr_entries;
504 		struct virtio_gpu_mem_entry entries[1];
505 	} __packed backing = { 0 };
506 	struct virtio_gpu_ctrl_hdr resp = { 0 };
507 
508 	backing.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
509 	backing.resource_id = resource_id;
510 	backing.nr_entries = nitems(backing.entries);
511 	backing.entries[0].addr = dmamap->dm_segs[0].ds_addr;
512 	backing.entries[0].length = dmamap->dm_segs[0].ds_len;
513 
514 	if (dmamap->dm_nsegs > 1)
515 		printf("%s: TODO: send all %d segs\n", __func__,
516 		    dmamap->dm_nsegs);
517 
518 #if VIRTIO_DEBUG
519 	printf("%s: backing addr 0x%llx length %d\n", __func__,
520 		backing.entries[0].addr, backing.entries[0].length);
521 #endif
522 
523 	viogpu_send_cmd(sc, &backing, sizeof(backing), &resp, sizeof(resp));
524 
525 	if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
526 		printf("%s: failed ATTACH_BACKING: %d\n", sc->sc_dev.dv_xname,
527 		    resp.type);
528 		return 1;
529 	}
530 
531 	return 0;
532 }
533 
534 int
535 viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int resource_id,
536     uint32_t width, uint32_t height)
537 {
538 	struct virtio_gpu_transfer_to_host_2d tth = { 0 };
539 	struct virtio_gpu_ctrl_hdr resp = { 0 };
540 
541 	tth.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
542 	tth.resource_id = resource_id;
543 	tth.r.width = width;
544 	tth.r.height = height;
545 
546 	viogpu_send_cmd(sc, &tth, sizeof(tth), &resp, sizeof(resp));
547 
548 	if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
549 		printf("%s: failed TRANSFER_TO_HOST: %d\n", sc->sc_dev.dv_xname,
550 		    resp.type);
551 		return 1;
552 	}
553 
554 	return 0;
555 }
556 
557 int
558 viogpu_flush_resource(struct viogpu_softc *sc, int resource_id, uint32_t width,
559     uint32_t height)
560 {
561 	struct virtio_gpu_resource_flush flush = { 0 };
562 	struct virtio_gpu_ctrl_hdr resp = { 0 };
563 
564 	flush.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
565 	flush.resource_id = resource_id;
566 	flush.r.width = width;
567 	flush.r.height = height;
568 
569 	viogpu_send_cmd(sc, &flush, sizeof(flush), &resp, sizeof(resp));
570 
571 	if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
572 		printf("%s: failed RESOURCE_FLUSH: %d\n", sc->sc_dev.dv_xname,
573 		    resp.type);
574 		return 1;
575 	}
576 
577 	return 0;
578 }
579 
580 int
581 viogpu_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
582 {
583 	struct rasops_info *ri = v;
584 	struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
585 	struct wsdisplay_fbinfo *wdf;
586 
587 	switch (cmd) {
588 	case WSDISPLAYIO_GETPARAM:
589 		if (ws_get_param)
590 			return ws_get_param(dp);
591 		return -1;
592 	case WSDISPLAYIO_SETPARAM:
593 		if (ws_set_param)
594 			return ws_set_param(dp);
595 		return -1;
596 	case WSDISPLAYIO_GTYPE:
597 		*(u_int *)data = WSDISPLAY_TYPE_VIOGPU;
598 		break;
599 	case WSDISPLAYIO_GINFO:
600 		wdf = (struct wsdisplay_fbinfo *)data;
601 		wdf->width = ri->ri_width;
602 		wdf->height = ri->ri_height;
603 		wdf->depth = ri->ri_depth;
604 		wdf->stride = ri->ri_stride;
605 		wdf->cmsize = 0;
606 		wdf->offset = 0;
607 		break;
608 	case WSDISPLAYIO_LINEBYTES:
609 		*(u_int *)data = ri->ri_stride;
610 		break;
611 	case WSDISPLAYIO_SMODE:
612 		break;
613 	case WSDISPLAYIO_GETSUPPORTEDDEPTH:
614 		*(u_int *)data = WSDISPLAYIO_DEPTH_24_32;
615 		break;
616 	case WSDISPLAYIO_GVIDEO:
617 	case WSDISPLAYIO_SVIDEO:
618 		break;
619 	default:
620 		return -1;
621 	}
622 
623 	return 0;
624 }
625 
626 paddr_t
627 viogpu_wsmmap(void *v, off_t off, int prot)
628 {
629 	struct rasops_info *ri = v;
630 	struct viogpu_softc *sc = ri->ri_hw;
631 	size_t size = sc->sc_fb_dma_size;
632 
633 	if (off < 0 || off >= size)
634 		return -1;
635 
636 	return (((paddr_t)sc->sc_fb_dma_kva + off) | PMAP_NOCACHE);
637 }
638 
639 int
640 viogpu_alloc_screen(void *v, const struct wsscreen_descr *type,
641     void **cookiep, int *curxp, int *curyp, uint32_t *attrp)
642 {
643 	return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp);
644 }
645 
646 #if 0
647 int
648 viogpu_fb_probe(struct drm_fb_helper *helper,
649     struct drm_fb_helper_surface_size *sizes)
650 {
651 	struct viogpu_softc *sc = helper->dev->dev_private;
652 	struct drm_device *ddev = helper->dev;
653 	struct viogpu_framebuffer *sfb = to_viogpu_framebuffer(helper->fb);
654 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
655 	struct drm_framebuffer *fb = helper->fb;
656 	struct wsemuldisplaydev_attach_args aa;
657 	struct rasops_info *ri = &sc->ro;
658 	struct viogpufb_attach_args sfa;
659 	unsigned int bytes_per_pixel;
660 	struct fb_info *info;
661 	size_t size;
662 	int error;
663 
664 	if (viogpu_get_display_info(sc) != 0)
665 		return -1;
666 
667 	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
668 
669 	mode_cmd.width = sc->sc_fb_width;
670 	mode_cmd.height = sc->sc_fb_height;
671 	mode_cmd.pitches[0] = sc->sc_fb_width * bytes_per_pixel;
672 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
673 	    sizes->surface_depth);
674 
675 	size = roundup(mode_cmd.pitches[0] * mode_cmd.height, PAGE_SIZE);
676 
677 	sfb->obj = drm_gem_cma_create(ddev, size);
678 	if (sfb->obj == NULL) {
679 		DRM_ERROR("failed to allocate memory for framebuffer\n");
680 		return -ENOMEM;
681 	}
682 
683 	drm_helper_mode_fill_fb_struct(ddev, fb, &mode_cmd);
684 	fb->format = drm_format_info(DRM_FORMAT_ARGB8888);
685 	fb->obj[0] = &sfb->obj->base;
686 	error = drm_framebuffer_init(ddev, fb, &viogpu_framebuffer_funcs);
687 	if (error != 0) {
688 		DRM_ERROR("failed to initialize framebuffer\n");
689 		return error;
690 	}
691 
692 	info = drm_fb_helper_alloc_fbi(helper);
693 	if (IS_ERR(info)) {
694 		DRM_ERROR("Failed to allocate fb_info\n");
695 		return PTR_ERR(info);
696 	}
697 	info->par = helper;
698 
699 	error = viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
700 	if (error)
701 		return error;
702 
703 	error = viogpu_attach_backing(sc, 1, sfb->obj->dmamap);
704 	if (error)
705 		return error;
706 
707 	error = viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width, sc->sc_fb_height);
708 	if (error)
709 		return error;
710 
711 	return 0;
712 }
713 #endif
714