1 /* $OpenBSD: viogpu.c,v 1.12 2025/01/16 10:33:27 sf Exp $ */
2
3 /*
4 * Copyright (c) 2021-2023 joshua stein <jcs@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/timeout.h>
23
24 #include <uvm/uvm_extern.h>
25
26 #include <dev/pv/virtioreg.h>
27 #include <dev/pv/virtiovar.h>
28 #include <dev/pv/viogpu.h>
29
30 #include <dev/wscons/wsconsio.h>
31 #include <dev/wscons/wsdisplayvar.h>
32 #include <dev/rasops/rasops.h>
33
34 #if VIRTIO_DEBUG
35 #define DPRINTF(x...) printf(x)
36 #else
37 #define DPRINTF(x...)
38 #endif
39
40 struct viogpu_softc;
41
42 int viogpu_match(struct device *, void *, void *);
43 void viogpu_attach(struct device *, struct device *, void *);
44 int viogpu_send_cmd(struct viogpu_softc *, void *, size_t, void *, size_t);
45 int viogpu_vq_done(struct virtqueue *vq);
46 void viogpu_rx_soft(void *arg);
47
48 int viogpu_get_display_info(struct viogpu_softc *);
49 int viogpu_create_2d(struct viogpu_softc *, int, int, int);
50 int viogpu_set_scanout(struct viogpu_softc *, int, int, int, int);
51 int viogpu_attach_backing(struct viogpu_softc *, int, bus_dmamap_t);
52 int viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int, uint32_t,
53 uint32_t);
54 int viogpu_flush_resource(struct viogpu_softc *, int, uint32_t, uint32_t);
55
56 void viogpu_repaint(void *);
57
58 int viogpu_wsioctl(void *, u_long, caddr_t, int, struct proc *);
59 paddr_t viogpu_wsmmap(void *, off_t, int);
60 int viogpu_alloc_screen(void *, const struct wsscreen_descr *, void **,
61 int *, int *, uint32_t *);
62
63 #define VIOGPU_HEIGHT 160
64 #define VIOGPU_WIDTH 160
65
66 struct viogpu_softc {
67 struct device sc_dev;
68 struct virtio_softc *sc_virtio;
69 #define VQCTRL 0
70 #define VQCURS 1
71 struct virtqueue sc_vqs[2];
72
73 bus_dma_segment_t sc_dma_seg;
74 bus_dmamap_t sc_dma_map;
75 size_t sc_dma_size;
76 void *sc_cmd;
77 int sc_fence_id;
78
79 int sc_fb_width;
80 int sc_fb_height;
81 bus_dma_segment_t sc_fb_dma_seg;
82 bus_dmamap_t sc_fb_dma_map;
83 size_t sc_fb_dma_size;
84 caddr_t sc_fb_dma_kva;
85
86 struct rasops_info sc_ri;
87 struct wsscreen_descr sc_wsd;
88 struct wsscreen_list sc_wsl;
89 struct wsscreen_descr *sc_scrlist[1];
90 int console;
91 int primary;
92
93 struct timeout sc_timo;
94 };
95
96 static const struct virtio_feature_name viogpu_feature_names[] = {
97 #if VIRTIO_DEBUG
98 { VIRTIO_GPU_F_VIRGL, "VirGL" },
99 { VIRTIO_GPU_F_EDID, "EDID" },
100 #endif
101 { 0, NULL },
102 };
103
104 struct wsscreen_descr viogpu_stdscreen = { "std" };
105
106 const struct wsscreen_descr *viogpu_scrlist[] = {
107 &viogpu_stdscreen,
108 };
109
110 struct wsscreen_list viogpu_screenlist = {
111 nitems(viogpu_scrlist), viogpu_scrlist
112 };
113
114 struct wsdisplay_accessops viogpu_accessops = {
115 .ioctl = viogpu_wsioctl,
116 .mmap = viogpu_wsmmap,
117 .alloc_screen = viogpu_alloc_screen,
118 .free_screen = rasops_free_screen,
119 .show_screen = rasops_show_screen,
120 .getchar = rasops_getchar,
121 .load_font = rasops_load_font,
122 .list_font = rasops_list_font,
123 .scrollback = rasops_scrollback,
124 };
125
126 const struct cfattach viogpu_ca = {
127 sizeof(struct viogpu_softc),
128 viogpu_match,
129 viogpu_attach,
130 NULL
131 };
132
133 struct cfdriver viogpu_cd = {
134 NULL, "viogpu", DV_DULL
135 };
136
137 int
viogpu_match(struct device * parent,void * match,void * aux)138 viogpu_match(struct device *parent, void *match, void *aux)
139 {
140 struct virtio_attach_args *va = aux;
141
142 if (va->va_devid == PCI_PRODUCT_VIRTIO_GPU)
143 return 1;
144
145 return 0;
146 }
147
148 void
viogpu_attach(struct device * parent,struct device * self,void * aux)149 viogpu_attach(struct device *parent, struct device *self, void *aux)
150 {
151 struct viogpu_softc *sc = (struct viogpu_softc *)self;
152 struct virtio_softc *vsc = (struct virtio_softc *)parent;
153 struct virtio_attach_args *va = aux;
154 struct wsemuldisplaydev_attach_args waa;
155 struct rasops_info *ri = &sc->sc_ri;
156 uint32_t defattr;
157 int nsegs;
158
159 if (vsc->sc_child != NULL) {
160 printf(": child already attached for %s\n", parent->dv_xname);
161 return;
162 }
163 vsc->sc_child = self;
164
165 if (virtio_negotiate_features(vsc, viogpu_feature_names) != 0)
166 goto err;
167 if (!vsc->sc_version_1) {
168 printf(": requires virtio version 1\n");
169 goto err;
170 }
171
172 vsc->sc_ipl = IPL_TTY;
173 softintr_establish(IPL_TTY, viogpu_rx_soft, vsc);
174 sc->sc_virtio = vsc;
175
176 /* allocate command and cursor virtqueues */
177 vsc->sc_vqs = sc->sc_vqs;
178 if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCTRL], VQCTRL, 1, "control")) {
179 printf(": alloc_vq failed\n");
180 goto err;
181 }
182 sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_done;
183
184 if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], VQCURS, 1, "cursor")) {
185 printf(": alloc_vq failed\n");
186 goto err;
187 }
188 vsc->sc_nvqs = nitems(sc->sc_vqs);
189
190 /* setup DMA space for sending commands */
191 sc->sc_dma_size = NBPG;
192 if (bus_dmamap_create(vsc->sc_dmat, sc->sc_dma_size, 1,
193 sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
194 &sc->sc_dma_map) != 0) {
195 printf(": create failed");
196 goto errdma;
197 }
198 if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_dma_size, 16, 0,
199 &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
200 printf(": alloc failed");
201 goto destroy;
202 }
203 if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_dma_seg, nsegs,
204 sc->sc_dma_size, (caddr_t *)&sc->sc_cmd, BUS_DMA_NOWAIT) != 0) {
205 printf(": map failed");
206 goto free;
207 }
208 if (bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd,
209 sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0) {
210 printf(": load failed");
211 goto unmap;
212 }
213
214 if (virtio_attach_finish(vsc, va) != 0)
215 goto unmap;
216
217 if (viogpu_get_display_info(sc) != 0)
218 goto unmap;
219
220 /* setup DMA space for actual framebuffer */
221 sc->sc_fb_dma_size = sc->sc_fb_width * sc->sc_fb_height * 4;
222 if (bus_dmamap_create(vsc->sc_dmat, sc->sc_fb_dma_size, 1,
223 sc->sc_fb_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
224 &sc->sc_fb_dma_map) != 0)
225 goto unmap;
226 if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_fb_dma_size, 1024, 0,
227 &sc->sc_fb_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
228 goto fb_destroy;
229 if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_fb_dma_seg, nsegs,
230 sc->sc_fb_dma_size, &sc->sc_fb_dma_kva, BUS_DMA_NOWAIT) != 0)
231 goto fb_free;
232 if (bus_dmamap_load(vsc->sc_dmat, sc->sc_fb_dma_map,
233 sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
234 goto fb_unmap;
235
236 if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
237 goto fb_unmap;
238
239 if (viogpu_attach_backing(sc, 1, sc->sc_fb_dma_map) != 0)
240 goto fb_unmap;
241
242 if (viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width,
243 sc->sc_fb_height) != 0)
244 goto fb_unmap;
245
246 sc->console = 1;
247
248 ri->ri_hw = sc;
249 ri->ri_bits = sc->sc_fb_dma_kva;
250 ri->ri_flg = RI_VCONS | RI_CENTER | RI_CLEAR | RI_WRONLY;
251 ri->ri_depth = 32;
252 ri->ri_width = sc->sc_fb_width;
253 ri->ri_height = sc->sc_fb_height;
254 ri->ri_stride = ri->ri_width * ri->ri_depth / 8;
255 ri->ri_bpos = 0; /* B8G8R8X8 */
256 ri->ri_bnum = 8;
257 ri->ri_gpos = 8;
258 ri->ri_gnum = 8;
259 ri->ri_rpos = 16;
260 ri->ri_rnum = 8;
261 rasops_init(ri, VIOGPU_HEIGHT, VIOGPU_WIDTH);
262
263 strlcpy(sc->sc_wsd.name, "std", sizeof(sc->sc_wsd.name));
264 sc->sc_wsd.capabilities = ri->ri_caps;
265 sc->sc_wsd.nrows = ri->ri_rows;
266 sc->sc_wsd.ncols = ri->ri_cols;
267 sc->sc_wsd.textops = &ri->ri_ops;
268 sc->sc_wsd.fontwidth = ri->ri_font->fontwidth;
269 sc->sc_wsd.fontheight = ri->ri_font->fontheight;
270
271 sc->sc_scrlist[0] = &sc->sc_wsd;
272 sc->sc_wsl.nscreens = 1;
273 sc->sc_wsl.screens = (const struct wsscreen_descr **)sc->sc_scrlist;
274
275 printf(": %dx%d, %dbpp\n", ri->ri_width, ri->ri_height, ri->ri_depth);
276
277 timeout_set(&sc->sc_timo, viogpu_repaint, sc);
278 viogpu_repaint(sc);
279
280 if (sc->console) {
281 ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr);
282 wsdisplay_cnattach(&sc->sc_wsd, ri->ri_active, 0, 0, defattr);
283 }
284
285 memset(&waa, 0, sizeof(waa));
286 waa.scrdata = &sc->sc_wsl;
287 waa.accessops = &viogpu_accessops;
288 waa.accesscookie = ri;
289 waa.console = sc->console;
290
291 config_found_sm(self, &waa, wsemuldisplaydevprint,
292 wsemuldisplaydevsubmatch);
293 return;
294
295 fb_unmap:
296 bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_fb_dma_kva,
297 sc->sc_fb_dma_size);
298 fb_free:
299 bus_dmamem_free(vsc->sc_dmat, &sc->sc_fb_dma_seg, 1);
300 fb_destroy:
301 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_fb_dma_map);
302 unmap:
303 bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_cmd, sc->sc_dma_size);
304 free:
305 bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
306 destroy:
307 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
308 errdma:
309 printf(": DMA setup failed\n");
310 err:
311 vsc->sc_child = VIRTIO_CHILD_ERROR;
312 return;
313 }
314
315 void
viogpu_repaint(void * arg)316 viogpu_repaint(void *arg)
317 {
318 struct viogpu_softc *sc = (struct viogpu_softc *)arg;
319 int s;
320
321 s = spltty();
322
323 viogpu_transfer_to_host_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
324 viogpu_flush_resource(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
325
326 timeout_add_msec(&sc->sc_timo, 10);
327 splx(s);
328 }
329
330 int
viogpu_vq_done(struct virtqueue * vq)331 viogpu_vq_done(struct virtqueue *vq)
332 {
333 struct virtio_softc *vsc = vq->vq_owner;
334 struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
335 int slot, len;
336
337 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
338 return 0;
339
340 bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
341 BUS_DMASYNC_POSTREAD);
342
343 virtio_dequeue_commit(vq, slot);
344
345 return 1;
346 }
347
348 void
viogpu_rx_soft(void * arg)349 viogpu_rx_soft(void *arg)
350 {
351 struct virtio_softc *vsc = (struct virtio_softc *)arg;
352 struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
353 struct virtqueue *vq = &sc->sc_vqs[VQCTRL];
354 int slot, len;
355
356 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
357 bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map,
358 slot, len, BUS_DMASYNC_POSTREAD);
359 virtio_dequeue_commit(vq, slot);
360 }
361 }
362
363 int
viogpu_send_cmd(struct viogpu_softc * sc,void * cmd,size_t cmd_size,void * ret,size_t ret_size)364 viogpu_send_cmd(struct viogpu_softc *sc, void *cmd, size_t cmd_size, void *ret,
365 size_t ret_size)
366 {
367 struct virtio_softc *vsc = sc->sc_virtio;
368 struct virtqueue *vq = &vsc->sc_vqs[VQCTRL];
369 struct virtio_gpu_ctrl_hdr *hdr =
370 (struct virtio_gpu_ctrl_hdr *)sc->sc_cmd;
371 struct virtio_gpu_ctrl_hdr *ret_hdr = (struct virtio_gpu_ctrl_hdr *)ret;
372 int slot, r;
373
374 memcpy(sc->sc_cmd, cmd, cmd_size);
375 memset(sc->sc_cmd + cmd_size, 0, ret_size);
376
377 #if VIRTIO_DEBUG >= 3
378 printf("%s: [%ld -> %ld]: ", __func__, cmd_size, ret_size);
379 for (int i = 0; i < cmd_size; i++) {
380 printf(" %02x", ((unsigned char *)sc->sc_cmd)[i]);
381 }
382 printf("\n");
383 #endif
384
385 hdr->flags |= VIRTIO_GPU_FLAG_FENCE;
386 hdr->fence_id = ++sc->sc_fence_id;
387
388 r = virtio_enqueue_prep(vq, &slot);
389 if (r != 0)
390 panic("%s: control vq busy", sc->sc_dev.dv_xname);
391
392 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd,
393 cmd_size + ret_size, NULL, BUS_DMA_NOWAIT);
394 if (r != 0)
395 panic("%s: dmamap load failed", sc->sc_dev.dv_xname);
396
397 r = virtio_enqueue_reserve(vq, slot, sc->sc_dma_map->dm_nsegs + 1);
398 if (r != 0)
399 panic("%s: control vq busy", sc->sc_dev.dv_xname);
400
401 bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
402 BUS_DMASYNC_PREWRITE);
403
404 virtio_enqueue_p(vq, slot, sc->sc_dma_map, 0, cmd_size, 1);
405 virtio_enqueue_p(vq, slot, sc->sc_dma_map, cmd_size, ret_size, 0);
406 virtio_enqueue_commit(vsc, vq, slot, 1);
407
408 while (virtio_check_vq(vsc, vq) == 0)
409 ;
410
411 bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
412 BUS_DMASYNC_POSTWRITE);
413 bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, cmd_size, ret_size,
414 BUS_DMASYNC_POSTREAD);
415
416 memcpy(ret, sc->sc_cmd + cmd_size, ret_size);
417
418 if (ret_hdr->fence_id != sc->sc_fence_id)
419 printf("%s: return fence id not right (0x%llx != 0x%x)\n",
420 __func__, ret_hdr->fence_id, sc->sc_fence_id);
421
422 return 0;
423 }
424
425 int
viogpu_get_display_info(struct viogpu_softc * sc)426 viogpu_get_display_info(struct viogpu_softc *sc)
427 {
428 struct virtio_gpu_ctrl_hdr hdr = { 0 };
429 struct virtio_gpu_resp_display_info info = { 0 };
430
431 hdr.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
432
433 viogpu_send_cmd(sc, &hdr, sizeof(hdr), &info, sizeof(info));
434
435 if (info.hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
436 printf("%s: failed getting display info\n",
437 sc->sc_dev.dv_xname);
438 return 1;
439 }
440
441 if (!info.pmodes[0].enabled) {
442 printf("%s: pmodes[0] is not enabled\n", sc->sc_dev.dv_xname);
443 return 1;
444 }
445
446 sc->sc_fb_width = info.pmodes[0].r.width;
447 sc->sc_fb_height = info.pmodes[0].r.height;
448
449 return 0;
450 }
451
452 int
viogpu_create_2d(struct viogpu_softc * sc,int resource_id,int width,int height)453 viogpu_create_2d(struct viogpu_softc *sc, int resource_id, int width,
454 int height)
455 {
456 struct virtio_gpu_resource_create_2d res = { 0 };
457 struct virtio_gpu_ctrl_hdr resp = { 0 };
458
459 res.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
460 res.resource_id = resource_id;
461 res.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
462 res.width = width;
463 res.height = height;
464
465 viogpu_send_cmd(sc, &res, sizeof(res), &resp, sizeof(resp));
466
467 if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
468 printf("%s: failed CREATE_2D: %d\n", sc->sc_dev.dv_xname,
469 resp.type);
470 return 1;
471 }
472
473 return 0;
474 }
475
476 int
viogpu_set_scanout(struct viogpu_softc * sc,int scanout_id,int resource_id,int width,int height)477 viogpu_set_scanout(struct viogpu_softc *sc, int scanout_id, int resource_id,
478 int width, int height)
479 {
480 struct virtio_gpu_set_scanout ss = { 0 };
481 struct virtio_gpu_ctrl_hdr resp = { 0 };
482
483 ss.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
484 ss.scanout_id = scanout_id;
485 ss.resource_id = resource_id;
486 ss.r.width = width;
487 ss.r.height = height;
488
489 viogpu_send_cmd(sc, &ss, sizeof(ss), &resp, sizeof(resp));
490
491 if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
492 printf("%s: failed SET_SCANOUT: %d\n", sc->sc_dev.dv_xname,
493 resp.type);
494 return 1;
495 }
496
497 return 0;
498 }
499
500 int
viogpu_attach_backing(struct viogpu_softc * sc,int resource_id,bus_dmamap_t dmamap)501 viogpu_attach_backing(struct viogpu_softc *sc, int resource_id,
502 bus_dmamap_t dmamap)
503 {
504 struct virtio_gpu_resource_attach_backing_entries {
505 struct virtio_gpu_ctrl_hdr hdr;
506 __le32 resource_id;
507 __le32 nr_entries;
508 struct virtio_gpu_mem_entry entries[1];
509 } __packed backing = { 0 };
510 struct virtio_gpu_ctrl_hdr resp = { 0 };
511
512 backing.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
513 backing.resource_id = resource_id;
514 backing.nr_entries = nitems(backing.entries);
515 backing.entries[0].addr = dmamap->dm_segs[0].ds_addr;
516 backing.entries[0].length = dmamap->dm_segs[0].ds_len;
517
518 if (dmamap->dm_nsegs > 1)
519 printf("%s: TODO: send all %d segs\n", __func__,
520 dmamap->dm_nsegs);
521
522 #if VIRTIO_DEBUG
523 printf("%s: backing addr 0x%llx length %d\n", __func__,
524 backing.entries[0].addr, backing.entries[0].length);
525 #endif
526
527 viogpu_send_cmd(sc, &backing, sizeof(backing), &resp, sizeof(resp));
528
529 if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
530 printf("%s: failed ATTACH_BACKING: %d\n", sc->sc_dev.dv_xname,
531 resp.type);
532 return 1;
533 }
534
535 return 0;
536 }
537
538 int
viogpu_transfer_to_host_2d(struct viogpu_softc * sc,int resource_id,uint32_t width,uint32_t height)539 viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int resource_id,
540 uint32_t width, uint32_t height)
541 {
542 struct virtio_gpu_transfer_to_host_2d tth = { 0 };
543 struct virtio_gpu_ctrl_hdr resp = { 0 };
544
545 tth.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
546 tth.resource_id = resource_id;
547 tth.r.width = width;
548 tth.r.height = height;
549
550 viogpu_send_cmd(sc, &tth, sizeof(tth), &resp, sizeof(resp));
551
552 if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
553 printf("%s: failed TRANSFER_TO_HOST: %d\n", sc->sc_dev.dv_xname,
554 resp.type);
555 return 1;
556 }
557
558 return 0;
559 }
560
561 int
viogpu_flush_resource(struct viogpu_softc * sc,int resource_id,uint32_t width,uint32_t height)562 viogpu_flush_resource(struct viogpu_softc *sc, int resource_id, uint32_t width,
563 uint32_t height)
564 {
565 struct virtio_gpu_resource_flush flush = { 0 };
566 struct virtio_gpu_ctrl_hdr resp = { 0 };
567
568 flush.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
569 flush.resource_id = resource_id;
570 flush.r.width = width;
571 flush.r.height = height;
572
573 viogpu_send_cmd(sc, &flush, sizeof(flush), &resp, sizeof(resp));
574
575 if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) {
576 printf("%s: failed RESOURCE_FLUSH: %d\n", sc->sc_dev.dv_xname,
577 resp.type);
578 return 1;
579 }
580
581 return 0;
582 }
583
584 int
viogpu_wsioctl(void * v,u_long cmd,caddr_t data,int flag,struct proc * p)585 viogpu_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
586 {
587 struct rasops_info *ri = v;
588 struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
589 struct wsdisplay_fbinfo *wdf;
590
591 switch (cmd) {
592 case WSDISPLAYIO_GETPARAM:
593 if (ws_get_param)
594 return ws_get_param(dp);
595 return -1;
596 case WSDISPLAYIO_SETPARAM:
597 if (ws_set_param)
598 return ws_set_param(dp);
599 return -1;
600 case WSDISPLAYIO_GTYPE:
601 *(u_int *)data = WSDISPLAY_TYPE_VIOGPU;
602 break;
603 case WSDISPLAYIO_GINFO:
604 wdf = (struct wsdisplay_fbinfo *)data;
605 wdf->width = ri->ri_width;
606 wdf->height = ri->ri_height;
607 wdf->depth = ri->ri_depth;
608 wdf->stride = ri->ri_stride;
609 wdf->cmsize = 0;
610 wdf->offset = 0;
611 break;
612 case WSDISPLAYIO_LINEBYTES:
613 *(u_int *)data = ri->ri_stride;
614 break;
615 case WSDISPLAYIO_SMODE:
616 break;
617 case WSDISPLAYIO_GETSUPPORTEDDEPTH:
618 *(u_int *)data = WSDISPLAYIO_DEPTH_24_32;
619 break;
620 case WSDISPLAYIO_GVIDEO:
621 case WSDISPLAYIO_SVIDEO:
622 break;
623 default:
624 return -1;
625 }
626
627 return 0;
628 }
629
630 paddr_t
viogpu_wsmmap(void * v,off_t off,int prot)631 viogpu_wsmmap(void *v, off_t off, int prot)
632 {
633 struct rasops_info *ri = v;
634 struct viogpu_softc *sc = ri->ri_hw;
635 size_t size = sc->sc_fb_dma_size;
636
637 if (off < 0 || off >= size)
638 return -1;
639
640 return (((paddr_t)sc->sc_fb_dma_kva + off) | PMAP_NOCACHE);
641 }
642
643 int
viogpu_alloc_screen(void * v,const struct wsscreen_descr * type,void ** cookiep,int * curxp,int * curyp,uint32_t * attrp)644 viogpu_alloc_screen(void *v, const struct wsscreen_descr *type,
645 void **cookiep, int *curxp, int *curyp, uint32_t *attrp)
646 {
647 return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp);
648 }
649
650 #if 0
651 int
652 viogpu_fb_probe(struct drm_fb_helper *helper,
653 struct drm_fb_helper_surface_size *sizes)
654 {
655 struct viogpu_softc *sc = helper->dev->dev_private;
656 struct drm_device *ddev = helper->dev;
657 struct viogpu_framebuffer *sfb = to_viogpu_framebuffer(helper->fb);
658 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
659 struct drm_framebuffer *fb = helper->fb;
660 struct wsemuldisplaydev_attach_args aa;
661 struct rasops_info *ri = &sc->ro;
662 struct viogpufb_attach_args sfa;
663 unsigned int bytes_per_pixel;
664 struct fb_info *info;
665 size_t size;
666 int error;
667
668 if (viogpu_get_display_info(sc) != 0)
669 return -1;
670
671 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
672
673 mode_cmd.width = sc->sc_fb_width;
674 mode_cmd.height = sc->sc_fb_height;
675 mode_cmd.pitches[0] = sc->sc_fb_width * bytes_per_pixel;
676 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
677 sizes->surface_depth);
678
679 size = roundup(mode_cmd.pitches[0] * mode_cmd.height, PAGE_SIZE);
680
681 sfb->obj = drm_gem_cma_create(ddev, size);
682 if (sfb->obj == NULL) {
683 DRM_ERROR("failed to allocate memory for framebuffer\n");
684 return -ENOMEM;
685 }
686
687 drm_helper_mode_fill_fb_struct(ddev, fb, &mode_cmd);
688 fb->format = drm_format_info(DRM_FORMAT_ARGB8888);
689 fb->obj[0] = &sfb->obj->base;
690 error = drm_framebuffer_init(ddev, fb, &viogpu_framebuffer_funcs);
691 if (error != 0) {
692 DRM_ERROR("failed to initialize framebuffer\n");
693 return error;
694 }
695
696 info = drm_fb_helper_alloc_fbi(helper);
697 if (IS_ERR(info)) {
698 DRM_ERROR("Failed to allocate fb_info\n");
699 return PTR_ERR(info);
700 }
701 info->par = helper;
702
703 error = viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height);
704 if (error)
705 return error;
706
707 error = viogpu_attach_backing(sc, 1, sfb->obj->dmamap);
708 if (error)
709 return error;
710
711 error = viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width, sc->sc_fb_height);
712 if (error)
713 return error;
714
715 return 0;
716 }
717 #endif
718