xref: /openbsd/sys/dev/pv/viomb.c (revision 09467b48)
1 /* $OpenBSD: viomb.c,v 1.6 2020/06/27 07:20:57 bket Exp $	 */
2 /* $NetBSD: viomb.c,v 1.1 2011/10/30 12:12:21 hannken Exp $	 */
3 
4 /*
5  * Copyright (c) 2012 Talypov Dinar <dinar@i-nk.ru>
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/device.h>
34 #include <sys/task.h>
35 #include <sys/pool.h>
36 #include <sys/sensors.h>
37 
38 #include <uvm/uvm_extern.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #if VIRTIO_PAGE_SIZE!=PAGE_SIZE
44 #error non-4K page sizes are not supported yet
45 #endif
46 
47 #define	DEVNAME(sc)	sc->sc_dev.dv_xname
48 #if VIRTIO_DEBUG
49 #define VIOMBDEBUG(sc, format, args...)					  \
50 		do { printf("%s: " format, sc->sc_dev.dv_xname, ##args);} \
51 		while (0)
52 #else
53 #define VIOMBDEBUG(...)
54 #endif
55 
56 /* flags used to specify kind of operation,
57  * actually should be moved to virtiovar.h
58  */
59 #define VRING_READ		0
60 #define VRING_WRITE		1
61 
62 /* notify or don't notify */
63 #define VRING_NO_NOTIFY		0
64 #define VRING_NOTIFY		1
65 
66 /* Configuration registers */
67 #define VIRTIO_BALLOON_CONFIG_NUM_PAGES	0	/* 32bit */
68 #define VIRTIO_BALLOON_CONFIG_ACTUAL	4	/* 32bit */
69 
70 /* Feature bits */
71 #define VIRTIO_BALLOON_F_MUST_TELL_HOST (1ULL<<0)
72 #define VIRTIO_BALLOON_F_STATS_VQ	(1ULL<<1)
73 
74 static const struct virtio_feature_name viomb_feature_names[] = {
75 #if VIRTIO_DEBUG
76 	{VIRTIO_BALLOON_F_MUST_TELL_HOST, "TellHost"},
77 	{VIRTIO_BALLOON_F_STATS_VQ, "StatVQ"},
78 #endif
79 	{0, NULL}
80 };
81 #define PGS_PER_REQ		256	/* 1MB, 4KB/page */
82 #define VQ_INFLATE	0
83 #define VQ_DEFLATE	1
84 
85 struct balloon_req {
86 	bus_dmamap_t	 bl_dmamap;
87 	struct pglist	 bl_pglist;
88 	int		 bl_nentries;
89 	u_int32_t	*bl_pages;
90 };
91 
92 struct viomb_softc {
93 	struct device		sc_dev;
94 	struct virtio_softc	*sc_virtio;
95 	struct virtqueue	sc_vq[2];
96 	u_int32_t		sc_npages; /* desired pages */
97 	u_int32_t		sc_actual; /* current pages */
98 	struct balloon_req	sc_req;
99 	struct taskq		*sc_taskq;
100 	struct task		sc_task;
101 	struct pglist		sc_balloon_pages;
102 	struct ksensor		sc_sens[2];
103 	struct ksensordev	sc_sensdev;
104 };
105 
106 int	viomb_match(struct device *, void *, void *);
107 void	viomb_attach(struct device *, struct device *, void *);
108 void	viomb_worker(void *);
109 void	viomb_inflate(struct viomb_softc *);
110 void	viomb_deflate(struct viomb_softc *);
111 int	viomb_config_change(struct virtio_softc *);
112 void	viomb_read_config(struct viomb_softc *);
113 int	viomb_vq_dequeue(struct virtqueue *);
114 int	viomb_inflate_intr(struct virtqueue *);
115 int	viomb_deflate_intr(struct virtqueue *);
116 
117 struct cfattach viomb_ca = {
118 	sizeof(struct viomb_softc), viomb_match, viomb_attach
119 };
120 
121 struct cfdriver viomb_cd = {
122 	NULL, "viomb", DV_DULL
123 };
124 
125 int
126 viomb_match(struct device *parent, void *match, void *aux)
127 {
128 	struct virtio_softc *va = aux;
129 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BALLOON)
130 		return (1);
131 	return (0);
132 }
133 
134 void
135 viomb_attach(struct device *parent, struct device *self, void *aux)
136 {
137 	struct viomb_softc *sc = (struct viomb_softc *)self;
138 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
139 	int i;
140 
141 	if (vsc->sc_child != NULL) {
142 		printf("child already attached for %s; something wrong...\n",
143 		    parent->dv_xname);
144 		return;
145 	}
146 
147 	/* fail on non-4K page size archs */
148 	if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
149 		printf("non-4K page size arch found, needs %d, got %d\n",
150 		    VIRTIO_PAGE_SIZE, PAGE_SIZE);
151 		return;
152 	}
153 
154 	sc->sc_virtio = vsc;
155 	vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE];
156 	vsc->sc_nvqs = 0;
157 	vsc->sc_child = self;
158 	vsc->sc_ipl = IPL_BIO;
159 	vsc->sc_config_change = viomb_config_change;
160 
161 	vsc->sc_driver_features = VIRTIO_BALLOON_F_MUST_TELL_HOST;
162 	if (virtio_negotiate_features(vsc, viomb_feature_names) != 0)
163 		goto err;
164 
165 	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE,
166 	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0))
167 		goto err;
168 	vsc->sc_nvqs++;
169 	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE,
170 	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0))
171 		goto err;
172 	vsc->sc_nvqs++;
173 
174 	sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr;
175 	sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr;
176 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]);
177 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]);
178 
179 	viomb_read_config(sc);
180 	TAILQ_INIT(&sc->sc_balloon_pages);
181 
182 	if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ,
183 	    PR_NOWAIT|PR_ZERO)) == NULL) {
184 		printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc));
185 		goto err;
186 	}
187 	if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ,
188 			      1, sizeof(u_int32_t) * PGS_PER_REQ, 0,
189 			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
190 		printf("%s: dmamap creation failed.\n", DEVNAME(sc));
191 		goto err;
192 	}
193 	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
194 			    &sc->sc_req.bl_pages[0],
195 			    sizeof(uint32_t) * PGS_PER_REQ,
196 			    NULL, BUS_DMA_NOWAIT)) {
197 		printf("%s: dmamap load failed.\n", DEVNAME(sc));
198 		goto err_dmamap;
199 	}
200 
201 	sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO, 0);
202 	if (sc->sc_taskq == NULL)
203 		goto err_dmamap;
204 	task_set(&sc->sc_task, viomb_worker, sc);
205 
206 	strlcpy(sc->sc_sensdev.xname, DEVNAME(sc),
207 	    sizeof(sc->sc_sensdev.xname));
208 	strlcpy(sc->sc_sens[0].desc, "desired",
209 	    sizeof(sc->sc_sens[0].desc));
210 	sc->sc_sens[0].type = SENSOR_INTEGER;
211 	sensor_attach(&sc->sc_sensdev, &sc->sc_sens[0]);
212 	sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
213 
214 	strlcpy(sc->sc_sens[1].desc, "current",
215 	    sizeof(sc->sc_sens[1].desc));
216 	sc->sc_sens[1].type = SENSOR_INTEGER;
217 	sensor_attach(&sc->sc_sensdev, &sc->sc_sens[1]);
218 	sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
219 
220 	sensordev_install(&sc->sc_sensdev);
221 
222 	printf("\n");
223 	return;
224 err_dmamap:
225 	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
226 err:
227 	if (sc->sc_req.bl_pages)
228 		dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ);
229 	for (i = 0; i < vsc->sc_nvqs; i++)
230 		virtio_free_vq(vsc, &sc->sc_vq[i]);
231 	vsc->sc_nvqs = 0;
232 	vsc->sc_child = VIRTIO_CHILD_ERROR;
233 	return;
234 }
235 
236 /*
237  * Config change
238  */
239 int
240 viomb_config_change(struct virtio_softc *vsc)
241 {
242 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
243 
244 	task_add(sc->sc_taskq, &sc->sc_task);
245 
246 	return (1);
247 }
248 
249 void
250 viomb_worker(void *arg1)
251 {
252 	struct viomb_softc *sc = (struct viomb_softc *)arg1;
253 	int s;
254 
255 	s = splbio();
256 	viomb_read_config(sc);
257 	if (sc->sc_npages > sc->sc_actual){
258 		VIOMBDEBUG(sc, "inflating balloon from %u to %u.\n",
259 			   sc->sc_actual, sc->sc_npages);
260 		viomb_inflate(sc);
261 		}
262 	else if (sc->sc_npages < sc->sc_actual){
263 		VIOMBDEBUG(sc, "deflating balloon from %u to %u.\n",
264 			   sc->sc_actual, sc->sc_npages);
265 		viomb_deflate(sc);
266 	}
267 
268 	sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
269 	sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
270 
271 	splx(s);
272 }
273 
274 void
275 viomb_inflate(struct viomb_softc *sc)
276 {
277 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
278 	struct balloon_req *b;
279 	struct vm_page *p;
280 	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
281 	u_int32_t nvpages;
282 	int slot, error, i = 0;
283 
284 	nvpages = sc->sc_npages - sc->sc_actual;
285 	if (nvpages > PGS_PER_REQ)
286 		nvpages = PGS_PER_REQ;
287 	b = &sc->sc_req;
288 
289 	if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0,
290 				     dma_constraint.ucr_high,
291 				     0, 0, &b->bl_pglist, nvpages,
292 				     UVM_PLA_NOWAIT))) {
293 		printf("%s unable to allocate %u physmem pages,"
294 		    "error %d\n", DEVNAME(sc), nvpages, error);
295 		return;
296 	}
297 
298 	b->bl_nentries = nvpages;
299 	TAILQ_FOREACH(p, &b->bl_pglist, pageq)
300 		b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE;
301 
302 	KASSERT(i == nvpages);
303 
304 	if ((virtio_enqueue_prep(vq, &slot)) > 0) {
305 		printf("%s:virtio_enqueue_prep() vq_num %d\n",
306 		       DEVNAME(sc), vq->vq_num);
307 		goto err;
308 	}
309 	if (virtio_enqueue_reserve(vq, slot, 1)) {
310 		printf("%s:virtio_enqueue_reserve vq_num %d\n",
311 		       DEVNAME(sc), vq->vq_num);
312 		goto err;
313 	}
314 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
315 			sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE);
316 	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
317 			 sizeof(u_int32_t) * nvpages, VRING_READ);
318 	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
319 	return;
320 err:
321 	uvm_pglistfree(&b->bl_pglist);
322 	return;
323 }
324 
325 void
326 viomb_deflate(struct viomb_softc *sc)
327 {
328 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
329 	struct balloon_req *b;
330 	struct vm_page *p;
331 	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
332 	u_int64_t nvpages;
333 	int i, slot;
334 
335 	nvpages = sc->sc_actual - sc->sc_npages;
336 	if (nvpages > PGS_PER_REQ)
337 		nvpages = PGS_PER_REQ;
338 	b = &sc->sc_req;
339 	b->bl_nentries = nvpages;
340 
341 	TAILQ_INIT(&b->bl_pglist);
342 	for (i = 0; i < nvpages; i++) {
343 		p = TAILQ_FIRST(&sc->sc_balloon_pages);
344 		if (p == NULL){
345 		    b->bl_nentries = i - 1;
346 		    break;
347 		}
348 		TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq);
349 		TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq);
350 		b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE;
351 	}
352 
353 	if (virtio_enqueue_prep(vq, &slot)) {
354 		printf("%s:virtio_get_slot(def) vq_num %d\n",
355 		       DEVNAME(sc), vq->vq_num);
356 		goto err;
357 	}
358 	if (virtio_enqueue_reserve(vq, slot, 1)) {
359 		printf("%s:virtio_enqueue_reserve() vq_num %d\n",
360 		       DEVNAME(sc), vq->vq_num);
361 		goto err;
362 	}
363 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
364 		    sizeof(u_int32_t) * nvpages,
365 		    BUS_DMASYNC_PREWRITE);
366 	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
367 			 sizeof(u_int32_t) * nvpages, VRING_READ);
368 
369 	if (!virtio_has_feature(vsc, VIRTIO_BALLOON_F_MUST_TELL_HOST))
370 		uvm_pglistfree(&b->bl_pglist);
371 	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
372 	return;
373 err:
374 	while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) {
375 		TAILQ_REMOVE(&b->bl_pglist, p, pageq);
376 		TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq);
377 	}
378 	return;
379 }
380 
381 void
382 viomb_read_config(struct viomb_softc *sc)
383 {
384 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
385 	u_int32_t reg;
386 
387 	/* these values are explicitly specified as little-endian */
388 	reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_NUM_PAGES);
389 	sc->sc_npages = letoh32(reg);
390 	reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL);
391 	sc->sc_actual = letoh32(reg);
392 	VIOMBDEBUG(sc, "sc->sc_npages %u, sc->sc_actual %u\n",
393 		   sc->sc_npages, sc->sc_actual);
394 }
395 
396 int
397 viomb_vq_dequeue(struct virtqueue *vq)
398 {
399 	struct virtio_softc *vsc = vq->vq_owner;
400 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
401 	int r, slot;
402 
403 	r = virtio_dequeue(vsc, vq, &slot, NULL);
404 	if (r != 0) {
405 		printf("%s: dequeue failed, errno %d\n", DEVNAME(sc), r);
406 		return(r);
407 	}
408 	virtio_dequeue_commit(vq, slot);
409 	return(0);
410 }
411 
412 /*
413  * interrupt handling for vq's
414  */
415 int
416 viomb_inflate_intr(struct virtqueue *vq)
417 {
418 	struct virtio_softc *vsc = vq->vq_owner;
419 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
420 	struct balloon_req *b;
421 	u_int64_t nvpages;
422 
423 	if (viomb_vq_dequeue(vq))
424 		return(1);
425 
426 	b = &sc->sc_req;
427 	nvpages = b->bl_nentries;
428 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
429 			sizeof(u_int32_t) * nvpages,
430 			BUS_DMASYNC_POSTWRITE);
431 	TAILQ_CONCAT(&sc->sc_balloon_pages, &b->bl_pglist, pageq);
432 	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
433 		   sc->sc_actual, sc->sc_actual + nvpages);
434 	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
435 				     sc->sc_actual + nvpages);
436 	viomb_read_config(sc);
437 
438 	/* if we have more work to do, add it to the task list */
439 	if (sc->sc_npages > sc->sc_actual)
440 		task_add(sc->sc_taskq, &sc->sc_task);
441 
442 	return (1);
443 }
444 
445 int
446 viomb_deflate_intr(struct virtqueue *vq)
447 {
448 	struct virtio_softc *vsc = vq->vq_owner;
449 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
450 	struct balloon_req *b;
451 	u_int64_t nvpages;
452 
453 	if (viomb_vq_dequeue(vq))
454 		return(1);
455 
456 	b = &sc->sc_req;
457 	nvpages = b->bl_nentries;
458 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
459 			sizeof(u_int32_t) * nvpages,
460 			BUS_DMASYNC_POSTWRITE);
461 
462 	if (virtio_has_feature(vsc, VIRTIO_BALLOON_F_MUST_TELL_HOST))
463 		uvm_pglistfree(&b->bl_pglist);
464 
465 	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
466 		sc->sc_actual, sc->sc_actual - nvpages);
467 	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
468 				     sc->sc_actual - nvpages);
469 	viomb_read_config(sc);
470 
471 	/* if we have more work to do, add it to tasks list */
472 	if (sc->sc_npages < sc->sc_actual)
473 		task_add(sc->sc_taskq, &sc->sc_task);
474 
475 	return(1);
476 }
477