xref: /openbsd/sys/dev/pv/viomb.c (revision fdf28b39)
1*fdf28b39Ssf /* $OpenBSD: viomb.c,v 1.12 2024/08/27 18:44:12 sf Exp $	 */
28972b12cSreyk /* $NetBSD: viomb.c,v 1.1 2011/10/30 12:12:21 hannken Exp $	 */
38972b12cSreyk 
48972b12cSreyk /*
58972b12cSreyk  * Copyright (c) 2012 Talypov Dinar <dinar@i-nk.ru>
68972b12cSreyk  * Copyright (c) 2010 Minoura Makoto.
78972b12cSreyk  * All rights reserved.
88972b12cSreyk  *
98972b12cSreyk  * Redistribution and use in source and binary forms, with or without
108972b12cSreyk  * modification, are permitted provided that the following conditions
118972b12cSreyk  * are met:
128972b12cSreyk  * 1. Redistributions of source code must retain the above copyright
138972b12cSreyk  *    notice, this list of conditions and the following disclaimer.
148972b12cSreyk  * 2. Redistributions in binary form must reproduce the above copyright
158972b12cSreyk  *    notice, this list of conditions and the following disclaimer in the
168972b12cSreyk  *    documentation and/or other materials provided with the distribution.
178972b12cSreyk  *
188972b12cSreyk  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
198972b12cSreyk  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
208972b12cSreyk  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
218972b12cSreyk  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
228972b12cSreyk  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
238972b12cSreyk  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
248972b12cSreyk  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
258972b12cSreyk  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
268972b12cSreyk  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
278972b12cSreyk  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
288972b12cSreyk  */
298972b12cSreyk 
308972b12cSreyk #include <sys/param.h>
318972b12cSreyk #include <sys/systm.h>
328972b12cSreyk #include <sys/device.h>
338972b12cSreyk #include <sys/task.h>
348972b12cSreyk #include <sys/pool.h>
358972b12cSreyk #include <sys/sensors.h>
368972b12cSreyk 
378972b12cSreyk #include <uvm/uvm_extern.h>
388972b12cSreyk 
398972b12cSreyk #include <dev/pv/virtioreg.h>
408972b12cSreyk #include <dev/pv/virtiovar.h>
418972b12cSreyk 
428972b12cSreyk #if VIRTIO_PAGE_SIZE!=PAGE_SIZE
438972b12cSreyk #error non-4K page sizes are not supported yet
448972b12cSreyk #endif
458972b12cSreyk 
468972b12cSreyk #define	DEVNAME(sc)	sc->sc_dev.dv_xname
478972b12cSreyk #if VIRTIO_DEBUG
488972b12cSreyk #define VIOMBDEBUG(sc, format, args...)					  \
498972b12cSreyk 		do { printf("%s: " format, sc->sc_dev.dv_xname, ##args);} \
508972b12cSreyk 		while (0)
518972b12cSreyk #else
528972b12cSreyk #define VIOMBDEBUG(...)
538972b12cSreyk #endif
548972b12cSreyk 
558972b12cSreyk /* flags used to specify kind of operation,
568972b12cSreyk  * actually should be moved to virtiovar.h
578972b12cSreyk  */
588972b12cSreyk #define VRING_READ		0
598972b12cSreyk #define VRING_WRITE		1
608972b12cSreyk 
618972b12cSreyk /* notify or don't notify */
628972b12cSreyk #define VRING_NO_NOTIFY		0
638972b12cSreyk #define VRING_NOTIFY		1
648972b12cSreyk 
658972b12cSreyk /* Configuration registers */
668972b12cSreyk #define VIRTIO_BALLOON_CONFIG_NUM_PAGES	0	/* 32bit */
678972b12cSreyk #define VIRTIO_BALLOON_CONFIG_ACTUAL	4	/* 32bit */
688972b12cSreyk 
698972b12cSreyk /* Feature bits */
70750a9ae1Ssf #define VIRTIO_BALLOON_F_MUST_TELL_HOST (1ULL<<0)
71750a9ae1Ssf #define VIRTIO_BALLOON_F_STATS_VQ	(1ULL<<1)
728972b12cSreyk 
738972b12cSreyk static const struct virtio_feature_name viomb_feature_names[] = {
74c30b7943Ssf #if VIRTIO_DEBUG
758972b12cSreyk 	{VIRTIO_BALLOON_F_MUST_TELL_HOST, "TellHost"},
768972b12cSreyk 	{VIRTIO_BALLOON_F_STATS_VQ, "StatVQ"},
77c30b7943Ssf #endif
788972b12cSreyk 	{0, NULL}
798972b12cSreyk };
808972b12cSreyk #define PGS_PER_REQ		256	/* 1MB, 4KB/page */
818972b12cSreyk #define VQ_INFLATE	0
828972b12cSreyk #define VQ_DEFLATE	1
838972b12cSreyk 
848972b12cSreyk struct balloon_req {
858972b12cSreyk 	bus_dmamap_t	 bl_dmamap;
868972b12cSreyk 	struct pglist	 bl_pglist;
878972b12cSreyk 	int		 bl_nentries;
888972b12cSreyk 	u_int32_t	*bl_pages;
898972b12cSreyk };
908972b12cSreyk 
918972b12cSreyk struct viomb_softc {
928972b12cSreyk 	struct device		sc_dev;
938972b12cSreyk 	struct virtio_softc	*sc_virtio;
948972b12cSreyk 	struct virtqueue	sc_vq[2];
958972b12cSreyk 	u_int32_t		sc_npages; /* desired pages */
968972b12cSreyk 	u_int32_t		sc_actual; /* current pages */
978972b12cSreyk 	struct balloon_req	sc_req;
988972b12cSreyk 	struct taskq		*sc_taskq;
998972b12cSreyk 	struct task		sc_task;
1008972b12cSreyk 	struct pglist		sc_balloon_pages;
1018972b12cSreyk 	struct ksensor		sc_sens[2];
1028972b12cSreyk 	struct ksensordev	sc_sensdev;
1038972b12cSreyk };
1048972b12cSreyk 
1058972b12cSreyk int	viomb_match(struct device *, void *, void *);
1068972b12cSreyk void	viomb_attach(struct device *, struct device *, void *);
1078972b12cSreyk void	viomb_worker(void *);
1088972b12cSreyk void	viomb_inflate(struct viomb_softc *);
1098972b12cSreyk void	viomb_deflate(struct viomb_softc *);
1108972b12cSreyk int	viomb_config_change(struct virtio_softc *);
1118972b12cSreyk void	viomb_read_config(struct viomb_softc *);
1128972b12cSreyk int	viomb_vq_dequeue(struct virtqueue *);
1138972b12cSreyk int	viomb_inflate_intr(struct virtqueue *);
1148972b12cSreyk int	viomb_deflate_intr(struct virtqueue *);
1158972b12cSreyk 
116ffaee248Smpi const struct cfattach viomb_ca = {
1178972b12cSreyk 	sizeof(struct viomb_softc), viomb_match, viomb_attach
1188972b12cSreyk };
1198972b12cSreyk 
1208972b12cSreyk struct cfdriver viomb_cd = {
1218972b12cSreyk 	NULL, "viomb", DV_DULL
1228972b12cSreyk };
1238972b12cSreyk 
1248972b12cSreyk int
viomb_match(struct device * parent,void * match,void * aux)1258972b12cSreyk viomb_match(struct device *parent, void *match, void *aux)
1268972b12cSreyk {
127dc275227Ssf 	struct virtio_attach_args *va = aux;
128dc275227Ssf 	if (va->va_devid == PCI_PRODUCT_VIRTIO_BALLOON)
1298972b12cSreyk 		return (1);
1308972b12cSreyk 	return (0);
1318972b12cSreyk }
1328972b12cSreyk 
1338972b12cSreyk void
viomb_attach(struct device * parent,struct device * self,void * aux)1348972b12cSreyk viomb_attach(struct device *parent, struct device *self, void *aux)
1358972b12cSreyk {
1368972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)self;
1378972b12cSreyk 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
1388972b12cSreyk 	int i;
1398972b12cSreyk 
1408972b12cSreyk 	if (vsc->sc_child != NULL) {
1418972b12cSreyk 		printf("child already attached for %s; something wrong...\n",
1428972b12cSreyk 		    parent->dv_xname);
1438972b12cSreyk 		return;
1448972b12cSreyk 	}
1458972b12cSreyk 
1468972b12cSreyk 	/* fail on non-4K page size archs */
1478972b12cSreyk 	if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
1488972b12cSreyk 		printf("non-4K page size arch found, needs %d, got %d\n",
1498972b12cSreyk 		    VIRTIO_PAGE_SIZE, PAGE_SIZE);
1508972b12cSreyk 		return;
1518972b12cSreyk 	}
1528972b12cSreyk 
1538972b12cSreyk 	sc->sc_virtio = vsc;
1548972b12cSreyk 	vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE];
1558972b12cSreyk 	vsc->sc_nvqs = 0;
1568972b12cSreyk 	vsc->sc_child = self;
1578972b12cSreyk 	vsc->sc_ipl = IPL_BIO;
1588972b12cSreyk 	vsc->sc_config_change = viomb_config_change;
1598972b12cSreyk 
1600091658aSsf 	vsc->sc_driver_features = VIRTIO_BALLOON_F_MUST_TELL_HOST;
1610091658aSsf 	if (virtio_negotiate_features(vsc, viomb_feature_names) != 0)
1620091658aSsf 		goto err;
1638972b12cSreyk 
164*fdf28b39Ssf 	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, 1,
165*fdf28b39Ssf 	    "inflate") != 0))
1668972b12cSreyk 		goto err;
1678972b12cSreyk 	vsc->sc_nvqs++;
168*fdf28b39Ssf 	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, 1,
169*fdf28b39Ssf 	    "deflate") != 0))
1708972b12cSreyk 		goto err;
1718972b12cSreyk 	vsc->sc_nvqs++;
1728972b12cSreyk 
1738972b12cSreyk 	sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr;
1748972b12cSreyk 	sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr;
1758972b12cSreyk 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]);
1768972b12cSreyk 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]);
1778972b12cSreyk 
1788972b12cSreyk 	viomb_read_config(sc);
1798972b12cSreyk 	TAILQ_INIT(&sc->sc_balloon_pages);
1808972b12cSreyk 
1818972b12cSreyk 	if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ,
1828972b12cSreyk 	    PR_NOWAIT|PR_ZERO)) == NULL) {
1838972b12cSreyk 		printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc));
1848972b12cSreyk 		goto err;
1858972b12cSreyk 	}
1868972b12cSreyk 	if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ,
1878972b12cSreyk 			      1, sizeof(u_int32_t) * PGS_PER_REQ, 0,
1888972b12cSreyk 			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
1898972b12cSreyk 		printf("%s: dmamap creation failed.\n", DEVNAME(sc));
1908972b12cSreyk 		goto err;
1918972b12cSreyk 	}
1928972b12cSreyk 	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
1938972b12cSreyk 			    &sc->sc_req.bl_pages[0],
1948972b12cSreyk 			    sizeof(uint32_t) * PGS_PER_REQ,
1958972b12cSreyk 			    NULL, BUS_DMA_NOWAIT)) {
1968972b12cSreyk 		printf("%s: dmamap load failed.\n", DEVNAME(sc));
1978972b12cSreyk 		goto err_dmamap;
1988972b12cSreyk 	}
1998972b12cSreyk 
2008972b12cSreyk 	sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO, 0);
2018972b12cSreyk 	if (sc->sc_taskq == NULL)
2028972b12cSreyk 		goto err_dmamap;
2038972b12cSreyk 	task_set(&sc->sc_task, viomb_worker, sc);
2048972b12cSreyk 
2058972b12cSreyk 	strlcpy(sc->sc_sensdev.xname, DEVNAME(sc),
2068972b12cSreyk 	    sizeof(sc->sc_sensdev.xname));
2078972b12cSreyk 	strlcpy(sc->sc_sens[0].desc, "desired",
2088972b12cSreyk 	    sizeof(sc->sc_sens[0].desc));
2098972b12cSreyk 	sc->sc_sens[0].type = SENSOR_INTEGER;
2108972b12cSreyk 	sensor_attach(&sc->sc_sensdev, &sc->sc_sens[0]);
2118972b12cSreyk 	sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
2128972b12cSreyk 
2138972b12cSreyk 	strlcpy(sc->sc_sens[1].desc, "current",
2148972b12cSreyk 	    sizeof(sc->sc_sens[1].desc));
2158972b12cSreyk 	sc->sc_sens[1].type = SENSOR_INTEGER;
2168972b12cSreyk 	sensor_attach(&sc->sc_sensdev, &sc->sc_sens[1]);
2178972b12cSreyk 	sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
2188972b12cSreyk 
2198972b12cSreyk 	sensordev_install(&sc->sc_sensdev);
2208972b12cSreyk 
2218972b12cSreyk 	printf("\n");
222cdd24841Ssf 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
2238972b12cSreyk 	return;
2248972b12cSreyk err_dmamap:
2258972b12cSreyk 	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
2268972b12cSreyk err:
2278972b12cSreyk 	if (sc->sc_req.bl_pages)
2288972b12cSreyk 		dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ);
2298972b12cSreyk 	for (i = 0; i < vsc->sc_nvqs; i++)
2308972b12cSreyk 		virtio_free_vq(vsc, &sc->sc_vq[i]);
2318972b12cSreyk 	vsc->sc_nvqs = 0;
2328972b12cSreyk 	vsc->sc_child = VIRTIO_CHILD_ERROR;
2338972b12cSreyk 	return;
2348972b12cSreyk }
2358972b12cSreyk 
2368972b12cSreyk /*
2378972b12cSreyk  * Config change
2388972b12cSreyk  */
2398972b12cSreyk int
viomb_config_change(struct virtio_softc * vsc)2408972b12cSreyk viomb_config_change(struct virtio_softc *vsc)
2418972b12cSreyk {
2428972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
2438972b12cSreyk 
2448972b12cSreyk 	task_add(sc->sc_taskq, &sc->sc_task);
2458972b12cSreyk 
2468972b12cSreyk 	return (1);
2478972b12cSreyk }
2488972b12cSreyk 
2498972b12cSreyk void
viomb_worker(void * arg1)2508972b12cSreyk viomb_worker(void *arg1)
2518972b12cSreyk {
2528972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)arg1;
2538972b12cSreyk 	int s;
2548972b12cSreyk 
2558972b12cSreyk 	s = splbio();
2568972b12cSreyk 	viomb_read_config(sc);
2578972b12cSreyk 	if (sc->sc_npages > sc->sc_actual){
2588972b12cSreyk 		VIOMBDEBUG(sc, "inflating balloon from %u to %u.\n",
2598972b12cSreyk 			   sc->sc_actual, sc->sc_npages);
2608972b12cSreyk 		viomb_inflate(sc);
2618972b12cSreyk 		}
2628972b12cSreyk 	else if (sc->sc_npages < sc->sc_actual){
2638972b12cSreyk 		VIOMBDEBUG(sc, "deflating balloon from %u to %u.\n",
2648972b12cSreyk 			   sc->sc_actual, sc->sc_npages);
2658972b12cSreyk 		viomb_deflate(sc);
2668972b12cSreyk 	}
2678972b12cSreyk 
2688972b12cSreyk 	sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
2698972b12cSreyk 	sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
2708972b12cSreyk 
2718972b12cSreyk 	splx(s);
2728972b12cSreyk }
2738972b12cSreyk 
2748972b12cSreyk void
viomb_inflate(struct viomb_softc * sc)2758972b12cSreyk viomb_inflate(struct viomb_softc *sc)
2768972b12cSreyk {
2778972b12cSreyk 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
2788972b12cSreyk 	struct balloon_req *b;
2798972b12cSreyk 	struct vm_page *p;
2808972b12cSreyk 	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
2818972b12cSreyk 	u_int32_t nvpages;
2828972b12cSreyk 	int slot, error, i = 0;
2838972b12cSreyk 
2848972b12cSreyk 	nvpages = sc->sc_npages - sc->sc_actual;
2858972b12cSreyk 	if (nvpages > PGS_PER_REQ)
2868972b12cSreyk 		nvpages = PGS_PER_REQ;
2878972b12cSreyk 	b = &sc->sc_req;
2888972b12cSreyk 
2898972b12cSreyk 	if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0,
2908972b12cSreyk 				     dma_constraint.ucr_high,
2918972b12cSreyk 				     0, 0, &b->bl_pglist, nvpages,
2928972b12cSreyk 				     UVM_PLA_NOWAIT))) {
2938972b12cSreyk 		printf("%s unable to allocate %u physmem pages,"
2948972b12cSreyk 		    "error %d\n", DEVNAME(sc), nvpages, error);
2958972b12cSreyk 		return;
2968972b12cSreyk 	}
2978972b12cSreyk 
2988972b12cSreyk 	b->bl_nentries = nvpages;
2998972b12cSreyk 	TAILQ_FOREACH(p, &b->bl_pglist, pageq)
3008972b12cSreyk 		b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE;
3018972b12cSreyk 
3028972b12cSreyk 	KASSERT(i == nvpages);
3038972b12cSreyk 
3048972b12cSreyk 	if ((virtio_enqueue_prep(vq, &slot)) > 0) {
3058972b12cSreyk 		printf("%s:virtio_enqueue_prep() vq_num %d\n",
3068972b12cSreyk 		       DEVNAME(sc), vq->vq_num);
3078972b12cSreyk 		goto err;
3088972b12cSreyk 	}
3098972b12cSreyk 	if (virtio_enqueue_reserve(vq, slot, 1)) {
3108972b12cSreyk 		printf("%s:virtio_enqueue_reserve vq_num %d\n",
3118972b12cSreyk 		       DEVNAME(sc), vq->vq_num);
3128972b12cSreyk 		goto err;
3138972b12cSreyk 	}
3148972b12cSreyk 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
3158972b12cSreyk 			sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE);
3168972b12cSreyk 	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
3178972b12cSreyk 			 sizeof(u_int32_t) * nvpages, VRING_READ);
3188972b12cSreyk 	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
3198972b12cSreyk 	return;
3208972b12cSreyk err:
3218972b12cSreyk 	uvm_pglistfree(&b->bl_pglist);
3228972b12cSreyk 	return;
3238972b12cSreyk }
3248972b12cSreyk 
3258972b12cSreyk void
viomb_deflate(struct viomb_softc * sc)3268972b12cSreyk viomb_deflate(struct viomb_softc *sc)
3278972b12cSreyk {
3288972b12cSreyk 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
3298972b12cSreyk 	struct balloon_req *b;
3308972b12cSreyk 	struct vm_page *p;
3318972b12cSreyk 	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
3328972b12cSreyk 	u_int64_t nvpages;
3338972b12cSreyk 	int i, slot;
3348972b12cSreyk 
3358972b12cSreyk 	nvpages = sc->sc_actual - sc->sc_npages;
3368972b12cSreyk 	if (nvpages > PGS_PER_REQ)
3378972b12cSreyk 		nvpages = PGS_PER_REQ;
3388972b12cSreyk 	b = &sc->sc_req;
3398972b12cSreyk 	b->bl_nentries = nvpages;
3408972b12cSreyk 
3418972b12cSreyk 	TAILQ_INIT(&b->bl_pglist);
3428972b12cSreyk 	for (i = 0; i < nvpages; i++) {
3438972b12cSreyk 		p = TAILQ_FIRST(&sc->sc_balloon_pages);
3448972b12cSreyk 		if (p == NULL){
3458972b12cSreyk 		    b->bl_nentries = i - 1;
3468972b12cSreyk 		    break;
3478972b12cSreyk 		}
3488972b12cSreyk 		TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq);
3498972b12cSreyk 		TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq);
3508972b12cSreyk 		b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE;
3518972b12cSreyk 	}
3528972b12cSreyk 
3538972b12cSreyk 	if (virtio_enqueue_prep(vq, &slot)) {
3548972b12cSreyk 		printf("%s:virtio_get_slot(def) vq_num %d\n",
3558972b12cSreyk 		       DEVNAME(sc), vq->vq_num);
3568972b12cSreyk 		goto err;
3578972b12cSreyk 	}
3588972b12cSreyk 	if (virtio_enqueue_reserve(vq, slot, 1)) {
3598972b12cSreyk 		printf("%s:virtio_enqueue_reserve() vq_num %d\n",
3608972b12cSreyk 		       DEVNAME(sc), vq->vq_num);
3618972b12cSreyk 		goto err;
3628972b12cSreyk 	}
3638972b12cSreyk 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
3648972b12cSreyk 		    sizeof(u_int32_t) * nvpages,
3658972b12cSreyk 		    BUS_DMASYNC_PREWRITE);
3668972b12cSreyk 	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
3678972b12cSreyk 			 sizeof(u_int32_t) * nvpages, VRING_READ);
3688972b12cSreyk 
3690091658aSsf 	if (!virtio_has_feature(vsc, VIRTIO_BALLOON_F_MUST_TELL_HOST))
3708972b12cSreyk 		uvm_pglistfree(&b->bl_pglist);
3718972b12cSreyk 	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
3728972b12cSreyk 	return;
3738972b12cSreyk err:
3749e34d16aSbket 	TAILQ_CONCAT(&sc->sc_balloon_pages, &b->bl_pglist, pageq);
3758972b12cSreyk 	return;
3768972b12cSreyk }
3778972b12cSreyk 
3788972b12cSreyk void
viomb_read_config(struct viomb_softc * sc)3798972b12cSreyk viomb_read_config(struct viomb_softc *sc)
3808972b12cSreyk {
3818972b12cSreyk 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
3828972b12cSreyk 	u_int32_t reg;
3838972b12cSreyk 
3848972b12cSreyk 	/* these values are explicitly specified as little-endian */
3858972b12cSreyk 	reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_NUM_PAGES);
3868972b12cSreyk 	sc->sc_npages = letoh32(reg);
3878972b12cSreyk 	reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL);
3888972b12cSreyk 	sc->sc_actual = letoh32(reg);
3898972b12cSreyk 	VIOMBDEBUG(sc, "sc->sc_npages %u, sc->sc_actual %u\n",
3908972b12cSreyk 		   sc->sc_npages, sc->sc_actual);
3918972b12cSreyk }
3928972b12cSreyk 
3938972b12cSreyk int
viomb_vq_dequeue(struct virtqueue * vq)3948972b12cSreyk viomb_vq_dequeue(struct virtqueue *vq)
3958972b12cSreyk {
3968972b12cSreyk 	struct virtio_softc *vsc = vq->vq_owner;
3978972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
3988972b12cSreyk 	int r, slot;
3998972b12cSreyk 
4008972b12cSreyk 	r = virtio_dequeue(vsc, vq, &slot, NULL);
4018972b12cSreyk 	if (r != 0) {
4028972b12cSreyk 		printf("%s: dequeue failed, errno %d\n", DEVNAME(sc), r);
4038972b12cSreyk 		return(r);
4048972b12cSreyk 	}
4058972b12cSreyk 	virtio_dequeue_commit(vq, slot);
4068972b12cSreyk 	return(0);
4078972b12cSreyk }
4088972b12cSreyk 
4098972b12cSreyk /*
4108972b12cSreyk  * interrupt handling for vq's
4118972b12cSreyk  */
4128972b12cSreyk int
viomb_inflate_intr(struct virtqueue * vq)4138972b12cSreyk viomb_inflate_intr(struct virtqueue *vq)
4148972b12cSreyk {
4158972b12cSreyk 	struct virtio_softc *vsc = vq->vq_owner;
4168972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
4178972b12cSreyk 	struct balloon_req *b;
4188972b12cSreyk 	u_int64_t nvpages;
4198972b12cSreyk 
4208972b12cSreyk 	if (viomb_vq_dequeue(vq))
4218972b12cSreyk 		return(1);
4228972b12cSreyk 
4238972b12cSreyk 	b = &sc->sc_req;
4248972b12cSreyk 	nvpages = b->bl_nentries;
4258972b12cSreyk 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
4268972b12cSreyk 			sizeof(u_int32_t) * nvpages,
4278972b12cSreyk 			BUS_DMASYNC_POSTWRITE);
428040fedc0Sbket 	TAILQ_CONCAT(&sc->sc_balloon_pages, &b->bl_pglist, pageq);
4298972b12cSreyk 	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
4308972b12cSreyk 		   sc->sc_actual, sc->sc_actual + nvpages);
4318972b12cSreyk 	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
4328972b12cSreyk 				     sc->sc_actual + nvpages);
4338972b12cSreyk 	viomb_read_config(sc);
4348972b12cSreyk 
4358972b12cSreyk 	/* if we have more work to do, add it to the task list */
4368972b12cSreyk 	if (sc->sc_npages > sc->sc_actual)
4378972b12cSreyk 		task_add(sc->sc_taskq, &sc->sc_task);
4388972b12cSreyk 
4398972b12cSreyk 	return (1);
4408972b12cSreyk }
4418972b12cSreyk 
4428972b12cSreyk int
viomb_deflate_intr(struct virtqueue * vq)4438972b12cSreyk viomb_deflate_intr(struct virtqueue *vq)
4448972b12cSreyk {
4458972b12cSreyk 	struct virtio_softc *vsc = vq->vq_owner;
4468972b12cSreyk 	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
4478972b12cSreyk 	struct balloon_req *b;
4488972b12cSreyk 	u_int64_t nvpages;
4498972b12cSreyk 
4508972b12cSreyk 	if (viomb_vq_dequeue(vq))
4518972b12cSreyk 		return(1);
4528972b12cSreyk 
4538972b12cSreyk 	b = &sc->sc_req;
4548972b12cSreyk 	nvpages = b->bl_nentries;
4558972b12cSreyk 	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
4568972b12cSreyk 			sizeof(u_int32_t) * nvpages,
4578972b12cSreyk 			BUS_DMASYNC_POSTWRITE);
4588972b12cSreyk 
4590091658aSsf 	if (virtio_has_feature(vsc, VIRTIO_BALLOON_F_MUST_TELL_HOST))
4608972b12cSreyk 		uvm_pglistfree(&b->bl_pglist);
4618972b12cSreyk 
4628972b12cSreyk 	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
4638972b12cSreyk 		sc->sc_actual, sc->sc_actual - nvpages);
4648972b12cSreyk 	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
4658972b12cSreyk 				     sc->sc_actual - nvpages);
4668972b12cSreyk 	viomb_read_config(sc);
4678972b12cSreyk 
4688972b12cSreyk 	/* if we have more work to do, add it to tasks list */
4698972b12cSreyk 	if (sc->sc_npages < sc->sc_actual)
4708972b12cSreyk 		task_add(sc->sc_taskq, &sc->sc_task);
4718972b12cSreyk 
4728972b12cSreyk 	return(1);
4738972b12cSreyk }
474