xref: /freebsd/sys/powerpc/pseries/plpar_iommu.c (revision acf9bb33)
17a8d25c0SNathan Whitehorn /*-
27a8d25c0SNathan Whitehorn  * Copyright (c) 2013, Nathan Whitehorn <nwhitehorn@FreeBSD.org>
37a8d25c0SNathan Whitehorn  * All rights reserved.
47a8d25c0SNathan Whitehorn  *
57a8d25c0SNathan Whitehorn  * Redistribution and use in source and binary forms, with or without
67a8d25c0SNathan Whitehorn  * modification, are permitted provided that the following conditions
77a8d25c0SNathan Whitehorn  * are met:
87a8d25c0SNathan Whitehorn  * 1. Redistributions of source code must retain the above copyright
97a8d25c0SNathan Whitehorn  *    notice unmodified, this list of conditions, and the following
107a8d25c0SNathan Whitehorn  *    disclaimer.
117a8d25c0SNathan Whitehorn  * 2. Redistributions in binary form must reproduce the above copyright
127a8d25c0SNathan Whitehorn  *    notice, this list of conditions and the following disclaimer in the
137a8d25c0SNathan Whitehorn  *    documentation and/or other materials provided with the distribution.
147a8d25c0SNathan Whitehorn  *
157a8d25c0SNathan Whitehorn  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
167a8d25c0SNathan Whitehorn  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
177a8d25c0SNathan Whitehorn  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
187a8d25c0SNathan Whitehorn  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
197a8d25c0SNathan Whitehorn  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
207a8d25c0SNathan Whitehorn  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
217a8d25c0SNathan Whitehorn  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
227a8d25c0SNathan Whitehorn  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
237a8d25c0SNathan Whitehorn  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
247a8d25c0SNathan Whitehorn  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
257a8d25c0SNathan Whitehorn  */
267a8d25c0SNathan Whitehorn 
277a8d25c0SNathan Whitehorn #include <sys/cdefs.h>
287a8d25c0SNathan Whitehorn __FBSDID("$FreeBSD$");
297a8d25c0SNathan Whitehorn 
307a8d25c0SNathan Whitehorn #include <sys/param.h>
317a8d25c0SNathan Whitehorn #include <sys/bus.h>
327a8d25c0SNathan Whitehorn #include <sys/kernel.h>
337a8d25c0SNathan Whitehorn #include <sys/libkern.h>
347a8d25c0SNathan Whitehorn #include <sys/module.h>
357a8d25c0SNathan Whitehorn #include <sys/vmem.h>
367a8d25c0SNathan Whitehorn 
377a8d25c0SNathan Whitehorn #include <dev/ofw/ofw_bus.h>
387a8d25c0SNathan Whitehorn #include <dev/ofw/ofw_bus_subr.h>
397a8d25c0SNathan Whitehorn #include <dev/ofw/openfirm.h>
407a8d25c0SNathan Whitehorn 
417a8d25c0SNathan Whitehorn #include <machine/bus.h>
427a8d25c0SNathan Whitehorn 
437a8d25c0SNathan Whitehorn #include <powerpc/pseries/phyp-hvcall.h>
447a8d25c0SNathan Whitehorn #include <powerpc/pseries/plpar_iommu.h>
457a8d25c0SNathan Whitehorn 
467a8d25c0SNathan Whitehorn MALLOC_DEFINE(M_PHYPIOMMU, "iommu", "IOMMU data for PAPR LPARs");
477a8d25c0SNathan Whitehorn 
487a8d25c0SNathan Whitehorn struct papr_iommu_map {
497a8d25c0SNathan Whitehorn 	uint32_t iobn;
507a8d25c0SNathan Whitehorn 	vmem_t *vmem;
517a8d25c0SNathan Whitehorn 	struct papr_iommu_map *next;
527a8d25c0SNathan Whitehorn };
537a8d25c0SNathan Whitehorn 
547a8d25c0SNathan Whitehorn static SLIST_HEAD(iommu_maps, iommu_map) iommu_map_head =
557a8d25c0SNathan Whitehorn     SLIST_HEAD_INITIALIZER(iommu_map_head);
567a8d25c0SNathan Whitehorn static int papr_supports_stuff_tce = -1;
577a8d25c0SNathan Whitehorn 
587a8d25c0SNathan Whitehorn struct iommu_map {
597a8d25c0SNathan Whitehorn 	uint32_t iobn;
607a8d25c0SNathan Whitehorn 	vmem_t *vmem;
617a8d25c0SNathan Whitehorn 
627a8d25c0SNathan Whitehorn 	SLIST_ENTRY(iommu_map) entries;
637a8d25c0SNathan Whitehorn };
647a8d25c0SNathan Whitehorn 
657a8d25c0SNathan Whitehorn struct dma_window {
667a8d25c0SNathan Whitehorn 	struct iommu_map *map;
677a8d25c0SNathan Whitehorn 	bus_addr_t start;
687a8d25c0SNathan Whitehorn 	bus_addr_t end;
697a8d25c0SNathan Whitehorn };
707a8d25c0SNathan Whitehorn 
717a8d25c0SNathan Whitehorn int
72453319bfSNathan Whitehorn phyp_iommu_set_dma_tag(device_t bus, device_t dev, bus_dma_tag_t tag)
737a8d25c0SNathan Whitehorn {
747a8d25c0SNathan Whitehorn 	device_t p;
757a8d25c0SNathan Whitehorn 	phandle_t node;
767a8d25c0SNathan Whitehorn 	cell_t dma_acells, dma_scells, dmawindow[5];
777a8d25c0SNathan Whitehorn 	struct iommu_map *i;
787a8d25c0SNathan Whitehorn 
79453319bfSNathan Whitehorn 	for (p = dev; device_get_parent(p) != NULL; p = device_get_parent(p)) {
807a8d25c0SNathan Whitehorn 		if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
817a8d25c0SNathan Whitehorn 			break;
827a8d25c0SNathan Whitehorn 		if (ofw_bus_has_prop(p, "ibm,dma-window"))
837a8d25c0SNathan Whitehorn 			break;
847a8d25c0SNathan Whitehorn 	}
857a8d25c0SNathan Whitehorn 
867a8d25c0SNathan Whitehorn 	if (p == NULL)
877a8d25c0SNathan Whitehorn 		return (ENXIO);
887a8d25c0SNathan Whitehorn 
897a8d25c0SNathan Whitehorn 	node = ofw_bus_get_node(p);
907a8d25c0SNathan Whitehorn 	if (OF_getprop(node, "ibm,#dma-size-cells", &dma_scells,
917a8d25c0SNathan Whitehorn 	    sizeof(cell_t)) <= 0)
927a8d25c0SNathan Whitehorn 		OF_searchprop(node, "#size-cells", &dma_scells, sizeof(cell_t));
937a8d25c0SNathan Whitehorn 	if (OF_getprop(node, "ibm,#dma-address-cells", &dma_acells,
947a8d25c0SNathan Whitehorn 	    sizeof(cell_t)) <= 0)
957a8d25c0SNathan Whitehorn 		OF_searchprop(node, "#address-cells", &dma_acells,
967a8d25c0SNathan Whitehorn 		    sizeof(cell_t));
977a8d25c0SNathan Whitehorn 
987a8d25c0SNathan Whitehorn 	if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
997a8d25c0SNathan Whitehorn 		OF_getprop(node, "ibm,my-dma-window", dmawindow,
1007a8d25c0SNathan Whitehorn 		    sizeof(cell_t)*(dma_scells + dma_acells + 1));
1017a8d25c0SNathan Whitehorn 	else
1027a8d25c0SNathan Whitehorn 		OF_getprop(node, "ibm,dma-window", dmawindow,
1037a8d25c0SNathan Whitehorn 		    sizeof(cell_t)*(dma_scells + dma_acells + 1));
1047a8d25c0SNathan Whitehorn 
1057a8d25c0SNathan Whitehorn 	struct dma_window *window = malloc(sizeof(struct dma_window),
1067a8d25c0SNathan Whitehorn 	    M_PHYPIOMMU, M_WAITOK);
1077a8d25c0SNathan Whitehorn 	if (dma_acells == 1)
1087a8d25c0SNathan Whitehorn 		window->start = dmawindow[1];
1097a8d25c0SNathan Whitehorn 	else
1107a8d25c0SNathan Whitehorn 		window->start = ((uint64_t)(dmawindow[1]) << 32) | dmawindow[2];
1117a8d25c0SNathan Whitehorn 	if (dma_scells == 1)
1127a8d25c0SNathan Whitehorn 		window->end = window->start + dmawindow[dma_acells + 1];
1137a8d25c0SNathan Whitehorn 	else
1147a8d25c0SNathan Whitehorn 		window->end = window->start +
1157a8d25c0SNathan Whitehorn 		    (((uint64_t)(dmawindow[dma_acells + 1]) << 32) |
1167a8d25c0SNathan Whitehorn 		    dmawindow[dma_acells + 2]);
1177a8d25c0SNathan Whitehorn 
118acf9bb33SNathan Whitehorn 	if (bootverbose)
119acf9bb33SNathan Whitehorn 		device_printf(dev, "Mapping IOMMU domain %#x\n", dmawindow[0]);
1207a8d25c0SNathan Whitehorn 	window->map = NULL;
1217a8d25c0SNathan Whitehorn 	SLIST_FOREACH(i, &iommu_map_head, entries) {
1227a8d25c0SNathan Whitehorn 		if (i->iobn == dmawindow[0]) {
1237a8d25c0SNathan Whitehorn 			window->map = i;
1247a8d25c0SNathan Whitehorn 			break;
1257a8d25c0SNathan Whitehorn 		}
1267a8d25c0SNathan Whitehorn 	}
1277a8d25c0SNathan Whitehorn 
1287a8d25c0SNathan Whitehorn 	if (window->map == NULL) {
1297a8d25c0SNathan Whitehorn 		window->map = malloc(sizeof(struct iommu_map), M_PHYPIOMMU,
1307a8d25c0SNathan Whitehorn 		    M_WAITOK);
1317a8d25c0SNathan Whitehorn 		window->map->iobn = dmawindow[0];
1327a8d25c0SNathan Whitehorn 		/*
1337a8d25c0SNathan Whitehorn 		 * Allocate IOMMU range beginning at PAGE_SIZE. Some drivers
1347a8d25c0SNathan Whitehorn 		 * (em(4), for example) do not like getting mappings at 0.
1357a8d25c0SNathan Whitehorn 		 */
1367a8d25c0SNathan Whitehorn 		window->map->vmem = vmem_create("IOMMU mappings", PAGE_SIZE,
1377a8d25c0SNathan Whitehorn 		    trunc_page(VMEM_ADDR_MAX) - PAGE_SIZE, PAGE_SIZE, 0,
1387a8d25c0SNathan Whitehorn 		    M_BESTFIT | M_NOWAIT);
139acf9bb33SNathan Whitehorn 		SLIST_INSERT_HEAD(&iommu_map_head, window->map, entries);
1407a8d25c0SNathan Whitehorn 	}
1417a8d25c0SNathan Whitehorn 
1427a8d25c0SNathan Whitehorn 	/*
1437a8d25c0SNathan Whitehorn 	 * Check experimentally whether we can use H_STUFF_TCE. It is required
1447a8d25c0SNathan Whitehorn 	 * by the spec but some firmware (e.g. QEMU) does not actually support
1457a8d25c0SNathan Whitehorn 	 * it
1467a8d25c0SNathan Whitehorn 	 */
1477a8d25c0SNathan Whitehorn 	if (papr_supports_stuff_tce == -1)
1487a8d25c0SNathan Whitehorn 		papr_supports_stuff_tce = !(phyp_hcall(H_STUFF_TCE,
1497a8d25c0SNathan Whitehorn 		    window->map->iobn, 0, 0, 0) == H_FUNCTION);
1507a8d25c0SNathan Whitehorn 
151453319bfSNathan Whitehorn 	bus_dma_tag_set_iommu(tag, bus, window);
1527a8d25c0SNathan Whitehorn 
1537a8d25c0SNathan Whitehorn 	return (0);
1547a8d25c0SNathan Whitehorn }
1557a8d25c0SNathan Whitehorn 
1567a8d25c0SNathan Whitehorn int
1577a8d25c0SNathan Whitehorn phyp_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs,
1587a8d25c0SNathan Whitehorn     bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary,
1597a8d25c0SNathan Whitehorn     void *cookie)
1607a8d25c0SNathan Whitehorn {
1617a8d25c0SNathan Whitehorn 	struct dma_window *window = cookie;
1627a8d25c0SNathan Whitehorn 	bus_addr_t minaddr, maxaddr;
1637a8d25c0SNathan Whitehorn 	bus_addr_t alloced;
1647a8d25c0SNathan Whitehorn 	bus_size_t allocsize;
1657a8d25c0SNathan Whitehorn 	int error, i, j;
1667a8d25c0SNathan Whitehorn 	uint64_t tce;
1677a8d25c0SNathan Whitehorn 	minaddr = window->start;
1687a8d25c0SNathan Whitehorn 	maxaddr = window->end;
1697a8d25c0SNathan Whitehorn 
1707a8d25c0SNathan Whitehorn 	/* XXX: handle exclusion range in a more useful way */
1717a8d25c0SNathan Whitehorn 	if (min < maxaddr)
1727a8d25c0SNathan Whitehorn 		maxaddr = min;
1737a8d25c0SNathan Whitehorn 
1747a8d25c0SNathan Whitehorn 	/* XXX: consolidate segs? */
1757a8d25c0SNathan Whitehorn 	for (i = 0; i < *nsegs; i++) {
1767a8d25c0SNathan Whitehorn 		allocsize = round_page(segs[i].ds_len +
1777a8d25c0SNathan Whitehorn 		    (segs[i].ds_addr & PAGE_MASK));
1787a8d25c0SNathan Whitehorn 		error = vmem_xalloc(window->map->vmem, allocsize,
1797a8d25c0SNathan Whitehorn 		    (alignment < PAGE_SIZE) ? PAGE_SIZE : alignment, 0,
1807a8d25c0SNathan Whitehorn 		    boundary, minaddr, maxaddr, M_BESTFIT | M_NOWAIT, &alloced);
1817a8d25c0SNathan Whitehorn 		if (error != 0) {
1827a8d25c0SNathan Whitehorn 			panic("VMEM failure: %d\n", error);
1837a8d25c0SNathan Whitehorn 			return (error);
1847a8d25c0SNathan Whitehorn 		}
1857a8d25c0SNathan Whitehorn 		KASSERT(alloced % PAGE_SIZE == 0, ("Alloc not page aligned"));
1867a8d25c0SNathan Whitehorn 		KASSERT((alloced + (segs[i].ds_addr & PAGE_MASK)) %
1877a8d25c0SNathan Whitehorn 		    alignment == 0,
1887a8d25c0SNathan Whitehorn 		    ("Allocated segment does not match alignment constraint"));
1897a8d25c0SNathan Whitehorn 
1907a8d25c0SNathan Whitehorn 		tce = trunc_page(segs[i].ds_addr);
1917a8d25c0SNathan Whitehorn 		tce |= 0x3; /* read/write */
1927a8d25c0SNathan Whitehorn 		if (papr_supports_stuff_tce) {
1937a8d25c0SNathan Whitehorn 			error = phyp_hcall(H_STUFF_TCE, window->map->iobn,
1947a8d25c0SNathan Whitehorn 			    alloced, tce, allocsize/PAGE_SIZE);
1957a8d25c0SNathan Whitehorn 		} else {
1967a8d25c0SNathan Whitehorn 			for (j = 0; j < allocsize; j += PAGE_SIZE)
1977a8d25c0SNathan Whitehorn 				error = phyp_hcall(H_PUT_TCE, window->map->iobn,
1987a8d25c0SNathan Whitehorn 				    alloced + j, tce + j);
1997a8d25c0SNathan Whitehorn 		}
2007a8d25c0SNathan Whitehorn 
2017a8d25c0SNathan Whitehorn 		segs[i].ds_addr = alloced + (segs[i].ds_addr & PAGE_MASK);
2027a8d25c0SNathan Whitehorn 		KASSERT(segs[i].ds_addr > 0, ("Address needs to be positive"));
2037a8d25c0SNathan Whitehorn 		KASSERT(segs[i].ds_addr + segs[i].ds_len < maxaddr,
2047a8d25c0SNathan Whitehorn 		    ("Address not in range"));
2057a8d25c0SNathan Whitehorn 		if (error < 0) {
2067a8d25c0SNathan Whitehorn 			panic("IOMMU mapping error: %d\n", error);
2077a8d25c0SNathan Whitehorn 			return (ENOMEM);
2087a8d25c0SNathan Whitehorn 		}
2097a8d25c0SNathan Whitehorn 	}
2107a8d25c0SNathan Whitehorn 
2117a8d25c0SNathan Whitehorn 	return (0);
2127a8d25c0SNathan Whitehorn }
2137a8d25c0SNathan Whitehorn 
2147a8d25c0SNathan Whitehorn int
2157a8d25c0SNathan Whitehorn phyp_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie)
2167a8d25c0SNathan Whitehorn {
2177a8d25c0SNathan Whitehorn 	struct dma_window *window = cookie;
2187a8d25c0SNathan Whitehorn 	bus_addr_t pageround;
2197a8d25c0SNathan Whitehorn 	bus_size_t roundedsize;
2207a8d25c0SNathan Whitehorn 	int i;
2217a8d25c0SNathan Whitehorn 	bus_addr_t j;
2227a8d25c0SNathan Whitehorn 
2237a8d25c0SNathan Whitehorn 	for (i = 0; i < nsegs; i++) {
2247a8d25c0SNathan Whitehorn 		pageround = trunc_page(segs[i].ds_addr);
2257a8d25c0SNathan Whitehorn 		roundedsize = round_page(segs[i].ds_len +
2267a8d25c0SNathan Whitehorn 		    (segs[i].ds_addr & PAGE_MASK));
2277a8d25c0SNathan Whitehorn 
2287a8d25c0SNathan Whitehorn 		if (papr_supports_stuff_tce) {
2297a8d25c0SNathan Whitehorn 			phyp_hcall(H_STUFF_TCE, window->map->iobn, pageround, 0,
2307a8d25c0SNathan Whitehorn 			    roundedsize/PAGE_SIZE);
2317a8d25c0SNathan Whitehorn 		} else {
2327a8d25c0SNathan Whitehorn 			for (j = 0; j < roundedsize; j += PAGE_SIZE)
2337a8d25c0SNathan Whitehorn 				phyp_hcall(H_PUT_TCE, window->map->iobn,
2347a8d25c0SNathan Whitehorn 				    pageround + j, 0);
2357a8d25c0SNathan Whitehorn 		}
2367a8d25c0SNathan Whitehorn 
2377a8d25c0SNathan Whitehorn 		vmem_xfree(window->map->vmem, pageround, roundedsize);
2387a8d25c0SNathan Whitehorn 	}
2397a8d25c0SNathan Whitehorn 
2407a8d25c0SNathan Whitehorn 	return (0);
2417a8d25c0SNathan Whitehorn }
2427a8d25c0SNathan Whitehorn 
243