xref: /openbsd/sys/arch/hppa/dev/astro.c (revision 3b9d585e)
1 /*	$OpenBSD: astro.c,v 1.19 2024/04/13 23:44:11 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/mutex.h>
25 #include <sys/reboot.h>
26 #include <sys/tree.h>
27 
28 #include <uvm/uvm_extern.h>
29 
30 #include <machine/iomod.h>
31 #include <machine/autoconf.h>
32 
33 #include <hppa/dev/cpudevs.h>
34 
35 struct astro_regs {
36 	u_int32_t	rid;
37 	u_int32_t	pad0000;
38 	u_int32_t	ioc_ctrl;
39 	u_int32_t	pad0008;
40 	u_int8_t	resv1[0x0300 - 0x0010];
41 	u_int64_t	lmmio_direct0_base;
42 	u_int64_t	lmmio_direct0_mask;
43 	u_int64_t	lmmio_direct0_route;
44 	u_int64_t	lmmio_direct1_base;
45 	u_int64_t	lmmio_direct1_mask;
46 	u_int64_t	lmmio_direct1_route;
47 	u_int64_t	lmmio_direct2_base;
48 	u_int64_t	lmmio_direct2_mask;
49 	u_int64_t	lmmio_direct2_route;
50 	u_int64_t	lmmio_direct3_base;
51 	u_int64_t	lmmio_direct3_mask;
52 	u_int64_t	lmmio_direct3_route;
53 	u_int64_t	lmmio_dist_base;
54 	u_int64_t	lmmio_dist_mask;
55 	u_int64_t	lmmio_dist_route;
56 	u_int64_t	gmmio_dist_base;
57 	u_int64_t	gmmio_dist_mask;
58 	u_int64_t	gmmio_dist_route;
59 	u_int64_t	ios_dist_base;
60 	u_int64_t	ios_dist_mask;
61 	u_int64_t	ios_dist_route;
62 	u_int8_t	resv2[0x03c0 - 0x03a8];
63 	u_int64_t	ios_direct_base;
64 	u_int64_t	ios_direct_mask;
65 	u_int64_t	ios_direct_route;
66 	u_int8_t	resv3[0x22000 - 0x03d8];
67 	u_int64_t	func_id;
68 	u_int64_t	func_class;
69 	u_int8_t	resv4[0x22040 - 0x22010];
70 	u_int64_t	rope_config;
71 	u_int8_t	resv5[0x22050 - 0x22048];
72 	u_int64_t	rope_debug;
73 	u_int8_t	resv6[0x22200 - 0x22058];
74 	u_int64_t	rope0_control;
75 	u_int64_t	rope1_control;
76 	u_int64_t	rope2_control;
77 	u_int64_t	rope3_control;
78 	u_int64_t	rope4_control;
79 	u_int64_t	rope5_control;
80 	u_int64_t	rope6_control;
81 	u_int64_t	rope7_control;
82 	u_int8_t	resv7[0x22300 - 0x22240];
83 	u_int32_t	tlb_ibase;
84 	u_int32_t	pad22300;
85 	u_int32_t	tlb_imask;
86 	u_int32_t	pad22308;
87 	u_int32_t	tlb_pcom;
88 	u_int32_t	pad22310;
89 	u_int32_t	tlb_tcnfg;
90 	u_int32_t	pad22318;
91 	u_int64_t	tlb_pdir_base;
92 };
93 
94 #define ASTRO_IOC_CTRL_TE	0x0001	/* TOC Enable */
95 #define ASTRO_IOC_CTRL_CE	0x0002	/* Coalesce Enable */
96 #define ASTRO_IOC_CTRL_DE	0x0004	/* Dillon Enable */
97 #define ASTRO_IOC_CTRL_IE	0x0008	/* IOS Enable */
98 #define ASTRO_IOC_CTRL_OS	0x0010	/* Outbound Synchronous */
99 #define ASTRO_IOC_CTRL_IS	0x0020	/* Inbound Synchronous */
100 #define ASTRO_IOC_CTRL_RC	0x0040	/* Read Current Enable */
101 #define ASTRO_IOC_CTRL_L0	0x0080	/* 0-length Read Enable */
102 #define ASTRO_IOC_CTRL_RM	0x0100	/* Real Mode */
103 #define ASTRO_IOC_CTRL_NC	0x0200	/* Non-coherent Mode */
104 #define ASTRO_IOC_CTRL_ID	0x0400	/* Interrupt Disable */
105 #define ASTRO_IOC_CTRL_D4	0x0800	/* Disable 4-byte Coalescing */
106 #define ASTRO_IOC_CTRL_CC	0x1000	/* Increase Coalescing counter value */
107 #define ASTRO_IOC_CTRL_DD	0x2000	/* Disable distr. range coalescing */
108 #define ASTRO_IOC_CTRL_DC	0x4000	/* Disable the coalescing counter */
109 
110 #define IOTTE_V		0x8000000000000000LL	/* Entry valid */
111 #define IOTTE_PAMASK	0x000000fffffff000LL
112 #define IOTTE_CI	0x00000000000000ffLL	/* Coherent index */
113 
114 struct astro_softc {
115 	struct device sc_dv;
116 
117 	bus_dma_tag_t sc_dmat;
118 	struct astro_regs volatile *sc_regs;
119 	u_int64_t *sc_pdir;
120 
121 	char sc_dvmamapname[20];
122 	struct extent *sc_dvmamap;
123 	struct mutex sc_dvmamtx;
124 
125 	struct hppa_bus_dma_tag sc_dmatag;
126 };
127 
128 /*
129  * per-map DVMA page table
130  */
131 struct iommu_page_entry {
132 	SPLAY_ENTRY(iommu_page_entry) ipe_node;
133 	paddr_t	ipe_pa;
134 	vaddr_t	ipe_va;
135 	bus_addr_t ipe_dva;
136 };
137 
138 struct iommu_page_map {
139 	SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
140 	int ipm_maxpage;	/* Size of allocated page map */
141 	int ipm_pagecnt;	/* Number of entries in use */
142 	struct iommu_page_entry	ipm_map[1];
143 };
144 
145 /*
146  * per-map IOMMU state
147  */
148 struct iommu_map_state {
149 	struct astro_softc *ims_sc;
150 	bus_addr_t ims_dvmastart;
151 	bus_size_t ims_dvmasize;
152 	struct extent_region ims_er;
153 	struct iommu_page_map ims_map;	/* map must be last (array at end) */
154 };
155 
156 int	astro_match(struct device *, void *, void *);
157 void	astro_attach(struct device *, struct device *, void *);
158 
159 const struct cfattach astro_ca = {
160 	sizeof(struct astro_softc), astro_match, astro_attach
161 };
162 
163 struct cfdriver astro_cd = {
164 	NULL, "astro", DV_DULL
165 };
166 
167 int	iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
168 	    int, bus_dmamap_t *);
169 void	iommu_dvmamap_destroy(void *, bus_dmamap_t);
170 int	iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
171 	    struct proc *, int);
172 int	iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int);
173 int	iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
174 int	iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
175 int	iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
176 	    int, bus_size_t, int);
177 void	iommu_dvmamap_unload(void *, bus_dmamap_t);
178 void	iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
179 int	iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
180 	    bus_dma_segment_t *, int, int *, int);
181 void	iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
182 int	iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
183 	    caddr_t *, int);
184 void	iommu_dvmamem_unmap(void *, caddr_t, size_t);
185 paddr_t	iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
186 
187 void	iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
188 void	iommu_remove(struct astro_softc *, bus_addr_t);
189 
190 struct iommu_map_state *iommu_iomap_create(int);
191 void	iommu_iomap_destroy(struct iommu_map_state *);
192 int	iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
193 bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
194 void	iommu_iomap_clear_pages(struct iommu_map_state *);
195 
196 const struct hppa_bus_dma_tag astro_dmat = {
197 	NULL,
198 	iommu_dvmamap_create, iommu_dvmamap_destroy,
199 	iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
200 	iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
201 	iommu_dvmamap_unload, iommu_dvmamap_sync,
202 
203 	iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
204 	iommu_dvmamem_unmap, iommu_dvmamem_mmap
205 };
206 
207 int
astro_match(struct device * parent,void * cfdata,void * aux)208 astro_match(struct device *parent, void *cfdata, void *aux)
209 {
210 	struct confargs *ca = aux;
211 
212 	/* Astro is a U-Turn variant. */
213 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
214 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
215 		return 0;
216 
217 	if (ca->ca_type.iodc_model == 0x58 &&
218 	    ca->ca_type.iodc_revision >= 0x20)
219 		return 1;
220 
221 	return 0;
222 }
223 
224 void
astro_attach(struct device * parent,struct device * self,void * aux)225 astro_attach(struct device *parent, struct device *self, void *aux)
226 {
227 	struct confargs *ca = aux, nca;
228 	struct astro_softc *sc = (struct astro_softc *)self;
229 	volatile struct astro_regs *r;
230 	bus_space_handle_t ioh;
231 	u_int32_t rid, ioc_ctrl;
232 	psize_t size;
233 	vaddr_t va;
234 	paddr_t pa;
235 	struct vm_page *m;
236 	struct pglist mlist;
237 	int iova_bits;
238 
239 	sc->sc_dmat = ca->ca_dmatag;
240 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
241 	    0, &ioh)) {
242 		printf(": can't map IO space\n");
243 		return;
244 	}
245 	sc->sc_regs = r = (struct astro_regs *)ca->ca_hpa;
246 
247 	rid = letoh32(r->rid);
248 	printf(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
249 
250 	ioc_ctrl = letoh32(r->ioc_ctrl);
251 	ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
252 	ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
253 	ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
254 	r->ioc_ctrl = htole32(ioc_ctrl);
255 
256 	/*
257 	 * Setup the iommu.
258 	 */
259 
260 	/* XXX This gives us 256MB of iova space. */
261 	iova_bits = 28;
262 
263 	r->tlb_ibase = htole32(0);
264 	r->tlb_imask = htole32(0xffffffff << iova_bits);
265 
266 	/* Page size is 4K. */
267 	r->tlb_tcnfg = htole32(0);
268 
269 	/* Flush TLB. */
270 	r->tlb_pcom = htole32(31);
271 
272 	/*
273 	 * Allocate memory for I/O pagetables.  They need to be physically
274 	 * contiguous.
275 	 */
276 
277 	size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(u_int64_t);
278 	TAILQ_INIT(&mlist);
279 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist,
280 	    1, UVM_PLA_NOWAIT) != 0)
281 		panic("astrottach: no memory");
282 
283 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);
284 	if (va == 0)
285 		panic("astroattach: no memory");
286 	sc->sc_pdir = (u_int64_t *)va;
287 
288 	m = TAILQ_FIRST(&mlist);
289 	r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
290 
291 	/* Map the pages. */
292 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
293 		pa = VM_PAGE_TO_PHYS(m);
294 		pmap_enter(pmap_kernel(), va, pa,
295 		    PROT_READ | PROT_WRITE, PMAP_WIRED);
296 		va += PAGE_SIZE;
297 	}
298 	pmap_update(pmap_kernel());
299 	memset(sc->sc_pdir, 0, size);
300 
301 	/*
302 	 * The PDC might have set up some devices to do DMA.  It will do
303 	 * this for the onboard USB controller if an USB keyboard is used
304 	 * for console input.  In that case, bad things will happen if we
305 	 * enable iova space.  So reset the PDC devices before we do that.
306 	 * Don't do this if we're using a serial console though, since it
307 	 * will stop working if we do.  This is fine since the serial port
308 	 * doesn't do DMA.
309 	 */
310 	if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
311 		pdc_call((iodcio_t)pdc, 0, PDC_IO, PDC_IO_RESET_DEVICES);
312 
313 	/* Enable iova space. */
314 	r->tlb_ibase = htole32(1);
315 
316         /*
317          * Now all the hardware's working we need to allocate a dvma map.
318          */
319 	snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
320 	    "%s_dvma", sc->sc_dv.dv_xname);
321 	sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
322 	    M_DEVBUF, NULL, 0, EX_NOWAIT | EX_NOCOALESCE);
323 	KASSERT(sc->sc_dvmamap);
324 	mtx_init(&sc->sc_dvmamtx, IPL_HIGH);
325 
326 	sc->sc_dmatag = astro_dmat;
327 	sc->sc_dmatag._cookie = sc;
328 
329 	nca = *ca;	/* clone from us */
330 	nca.ca_hpamask = HPPA_IOBEGIN;
331 	nca.ca_dmatag = &sc->sc_dmatag;
332 	pdc_scanbus(self, &nca, MAXMODBUS, 0, 0);
333 }
334 
335 int
iommu_dvmamap_create(void * v,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamap)336 iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
337     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
338 {
339 	struct astro_softc *sc = v;
340 	bus_dmamap_t map;
341 	struct iommu_map_state *ims;
342 	int error;
343 
344 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
345 	    boundary, flags, &map);
346 	if (error)
347 		return (error);
348 
349 	ims = iommu_iomap_create(atop(round_page(size)));
350 	if (ims == NULL) {
351 		bus_dmamap_destroy(sc->sc_dmat, map);
352 		return (ENOMEM);
353 	}
354 
355 	ims->ims_sc = sc;
356 	map->_dm_cookie = ims;
357 	*dmamap = map;
358 
359 	return (0);
360 }
361 
362 void
iommu_dvmamap_destroy(void * v,bus_dmamap_t map)363 iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
364 {
365 	struct astro_softc *sc = v;
366 
367 	/*
368 	 * The specification (man page) requires a loaded
369 	 * map to be unloaded before it is destroyed.
370 	 */
371 	if (map->dm_nsegs)
372 		iommu_dvmamap_unload(sc, map);
373 
374         if (map->_dm_cookie)
375                 iommu_iomap_destroy(map->_dm_cookie);
376 	map->_dm_cookie = NULL;
377 
378 	bus_dmamap_destroy(sc->sc_dmat, map);
379 }
380 
381 int
iommu_iomap_load_map(struct astro_softc * sc,bus_dmamap_t map,int flags)382 iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
383 {
384 	struct iommu_map_state *ims = map->_dm_cookie;
385 	struct iommu_page_map *ipm = &ims->ims_map;
386 	struct iommu_page_entry *e;
387 	int err, seg;
388 	paddr_t pa, paend;
389 	vaddr_t va;
390 	bus_size_t sgsize;
391 	bus_size_t align, boundary;
392 	u_long dvmaddr;
393 	bus_addr_t dva;
394 	int i;
395 
396 	/* XXX */
397 	boundary = map->_dm_boundary;
398 	align = PAGE_SIZE;
399 
400 	iommu_iomap_clear_pages(ims);
401 
402 	for (seg = 0; seg < map->dm_nsegs; seg++) {
403 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
404 
405 		paend = round_page(ds->ds_addr + ds->ds_len);
406 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
407 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
408 			err = iommu_iomap_insert_page(ims, va, pa);
409 			if (err) {
410 				printf("iomap insert error: %d for "
411 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
412 				bus_dmamap_unload(sc->sc_dmat, map);
413 				iommu_iomap_clear_pages(ims);
414 			}
415 		}
416 	}
417 
418 	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
419 	mtx_enter(&sc->sc_dvmamtx);
420 	err = extent_alloc_with_descr(sc->sc_dvmamap, sgsize, align, 0,
421 	    boundary, EX_NOWAIT | EX_BOUNDZERO, &ims->ims_er, &dvmaddr);
422 	mtx_leave(&sc->sc_dvmamtx);
423 	if (err)
424 		return (err);
425 
426 	ims->ims_dvmastart = dvmaddr;
427 	ims->ims_dvmasize = sgsize;
428 
429 	dva = dvmaddr;
430 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
431 		e->ipe_dva = dva;
432 		iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
433 		dva += PAGE_SIZE;
434 	}
435 
436 	for (seg = 0; seg < map->dm_nsegs; seg++) {
437 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
438 		ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
439 	}
440 
441 	return (0);
442 }
443 
444 int
iommu_dvmamap_load(void * v,bus_dmamap_t map,void * addr,bus_size_t size,struct proc * p,int flags)445 iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
446     struct proc *p, int flags)
447 {
448 	struct astro_softc *sc = v;
449 	int err;
450 
451 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
452 	if (err)
453 		return (err);
454 
455 	return iommu_iomap_load_map(sc, map, flags);
456 }
457 
458 int
iommu_dvmamap_load_mbuf(void * v,bus_dmamap_t map,struct mbuf * m,int flags)459 iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
460 {
461 	struct astro_softc *sc = v;
462 	int err;
463 
464 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
465 	if (err)
466 		return (err);
467 
468 	return iommu_iomap_load_map(sc, map, flags);
469 }
470 
471 int
iommu_dvmamap_load_uio(void * v,bus_dmamap_t map,struct uio * uio,int flags)472 iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
473 {
474 	struct astro_softc *sc = v;
475 
476 	printf("load_uio\n");
477 
478 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
479 }
480 
481 int
iommu_dvmamap_load_raw(void * v,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)482 iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
483     int nsegs, bus_size_t size, int flags)
484 {
485 	struct astro_softc *sc = v;
486 
487 	printf("load_raw\n");
488 
489 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
490 }
491 
492 void
iommu_dvmamap_unload(void * v,bus_dmamap_t map)493 iommu_dvmamap_unload(void *v, bus_dmamap_t map)
494 {
495 	struct astro_softc *sc = v;
496 	struct iommu_map_state *ims = map->_dm_cookie;
497 	struct iommu_page_map *ipm = &ims->ims_map;
498 	struct iommu_page_entry *e;
499 	int err, i;
500 
501 	/* Remove the IOMMU entries. */
502 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
503 		iommu_remove(sc, e->ipe_dva);
504 
505 	/* Clear the iomap. */
506 	iommu_iomap_clear_pages(ims);
507 
508 	bus_dmamap_unload(sc->sc_dmat, map);
509 
510 	mtx_enter(&sc->sc_dvmamtx);
511 	err = extent_free(sc->sc_dvmamap, ims->ims_dvmastart,
512 	    ims->ims_dvmasize, EX_NOWAIT);
513 	ims->ims_dvmastart = 0;
514 	ims->ims_dvmasize = 0;
515 	mtx_leave(&sc->sc_dvmamtx);
516 	if (err)
517 		printf("warning: %ld of DVMA space lost\n", ims->ims_dvmasize);
518 }
519 
520 void
iommu_dvmamap_sync(void * v,bus_dmamap_t map,bus_addr_t off,bus_size_t len,int ops)521 iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
522     bus_size_t len, int ops)
523 {
524 	/* Nothing to do; DMA is cache-coherent. */
525 }
526 
527 int
iommu_dvmamem_alloc(void * v,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)528 iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
529     bus_size_t boundary, bus_dma_segment_t *segs,
530     int nsegs, int *rsegs, int flags)
531 {
532 	struct astro_softc *sc = v;
533 
534 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
535 	    segs, nsegs, rsegs, flags));
536 }
537 
538 void
iommu_dvmamem_free(void * v,bus_dma_segment_t * segs,int nsegs)539 iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
540 {
541 	struct astro_softc *sc = v;
542 
543 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
544 }
545 
546 int
iommu_dvmamem_map(void * v,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)547 iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
548     caddr_t *kvap, int flags)
549 {
550 	struct astro_softc *sc = v;
551 
552 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
553 }
554 
555 void
iommu_dvmamem_unmap(void * v,caddr_t kva,size_t size)556 iommu_dvmamem_unmap(void *v, caddr_t kva, size_t size)
557 {
558 	struct astro_softc *sc = v;
559 
560 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
561 }
562 
563 paddr_t
iommu_dvmamem_mmap(void * v,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)564 iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
565     int prot, int flags)
566 {
567 	struct astro_softc *sc = v;
568 
569 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
570 }
571 
572 /*
573  * Utility function used by splay tree to order page entries by pa.
574  */
575 static inline int
iomap_compare(struct iommu_page_entry * a,struct iommu_page_entry * b)576 iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
577 {
578 	return ((a->ipe_pa > b->ipe_pa) ? 1 :
579 		(a->ipe_pa < b->ipe_pa) ? -1 : 0);
580 }
581 
582 SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
583 
584 SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
585 
586 /*
587  * Create a new iomap.
588  */
589 struct iommu_map_state *
iommu_iomap_create(int n)590 iommu_iomap_create(int n)
591 {
592 	struct iommu_map_state *ims;
593 
594 	/* Safety for heavily fragmented data, such as mbufs */
595 	n += 4;
596 	if (n < 16)
597 		n = 16;
598 
599 	ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
600 	    M_DEVBUF, M_NOWAIT | M_ZERO);
601 	if (ims == NULL)
602 		return (NULL);
603 
604 	/* Initialize the map. */
605 	ims->ims_map.ipm_maxpage = n;
606 	SPLAY_INIT(&ims->ims_map.ipm_tree);
607 
608 	return (ims);
609 }
610 
611 /*
612  * Destroy an iomap.
613  */
614 void
iommu_iomap_destroy(struct iommu_map_state * ims)615 iommu_iomap_destroy(struct iommu_map_state *ims)
616 {
617 #ifdef DIAGNOSTIC
618 	if (ims->ims_map.ipm_pagecnt > 0)
619 		printf("iommu_iomap_destroy: %d page entries in use\n",
620 		    ims->ims_map.ipm_pagecnt);
621 #endif
622 
623 	free(ims, M_DEVBUF, 0);
624 }
625 
626 /*
627  * Insert a pa entry in the iomap.
628  */
629 int
iommu_iomap_insert_page(struct iommu_map_state * ims,vaddr_t va,paddr_t pa)630 iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
631 {
632 	struct iommu_page_map *ipm = &ims->ims_map;
633 	struct iommu_page_entry *e;
634 
635 	if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
636 		struct iommu_page_entry ipe;
637 
638 		ipe.ipe_pa = pa;
639 		if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
640 			return (0);
641 
642 		return (ENOMEM);
643 	}
644 
645 	e = &ipm->ipm_map[ipm->ipm_pagecnt];
646 
647 	e->ipe_pa = pa;
648 	e->ipe_va = va;
649 	e->ipe_dva = 0;
650 
651 	e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
652 
653 	/* Duplicates are okay, but only count them once. */
654 	if (e)
655 		return (0);
656 
657 	++ipm->ipm_pagecnt;
658 
659 	return (0);
660 }
661 
662 /*
663  * Translate a physical address (pa) into a DVMA address.
664  */
665 bus_addr_t
iommu_iomap_translate(struct iommu_map_state * ims,paddr_t pa)666 iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
667 {
668 	struct iommu_page_map *ipm = &ims->ims_map;
669 	struct iommu_page_entry *e;
670 	struct iommu_page_entry pe;
671 	paddr_t offset = pa & PAGE_MASK;
672 
673 	pe.ipe_pa = trunc_page(pa);
674 
675 	e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
676 
677 	if (e == NULL) {
678 		panic("couldn't find pa %lx", pa);
679 		return 0;
680 	}
681 
682 	return (e->ipe_dva | offset);
683 }
684 
685 /*
686  * Clear the iomap table and tree.
687  */
688 void
iommu_iomap_clear_pages(struct iommu_map_state * ims)689 iommu_iomap_clear_pages(struct iommu_map_state *ims)
690 {
691         ims->ims_map.ipm_pagecnt = 0;
692         SPLAY_INIT(&ims->ims_map.ipm_tree);
693 }
694 
695 /*
696  * Add an entry to the IOMMU table.
697  */
698 void
iommu_enter(struct astro_softc * sc,bus_addr_t dva,paddr_t pa,vaddr_t va,int flags)699 iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
700     int flags)
701 {
702 	volatile u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
703 	u_int64_t tte;
704 	u_int32_t ci;
705 
706 #ifdef DEBUG
707 	printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
708 #endif
709 
710 #ifdef DIAGNOSTIC
711 	tte = letoh64(*tte_ptr);
712 
713 	if (tte & IOTTE_V) {
714 		printf("Overwriting valid tte entry (dva %lx pa %lx "
715 		    "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
716 		extent_print(sc->sc_dvmamap);
717 		panic("IOMMU overwrite");
718 	}
719 #endif
720 
721 	mtsp(HPPA_SID_KERNEL, 1);
722 	__asm volatile("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (va));
723 
724 	tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
725 	tte |= IOTTE_V;
726 
727 	*tte_ptr = htole64(tte);
728 	__asm volatile("fdc 0(%%sr1, %0)\n\tsync" : : "r" (tte_ptr));
729 }
730 
731 /*
732  * Remove an entry from the IOMMU table.
733  */
734 void
iommu_remove(struct astro_softc * sc,bus_addr_t dva)735 iommu_remove(struct astro_softc *sc, bus_addr_t dva)
736 {
737 	volatile struct astro_regs *r = sc->sc_regs;
738 	u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
739 	u_int64_t tte;
740 
741 #ifdef DIAGNOSTIC
742 	if (dva != trunc_page(dva)) {
743 		printf("iommu_remove: unaligned dva: %lx\n", dva);
744 		dva = trunc_page(dva);
745 	}
746 #endif
747 
748 	tte = letoh64(*tte_ptr);
749 
750 #ifdef DIAGNOSTIC
751 	if ((tte & IOTTE_V) == 0) {
752 		printf("Removing invalid tte entry (dva %lx &tte %p "
753 		    "tte %llx)\n", dva, tte_ptr, tte);
754 		extent_print(sc->sc_dvmamap);
755 		panic("IOMMU remove overwrite");
756 	}
757 #endif
758 
759 	*tte_ptr = htole64(tte & ~IOTTE_V);
760 
761 	/* Flush IOMMU. */
762 	r->tlb_pcom = htole32(dva | PAGE_SHIFT);
763 }
764