xref: /openbsd/sys/arch/powerpc64/dev/phb.c (revision 09467b48)
1 /*	$OpenBSD: phb.c,v 1.12 2020/07/14 20:40:48 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/extent.h>
22 
23 #include <machine/bus.h>
24 #include <machine/fdt.h>
25 #include <machine/opal.h>
26 
27 #include <dev/pci/pcidevs.h>
28 #include <dev/pci/pcireg.h>
29 #include <dev/pci/pcivar.h>
30 
31 #include <dev/ofw/openfirm.h>
32 #include <dev/ofw/fdt.h>
33 
34 extern paddr_t physmax;		/* machdep.c */
35 
36 #define IODA_TVE_SELECT		(1ULL << 59)
37 
38 struct phb_range {
39 	uint32_t		flags;
40 	uint64_t		pci_base;
41 	uint64_t		phys_base;
42 	uint64_t		size;
43 };
44 
45 struct phb_softc {
46 	struct device		sc_dev;
47 	bus_space_tag_t		sc_iot;
48 	bus_dma_tag_t		sc_dmat;
49 
50 	int			sc_node;
51 	int			sc_acells;
52 	int			sc_scells;
53 	int			sc_pacells;
54 	int			sc_pscells;
55 	struct phb_range	*sc_ranges;
56 	int			sc_nranges;
57 
58 	uint64_t		sc_phb_id;
59 	uint64_t		sc_pe_number;
60 	uint32_t		sc_msi_ranges[2];
61 	uint32_t		sc_xive;
62 
63 	struct bus_space	sc_bus_iot;
64 	struct bus_space	sc_bus_memt;
65 	struct machine_bus_dma_tag sc_bus_dmat;
66 
67 	struct ppc64_pci_chipset sc_pc;
68 	struct extent		*sc_busex;
69 	struct extent		*sc_memex;
70 	struct extent		*sc_ioex;
71 	int			sc_bus;
72 };
73 
74 int	phb_match(struct device *, void *, void *);
75 void	phb_attach(struct device *, struct device *, void *);
76 
77 struct cfattach	phb_ca = {
78 	sizeof (struct phb_softc), phb_match, phb_attach
79 };
80 
81 struct cfdriver phb_cd = {
82 	NULL, "phb", DV_DULL
83 };
84 
85 void	phb_attach_hook(struct device *, struct device *,
86 	    struct pcibus_attach_args *);
87 int	phb_bus_maxdevs(void *, int);
88 pcitag_t phb_make_tag(void *, int, int, int);
89 void	phb_decompose_tag(void *, pcitag_t, int *, int *, int *);
90 int	phb_conf_size(void *, pcitag_t);
91 pcireg_t phb_conf_read(void *, pcitag_t, int);
92 void	phb_conf_write(void *, pcitag_t, int, pcireg_t);
93 
94 int	phb_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
95 const char *phb_intr_string(void *, pci_intr_handle_t);
96 void	*phb_intr_establish(void *, pci_intr_handle_t, int,
97 	    int (*)(void *), void *, char *);
98 void	phb_intr_disestablish(void *, void *);
99 
100 int	phb_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
101 	    bus_space_handle_t *);
102 int	phb_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
103 	    bus_space_handle_t *);
104 int	phb_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
105 	    bus_size_t, struct proc *, int, paddr_t *, int *, int);
106 int	phb_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
107 	    bus_dma_segment_t *, int, bus_size_t, int);
108 
109 int
110 phb_match(struct device *parent, void *match, void *aux)
111 {
112 	struct fdt_attach_args *faa = aux;
113 
114 	return OF_is_compatible(faa->fa_node, "ibm,ioda3-phb");
115 }
116 
117 void
118 phb_attach(struct device *parent, struct device *self, void *aux)
119 {
120 	struct phb_softc *sc = (struct phb_softc *)self;
121 	struct fdt_attach_args *faa = aux;
122 	struct pcibus_attach_args pba;
123 	uint32_t bus_range[2];
124 	uint32_t *ranges;
125 	uint32_t m64window[6];
126 	uint32_t m64ranges[2];
127 	int i, j, nranges, rangeslen;
128 	uint32_t window;
129 	uint32_t chip_id;
130 	int64_t error;
131 
132 	if (faa->fa_nreg < 1) {
133 		printf(": no registers\n");
134 		return;
135 	}
136 
137 	sc->sc_iot = faa->fa_iot;
138 	sc->sc_dmat = faa->fa_dmat;
139 	sc->sc_node = faa->fa_node;
140 	sc->sc_phb_id = OF_getpropint64(sc->sc_node, "ibm,opal-phbid", 0);
141 	sc->sc_pe_number = 0;
142 
143 	if (OF_getproplen(sc->sc_node, "ibm,chip-id") == sizeof(chip_id)) {
144 		chip_id = OF_getpropint(sc->sc_node, "ibm,chip-id", 0);
145 		printf(": chip 0x%x", chip_id);
146 	}
147 
148 	/*
149 	 * Reset the IODA tables.  Should clear any gunk left behind
150 	 * by Linux.
151 	 */
152 	error = opal_pci_reset(sc->sc_phb_id, OPAL_RESET_PCI_IODA_TABLE,
153 	    OPAL_ASSERT_RESET);
154 	if (error != OPAL_SUCCESS) {
155 		printf(": can't reset IODA table\n");
156 		return;
157 	}
158 
159 	/*
160 	 * Keep things simple and use a single PE for everything below
161 	 * this host bridge.
162 	 */
163 	error = opal_pci_set_pe(sc->sc_phb_id, sc->sc_pe_number, 0,
164 	    OPAL_IGNORE_RID_BUS_NUMBER, OPAL_IGNORE_RID_DEVICE_NUMBER,
165 	    OPAL_IGNORE_RID_FUNCTION_NUMBER, OPAL_MAP_PE);
166 	if (error != OPAL_SUCCESS) {
167 		printf(": can't map PHB PE\n");
168 		return;
169 	}
170 
171 	/* Enable bypass mode. */
172 	error = opal_pci_map_pe_dma_window_real(sc->sc_phb_id,
173 	    sc->sc_pe_number, (sc->sc_pe_number << 1) | 1,
174 	    IODA_TVE_SELECT, physmax);
175 	if (error != OPAL_SUCCESS) {
176 		printf(": can't enable DMA bypass\n");
177 		return;
178 	}
179 
180 	/*
181 	 * Parse address ranges such that we can do the appropriate
182 	 * address translations.
183 	 */
184 
185 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
186 	    faa->fa_acells);
187 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
188 	    faa->fa_scells);
189 	sc->sc_pacells = faa->fa_acells;
190 	sc->sc_pscells = faa->fa_scells;
191 
192 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
193 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
194 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
195 	     sc->sc_pacells + sc->sc_scells)) {
196 		printf(": invalid ranges property\n");
197 		return;
198 	}
199 
200 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
201 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
202 	    rangeslen);
203 
204 	/*
205 	 * Reserve an extra slot here and make sure it is filled
206 	 * with zeroes.
207 	 */
208 	nranges = (rangeslen / sizeof(uint32_t)) /
209 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
210 	sc->sc_ranges = mallocarray(nranges + 1,
211 	    sizeof(struct phb_range), M_DEVBUF, M_ZERO | M_WAITOK);
212 	sc->sc_nranges = nranges + 1;
213 
214 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
215 		sc->sc_ranges[i].flags = ranges[j++];
216 		sc->sc_ranges[i].pci_base = ranges[j++];
217 		if (sc->sc_acells - 1 == 2) {
218 			sc->sc_ranges[i].pci_base <<= 32;
219 			sc->sc_ranges[i].pci_base |= ranges[j++];
220 		}
221 		sc->sc_ranges[i].phys_base = ranges[j++];
222 		if (sc->sc_pacells == 2) {
223 			sc->sc_ranges[i].phys_base <<= 32;
224 			sc->sc_ranges[i].phys_base |= ranges[j++];
225 		}
226 		sc->sc_ranges[i].size = ranges[j++];
227 		if (sc->sc_scells == 2) {
228 			sc->sc_ranges[i].size <<= 32;
229 			sc->sc_ranges[i].size |= ranges[j++];
230 		}
231 	}
232 
233 	free(ranges, M_TEMP, rangeslen);
234 
235 	/*
236 	 * IBM has chosen a non-standard way to encode 64-bit mmio
237 	 * ranges.  Stick the information into the slot we reserved
238 	 * above.
239 	 */
240 	if (OF_getpropintarray(sc->sc_node, "ibm,opal-m64-window",
241 	    m64window, sizeof(m64window)) == sizeof(m64window)) {
242 		sc->sc_ranges[sc->sc_nranges - 1].flags = 0x03000000;
243 		sc->sc_ranges[sc->sc_nranges - 1].pci_base =
244 		    (uint64_t)m64window[0] << 32 | m64window[1];
245 		sc->sc_ranges[sc->sc_nranges - 1].phys_base =
246 		    (uint64_t)m64window[2] << 32 | m64window[3];
247 		sc->sc_ranges[sc->sc_nranges - 1].size =
248 		    (uint64_t)m64window[4] << 32 | m64window[5];
249 	}
250 
251 	/*
252 	 * Enable all the 64-bit mmio windows we found.
253 	 */
254 	m64ranges[0] = 1; m64ranges[1] = 0;
255 	OF_getpropintarray(sc->sc_node, "ibm,opal-available-m64-ranges",
256 	    m64ranges, sizeof(m64ranges));
257 	window = m64ranges[0];
258 	for (i = 0; i < sc->sc_nranges; i++) {
259 		/* Skip non-64-bit ranges. */
260 		if ((sc->sc_ranges[i].flags & 0x03000000) != 0x03000000)
261 			continue;
262 
263 		/* Bail if we're out of 64-bit mmio windows. */
264 		if (window > m64ranges[1]) {
265 			printf(": no 64-bit mmio window available\n");
266 			return;
267 		}
268 
269 		error = opal_pci_set_phb_mem_window(sc->sc_phb_id,
270 		    OPAL_M64_WINDOW_TYPE, window, sc->sc_ranges[i].phys_base,
271 		    sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size);
272 		if (error != OPAL_SUCCESS) {
273 			printf(": can't set 64-bit mmio window\n");
274 			return;
275 		}
276 		error = opal_pci_phb_mmio_enable(sc->sc_phb_id,
277 		    OPAL_M64_WINDOW_TYPE, window, OPAL_ENABLE_M64_SPLIT);
278 		if (error != OPAL_SUCCESS) {
279 			printf(": can't enable 64-bit mmio window\n");
280 			return;
281 		}
282 
283 		window++;
284 	}
285 
286 	OF_getpropintarray(sc->sc_node, "ibm,opal-msi-ranges",
287 	    sc->sc_msi_ranges, sizeof(sc->sc_msi_ranges));
288 
289 	/* Create extents for our address spaces. */
290 	sc->sc_busex = extent_create("pcibus", 0, 255,
291 	    M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED);
292 	sc->sc_memex = extent_create("pcimem", 0, (u_long)-1,
293 	    M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED);
294 	sc->sc_ioex = extent_create("pciio", 0, 0xffffffff,
295 	    M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED);
296 
297 	/* Set up bus range. */
298 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
299 	    sizeof(bus_range)) != sizeof(bus_range) ||
300 	    bus_range[0] >= 256 || bus_range[1] >= 256) {
301 		bus_range[0] = 0;
302 		bus_range[1] = 255;
303 	}
304 	sc->sc_bus = bus_range[0];
305 	extent_free(sc->sc_busex, bus_range[0],
306 	    bus_range[1] - bus_range[0] + 1, EX_WAITOK);
307 
308 	/* Set up mmio ranges. */
309 	for (i = 0; i < sc->sc_nranges; i++) {
310 		if ((sc->sc_ranges[i].flags & 0x02000000) != 0x02000000)
311 			continue;
312 
313 		extent_free(sc->sc_memex, sc->sc_ranges[i].pci_base,
314 		    sc->sc_ranges[i].size, EX_WAITOK);
315 	}
316 
317 	printf("\n");
318 
319 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
320 	sc->sc_bus_iot.bus_private = sc;
321 	sc->sc_bus_iot._space_map = phb_bs_iomap;
322 	sc->sc_bus_iot._space_read_2 = little_space_read_2;
323 	sc->sc_bus_iot._space_read_4 = little_space_read_4;
324 	sc->sc_bus_iot._space_read_8 = little_space_read_8;
325 	sc->sc_bus_iot._space_write_2 = little_space_write_2;
326 	sc->sc_bus_iot._space_write_4 = little_space_write_4;
327 	sc->sc_bus_iot._space_write_8 = little_space_write_8;
328 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
329 	sc->sc_bus_memt.bus_private = sc;
330 	sc->sc_bus_memt._space_map = phb_bs_memmap;
331 	sc->sc_bus_memt._space_read_2 = little_space_read_2;
332 	sc->sc_bus_memt._space_read_4 = little_space_read_4;
333 	sc->sc_bus_memt._space_read_8 = little_space_read_8;
334 	sc->sc_bus_memt._space_write_2 = little_space_write_2;
335 	sc->sc_bus_memt._space_write_4 = little_space_write_4;
336 	sc->sc_bus_memt._space_write_8 = little_space_write_8;
337 
338 	memcpy(&sc->sc_bus_dmat, sc->sc_dmat, sizeof(sc->sc_bus_dmat));
339 	sc->sc_bus_dmat._cookie = sc;
340 	sc->sc_bus_dmat._dmamap_load_buffer = phb_dmamap_load_buffer;
341 	sc->sc_bus_dmat._dmamap_load_raw = phb_dmamap_load_raw;
342 
343 	sc->sc_pc.pc_conf_v = sc;
344 	sc->sc_pc.pc_attach_hook = phb_attach_hook;
345 	sc->sc_pc.pc_bus_maxdevs = phb_bus_maxdevs;
346 	sc->sc_pc.pc_make_tag = phb_make_tag;
347 	sc->sc_pc.pc_decompose_tag = phb_decompose_tag;
348 	sc->sc_pc.pc_conf_size = phb_conf_size;
349 	sc->sc_pc.pc_conf_read = phb_conf_read;
350 	sc->sc_pc.pc_conf_write = phb_conf_write;
351 
352 	sc->sc_pc.pc_intr_v = sc;
353 	sc->sc_pc.pc_intr_map = phb_intr_map;
354 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
355 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
356 	sc->sc_pc.pc_intr_string = phb_intr_string;
357 	sc->sc_pc.pc_intr_establish = phb_intr_establish;
358 	sc->sc_pc.pc_intr_disestablish = phb_intr_disestablish;
359 
360 	memset(&pba, 0, sizeof(pba));
361 	pba.pba_busname = "pci";
362 	pba.pba_iot = &sc->sc_bus_iot;
363 	pba.pba_memt = &sc->sc_bus_memt;
364 	pba.pba_dmat = &sc->sc_bus_dmat;
365 	pba.pba_pc = &sc->sc_pc;
366 	pba.pba_busex = sc->sc_busex;
367 	pba.pba_memex = sc->sc_memex;
368 	pba.pba_ioex = sc->sc_ioex;
369 	pba.pba_domain = pci_ndomains++;
370 	pba.pba_bus = sc->sc_bus;
371 	pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
372 
373 	config_found(self, &pba, NULL);
374 }
375 
376 void
377 phb_attach_hook(struct device *parent, struct device *self,
378     struct pcibus_attach_args *pba)
379 {
380 }
381 
382 int
383 phb_bus_maxdevs(void *v, int bus)
384 {
385 	struct phb_softc *sc = v;
386 
387 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
388 		return 1;
389 	return 32;
390 }
391 
392 pcitag_t
393 phb_make_tag(void *v, int bus, int device, int function)
394 {
395 	/* Return OPAL bus_dev_func. */
396 	return ((bus << 8) | (device << 3) | (function << 0));
397 }
398 
399 void
400 phb_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
401 {
402 	if (bp != NULL)
403 		*bp = (tag >> 8) & 0xff;
404 	if (dp != NULL)
405 		*dp = (tag >> 3) & 0x1f;
406 	if (fp != NULL)
407 		*fp = (tag >> 0) & 0x7;
408 }
409 
410 int
411 phb_conf_size(void *v, pcitag_t tag)
412 {
413 	return PCIE_CONFIG_SPACE_SIZE;
414 }
415 
416 pcireg_t
417 phb_conf_read(void *v, pcitag_t tag, int reg)
418 {
419 	struct phb_softc *sc = v;
420 	int64_t error;
421 	uint32_t data;
422 	uint16_t pci_error_state;
423 	uint8_t freeze_state;
424 
425 	error = opal_pci_config_read_word(sc->sc_phb_id,
426 	    tag, reg, opal_phys(&data));
427 	if (error == OPAL_SUCCESS && data != 0xffffffff)
428 		return data;
429 
430 	/*
431 	 * Probing hardware that isn't there may ut the host bridge in
432 	 * an error state.  Clear the error.
433 	 */
434 	error = opal_pci_eeh_freeze_status(sc->sc_phb_id, sc->sc_pe_number,
435 	    opal_phys(&freeze_state), opal_phys(&pci_error_state), NULL);
436 	if (freeze_state)
437 		opal_pci_eeh_freeze_clear(sc->sc_phb_id, sc->sc_pe_number,
438 		    OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
439 
440 	return 0xffffffff;
441 }
442 
443 void
444 phb_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data)
445 {
446 	struct phb_softc *sc = v;
447 
448 	opal_pci_config_write_word(sc->sc_phb_id, tag, reg, data);
449 }
450 
451 int
452 phb_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
453 {
454 	int pin = pa->pa_rawintrpin;
455 
456 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
457 		return -1;
458 
459 	if (pa->pa_tag == 0)
460 		return -1;
461 
462 	ihp->ih_pc = pa->pa_pc;
463 	ihp->ih_tag = pa->pa_intrtag;
464 	ihp->ih_intrpin = pa->pa_intrpin;
465 	ihp->ih_type = PCI_INTX;
466 
467 	return 0;
468 }
469 
470 const char *
471 phb_intr_string(void *v, pci_intr_handle_t ih)
472 {
473 	switch (ih.ih_type) {
474 	case PCI_MSI32:
475 	case PCI_MSI64:
476 		return "msi";
477 	case PCI_MSIX:
478 		return "msix";
479 	}
480 
481 	return "intx";
482 }
483 
484 void *
485 phb_intr_establish(void *v, pci_intr_handle_t ih, int level,
486     int (*func)(void *), void *arg, char *name)
487 {
488 	struct phb_softc *sc = v;
489 	void *cookie = NULL;
490 
491 	KASSERT(ih.ih_type != PCI_NONE);
492 
493 	if (ih.ih_type != PCI_INTX) {
494 		uint32_t addr32, data;
495 		uint64_t addr;
496 		uint32_t xive;
497 		int64_t error;
498 
499 		if (sc->sc_xive >= sc->sc_msi_ranges[1])
500 			return NULL;
501 
502 		/* Allocate an MSI. */
503 		xive = sc->sc_xive++;
504 
505 		error = opal_pci_set_xive_pe(sc->sc_phb_id,
506 		    sc->sc_pe_number, xive);
507 		if (error != OPAL_SUCCESS)
508 			return NULL;
509 
510 		if (ih.ih_type == PCI_MSI32) {
511 			error = opal_get_msi_32(sc->sc_phb_id, 0, xive,
512 			    1, opal_phys(&addr32), opal_phys(&data));
513 			addr = addr32;
514 		} else {
515 			error = opal_get_msi_64(sc->sc_phb_id, 0, xive,
516 			    1, opal_phys(&addr), opal_phys(&data));
517 		}
518 		if (error != OPAL_SUCCESS)
519 			return NULL;
520 
521 		cookie = intr_establish(sc->sc_msi_ranges[0] + xive,
522 		    IST_EDGE, level, func, arg, name);
523 		if (cookie == NULL)
524 			return NULL;
525 
526 		if (ih.ih_type == PCI_MSIX) {
527 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
528 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
529 		} else
530 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
531 	} else {
532 		int bus, dev, fn;
533 		uint32_t reg[4];
534 		int node;
535 
536 		phb_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
537 
538 		reg[0] = bus << 16 | dev << 11 | fn << 8;
539 		reg[1] = reg[2] = 0;
540 		reg[3] = ih.ih_intrpin;
541 
542 		/* Host bridge child node holds the interrupt map. */
543 		node = OF_child(sc->sc_node);
544 		if (node == 0)
545 			return NULL;
546 
547 		cookie = fdt_intr_establish_imap(node, reg, sizeof(reg),
548 		    level, func, arg, name);
549 	}
550 
551 	return cookie;
552 }
553 
554 void
555 phb_intr_disestablish(void *v, void *cookie)
556 {
557 }
558 
559 int
560 phb_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
561     int flags, bus_space_handle_t *bshp)
562 {
563 	struct phb_softc *sc = t->bus_private;
564 	int i;
565 
566 	for (i = 0; i < sc->sc_nranges; i++) {
567 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
568 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
569 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
570 
571 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
572 		    addr >= pci_start && addr + size <= pci_end) {
573 			return bus_space_map(sc->sc_iot,
574 			    addr - pci_start + phys_start, size, flags, bshp);
575 		}
576 	}
577 
578 	return ENXIO;
579 }
580 
581 int
582 phb_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
583     int flags, bus_space_handle_t *bshp)
584 {
585 	struct phb_softc *sc = t->bus_private;
586 	int i;
587 
588 	for (i = 0; i < sc->sc_nranges; i++) {
589 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
590 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
591 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
592 
593 		if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 &&
594 		    addr >= pci_start && addr + size <= pci_end) {
595 			return bus_space_map(sc->sc_iot,
596 			    addr - pci_start + phys_start, size, flags, bshp);
597 		}
598 	}
599 
600 	return ENXIO;
601 }
602 
603 int
604 phb_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
605     bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
606     int *segp, int first)
607 {
608 	struct phb_softc *sc = t->_cookie;
609 	int seg, firstseg = *segp;
610 	int error;
611 
612 	error = sc->sc_dmat->_dmamap_load_buffer(sc->sc_dmat, map, buf, buflen,
613 	    p, flags, lastaddrp, segp, first);
614 	if (error)
615 		return error;
616 
617 	/* For each segment. */
618 	for (seg = firstseg; seg <= *segp; seg++)
619 		map->dm_segs[seg].ds_addr |= IODA_TVE_SELECT;
620 
621 	return 0;
622 }
623 
624 int
625 phb_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
626     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
627 {
628 	struct phb_softc *sc = t->_cookie;
629 	int seg, error;
630 
631 	error = sc->sc_dmat->_dmamap_load_raw(sc->sc_dmat, map,
632 	     segs, nsegs, size, flags);
633 	if (error)
634 		return error;
635 
636 	/* For each segment. */
637 	for (seg = 0; seg < nsegs; seg++)
638 		map->dm_segs[seg].ds_addr |= IODA_TVE_SELECT;
639 
640 	return 0;
641 }
642