xref: /netbsd/sys/arch/x86/x86/bus_space.c (revision 6550d01e)
1 /*	$NetBSD: bus_space.c,v 1.32 2011/01/10 16:59:09 jruoho Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.32 2011/01/10 16:59:09 jruoho Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/extent.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <dev/isa/isareg.h>
44 
45 #include <sys/bus.h>
46 #include <machine/pio.h>
47 #include <machine/isa_machdep.h>
48 
49 #ifdef XEN
50 #include <xen/hypervisor.h>
51 #endif
52 
53 /*
54  * Macros for sanity-checking the aligned-ness of pointers passed to
55  * bus space ops.  These are not strictly necessary on the x86, but
56  * could lead to performance improvements, and help catch problems
57  * with drivers that would creep up on other architectures.
58  */
59 #ifdef BUS_SPACE_DEBUG
60 #define	BUS_SPACE_ALIGNED_ADDRESS(p, t)				\
61 	((((u_long)(p)) & (sizeof(t)-1)) == 0)
62 
63 #define	BUS_SPACE_ADDRESS_SANITY(p, t, d)				\
64 ({									\
65 	if (BUS_SPACE_ALIGNED_ADDRESS((p), t) == 0) {			\
66 		printf("%s 0x%lx not aligned to %zu bytes %s:%d\n",	\
67 		    d, (u_long)(p), sizeof(t), __FILE__, __LINE__);	\
68 	}								\
69 	(void) 0;							\
70 })
71 #else
72 #define	BUS_SPACE_ADDRESS_SANITY(p,t,d)	(void) 0
73 #endif /* BUS_SPACE_DEBUG */
74 
75 /*
76  * Extent maps to manage I/O and memory space.  Allocate
77  * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
78  * will indicate that it's safe to use malloc() to dynamically allocate
79  * region descriptors.
80  *
81  * N.B. At least two regions are _always_ allocated from the iomem
82  * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
83  *
84  * The extent maps are not static!  Machine-dependent ISA and EISA
85  * routines need access to them for bus address space allocation.
86  */
87 static	long ioport_ex_storage[EXTENT_FIXED_STORAGE_SIZE(16) / sizeof(long)];
88 static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
89 struct	extent *ioport_ex;
90 struct	extent *iomem_ex;
91 static	int ioport_malloc_safe;
92 
93 static struct bus_space_tag x86_io = { .bst_type = X86_BUS_SPACE_IO };
94 static struct bus_space_tag x86_mem = { .bst_type = X86_BUS_SPACE_MEM };
95 
96 bus_space_tag_t x86_bus_space_io = &x86_io;
97 bus_space_tag_t x86_bus_space_mem = &x86_mem;
98 
99 int x86_mem_add_mapping(bus_addr_t, bus_size_t,
100 	    int, bus_space_handle_t *);
101 
102 static inline bool
103 x86_bus_space_is_io(bus_space_tag_t t)
104 {
105 	return t->bst_type == X86_BUS_SPACE_IO;
106 }
107 
108 static inline bool
109 x86_bus_space_is_mem(bus_space_tag_t t)
110 {
111 	return t->bst_type == X86_BUS_SPACE_MEM;
112 }
113 
114 void
115 x86_bus_space_init(void)
116 {
117 	/*
118 	 * Initialize the I/O port and I/O mem extent maps.
119 	 * Note: we don't have to check the return value since
120 	 * creation of a fixed extent map will never fail (since
121 	 * descriptor storage has already been allocated).
122 	 *
123 	 * N.B. The iomem extent manages _all_ physical addresses
124 	 * on the machine.  When the amount of RAM is found, the two
125 	 * extents of RAM are allocated from the map (0 -> ISA hole
126 	 * and end of ISA hole -> end of RAM).
127 	 */
128 	ioport_ex = extent_create("ioport", 0x0, 0xffff, M_DEVBUF,
129 	    (void *)ioport_ex_storage, sizeof(ioport_ex_storage),
130 	    EX_NOCOALESCE|EX_NOWAIT);
131 	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
132 	    (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
133 	    EX_NOCOALESCE|EX_NOWAIT);
134 
135 #ifdef XEN
136 	/* We are privileged guest os - should have IO privileges. */
137 	if (xendomain_is_privileged()) {
138 		struct physdev_op physop;
139 		physop.cmd = PHYSDEVOP_SET_IOPL;
140 		physop.u.set_iopl.iopl = 1;
141 		if (HYPERVISOR_physdev_op(&physop) != 0)
142 			panic("Unable to obtain IOPL, "
143 			    "despite being SIF_PRIVILEGED");
144 	}
145 #endif	/* XEN */
146 }
147 
148 void
149 x86_bus_space_mallocok(void)
150 {
151 
152 	ioport_malloc_safe = 1;
153 }
154 
155 int
156 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
157 		int flags, bus_space_handle_t *bshp)
158 {
159 	int error;
160 	struct extent *ex;
161 
162 	/*
163 	 * Pick the appropriate extent map.
164 	 */
165 	if (x86_bus_space_is_io(t)) {
166 		if (flags & BUS_SPACE_MAP_LINEAR)
167 			return (EOPNOTSUPP);
168 		ex = ioport_ex;
169 	} else if (x86_bus_space_is_mem(t))
170 		ex = iomem_ex;
171 	else
172 		panic("x86_memio_map: bad bus space tag");
173 
174 	/*
175 	 * Before we go any further, let's make sure that this
176 	 * region is available.
177 	 */
178 	error = extent_alloc_region(ex, bpa, size,
179 	    EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0));
180 	if (error)
181 		return (error);
182 
183 	/*
184 	 * For I/O space, that's all she wrote.
185 	 */
186 	if (x86_bus_space_is_io(t)) {
187 		*bshp = bpa;
188 		return (0);
189 	}
190 
191 #ifndef XEN
192 	if (bpa >= IOM_BEGIN && (bpa + size) != 0 && (bpa + size) <= IOM_END) {
193 		*bshp = (bus_space_handle_t)ISA_HOLE_VADDR(bpa);
194 		return(0);
195 	}
196 #endif	/* !XEN */
197 
198 	/*
199 	 * For memory space, map the bus physical address to
200 	 * a kernel virtual address.
201 	 */
202 	error = x86_mem_add_mapping(bpa, size, flags, bshp);
203 	if (error) {
204 		if (extent_free(ex, bpa, size, EX_NOWAIT |
205 		    (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
206 			printf("x86_memio_map: pa 0x%jx, size 0x%jx\n",
207 			    (uintmax_t)bpa, (uintmax_t)size);
208 			printf("x86_memio_map: can't free region\n");
209 		}
210 	}
211 
212 	return (error);
213 }
214 
215 int
216 _x86_memio_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
217 		int flags, bus_space_handle_t *bshp)
218 {
219 
220 	/*
221 	 * For I/O space, just fill in the handle.
222 	 */
223 	if (x86_bus_space_is_io(t)) {
224 		if (flags & BUS_SPACE_MAP_LINEAR)
225 			return (EOPNOTSUPP);
226 		*bshp = bpa;
227 		return (0);
228 	}
229 
230 	/*
231 	 * For memory space, map the bus physical address to
232 	 * a kernel virtual address.
233 	 */
234 	return x86_mem_add_mapping(bpa, size, flags, bshp);
235 }
236 
237 int
238 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
239 		bus_size_t size, bus_size_t alignment, bus_size_t boundary,
240 		int flags, bus_addr_t *bpap, bus_space_handle_t *bshp)
241 {
242 	struct extent *ex;
243 	u_long bpa;
244 	int error;
245 
246 	/*
247 	 * Pick the appropriate extent map.
248 	 */
249 	if (x86_bus_space_is_io(t)) {
250 		if (flags & BUS_SPACE_MAP_LINEAR)
251 			return (EOPNOTSUPP);
252 		ex = ioport_ex;
253 	} else if (x86_bus_space_is_mem(t))
254 		ex = iomem_ex;
255 	else
256 		panic("x86_memio_alloc: bad bus space tag");
257 
258 	/*
259 	 * Sanity check the allocation against the extent's boundaries.
260 	 */
261 	if (rstart < ex->ex_start || rend > ex->ex_end)
262 		panic("x86_memio_alloc: bad region start/end");
263 
264 	/*
265 	 * Do the requested allocation.
266 	 */
267 	error = extent_alloc_subregion(ex, rstart, rend, size, alignment,
268 	    boundary,
269 	    EX_FAST | EX_NOWAIT | (ioport_malloc_safe ?  EX_MALLOCOK : 0),
270 	    &bpa);
271 
272 	if (error)
273 		return (error);
274 
275 	/*
276 	 * For I/O space, that's all she wrote.
277 	 */
278 	if (x86_bus_space_is_io(t)) {
279 		*bshp = *bpap = bpa;
280 		return (0);
281 	}
282 
283 	/*
284 	 * For memory space, map the bus physical address to
285 	 * a kernel virtual address.
286 	 */
287 	error = x86_mem_add_mapping(bpa, size, flags, bshp);
288 	if (error) {
289 		if (extent_free(iomem_ex, bpa, size, EX_NOWAIT |
290 		    (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
291 			printf("x86_memio_alloc: pa 0x%jx, size 0x%jx\n",
292 			    (uintmax_t)bpa, (uintmax_t)size);
293 			printf("x86_memio_alloc: can't free region\n");
294 		}
295 	}
296 
297 	*bpap = bpa;
298 
299 	return (error);
300 }
301 
302 int
303 x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size,
304 		int flags, bus_space_handle_t *bshp)
305 {
306 	paddr_t pa, endpa;
307 	vaddr_t va, sva;
308 	u_int pmapflags;
309 
310 	pa = x86_trunc_page(bpa);
311 	endpa = x86_round_page(bpa + size);
312 
313 	pmapflags = PMAP_NOCACHE;
314 	if ((flags & BUS_SPACE_MAP_CACHEABLE) != 0)
315 		pmapflags = 0;
316 	else if (flags & BUS_SPACE_MAP_PREFETCHABLE)
317 		pmapflags = PMAP_WRITE_COMBINE;
318 
319 #ifdef DIAGNOSTIC
320 	if (endpa != 0 && endpa <= pa)
321 		panic("x86_mem_add_mapping: overflow");
322 #endif
323 
324 #ifdef XEN
325 	if (bpa >= IOM_BEGIN && (bpa + size) != 0 && (bpa + size) <= IOM_END) {
326 		sva = (vaddr_t)ISA_HOLE_VADDR(pa);
327 	} else
328 #endif	/* XEN */
329 	{
330 		sva = uvm_km_alloc(kernel_map, endpa - pa, 0,
331 		    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
332 		if (sva == 0)
333 			return (ENOMEM);
334 	}
335 
336 	*bshp = (bus_space_handle_t)(sva + (bpa & PGOFSET));
337 
338 	for (va = sva; pa != endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
339 		pmap_kenter_ma(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
340 	}
341 	pmap_update(pmap_kernel());
342 
343 	return 0;
344 }
345 
346 bool
347 bus_space_is_equal(bus_space_tag_t t1, bus_space_tag_t t2)
348 {
349 	if (t1 == NULL || t2 == NULL)
350 		return false;
351 	return t1->bst_type == t2->bst_type;
352 }
353 
354 /*
355  * void _x86_memio_unmap(bus_space_tag bst, bus_space_handle bsh,
356  *                        bus_size_t size, bus_addr_t *adrp)
357  *
358  *   This function unmaps memory- or io-space mapped by the function
359  *   _x86_memio_map().  This function works nearly as same as
360  *   x86_memio_unmap(), but this function does not ask kernel
361  *   built-in extents and returns physical address of the bus space,
362  *   for the convenience of the extra extent manager.
363  */
364 void
365 _x86_memio_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
366 		bus_size_t size, bus_addr_t *adrp)
367 {
368 	u_long va, endva;
369 	bus_addr_t bpa;
370 
371 	/*
372 	 * Find the correct extent and bus physical address.
373 	 */
374 	if (x86_bus_space_is_io(t)) {
375 		bpa = bsh;
376 	} else if (x86_bus_space_is_mem(t)) {
377 		if (bsh >= atdevbase && (bsh + size) != 0 &&
378 		    (bsh + size) <= (atdevbase + IOM_SIZE)) {
379 			bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
380 		} else {
381 
382 			va = x86_trunc_page(bsh);
383 			endva = x86_round_page(bsh + size);
384 
385 #ifdef DIAGNOSTIC
386 			if (endva <= va) {
387 				panic("_x86_memio_unmap: overflow");
388 			}
389 #endif
390 
391 			if (pmap_extract_ma(pmap_kernel(), va, &bpa) == FALSE) {
392 				panic("_x86_memio_unmap:"
393 				    " wrong virtual address");
394 			}
395 			bpa += (bsh & PGOFSET);
396 			pmap_kremove(va, endva - va);
397 			pmap_update(pmap_kernel());
398 
399 			/*
400 			 * Free the kernel virtual mapping.
401 			 */
402 			uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
403 		}
404 	} else {
405 		panic("_x86_memio_unmap: bad bus space tag");
406 	}
407 
408 	if (adrp != NULL) {
409 		*adrp = bpa;
410 	}
411 }
412 
413 void
414 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
415 {
416 	struct extent *ex;
417 	u_long va, endva;
418 	bus_addr_t bpa;
419 
420 	/*
421 	 * Find the correct extent and bus physical address.
422 	 */
423 	if (x86_bus_space_is_io(t)) {
424 		ex = ioport_ex;
425 		bpa = bsh;
426 	} else if (x86_bus_space_is_mem(t)) {
427 		ex = iomem_ex;
428 
429 		if (bsh >= atdevbase && (bsh + size) != 0 &&
430 		    (bsh + size) <= (atdevbase + IOM_SIZE)) {
431 			bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
432 			goto ok;
433 		}
434 
435 		va = x86_trunc_page(bsh);
436 		endva = x86_round_page(bsh + size);
437 
438 #ifdef DIAGNOSTIC
439 		if (endva <= va)
440 			panic("x86_memio_unmap: overflow");
441 #endif
442 
443 		(void) pmap_extract_ma(pmap_kernel(), va, &bpa);
444 		bpa += (bsh & PGOFSET);
445 
446 		pmap_kremove(va, endva - va);
447 		pmap_update(pmap_kernel());
448 
449 		/*
450 		 * Free the kernel virtual mapping.
451 		 */
452 		uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
453 	} else
454 		panic("x86_memio_unmap: bad bus space tag");
455 
456 ok:
457 	if (extent_free(ex, bpa, size,
458 	    EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
459 		printf("x86_memio_unmap: %s 0x%jx, size 0x%jx\n",
460 		    x86_bus_space_is_io(t) ? "port" : "pa",
461 		    (uintmax_t)bpa, (uintmax_t)size);
462 		printf("x86_memio_unmap: can't free region\n");
463 	}
464 }
465 
466 void
467 bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
468 {
469 
470 	/* bus_space_unmap() does all that we need to do. */
471 	bus_space_unmap(t, bsh, size);
472 }
473 
474 int
475 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
476     bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
477 {
478 
479 	*nbshp = bsh + offset;
480 	return (0);
481 }
482 
483 paddr_t
484 bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot,
485     int flags)
486 {
487 
488 	/* Can't mmap I/O space. */
489 	if (x86_bus_space_is_io(t))
490 		return (-1);
491 
492 	/*
493 	 * "addr" is the base address of the device we're mapping.
494 	 * "off" is the offset into that device.
495 	 *
496 	 * Note we are called for each "page" in the device that
497 	 * the upper layers want to map.
498 	 */
499 	return (x86_btop(addr + off));
500 }
501 
502 void
503 bus_space_set_multi_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
504 		      uint8_t v, size_t c)
505 {
506 	vaddr_t addr = h + o;
507 
508 	if (x86_bus_space_is_io(t))
509 		while (c--)
510 			outb(addr, v);
511 	else
512 		while (c--)
513 			*(volatile uint8_t *)(addr) = v;
514 }
515 
516 void
517 bus_space_set_multi_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
518 		      uint16_t v, size_t c)
519 {
520 	vaddr_t addr = h + o;
521 
522 	BUS_SPACE_ADDRESS_SANITY(addr, uint16_t, "bus addr");
523 
524 	if (x86_bus_space_is_io(t))
525 		while (c--)
526 			outw(addr, v);
527 	else
528 		while (c--)
529 			*(volatile uint16_t *)(addr) = v;
530 }
531 
532 void
533 bus_space_set_multi_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
534 		      uint32_t v, size_t c)
535 {
536 	vaddr_t addr = h + o;
537 
538 	BUS_SPACE_ADDRESS_SANITY(addr, uint32_t, "bus addr");
539 
540 	if (x86_bus_space_is_io(t))
541 		while (c--)
542 			outl(addr, v);
543 	else
544 		while (c--)
545 			*(volatile uint32_t *)(addr) = v;
546 }
547 
548 void
549 bus_space_set_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
550 		      uint8_t v, size_t c)
551 {
552 	vaddr_t addr = h + o;
553 
554 	if (x86_bus_space_is_io(t))
555 		for (; c != 0; c--, addr++)
556 			outb(addr, v);
557 	else
558 		for (; c != 0; c--, addr++)
559 			*(volatile uint8_t *)(addr) = v;
560 }
561 
562 void
563 bus_space_set_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
564 		       uint16_t v, size_t c)
565 {
566 	vaddr_t addr = h + o;
567 
568 	BUS_SPACE_ADDRESS_SANITY(addr, uint16_t, "bus addr");
569 
570 	if (x86_bus_space_is_io(t))
571 		for (; c != 0; c--, addr += 2)
572 			outw(addr, v);
573 	else
574 		for (; c != 0; c--, addr += 2)
575 			*(volatile uint16_t *)(addr) = v;
576 }
577 
578 void
579 bus_space_set_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
580 		       uint32_t v, size_t c)
581 {
582 	vaddr_t addr = h + o;
583 
584 	BUS_SPACE_ADDRESS_SANITY(addr, uint32_t, "bus addr");
585 
586 	if (x86_bus_space_is_io(t))
587 		for (; c != 0; c--, addr += 4)
588 			outl(addr, v);
589 	else
590 		for (; c != 0; c--, addr += 4)
591 			*(volatile uint32_t *)(addr) = v;
592 }
593 
594 void
595 bus_space_copy_region_1(bus_space_tag_t t, bus_space_handle_t h1,
596 			bus_size_t o1, bus_space_handle_t h2,
597 			bus_size_t o2, size_t c)
598 {
599 	vaddr_t addr1 = h1 + o1;
600 	vaddr_t addr2 = h2 + o2;
601 
602 	if (x86_bus_space_is_io(t)) {
603 		if (addr1 >= addr2) {
604 			/* src after dest: copy forward */
605 			for (; c != 0; c--, addr1++, addr2++)
606 				outb(addr2, inb(addr1));
607 		} else {
608 			/* dest after src: copy backwards */
609 			for (addr1 += (c - 1), addr2 += (c - 1);
610 			    c != 0; c--, addr1--, addr2--)
611 				outb(addr2, inb(addr1));
612 		}
613 	} else {
614 		if (addr1 >= addr2) {
615 			/* src after dest: copy forward */
616 			for (; c != 0; c--, addr1++, addr2++)
617 				*(volatile uint8_t *)(addr2) =
618 				    *(volatile uint8_t *)(addr1);
619 		} else {
620 			/* dest after src: copy backwards */
621 			for (addr1 += (c - 1), addr2 += (c - 1);
622 			    c != 0; c--, addr1--, addr2--)
623 				*(volatile uint8_t *)(addr2) =
624 				    *(volatile uint8_t *)(addr1);
625 		}
626 	}
627 }
628 
629 void
630 bus_space_copy_region_2(bus_space_tag_t t, bus_space_handle_t h1,
631 			bus_size_t o1, bus_space_handle_t h2,
632 			bus_size_t o2, size_t c)
633 {
634 	vaddr_t addr1 = h1 + o1;
635 	vaddr_t addr2 = h2 + o2;
636 
637 	BUS_SPACE_ADDRESS_SANITY(addr1, uint16_t, "bus addr 1");
638 	BUS_SPACE_ADDRESS_SANITY(addr2, uint16_t, "bus addr 2");
639 
640 	if (x86_bus_space_is_io(t)) {
641 		if (addr1 >= addr2) {
642 			/* src after dest: copy forward */
643 			for (; c != 0; c--, addr1 += 2, addr2 += 2)
644 				outw(addr2, inw(addr1));
645 		} else {
646 			/* dest after src: copy backwards */
647 			for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1);
648 			    c != 0; c--, addr1 -= 2, addr2 -= 2)
649 				outw(addr2, inw(addr1));
650 		}
651 	} else {
652 		if (addr1 >= addr2) {
653 			/* src after dest: copy forward */
654 			for (; c != 0; c--, addr1 += 2, addr2 += 2)
655 				*(volatile uint16_t *)(addr2) =
656 				    *(volatile uint16_t *)(addr1);
657 		} else {
658 			/* dest after src: copy backwards */
659 			for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1);
660 			    c != 0; c--, addr1 -= 2, addr2 -= 2)
661 				*(volatile uint16_t *)(addr2) =
662 				    *(volatile uint16_t *)(addr1);
663 		}
664 	}
665 }
666 
667 void
668 bus_space_copy_region_4(bus_space_tag_t t, bus_space_handle_t h1,
669 			bus_size_t o1, bus_space_handle_t h2,
670 			bus_size_t o2, size_t c)
671 {
672 	vaddr_t addr1 = h1 + o1;
673 	vaddr_t addr2 = h2 + o2;
674 
675 	BUS_SPACE_ADDRESS_SANITY(addr1, uint32_t, "bus addr 1");
676 	BUS_SPACE_ADDRESS_SANITY(addr2, uint32_t, "bus addr 2");
677 
678 	if (x86_bus_space_is_io(t)) {
679 		if (addr1 >= addr2) {
680 			/* src after dest: copy forward */
681 			for (; c != 0; c--, addr1 += 4, addr2 += 4)
682 				outl(addr2, inl(addr1));
683 		} else {
684 			/* dest after src: copy backwards */
685 			for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1);
686 			    c != 0; c--, addr1 -= 4, addr2 -= 4)
687 				outl(addr2, inl(addr1));
688 		}
689 	} else {
690 		if (addr1 >= addr2) {
691 			/* src after dest: copy forward */
692 			for (; c != 0; c--, addr1 += 4, addr2 += 4)
693 				*(volatile uint32_t *)(addr2) =
694 				    *(volatile uint32_t *)(addr1);
695 		} else {
696 			/* dest after src: copy backwards */
697 			for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1);
698 			    c != 0; c--, addr1 -= 4, addr2 -= 4)
699 				*(volatile uint32_t *)(addr2) =
700 				    *(volatile uint32_t *)(addr1);
701 		}
702 	}
703 }
704 
705 void
706 bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
707 		  bus_size_t offset, bus_size_t len, int flags)
708 {
709 
710 	/* Function call is enough to prevent reordering of loads. */
711 }
712 
713 void *
714 bus_space_vaddr(bus_space_tag_t tag, bus_space_handle_t bsh)
715 {
716 
717 	return x86_bus_space_is_mem(tag) ? (void *)bsh : NULL;
718 }
719