xref: /dragonfly/sys/dev/drm/linux_iomapping.c (revision 52cb6762)
1 /*
2  * Copyright (c) 2014-2016 François Tigeot
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <machine/pmap.h>
28 #include <vm/pmap.h>
29 #include <vm/vm.h>
30 
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/bug.h>
34 #include <asm/page.h>
35 #include <asm/io.h>
36 
37 SLIST_HEAD(iomap_list_head, iomap) iomap_list = SLIST_HEAD_INITIALIZER(iomap_list);
38 
39 void __iomem *
40 __ioremap_common(unsigned long phys_addr, unsigned long size, int cache_mode)
41 {
42 	struct iomap *imp;
43 
44 	/* Ensure mappings are page-aligned */
45 	BUG_ON(phys_addr & PAGE_MASK);
46 	BUG_ON(size & PAGE_MASK);
47 
48 	imp = kmalloc(sizeof(struct iomap), M_DRM, M_WAITOK);
49 	imp->paddr = phys_addr;
50 	imp->npages = size / PAGE_SIZE;
51 	imp->pmap_addr = pmap_mapdev_attr(phys_addr, size, cache_mode);
52 	SLIST_INSERT_HEAD(&iomap_list, imp, im_iomaps);
53 
54 	return imp->pmap_addr;
55 }
56 
57 void iounmap(void __iomem *ptr)
58 {
59 	struct iomap *imp, *tmp_imp;
60 	int found = 0;
61 	int indx;
62 	vm_paddr_t paddr_end;
63 
64 	SLIST_FOREACH_MUTABLE(imp, &iomap_list, im_iomaps, tmp_imp) {
65 		if (imp->pmap_addr == ptr) {
66 			found = 1;
67 			break;
68 		}
69 	}
70 
71 	if (!found) {
72 		kprintf("iounmap: invalid address %p\n", ptr);
73 		return;
74 	}
75 
76 	paddr_end = imp->paddr + (imp->npages * PAGE_SIZE) - 1;
77 	/* Is this address space range backed by regular memory ? */
78 	for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
79 		vm_paddr_t range_start = phys_avail[indx];
80 		vm_paddr_t size = phys_avail[indx + 1] - phys_avail[indx];
81 		vm_paddr_t range_end = range_start + size - 1;
82 
83 		if ((imp->paddr >= range_start) && (paddr_end <= range_end)) {
84 			/* Yes, change page caching attributes */
85 			pmap_change_attr(imp->paddr, imp->npages, PAT_WRITE_BACK);
86 			break;
87 		}
88 
89 	}
90 
91 	pmap_unmapdev((vm_offset_t)imp->pmap_addr, imp->npages * PAGE_SIZE);
92 
93 	SLIST_REMOVE(&iomap_list, imp, iomap, im_iomaps);
94 	kfree(imp);
95 }
96