xref: /freebsd/sys/kern/subr_devmap.c (revision 5df74441)
1 /*-
2  * Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 /* Routines for mapping device memory. */
29 
30 #include "opt_ddb.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/devmap.h>
35 #include <vm/vm.h>
36 #include <vm/vm_extern.h>
37 #include <vm/pmap.h>
38 #include <machine/vmparam.h>
39 
40 static const struct devmap_entry *devmap_table;
41 static boolean_t devmap_bootstrap_done = false;
42 
43 /*
44  * The allocated-kva (akva) devmap table and metadata.  Platforms can call
45  * devmap_add_entry() to add static device mappings to this table using
46  * automatically allocated virtual addresses carved out of the top of kva space.
47  * Allocation begins immediately below the max kernel virtual address.
48  */
49 #define	AKVA_DEVMAP_MAX_ENTRIES	32
50 static struct devmap_entry	akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES];
51 static u_int			akva_devmap_idx;
52 static vm_offset_t		akva_devmap_vaddr = DEVMAP_MAX_VADDR;
53 
54 #if defined(__aarch64__) || defined(__riscv)
55 extern int early_boot;
56 #endif
57 
58 /*
59  * Print the contents of the static mapping table using the provided printf-like
60  * output function (which will be either printf or db_printf).
61  */
62 static void
devmap_dump_table(int (* prfunc)(const char *,...))63 devmap_dump_table(int (*prfunc)(const char *, ...))
64 {
65 	const struct devmap_entry *pd;
66 
67 	if (devmap_table == NULL || devmap_table[0].pd_size == 0) {
68 		prfunc("No static device mappings.\n");
69 		return;
70 	}
71 
72 	prfunc("Static device mappings:\n");
73 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
74 		prfunc("  0x%08jx - 0x%08jx mapped at VA 0x%08jx\n",
75 		    (uintmax_t)pd->pd_pa,
76 		    (uintmax_t)(pd->pd_pa + pd->pd_size - 1),
77 		    (uintmax_t)pd->pd_va);
78 	}
79 }
80 
81 /*
82  * Print the contents of the static mapping table.  Used for bootverbose.
83  */
84 void
devmap_print_table(void)85 devmap_print_table(void)
86 {
87 	devmap_dump_table(printf);
88 }
89 
90 /*
91  * Return the "last" kva address used by the registered devmap table.  It's
92  * actually the lowest address used by the static mappings, i.e., the address of
93  * the first unusable byte of KVA.
94  */
95 vm_offset_t
devmap_lastaddr(void)96 devmap_lastaddr(void)
97 {
98 	const struct devmap_entry *pd;
99 	vm_offset_t lowaddr;
100 
101 	if (akva_devmap_idx > 0)
102 		return (akva_devmap_vaddr);
103 
104 	lowaddr = DEVMAP_MAX_VADDR;
105 	for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) {
106 		if (lowaddr > pd->pd_va)
107 			lowaddr = pd->pd_va;
108 	}
109 
110 	return (lowaddr);
111 }
112 
113 /*
114  * Add an entry to the internal "akva" static devmap table using the given
115  * physical address and size and a virtual address allocated from the top of
116  * kva.  This automatically registers the akva table on the first call, so all a
117  * platform has to do is call this routine to install as many mappings as it
118  * needs and when the platform-specific init function calls devmap_bootstrap()
119  * it will pick up all the entries in the akva table automatically.
120  */
121 void
devmap_add_entry(vm_paddr_t pa,vm_size_t sz)122 devmap_add_entry(vm_paddr_t pa, vm_size_t sz)
123 {
124 	struct devmap_entry *m;
125 
126 	if (devmap_bootstrap_done)
127 		panic("devmap_add_entry() after devmap_bootstrap()");
128 
129 	if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1))
130 		panic("AKVA_DEVMAP_MAX_ENTRIES is too small");
131 
132 	if (akva_devmap_idx == 0)
133 		devmap_register_table(akva_devmap_entries);
134 
135 	 /* Allocate virtual address space from the top of kva downwards. */
136 #ifdef __arm__
137 	/*
138 	 * If the range being mapped is aligned and sized to 1MB boundaries then
139 	 * also align the virtual address to the next-lower 1MB boundary so that
140 	 * we end with a nice efficient section mapping.
141 	 */
142 	if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) {
143 		akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz);
144 	} else
145 #endif
146 	{
147 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz);
148 	}
149 	m = &akva_devmap_entries[akva_devmap_idx++];
150 	m->pd_va    = akva_devmap_vaddr;
151 	m->pd_pa    = pa;
152 	m->pd_size  = sz;
153 }
154 
155 /*
156  * Register the given table as the one to use in devmap_bootstrap().
157  */
158 void
devmap_register_table(const struct devmap_entry * table)159 devmap_register_table(const struct devmap_entry *table)
160 {
161 
162 	devmap_table = table;
163 }
164 
165 /*
166  * Map all of the static regions in the devmap table, and remember the devmap
167  * table so the mapdev, ptov, and vtop functions can do lookups later.
168  */
169 void
devmap_bootstrap(void)170 devmap_bootstrap(void)
171 {
172 	const struct devmap_entry *pd;
173 
174 	devmap_bootstrap_done = true;
175 
176 	/*
177 	 * If a table was previously registered, use it.  Otherwise, no work to
178 	 * do.
179 	 */
180 	if (devmap_table == NULL)
181 		return;
182 
183 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
184 #if defined(__arm__)
185 		pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size,
186 		    VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
187 #elif defined(__aarch64__) || defined(__riscv)
188 		pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa);
189 #endif
190 	}
191 }
192 
193 /*
194  * Look up the given physical address in the static mapping data and return the
195  * corresponding virtual address, or NULL if not found.
196  */
197 void *
devmap_ptov(vm_paddr_t pa,vm_size_t size)198 devmap_ptov(vm_paddr_t pa, vm_size_t size)
199 {
200 	const struct devmap_entry *pd;
201 
202 	if (devmap_table == NULL)
203 		return (NULL);
204 
205 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
206 		if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size)
207 			return ((void *)(pd->pd_va + (pa - pd->pd_pa)));
208 	}
209 
210 	return (NULL);
211 }
212 
213 /*
214  * Look up the given virtual address in the static mapping data and return the
215  * corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found.
216  */
217 vm_paddr_t
devmap_vtop(void * vpva,vm_size_t size)218 devmap_vtop(void * vpva, vm_size_t size)
219 {
220 	const struct devmap_entry *pd;
221 	vm_offset_t va;
222 
223 	if (devmap_table == NULL)
224 		return (DEVMAP_PADDR_NOTFOUND);
225 
226 	va = (vm_offset_t)vpva;
227 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
228 		if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size)
229 			return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va)));
230 	}
231 
232 	return (DEVMAP_PADDR_NOTFOUND);
233 }
234 
235 /*
236  * Map a set of physical memory pages into the kernel virtual address space.
237  * Return a pointer to where it is mapped.
238  *
239  * This uses a pre-established static mapping if one exists for the requested
240  * range, otherwise it allocates kva space and maps the physical pages into it.
241  *
242  * This routine is intended to be used for mapping device memory, NOT real
243  * memory; the mapping type is inherently VM_MEMATTR_DEVICE in
244  * pmap_kenter_device().
245  */
246 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)247 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
248 {
249 	vm_offset_t va, offset;
250 	void * rva;
251 
252 	/* First look in the static mapping table. */
253 	if ((rva = devmap_ptov(pa, size)) != NULL)
254 		return (rva);
255 
256 	offset = pa & PAGE_MASK;
257 	pa = trunc_page(pa);
258 	size = round_page(size + offset);
259 
260 #if defined(__aarch64__) || defined(__riscv)
261 	if (early_boot) {
262 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
263 		va = akva_devmap_vaddr;
264 		KASSERT(va >= VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE,
265 		    ("Too many early devmap mappings"));
266 	} else
267 #endif
268 #ifdef __aarch64__
269 	if (size >= L2_SIZE && (pa & L2_OFFSET) == 0)
270 		va = kva_alloc_aligned(size, L2_SIZE);
271 	else if (size >= L3C_SIZE && (pa & L3C_OFFSET) == 0)
272 		va = kva_alloc_aligned(size, L3C_SIZE);
273 	else
274 #endif
275 		va = kva_alloc(size);
276 	if (!va)
277 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
278 
279 	pmap_kenter_device(va, size, pa);
280 
281 	return ((void *)(va + offset));
282 }
283 
284 #if defined(__aarch64__) || defined(__riscv)
285 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,vm_memattr_t ma)286 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
287 {
288 	vm_offset_t va, offset;
289 	void * rva;
290 
291 	/* First look in the static mapping table. */
292 	if ((rva = devmap_ptov(pa, size)) != NULL)
293 		return (rva);
294 
295 	offset = pa & PAGE_MASK;
296 	pa = trunc_page(pa);
297 	size = round_page(size + offset);
298 
299 	if (early_boot) {
300 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
301 		va = akva_devmap_vaddr;
302 		KASSERT(va >= (VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE)),
303 		    ("Too many early devmap mappings 2"));
304 	} else
305 #ifdef __aarch64__
306 	if (size >= L2_SIZE && (pa & L2_OFFSET) == 0)
307 		va = kva_alloc_aligned(size, L2_SIZE);
308 	else if (size >= L3C_SIZE && (pa & L3C_OFFSET) == 0)
309 		va = kva_alloc_aligned(size, L3C_SIZE);
310 	else
311 #endif
312 		va = kva_alloc(size);
313 	if (!va)
314 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
315 
316 	pmap_kenter(va, size, pa, ma);
317 
318 	return ((void *)(va + offset));
319 }
320 #endif
321 
322 /*
323  * Unmap device memory and free the kva space.
324  */
325 void
pmap_unmapdev(void * p,vm_size_t size)326 pmap_unmapdev(void *p, vm_size_t size)
327 {
328 	vm_offset_t offset, va;
329 
330 	/* Nothing to do if we find the mapping in the static table. */
331 	if (devmap_vtop(p, size) != DEVMAP_PADDR_NOTFOUND)
332 		return;
333 
334 	va = (vm_offset_t)p;
335 	offset = va & PAGE_MASK;
336 	va = trunc_page(va);
337 	size = round_page(size + offset);
338 
339 	pmap_kremove_device(va, size);
340 	kva_free(va, size);
341 }
342 
343 #ifdef DDB
344 #include <ddb/ddb.h>
345 
DB_SHOW_COMMAND_FLAGS(devmap,db_show_devmap,DB_CMD_MEMSAFE)346 DB_SHOW_COMMAND_FLAGS(devmap, db_show_devmap, DB_CMD_MEMSAFE)
347 {
348 	devmap_dump_table(db_printf);
349 }
350 
351 #endif /* DDB */
352