xref: /freebsd/sys/powerpc/powerpc/pmap_dispatch.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2005 Peter Grehan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Dispatch MI pmap calls to the appropriate MMU implementation
33  * through a previously registered kernel object.
34  *
35  * Before pmap_bootstrap() can be called, a CPU module must have
36  * called pmap_mmu_install(). This may be called multiple times:
37  * the highest priority call will be installed as the default
38  * MMU handler when pmap_bootstrap() is called.
39  *
40  * It is required that mutex_init() be called before pmap_bootstrap(),
41  * as the PMAP layer makes extensive use of mutexes.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/ktr.h>
48 #include <sys/mutex.h>
49 #include <sys/systm.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 
54 #include <machine/mmuvar.h>
55 #include <machine/smp.h>
56 
57 #include "mmu_if.h"
58 
59 static mmu_def_t	*mmu_def_impl;
60 static mmu_t		mmu_obj;
61 static struct mmu_kobj	mmu_kernel_obj;
62 static struct kobj_ops	mmu_kernel_kops;
63 
64 /*
65  * pmap globals
66  */
67 struct pmap kernel_pmap_store;
68 
69 struct msgbuf *msgbufp;
70 vm_offset_t    msgbuf_phys;
71 
72 vm_offset_t kernel_vm_end;
73 vm_offset_t phys_avail[PHYS_AVAIL_SZ];
74 vm_offset_t virtual_avail;
75 vm_offset_t virtual_end;
76 
77 int pmap_bootstrapped;
78 
79 void
80 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
81 {
82 
83 	CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
84 	MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
85 }
86 
87 void
88 pmap_clear_modify(vm_page_t m)
89 {
90 
91 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
92 	MMU_CLEAR_MODIFY(mmu_obj, m);
93 }
94 
95 void
96 pmap_clear_reference(vm_page_t m)
97 {
98 
99 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
100 	MMU_CLEAR_REFERENCE(mmu_obj, m);
101 }
102 
103 void
104 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
105     vm_size_t len, vm_offset_t src_addr)
106 {
107 
108 	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
109 	    src_pmap, dst_addr, len, src_addr);
110 	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
111 }
112 
113 void
114 pmap_copy_page(vm_page_t src, vm_page_t dst)
115 {
116 
117 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
118 	MMU_COPY_PAGE(mmu_obj, src, dst);
119 }
120 
121 void
122 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
123     vm_prot_t prot, boolean_t wired)
124 {
125 
126 	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
127 	    access, p, prot, wired);
128 	MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
129 }
130 
131 void
132 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
133     vm_page_t m_start, vm_prot_t prot)
134 {
135 
136 	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
137 	    end, m_start, prot);
138 	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
139 }
140 
141 void
142 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
143 {
144 
145 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
146 	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
147 }
148 
149 vm_paddr_t
150 pmap_extract(pmap_t pmap, vm_offset_t va)
151 {
152 
153 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
154 	return (MMU_EXTRACT(mmu_obj, pmap, va));
155 }
156 
157 vm_page_t
158 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
159 {
160 
161 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
162 	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
163 }
164 
165 void
166 pmap_growkernel(vm_offset_t va)
167 {
168 
169 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
170 	MMU_GROWKERNEL(mmu_obj, va);
171 }
172 
173 void
174 pmap_init(void)
175 {
176 
177 	CTR1(KTR_PMAP, "%s()", __func__);
178 	MMU_INIT(mmu_obj);
179 }
180 
181 boolean_t
182 pmap_is_modified(vm_page_t m)
183 {
184 
185 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
186 	return (MMU_IS_MODIFIED(mmu_obj, m));
187 }
188 
189 boolean_t
190 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
191 {
192 
193 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
194 	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
195 }
196 
197 boolean_t
198 pmap_is_referenced(vm_page_t m)
199 {
200 
201 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
202 	return (MMU_IS_REFERENCED(mmu_obj, m));
203 }
204 
205 boolean_t
206 pmap_ts_referenced(vm_page_t m)
207 {
208 
209 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
210 	return (MMU_TS_REFERENCED(mmu_obj, m));
211 }
212 
213 vm_offset_t
214 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
215 {
216 
217 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
218 	    prot);
219 	return (MMU_MAP(mmu_obj, virt, start, end, prot));
220 }
221 
222 void
223 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
224     vm_pindex_t pindex, vm_size_t size)
225 {
226 
227 	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
228 	    object, pindex, size);
229 	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
230 }
231 
232 boolean_t
233 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
234 {
235 
236 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
237 	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
238 }
239 
240 void
241 pmap_page_init(vm_page_t m)
242 {
243 
244 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
245 	MMU_PAGE_INIT(mmu_obj, m);
246 }
247 
248 int
249 pmap_page_wired_mappings(vm_page_t m)
250 {
251 
252 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
253 	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
254 }
255 
256 int
257 pmap_pinit(pmap_t pmap)
258 {
259 
260 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
261 	MMU_PINIT(mmu_obj, pmap);
262 	return (1);
263 }
264 
265 void
266 pmap_pinit0(pmap_t pmap)
267 {
268 
269 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
270 	MMU_PINIT0(mmu_obj, pmap);
271 }
272 
273 void
274 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
275 {
276 
277 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
278 	    prot);
279 	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
280 }
281 
282 void
283 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
284 {
285 
286 	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
287 	MMU_QENTER(mmu_obj, start, m, count);
288 }
289 
290 void
291 pmap_qremove(vm_offset_t start, int count)
292 {
293 
294 	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
295 	MMU_QREMOVE(mmu_obj, start, count);
296 }
297 
298 void
299 pmap_release(pmap_t pmap)
300 {
301 
302 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
303 	MMU_RELEASE(mmu_obj, pmap);
304 }
305 
306 void
307 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
308 {
309 
310 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
311 	MMU_REMOVE(mmu_obj, pmap, start, end);
312 }
313 
314 void
315 pmap_remove_all(vm_page_t m)
316 {
317 
318 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
319 	MMU_REMOVE_ALL(mmu_obj, m);
320 }
321 
322 void
323 pmap_remove_pages(pmap_t pmap)
324 {
325 
326 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
327 	MMU_REMOVE_PAGES(mmu_obj, pmap);
328 }
329 
330 void
331 pmap_remove_write(vm_page_t m)
332 {
333 
334 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
335 	MMU_REMOVE_WRITE(mmu_obj, m);
336 }
337 
338 void
339 pmap_zero_page(vm_page_t m)
340 {
341 
342 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
343 	MMU_ZERO_PAGE(mmu_obj, m);
344 }
345 
346 void
347 pmap_zero_page_area(vm_page_t m, int off, int size)
348 {
349 
350 	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
351 	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
352 }
353 
354 void
355 pmap_zero_page_idle(vm_page_t m)
356 {
357 
358 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
359 	MMU_ZERO_PAGE_IDLE(mmu_obj, m);
360 }
361 
362 int
363 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
364 {
365 
366 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
367 	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
368 }
369 
370 void
371 pmap_activate(struct thread *td)
372 {
373 
374 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
375 	MMU_ACTIVATE(mmu_obj, td);
376 }
377 
378 void
379 pmap_deactivate(struct thread *td)
380 {
381 
382 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
383 	MMU_DEACTIVATE(mmu_obj, td);
384 }
385 
386 /*
387  *	Increase the starting virtual address of the given mapping if a
388  *	different alignment might result in more superpage mappings.
389  */
390 void
391 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
392     vm_offset_t *addr, vm_size_t size)
393 {
394 
395 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
396 	    size);
397 	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
398 }
399 
400 /*
401  * Routines used in machine-dependent code
402  */
403 void
404 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
405 {
406 	mmu_obj = &mmu_kernel_obj;
407 
408 	/*
409 	 * Take care of compiling the selected class, and
410 	 * then statically initialise the MMU object
411 	 */
412 	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
413 	kobj_init((kobj_t)mmu_obj, mmu_def_impl);
414 
415 	MMU_BOOTSTRAP(mmu_obj, start, end);
416 }
417 
418 void
419 pmap_cpu_bootstrap(int ap)
420 {
421 	/*
422 	 * No KTR here because our console probably doesn't work yet
423 	 */
424 
425 	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
426 }
427 
428 void *
429 pmap_mapdev(vm_offset_t pa, vm_size_t size)
430 {
431 
432 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
433 	return (MMU_MAPDEV(mmu_obj, pa, size));
434 }
435 
436 void *
437 pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
438 {
439 
440 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
441 	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
442 }
443 
444 void
445 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
446 {
447 
448 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
449 	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
450 }
451 
452 void
453 pmap_unmapdev(vm_offset_t va, vm_size_t size)
454 {
455 
456 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
457 	MMU_UNMAPDEV(mmu_obj, va, size);
458 }
459 
460 vm_offset_t
461 pmap_kextract(vm_offset_t va)
462 {
463 
464 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
465 	return (MMU_KEXTRACT(mmu_obj, va));
466 }
467 
468 void
469 pmap_kenter(vm_offset_t va, vm_offset_t pa)
470 {
471 
472 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
473 	MMU_KENTER(mmu_obj, va, pa);
474 }
475 
476 void
477 pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
478 {
479 
480 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
481 	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
482 }
483 
484 boolean_t
485 pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
486 {
487 
488 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
489 	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
490 }
491 
492 void
493 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
494 {
495 
496 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
497 	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
498 }
499 
500 vm_offset_t
501 pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
502 {
503 
504 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
505 	return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
506 }
507 
508 void
509 pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
510 {
511 
512 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
513 	return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
514 }
515 
516 struct pmap_md *
517 pmap_scan_md(struct pmap_md *prev)
518 {
519 
520 	CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
521 	return (MMU_SCAN_MD(mmu_obj, prev));
522 }
523 
524 /*
525  * MMU install routines. Highest priority wins, equal priority also
526  * overrides allowing last-set to win.
527  */
528 SET_DECLARE(mmu_set, mmu_def_t);
529 
530 boolean_t
531 pmap_mmu_install(char *name, int prio)
532 {
533 	mmu_def_t	**mmupp, *mmup;
534 	static int	curr_prio = 0;
535 
536 	/*
537 	 * Try and locate the MMU kobj corresponding to the name
538 	 */
539 	SET_FOREACH(mmupp, mmu_set) {
540 		mmup = *mmupp;
541 
542 		if (mmup->name &&
543 		    !strcmp(mmup->name, name) &&
544 		    (prio >= curr_prio || mmu_def_impl == NULL)) {
545 			curr_prio = prio;
546 			mmu_def_impl = mmup;
547 			return (TRUE);
548 		}
549 	}
550 
551 	return (FALSE);
552 }
553