xref: /freebsd/sys/powerpc/powerpc/pmap_dispatch.c (revision b0b1dbdd)
1 /*-
2  * Copyright (c) 2005 Peter Grehan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Dispatch MI pmap calls to the appropriate MMU implementation
33  * through a previously registered kernel object.
34  *
35  * Before pmap_bootstrap() can be called, a CPU module must have
36  * called pmap_mmu_install(). This may be called multiple times:
37  * the highest priority call will be installed as the default
38  * MMU handler when pmap_bootstrap() is called.
39  *
40  * It is required that mutex_init() be called before pmap_bootstrap(),
41  * as the PMAP layer makes extensive use of mutexes.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/conf.h>
47 #include <sys/lock.h>
48 #include <sys/kerneldump.h>
49 #include <sys/ktr.h>
50 #include <sys/mutex.h>
51 #include <sys/systm.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 
56 #include <machine/dump.h>
57 #include <machine/md_var.h>
58 #include <machine/mmuvar.h>
59 #include <machine/smp.h>
60 
61 #include "mmu_if.h"
62 
63 static mmu_def_t	*mmu_def_impl;
64 static mmu_t		mmu_obj;
65 static struct mmu_kobj	mmu_kernel_obj;
66 static struct kobj_ops	mmu_kernel_kops;
67 
68 /*
69  * pmap globals
70  */
71 struct pmap kernel_pmap_store;
72 
73 struct msgbuf *msgbufp;
74 vm_offset_t    msgbuf_phys;
75 
76 vm_offset_t kernel_vm_end;
77 vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
78 vm_offset_t virtual_avail;
79 vm_offset_t virtual_end;
80 
81 int pmap_bootstrapped;
82 
83 #ifdef AIM
84 int
85 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
86 {
87 	if (PVO_VADDR(a) < PVO_VADDR(b))
88 		return (-1);
89 	else if (PVO_VADDR(a) > PVO_VADDR(b))
90 		return (1);
91 	return (0);
92 }
93 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
94 #endif
95 
96 
97 void
98 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
99 {
100 
101 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
102 	    advice);
103 	MMU_ADVISE(mmu_obj, pmap, start, end, advice);
104 }
105 
106 void
107 pmap_clear_modify(vm_page_t m)
108 {
109 
110 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
111 	MMU_CLEAR_MODIFY(mmu_obj, m);
112 }
113 
114 void
115 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
116     vm_size_t len, vm_offset_t src_addr)
117 {
118 
119 	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
120 	    src_pmap, dst_addr, len, src_addr);
121 	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
122 }
123 
124 void
125 pmap_copy_page(vm_page_t src, vm_page_t dst)
126 {
127 
128 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
129 	MMU_COPY_PAGE(mmu_obj, src, dst);
130 }
131 
132 void
133 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
134     vm_offset_t b_offset, int xfersize)
135 {
136 
137 	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
138 	    a_offset, mb, b_offset, xfersize);
139 	MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
140 }
141 
142 int
143 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
144     u_int flags, int8_t psind)
145 {
146 
147 	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
148 	    p, prot, flags, psind);
149 	return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
150 }
151 
152 void
153 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
154     vm_page_t m_start, vm_prot_t prot)
155 {
156 
157 	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
158 	    end, m_start, prot);
159 	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
160 }
161 
162 void
163 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
164 {
165 
166 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
167 	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
168 }
169 
170 vm_paddr_t
171 pmap_extract(pmap_t pmap, vm_offset_t va)
172 {
173 
174 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
175 	return (MMU_EXTRACT(mmu_obj, pmap, va));
176 }
177 
178 vm_page_t
179 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
180 {
181 
182 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
183 	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
184 }
185 
186 void
187 pmap_growkernel(vm_offset_t va)
188 {
189 
190 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
191 	MMU_GROWKERNEL(mmu_obj, va);
192 }
193 
194 void
195 pmap_init(void)
196 {
197 
198 	CTR1(KTR_PMAP, "%s()", __func__);
199 	MMU_INIT(mmu_obj);
200 }
201 
202 boolean_t
203 pmap_is_modified(vm_page_t m)
204 {
205 
206 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
207 	return (MMU_IS_MODIFIED(mmu_obj, m));
208 }
209 
210 boolean_t
211 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
212 {
213 
214 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
215 	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
216 }
217 
218 boolean_t
219 pmap_is_referenced(vm_page_t m)
220 {
221 
222 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
223 	return (MMU_IS_REFERENCED(mmu_obj, m));
224 }
225 
226 boolean_t
227 pmap_ts_referenced(vm_page_t m)
228 {
229 
230 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
231 	return (MMU_TS_REFERENCED(mmu_obj, m));
232 }
233 
234 vm_offset_t
235 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
236 {
237 
238 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
239 	    prot);
240 	return (MMU_MAP(mmu_obj, virt, start, end, prot));
241 }
242 
243 void
244 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
245     vm_pindex_t pindex, vm_size_t size)
246 {
247 
248 	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
249 	    object, pindex, size);
250 	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
251 }
252 
253 boolean_t
254 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
255 {
256 
257 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
258 	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
259 }
260 
261 void
262 pmap_page_init(vm_page_t m)
263 {
264 
265 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
266 	MMU_PAGE_INIT(mmu_obj, m);
267 }
268 
269 int
270 pmap_page_wired_mappings(vm_page_t m)
271 {
272 
273 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
274 	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
275 }
276 
277 int
278 pmap_pinit(pmap_t pmap)
279 {
280 
281 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
282 	MMU_PINIT(mmu_obj, pmap);
283 	return (1);
284 }
285 
286 void
287 pmap_pinit0(pmap_t pmap)
288 {
289 
290 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
291 	MMU_PINIT0(mmu_obj, pmap);
292 }
293 
294 void
295 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
296 {
297 
298 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
299 	    prot);
300 	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
301 }
302 
303 void
304 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
305 {
306 
307 	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
308 	MMU_QENTER(mmu_obj, start, m, count);
309 }
310 
311 void
312 pmap_qremove(vm_offset_t start, int count)
313 {
314 
315 	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
316 	MMU_QREMOVE(mmu_obj, start, count);
317 }
318 
319 void
320 pmap_release(pmap_t pmap)
321 {
322 
323 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
324 	MMU_RELEASE(mmu_obj, pmap);
325 }
326 
327 void
328 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
329 {
330 
331 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
332 	MMU_REMOVE(mmu_obj, pmap, start, end);
333 }
334 
335 void
336 pmap_remove_all(vm_page_t m)
337 {
338 
339 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
340 	MMU_REMOVE_ALL(mmu_obj, m);
341 }
342 
343 void
344 pmap_remove_pages(pmap_t pmap)
345 {
346 
347 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
348 	MMU_REMOVE_PAGES(mmu_obj, pmap);
349 }
350 
351 void
352 pmap_remove_write(vm_page_t m)
353 {
354 
355 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
356 	MMU_REMOVE_WRITE(mmu_obj, m);
357 }
358 
359 void
360 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
361 {
362 
363 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
364 	MMU_UNWIRE(mmu_obj, pmap, start, end);
365 }
366 
367 void
368 pmap_zero_page(vm_page_t m)
369 {
370 
371 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
372 	MMU_ZERO_PAGE(mmu_obj, m);
373 }
374 
375 void
376 pmap_zero_page_area(vm_page_t m, int off, int size)
377 {
378 
379 	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
380 	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
381 }
382 
383 int
384 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
385 {
386 
387 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
388 	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
389 }
390 
391 void
392 pmap_activate(struct thread *td)
393 {
394 
395 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
396 	MMU_ACTIVATE(mmu_obj, td);
397 }
398 
399 void
400 pmap_deactivate(struct thread *td)
401 {
402 
403 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
404 	MMU_DEACTIVATE(mmu_obj, td);
405 }
406 
407 /*
408  *	Increase the starting virtual address of the given mapping if a
409  *	different alignment might result in more superpage mappings.
410  */
411 void
412 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
413     vm_offset_t *addr, vm_size_t size)
414 {
415 
416 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
417 	    size);
418 	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
419 }
420 
421 /*
422  * Routines used in machine-dependent code
423  */
424 void
425 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
426 {
427 	mmu_obj = &mmu_kernel_obj;
428 
429 	/*
430 	 * Take care of compiling the selected class, and
431 	 * then statically initialise the MMU object
432 	 */
433 	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
434 	kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
435 
436 	MMU_BOOTSTRAP(mmu_obj, start, end);
437 }
438 
439 void
440 pmap_cpu_bootstrap(int ap)
441 {
442 	/*
443 	 * No KTR here because our console probably doesn't work yet
444 	 */
445 
446 	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
447 }
448 
449 void *
450 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
451 {
452 
453 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
454 	return (MMU_MAPDEV(mmu_obj, pa, size));
455 }
456 
457 void *
458 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
459 {
460 
461 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
462 	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
463 }
464 
465 void
466 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
467 {
468 
469 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
470 	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
471 }
472 
473 void
474 pmap_unmapdev(vm_offset_t va, vm_size_t size)
475 {
476 
477 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
478 	MMU_UNMAPDEV(mmu_obj, va, size);
479 }
480 
481 vm_paddr_t
482 pmap_kextract(vm_offset_t va)
483 {
484 
485 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
486 	return (MMU_KEXTRACT(mmu_obj, va));
487 }
488 
489 void
490 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
491 {
492 
493 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
494 	MMU_KENTER(mmu_obj, va, pa);
495 }
496 
497 void
498 pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
499 {
500 
501 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
502 	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
503 }
504 
505 void
506 pmap_kremove(vm_offset_t va)
507 {
508 
509 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
510 	return (MMU_KREMOVE(mmu_obj, va));
511 }
512 
513 boolean_t
514 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
515 {
516 
517 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
518 	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
519 }
520 
521 void
522 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
523 {
524 
525 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
526 	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
527 }
528 
529 void
530 dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
531 {
532 
533 	CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
534 	return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
535 }
536 
537 void
538 dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
539 {
540 
541 	CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
542 	return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
543 }
544 
545 void
546 dumpsys_pa_init(void)
547 {
548 
549 	CTR1(KTR_PMAP, "%s()", __func__);
550 	return (MMU_SCAN_INIT(mmu_obj));
551 }
552 
553 vm_offset_t
554 pmap_quick_enter_page(vm_page_t m)
555 {
556 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
557 	return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
558 }
559 
560 void
561 pmap_quick_remove_page(vm_offset_t addr)
562 {
563 	CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
564 	MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
565 }
566 
567 int
568 pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
569 {
570 	CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
571 	return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
572 }
573 
574 /*
575  * MMU install routines. Highest priority wins, equal priority also
576  * overrides allowing last-set to win.
577  */
578 SET_DECLARE(mmu_set, mmu_def_t);
579 
580 boolean_t
581 pmap_mmu_install(char *name, int prio)
582 {
583 	mmu_def_t	**mmupp, *mmup;
584 	static int	curr_prio = 0;
585 
586 	/*
587 	 * Try and locate the MMU kobj corresponding to the name
588 	 */
589 	SET_FOREACH(mmupp, mmu_set) {
590 		mmup = *mmupp;
591 
592 		if (mmup->name &&
593 		    !strcmp(mmup->name, name) &&
594 		    (prio >= curr_prio || mmu_def_impl == NULL)) {
595 			curr_prio = prio;
596 			mmu_def_impl = mmup;
597 			return (TRUE);
598 		}
599 	}
600 
601 	return (FALSE);
602 }
603 
604 int unmapped_buf_allowed;
605