1 /*	$NetBSD: pmap.c,v 1.335 2016/07/14 15:51:41 skrll Exp $	*/
2 
3 /*
4  * Copyright 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Steve C. Woodford for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40  * Copyright (c) 2001 Richard Earnshaw
41  * Copyright (c) 2001-2002 Christopher Gilbert
42  * All rights reserved.
43  *
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. The name of the company nor the name of the author may be used to
50  *    endorse or promote products derived from this software without specific
51  *    prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  */
65 
66 /*-
67  * Copyright (c) 1999 The NetBSD Foundation, Inc.
68  * All rights reserved.
69  *
70  * This code is derived from software contributed to The NetBSD Foundation
71  * by Charles M. Hannum.
72  *
73  * Redistribution and use in source and binary forms, with or without
74  * modification, are permitted provided that the following conditions
75  * are met:
76  * 1. Redistributions of source code must retain the above copyright
77  *    notice, this list of conditions and the following disclaimer.
78  * 2. Redistributions in binary form must reproduce the above copyright
79  *    notice, this list of conditions and the following disclaimer in the
80  *    documentation and/or other materials provided with the distribution.
81  *
82  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
83  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
85  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
86  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
92  * POSSIBILITY OF SUCH DAMAGE.
93  */
94 
95 /*
96  * Copyright (c) 1994-1998 Mark Brinicombe.
97  * Copyright (c) 1994 Brini.
98  * All rights reserved.
99  *
100  * This code is derived from software written for Brini by Mark Brinicombe
101  *
102  * Redistribution and use in source and binary forms, with or without
103  * modification, are permitted provided that the following conditions
104  * are met:
105  * 1. Redistributions of source code must retain the above copyright
106  *    notice, this list of conditions and the following disclaimer.
107  * 2. Redistributions in binary form must reproduce the above copyright
108  *    notice, this list of conditions and the following disclaimer in the
109  *    documentation and/or other materials provided with the distribution.
110  * 3. All advertising materials mentioning features or use of this software
111  *    must display the following acknowledgement:
112  *	This product includes software developed by Mark Brinicombe.
113  * 4. The name of the author may not be used to endorse or promote products
114  *    derived from this software without specific prior written permission.
115  *
116  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
117  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
118  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
119  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
120  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
121  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
122  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
123  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
124  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
125  *
126  * RiscBSD kernel project
127  *
128  * pmap.c
129  *
130  * Machine dependent vm stuff
131  *
132  * Created      : 20/09/94
133  */
134 
135 /*
136  * armv6 and VIPT cache support by 3am Software Foundry,
137  * Copyright (c) 2007 Microsoft
138  */
139 
140 /*
141  * Performance improvements, UVM changes, overhauls and part-rewrites
142  * were contributed by Neil A. Carson <neil@causality.com>.
143  */
144 
145 /*
146  * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
147  * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
148  * Systems, Inc.
149  *
150  * There are still a few things outstanding at this time:
151  *
152  *   - There are some unresolved issues for MP systems:
153  *
154  *     o The L1 metadata needs a lock, or more specifically, some places
155  *       need to acquire an exclusive lock when modifying L1 translation
156  *       table entries.
157  *
158  *     o When one cpu modifies an L1 entry, and that L1 table is also
159  *       being used by another cpu, then the latter will need to be told
160  *       that a tlb invalidation may be necessary. (But only if the old
161  *       domain number in the L1 entry being over-written is currently
162  *       the active domain on that cpu). I guess there are lots more tlb
163  *       shootdown issues too...
164  *
165  *     o If the vector_page is at 0x00000000 instead of in kernel VA space,
166  *       then MP systems will lose big-time because of the MMU domain hack.
167  *       The only way this can be solved (apart from moving the vector
168  *       page to 0xffff0000) is to reserve the first 1MB of user address
169  *       space for kernel use only. This would require re-linking all
170  *       applications so that the text section starts above this 1MB
171  *       boundary.
172  *
173  *     o Tracking which VM space is resident in the cache/tlb has not yet
174  *       been implemented for MP systems.
175  *
176  *     o Finally, there is a pathological condition where two cpus running
177  *       two separate processes (not lwps) which happen to share an L1
178  *       can get into a fight over one or more L1 entries. This will result
179  *       in a significant slow-down if both processes are in tight loops.
180  */
181 
182 /*
183  * Special compilation symbols
184  * PMAP_DEBUG		- Build in pmap_debug_level code
185  */
186 
187 /* Include header files */
188 
189 #include "opt_arm_debug.h"
190 #include "opt_cpuoptions.h"
191 #include "opt_pmap_debug.h"
192 #include "opt_ddb.h"
193 #include "opt_lockdebug.h"
194 #include "opt_multiprocessor.h"
195 
196 #ifdef MULTIPROCESSOR
197 #define _INTR_PRIVATE
198 #endif
199 
200 #include <sys/param.h>
201 #include <sys/types.h>
202 #include <sys/kernel.h>
203 #include <sys/systm.h>
204 #include <sys/proc.h>
205 #include <sys/intr.h>
206 #include <sys/pool.h>
207 #include <sys/kmem.h>
208 #include <sys/cdefs.h>
209 #include <sys/cpu.h>
210 #include <sys/sysctl.h>
211 #include <sys/bus.h>
212 #include <sys/atomic.h>
213 #include <sys/kernhist.h>
214 
215 #include <uvm/uvm.h>
216 #include <uvm/pmap/pmap_pvt.h>
217 
218 #include <arm/locore.h>
219 
220 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.335 2016/07/14 15:51:41 skrll Exp $");
221 
222 //#define PMAP_DEBUG
223 #ifdef PMAP_DEBUG
224 
225 /* XXX need to get rid of all refs to this */
226 int pmap_debug_level = 0;
227 
228 /*
229  * for switching to potentially finer grained debugging
230  */
231 #define	PDB_FOLLOW	0x0001
232 #define	PDB_INIT	0x0002
233 #define	PDB_ENTER	0x0004
234 #define	PDB_REMOVE	0x0008
235 #define	PDB_CREATE	0x0010
236 #define	PDB_PTPAGE	0x0020
237 #define	PDB_GROWKERN	0x0040
238 #define	PDB_BITS	0x0080
239 #define	PDB_COLLECT	0x0100
240 #define	PDB_PROTECT	0x0200
241 #define	PDB_MAP_L1	0x0400
242 #define	PDB_BOOTSTRAP	0x1000
243 #define	PDB_PARANOIA	0x2000
244 #define	PDB_WIRING	0x4000
245 #define	PDB_PVDUMP	0x8000
246 #define	PDB_VAC		0x10000
247 #define	PDB_KENTER	0x20000
248 #define	PDB_KREMOVE	0x40000
249 #define	PDB_EXEC	0x80000
250 
251 int debugmap = 1;
252 int pmapdebug = 0;
253 #define	NPDEBUG(_lev_,_stat_) \
254 	if (pmapdebug & (_lev_)) \
255         	((_stat_))
256 
257 #else	/* PMAP_DEBUG */
258 #define NPDEBUG(_lev_,_stat_) /* Nothing */
259 #endif	/* PMAP_DEBUG */
260 
261 /*
262  * pmap_kernel() points here
263  */
264 static struct pmap	kernel_pmap_store = {
265 #ifndef ARM_MMU_EXTENDED
266 	.pm_activated = true,
267 	.pm_domain = PMAP_DOMAIN_KERNEL,
268 	.pm_cstate.cs_all = PMAP_CACHE_STATE_ALL,
269 #endif
270 };
271 struct pmap * const	kernel_pmap_ptr = &kernel_pmap_store;
272 #undef pmap_kernel
273 #define pmap_kernel()	(&kernel_pmap_store)
274 #ifdef PMAP_NEED_ALLOC_POOLPAGE
275 int			arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
276 #endif
277 
278 /*
279  * Pool and cache that pmap structures are allocated from.
280  * We use a cache to avoid clearing the pm_l2[] array (1KB)
281  * in pmap_create().
282  */
283 static struct pool_cache pmap_cache;
284 
285 /*
286  * Pool of PV structures
287  */
288 static struct pool pmap_pv_pool;
289 static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
290 static void pmap_bootstrap_pv_page_free(struct pool *, void *);
291 static struct pool_allocator pmap_bootstrap_pv_allocator = {
292 	pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
293 };
294 
295 /*
296  * Pool and cache of l2_dtable structures.
297  * We use a cache to avoid clearing the structures when they're
298  * allocated. (196 bytes)
299  */
300 static struct pool_cache pmap_l2dtable_cache;
301 static vaddr_t pmap_kernel_l2dtable_kva;
302 
303 /*
304  * Pool and cache of L2 page descriptors.
305  * We use a cache to avoid clearing the descriptor table
306  * when they're allocated. (1KB)
307  */
308 static struct pool_cache pmap_l2ptp_cache;
309 static vaddr_t pmap_kernel_l2ptp_kva;
310 static paddr_t pmap_kernel_l2ptp_phys;
311 
312 #ifdef PMAPCOUNTERS
313 #define	PMAP_EVCNT_INITIALIZER(name) \
314 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
315 
316 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
317 static struct evcnt pmap_ev_vac_clean_one =
318    PMAP_EVCNT_INITIALIZER("clean page (1 color)");
319 static struct evcnt pmap_ev_vac_flush_one =
320    PMAP_EVCNT_INITIALIZER("flush page (1 color)");
321 static struct evcnt pmap_ev_vac_flush_lots =
322    PMAP_EVCNT_INITIALIZER("flush page (2+ colors)");
323 static struct evcnt pmap_ev_vac_flush_lots2 =
324    PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)");
325 EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one);
326 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one);
327 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots);
328 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2);
329 
330 static struct evcnt pmap_ev_vac_color_new =
331    PMAP_EVCNT_INITIALIZER("new page color");
332 static struct evcnt pmap_ev_vac_color_reuse =
333    PMAP_EVCNT_INITIALIZER("ok first page color");
334 static struct evcnt pmap_ev_vac_color_ok =
335    PMAP_EVCNT_INITIALIZER("ok page color");
336 static struct evcnt pmap_ev_vac_color_blind =
337    PMAP_EVCNT_INITIALIZER("blind page color");
338 static struct evcnt pmap_ev_vac_color_change =
339    PMAP_EVCNT_INITIALIZER("change page color");
340 static struct evcnt pmap_ev_vac_color_erase =
341    PMAP_EVCNT_INITIALIZER("erase page color");
342 static struct evcnt pmap_ev_vac_color_none =
343    PMAP_EVCNT_INITIALIZER("no page color");
344 static struct evcnt pmap_ev_vac_color_restore =
345    PMAP_EVCNT_INITIALIZER("restore page color");
346 
347 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new);
348 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse);
349 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok);
350 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind);
351 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change);
352 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase);
353 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none);
354 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore);
355 #endif
356 
357 static struct evcnt pmap_ev_mappings =
358    PMAP_EVCNT_INITIALIZER("pages mapped");
359 static struct evcnt pmap_ev_unmappings =
360    PMAP_EVCNT_INITIALIZER("pages unmapped");
361 static struct evcnt pmap_ev_remappings =
362    PMAP_EVCNT_INITIALIZER("pages remapped");
363 
364 EVCNT_ATTACH_STATIC(pmap_ev_mappings);
365 EVCNT_ATTACH_STATIC(pmap_ev_unmappings);
366 EVCNT_ATTACH_STATIC(pmap_ev_remappings);
367 
368 static struct evcnt pmap_ev_kernel_mappings =
369    PMAP_EVCNT_INITIALIZER("kernel pages mapped");
370 static struct evcnt pmap_ev_kernel_unmappings =
371    PMAP_EVCNT_INITIALIZER("kernel pages unmapped");
372 static struct evcnt pmap_ev_kernel_remappings =
373    PMAP_EVCNT_INITIALIZER("kernel pages remapped");
374 
375 EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings);
376 EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings);
377 EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings);
378 
379 static struct evcnt pmap_ev_kenter_mappings =
380    PMAP_EVCNT_INITIALIZER("kenter pages mapped");
381 static struct evcnt pmap_ev_kenter_unmappings =
382    PMAP_EVCNT_INITIALIZER("kenter pages unmapped");
383 static struct evcnt pmap_ev_kenter_remappings =
384    PMAP_EVCNT_INITIALIZER("kenter pages remapped");
385 static struct evcnt pmap_ev_pt_mappings =
386    PMAP_EVCNT_INITIALIZER("page table pages mapped");
387 
388 EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings);
389 EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings);
390 EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings);
391 EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings);
392 
393 static struct evcnt pmap_ev_fixup_mod =
394    PMAP_EVCNT_INITIALIZER("page modification emulations");
395 static struct evcnt pmap_ev_fixup_ref =
396    PMAP_EVCNT_INITIALIZER("page reference emulations");
397 static struct evcnt pmap_ev_fixup_exec =
398    PMAP_EVCNT_INITIALIZER("exec pages fixed up");
399 static struct evcnt pmap_ev_fixup_pdes =
400    PMAP_EVCNT_INITIALIZER("pdes fixed up");
401 #ifndef ARM_MMU_EXTENDED
402 static struct evcnt pmap_ev_fixup_ptesync =
403    PMAP_EVCNT_INITIALIZER("ptesync fixed");
404 #endif
405 
406 EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod);
407 EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref);
408 EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec);
409 EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes);
410 #ifndef ARM_MMU_EXTENDED
411 EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync);
412 #endif
413 
414 #ifdef PMAP_CACHE_VIPT
415 static struct evcnt pmap_ev_exec_mappings =
416    PMAP_EVCNT_INITIALIZER("exec pages mapped");
417 static struct evcnt pmap_ev_exec_cached =
418    PMAP_EVCNT_INITIALIZER("exec pages cached");
419 
420 EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings);
421 EVCNT_ATTACH_STATIC(pmap_ev_exec_cached);
422 
423 static struct evcnt pmap_ev_exec_synced =
424    PMAP_EVCNT_INITIALIZER("exec pages synced");
425 static struct evcnt pmap_ev_exec_synced_map =
426    PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
427 #ifndef ARM_MMU_EXTENDED
428 static struct evcnt pmap_ev_exec_synced_unmap =
429    PMAP_EVCNT_INITIALIZER("exec pages synced (UM)");
430 static struct evcnt pmap_ev_exec_synced_remap =
431    PMAP_EVCNT_INITIALIZER("exec pages synced (RM)");
432 static struct evcnt pmap_ev_exec_synced_clearbit =
433    PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
434 static struct evcnt pmap_ev_exec_synced_kremove =
435    PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
436 #endif
437 
438 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced);
439 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map);
440 #ifndef ARM_MMU_EXTENDED
441 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap);
442 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap);
443 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit);
444 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove);
445 #endif
446 
447 static struct evcnt pmap_ev_exec_discarded_unmap =
448    PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
449 static struct evcnt pmap_ev_exec_discarded_zero =
450    PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)");
451 static struct evcnt pmap_ev_exec_discarded_copy =
452    PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)");
453 static struct evcnt pmap_ev_exec_discarded_page_protect =
454    PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)");
455 static struct evcnt pmap_ev_exec_discarded_clearbit =
456    PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
457 static struct evcnt pmap_ev_exec_discarded_kremove =
458    PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
459 #ifdef ARM_MMU_EXTENDED
460 static struct evcnt pmap_ev_exec_discarded_modfixup =
461    PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)");
462 #endif
463 
464 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap);
465 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero);
466 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy);
467 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect);
468 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit);
469 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove);
470 #ifdef ARM_MMU_EXTENDED
471 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup);
472 #endif
473 #endif /* PMAP_CACHE_VIPT */
474 
475 static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates");
476 static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects");
477 static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations");
478 
479 EVCNT_ATTACH_STATIC(pmap_ev_updates);
480 EVCNT_ATTACH_STATIC(pmap_ev_collects);
481 EVCNT_ATTACH_STATIC(pmap_ev_activations);
482 
483 #define	PMAPCOUNT(x)	((void)(pmap_ev_##x.ev_count++))
484 #else
485 #define	PMAPCOUNT(x)	((void)0)
486 #endif
487 
488 /*
489  * pmap copy/zero page, and mem(5) hook point
490  */
491 static pt_entry_t *csrc_pte, *cdst_pte;
492 static vaddr_t csrcp, cdstp;
493 #ifdef MULTIPROCESSOR
494 static size_t cnptes;
495 #define	cpu_csrc_pte(o)	(csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT))
496 #define	cpu_cdst_pte(o)	(cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT))
497 #define	cpu_csrcp(o)	(csrcp + L2_S_SIZE * cnptes * cpu_number() + (o))
498 #define	cpu_cdstp(o)	(cdstp + L2_S_SIZE * cnptes * cpu_number() + (o))
499 #else
500 #define	cpu_csrc_pte(o)	(csrc_pte + ((o) >> L2_S_SHIFT))
501 #define	cpu_cdst_pte(o)	(cdst_pte + ((o) >> L2_S_SHIFT))
502 #define	cpu_csrcp(o)	(csrcp + (o))
503 #define	cpu_cdstp(o)	(cdstp + (o))
504 #endif
505 vaddr_t memhook;			/* used by mem.c & others */
506 kmutex_t memlock __cacheline_aligned;	/* used by mem.c & others */
507 kmutex_t pmap_lock __cacheline_aligned;
508 extern void *msgbufaddr;
509 int pmap_kmpages;
510 /*
511  * Flag to indicate if pmap_init() has done its thing
512  */
513 bool pmap_initialized;
514 
515 #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
516 /*
517  * Virtual end of direct-mapped memory
518  */
519 vaddr_t pmap_directlimit;
520 #endif
521 
522 /*
523  * Misc. locking data structures
524  */
525 
526 static inline void
pmap_acquire_pmap_lock(pmap_t pm)527 pmap_acquire_pmap_lock(pmap_t pm)
528 {
529 	if (pm == pmap_kernel()) {
530 #ifdef MULTIPROCESSOR
531 		KERNEL_LOCK(1, NULL);
532 #endif
533 	} else {
534 		mutex_enter(pm->pm_lock);
535 	}
536 }
537 
538 static inline void
pmap_release_pmap_lock(pmap_t pm)539 pmap_release_pmap_lock(pmap_t pm)
540 {
541 	if (pm == pmap_kernel()) {
542 #ifdef MULTIPROCESSOR
543 		KERNEL_UNLOCK_ONE(NULL);
544 #endif
545 	} else {
546 		mutex_exit(pm->pm_lock);
547 	}
548 }
549 
550 static inline void
pmap_acquire_page_lock(struct vm_page_md * md)551 pmap_acquire_page_lock(struct vm_page_md *md)
552 {
553 	mutex_enter(&pmap_lock);
554 }
555 
556 static inline void
pmap_release_page_lock(struct vm_page_md * md)557 pmap_release_page_lock(struct vm_page_md *md)
558 {
559 	mutex_exit(&pmap_lock);
560 }
561 
562 #ifdef DIAGNOSTIC
563 static inline int
pmap_page_locked_p(struct vm_page_md * md)564 pmap_page_locked_p(struct vm_page_md *md)
565 {
566 	return mutex_owned(&pmap_lock);
567 }
568 #endif
569 
570 
571 /*
572  * Metadata for L1 translation tables.
573  */
574 #ifndef ARM_MMU_EXTENDED
575 struct l1_ttable {
576 	/* Entry on the L1 Table list */
577 	SLIST_ENTRY(l1_ttable) l1_link;
578 
579 	/* Entry on the L1 Least Recently Used list */
580 	TAILQ_ENTRY(l1_ttable) l1_lru;
581 
582 	/* Track how many domains are allocated from this L1 */
583 	volatile u_int l1_domain_use_count;
584 
585 	/*
586 	 * A free-list of domain numbers for this L1.
587 	 * We avoid using ffs() and a bitmap to track domains since ffs()
588 	 * is slow on ARM.
589 	 */
590 	uint8_t l1_domain_first;
591 	uint8_t l1_domain_free[PMAP_DOMAINS];
592 
593 	/* Physical address of this L1 page table */
594 	paddr_t l1_physaddr;
595 
596 	/* KVA of this L1 page table */
597 	pd_entry_t *l1_kva;
598 };
599 
600 /*
601  * L1 Page Tables are tracked using a Least Recently Used list.
602  *  - New L1s are allocated from the HEAD.
603  *  - Freed L1s are added to the TAIl.
604  *  - Recently accessed L1s (where an 'access' is some change to one of
605  *    the userland pmaps which owns this L1) are moved to the TAIL.
606  */
607 static TAILQ_HEAD(, l1_ttable) l1_lru_list;
608 static kmutex_t l1_lru_lock __cacheline_aligned;
609 
610 /*
611  * A list of all L1 tables
612  */
613 static SLIST_HEAD(, l1_ttable) l1_list;
614 #endif /* ARM_MMU_EXTENDED */
615 
616 /*
617  * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
618  *
619  * This is normally 16MB worth L2 page descriptors for any given pmap.
620  * Reference counts are maintained for L2 descriptors so they can be
621  * freed when empty.
622  */
623 struct l2_bucket {
624 	pt_entry_t *l2b_kva;		/* KVA of L2 Descriptor Table */
625 	paddr_t l2b_pa;			/* Physical address of same */
626 	u_short l2b_l1slot;		/* This L2 table's L1 index */
627 	u_short l2b_occupancy;		/* How many active descriptors */
628 };
629 
630 struct l2_dtable {
631 	/* The number of L2 page descriptors allocated to this l2_dtable */
632 	u_int l2_occupancy;
633 
634 	/* List of L2 page descriptors */
635 	struct l2_bucket l2_bucket[L2_BUCKET_SIZE];
636 };
637 
638 /*
639  * Given an L1 table index, calculate the corresponding l2_dtable index
640  * and bucket index within the l2_dtable.
641  */
642 #define L2_BUCKET_XSHIFT	(L2_BUCKET_XLOG2 - L1_S_SHIFT)
643 #define L2_BUCKET_XFRAME	(~(vaddr_t)0 << L2_BUCKET_XLOG2)
644 #define L2_BUCKET_IDX(l1slot)	((l1slot) >> L2_BUCKET_XSHIFT)
645 #define L2_IDX(l1slot)		(L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2)
646 #define L2_BUCKET(l1slot)	(L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1))
647 
648 __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE));
649 __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1));
650 
651 /*
652  * Given a virtual address, this macro returns the
653  * virtual address required to drop into the next L2 bucket.
654  */
655 #define	L2_NEXT_BUCKET_VA(va)	(((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE)
656 
657 /*
658  * L2 allocation.
659  */
660 #define	pmap_alloc_l2_dtable()		\
661 	    pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
662 #define	pmap_free_l2_dtable(l2)		\
663 	    pool_cache_put(&pmap_l2dtable_cache, (l2))
664 #define pmap_alloc_l2_ptp(pap)		\
665 	    ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
666 	    PR_NOWAIT, (pap)))
667 
668 /*
669  * We try to map the page tables write-through, if possible.  However, not
670  * all CPUs have a write-through cache mode, so on those we have to sync
671  * the cache when we frob page tables.
672  *
673  * We try to evaluate this at compile time, if possible.  However, it's
674  * not always possible to do that, hence this run-time var.
675  */
676 int	pmap_needs_pte_sync;
677 
678 /*
679  * Real definition of pv_entry.
680  */
681 struct pv_entry {
682 	SLIST_ENTRY(pv_entry) pv_link;	/* next pv_entry */
683 	pmap_t		pv_pmap;        /* pmap where mapping lies */
684 	vaddr_t		pv_va;          /* virtual address for mapping */
685 	u_int		pv_flags;       /* flags */
686 };
687 
688 /*
689  * Macros to determine if a mapping might be resident in the
690  * instruction/data cache and/or TLB
691  */
692 #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED)
693 /*
694  * Speculative loads by Cortex cores can cause TLB entries to be filled even if
695  * there are no explicit accesses, so there may be always be TLB entries to
696  * flush.  If we used ASIDs then this would not be a problem.
697  */
698 #define	PV_BEEN_EXECD(f)  (((f) & PVF_EXEC) == PVF_EXEC)
699 #define	PV_BEEN_REFD(f)   (true)
700 #else
701 #define	PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
702 #define	PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
703 #endif
704 #define	PV_IS_EXEC_P(f)   (((f) & PVF_EXEC) != 0)
705 #define	PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0)
706 #define	PV_IS_WRITE_P(f)  (((f) & PVF_WRITE) != 0)
707 
708 /*
709  * Local prototypes
710  */
711 static bool		pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t);
712 static void		pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
713 			    pt_entry_t **);
714 static bool		pmap_is_current(pmap_t) __unused;
715 static bool		pmap_is_cached(pmap_t);
716 static void		pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *,
717 			    pmap_t, vaddr_t, u_int);
718 static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t);
719 static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
720 static u_int		pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t,
721 			    u_int, u_int);
722 
723 static void		pmap_pinit(pmap_t);
724 static int		pmap_pmap_ctor(void *, void *, int);
725 
726 static void		pmap_alloc_l1(pmap_t);
727 static void		pmap_free_l1(pmap_t);
728 #ifndef ARM_MMU_EXTENDED
729 static void		pmap_use_l1(pmap_t);
730 #endif
731 
732 static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
733 static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
734 static void		pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
735 static int		pmap_l2ptp_ctor(void *, void *, int);
736 static int		pmap_l2dtable_ctor(void *, void *, int);
737 
738 static void		pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
739 #ifdef PMAP_CACHE_VIVT
740 static void		pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
741 static void		pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
742 #endif
743 
744 static void		pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
745 #ifdef PMAP_CACHE_VIVT
746 static bool		pmap_clean_page(struct vm_page_md *, bool);
747 #endif
748 #ifdef PMAP_CACHE_VIPT
749 static void		pmap_syncicache_page(struct vm_page_md *, paddr_t);
750 enum pmap_flush_op {
751 	PMAP_FLUSH_PRIMARY,
752 	PMAP_FLUSH_SECONDARY,
753 	PMAP_CLEAN_PRIMARY
754 };
755 #ifndef ARM_MMU_EXTENDED
756 static void		pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
757 #endif
758 #endif
759 static void		pmap_page_remove(struct vm_page_md *, paddr_t);
760 static void		pmap_pv_remove(paddr_t);
761 
762 #ifndef ARM_MMU_EXTENDED
763 static void		pmap_init_l1(struct l1_ttable *, pd_entry_t *);
764 #endif
765 static vaddr_t		kernel_pt_lookup(paddr_t);
766 
767 
768 /*
769  * Misc variables
770  */
771 vaddr_t virtual_avail;
772 vaddr_t virtual_end;
773 vaddr_t pmap_curmaxkvaddr;
774 
775 paddr_t avail_start;
776 paddr_t avail_end;
777 
778 pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq);
779 pv_addr_t kernelpages;
780 pv_addr_t kernel_l1pt;
781 pv_addr_t systempage;
782 
783 /* Function to set the debug level of the pmap code */
784 
785 #ifdef PMAP_DEBUG
786 void
pmap_debug(int level)787 pmap_debug(int level)
788 {
789 	pmap_debug_level = level;
790 	printf("pmap_debug: level=%d\n", pmap_debug_level);
791 }
792 #endif	/* PMAP_DEBUG */
793 
794 #ifdef PMAP_CACHE_VIPT
795 #define PMAP_VALIDATE_MD_PAGE(md)	\
796 	KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
797 	    "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
798 	    (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
799 #endif /* PMAP_CACHE_VIPT */
800 /*
801  * A bunch of routines to conditionally flush the caches/TLB depending
802  * on whether the specified pmap actually needs to be flushed at any
803  * given time.
804  */
805 static inline void
pmap_tlb_flush_SE(pmap_t pm,vaddr_t va,u_int flags)806 pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags)
807 {
808 #ifdef ARM_MMU_EXTENDED
809 	pmap_tlb_invalidate_addr(pm, va);
810 #else
811 	if (pm->pm_cstate.cs_tlb_id != 0) {
812 		if (PV_BEEN_EXECD(flags)) {
813 			cpu_tlb_flushID_SE(va);
814 		} else if (PV_BEEN_REFD(flags)) {
815 			cpu_tlb_flushD_SE(va);
816 		}
817 	}
818 #endif /* ARM_MMU_EXTENDED */
819 }
820 
821 static inline void
pmap_tlb_flushID(pmap_t pm)822 pmap_tlb_flushID(pmap_t pm)
823 {
824 #ifdef ARM_MMU_EXTENDED
825 	pmap_tlb_asid_release_all(pm);
826 #else
827 	if (pm->pm_cstate.cs_tlb_id) {
828 		cpu_tlb_flushID();
829 #if ARM_MMU_V7 == 0
830 		/*
831 		 * Speculative loads by Cortex cores can cause TLB entries to
832 		 * be filled even if there are no explicit accesses, so there
833 		 * may be always be TLB entries to flush.  If we used ASIDs
834 		 * then it would not be a problem.
835 		 * This is not true for other CPUs.
836 		 */
837 		pm->pm_cstate.cs_tlb = 0;
838 #endif /* ARM_MMU_V7 */
839 	}
840 #endif /* ARM_MMU_EXTENDED */
841 }
842 
843 #ifndef ARM_MMU_EXTENDED
844 static inline void
pmap_tlb_flushD(pmap_t pm)845 pmap_tlb_flushD(pmap_t pm)
846 {
847 	if (pm->pm_cstate.cs_tlb_d) {
848 		cpu_tlb_flushD();
849 #if ARM_MMU_V7 == 0
850 		/*
851 		 * Speculative loads by Cortex cores can cause TLB entries to
852 		 * be filled even if there are no explicit accesses, so there
853 		 * may be always be TLB entries to flush.  If we used ASIDs
854 		 * then it would not be a problem.
855 		 * This is not true for other CPUs.
856 		 */
857 		pm->pm_cstate.cs_tlb_d = 0;
858 #endif /* ARM_MMU_V7 */
859 	}
860 }
861 #endif /* ARM_MMU_EXTENDED */
862 
863 #ifdef PMAP_CACHE_VIVT
864 static inline void
pmap_cache_wbinv_page(pmap_t pm,vaddr_t va,bool do_inv,u_int flags)865 pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags)
866 {
867 	if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) {
868 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
869 	} else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) {
870 		if (do_inv) {
871 			if (flags & PVF_WRITE)
872 				cpu_dcache_wbinv_range(va, PAGE_SIZE);
873 			else
874 				cpu_dcache_inv_range(va, PAGE_SIZE);
875 		} else if (flags & PVF_WRITE) {
876 			cpu_dcache_wb_range(va, PAGE_SIZE);
877 		}
878 	}
879 }
880 
881 static inline void
pmap_cache_wbinv_all(pmap_t pm,u_int flags)882 pmap_cache_wbinv_all(pmap_t pm, u_int flags)
883 {
884 	if (PV_BEEN_EXECD(flags)) {
885 		if (pm->pm_cstate.cs_cache_id) {
886 			cpu_idcache_wbinv_all();
887 			pm->pm_cstate.cs_cache = 0;
888 		}
889 	} else if (pm->pm_cstate.cs_cache_d) {
890 		cpu_dcache_wbinv_all();
891 		pm->pm_cstate.cs_cache_d = 0;
892 	}
893 }
894 #endif /* PMAP_CACHE_VIVT */
895 
896 static inline uint8_t
pmap_domain(pmap_t pm)897 pmap_domain(pmap_t pm)
898 {
899 #ifdef ARM_MMU_EXTENDED
900 	return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER;
901 #else
902 	return pm->pm_domain;
903 #endif
904 }
905 
906 static inline pd_entry_t *
pmap_l1_kva(pmap_t pm)907 pmap_l1_kva(pmap_t pm)
908 {
909 #ifdef ARM_MMU_EXTENDED
910 	return pm->pm_l1;
911 #else
912 	return pm->pm_l1->l1_kva;
913 #endif
914 }
915 
916 static inline bool
pmap_is_current(pmap_t pm)917 pmap_is_current(pmap_t pm)
918 {
919 	if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm)
920 		return true;
921 
922 	return false;
923 }
924 
925 static inline bool
pmap_is_cached(pmap_t pm)926 pmap_is_cached(pmap_t pm)
927 {
928 #ifdef ARM_MMU_EXTENDED
929 	if (pm == pmap_kernel())
930 		return true;
931 #ifdef MULTIPROCESSOR
932 	// Is this pmap active on any CPU?
933 	if (!kcpuset_iszero(pm->pm_active))
934 		return true;
935 #else
936 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
937 	// Is this pmap active?
938 	if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti))
939 		return true;
940 #endif
941 #else
942 	struct cpu_info * const ci = curcpu();
943 	if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL
944 	    || ci->ci_pmap_lastuser == pm)
945 		return true;
946 #endif /* ARM_MMU_EXTENDED */
947 
948 	return false;
949 }
950 
951 /*
952  * PTE_SYNC_CURRENT:
953  *
954  *     Make sure the pte is written out to RAM.
955  *     We need to do this for one of two cases:
956  *       - We're dealing with the kernel pmap
957  *       - There is no pmap active in the cache/tlb.
958  *       - The specified pmap is 'active' in the cache/tlb.
959  */
960 
961 static inline void
pmap_pte_sync_current(pmap_t pm,pt_entry_t * ptep)962 pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep)
963 {
964 	if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm))
965 		PTE_SYNC(ptep);
966 	arm_dsb();
967 }
968 
969 #ifdef PMAP_INCLUDE_PTE_SYNC
970 #define	PTE_SYNC_CURRENT(pm, ptep)	pmap_pte_sync_current(pm, ptep)
971 #else
972 #define	PTE_SYNC_CURRENT(pm, ptep)	/* nothing */
973 #endif
974 
975 /*
976  * main pv_entry manipulation functions:
977  *   pmap_enter_pv: enter a mapping onto a vm_page list
978  *   pmap_remove_pv: remove a mapping from a vm_page list
979  *
980  * NOTE: pmap_enter_pv expects to lock the pvh itself
981  *       pmap_remove_pv expects the caller to lock the pvh before calling
982  */
983 
984 /*
985  * pmap_enter_pv: enter a mapping onto a vm_page lst
986  *
987  * => caller should hold the proper lock on pmap_main_lock
988  * => caller should have pmap locked
989  * => we will gain the lock on the vm_page and allocate the new pv_entry
990  * => caller should adjust ptp's wire_count before calling
991  * => caller should not adjust pmap's wire_count
992  */
993 static void
pmap_enter_pv(struct vm_page_md * md,paddr_t pa,struct pv_entry * pv,pmap_t pm,vaddr_t va,u_int flags)994 pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
995     vaddr_t va, u_int flags)
996 {
997 	struct pv_entry **pvp;
998 
999 	NPDEBUG(PDB_PVDUMP,
1000 	    printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags));
1001 
1002 	pv->pv_pmap = pm;
1003 	pv->pv_va = va;
1004 	pv->pv_flags = flags;
1005 
1006 	pvp = &SLIST_FIRST(&md->pvh_list);
1007 #ifdef PMAP_CACHE_VIPT
1008 	/*
1009 	 * Insert unmanaged entries, writeable first, at the head of
1010 	 * the pv list.
1011 	 */
1012 	if (__predict_true(!PV_IS_KENTRY_P(flags))) {
1013 		while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags))
1014 			pvp = &SLIST_NEXT(*pvp, pv_link);
1015 	}
1016 	if (!PV_IS_WRITE_P(flags)) {
1017 		while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags))
1018 			pvp = &SLIST_NEXT(*pvp, pv_link);
1019 	}
1020 #endif
1021 	SLIST_NEXT(pv, pv_link) = *pvp;		/* add to ... */
1022 	*pvp = pv;				/* ... locked list */
1023 	md->pvh_attrs |= flags & (PVF_REF | PVF_MOD);
1024 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
1025 	if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE)
1026 		md->pvh_attrs |= PVF_KMOD;
1027 	if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
1028 		md->pvh_attrs |= PVF_DIRTY;
1029 	KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1030 #endif
1031 	if (pm == pmap_kernel()) {
1032 		PMAPCOUNT(kernel_mappings);
1033 		if (flags & PVF_WRITE)
1034 			md->krw_mappings++;
1035 		else
1036 			md->kro_mappings++;
1037 	} else {
1038 		if (flags & PVF_WRITE)
1039 			md->urw_mappings++;
1040 		else
1041 			md->uro_mappings++;
1042 	}
1043 
1044 #ifdef PMAP_CACHE_VIPT
1045 #ifndef ARM_MMU_EXTENDED
1046 	/*
1047 	 * Even though pmap_vac_me_harder will set PVF_WRITE for us,
1048 	 * do it here as well to keep the mappings & KVF_WRITE consistent.
1049 	 */
1050 	if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) {
1051 		md->pvh_attrs |= PVF_WRITE;
1052 	}
1053 #endif
1054 	/*
1055 	 * If this is an exec mapping and its the first exec mapping
1056 	 * for this page, make sure to sync the I-cache.
1057 	 */
1058 	if (PV_IS_EXEC_P(flags)) {
1059 #ifndef ARM_MMU_EXTENDED
1060 		if (!PV_IS_EXEC_P(md->pvh_attrs)) {
1061 			pmap_syncicache_page(md, pa);
1062 			PMAPCOUNT(exec_synced_map);
1063 		}
1064 #endif
1065 		PMAPCOUNT(exec_mappings);
1066 	}
1067 #endif
1068 
1069 	PMAPCOUNT(mappings);
1070 
1071 	if (pv->pv_flags & PVF_WIRED)
1072 		++pm->pm_stats.wired_count;
1073 }
1074 
1075 /*
1076  *
1077  * pmap_find_pv: Find a pv entry
1078  *
1079  * => caller should hold lock on vm_page
1080  */
1081 static inline struct pv_entry *
pmap_find_pv(struct vm_page_md * md,pmap_t pm,vaddr_t va)1082 pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va)
1083 {
1084 	struct pv_entry *pv;
1085 
1086 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1087 		if (pm == pv->pv_pmap && va == pv->pv_va)
1088 			break;
1089 	}
1090 
1091 	return (pv);
1092 }
1093 
1094 /*
1095  * pmap_remove_pv: try to remove a mapping from a pv_list
1096  *
1097  * => caller should hold proper lock on pmap_main_lock
1098  * => pmap should be locked
1099  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1100  * => caller should adjust ptp's wire_count and free PTP if needed
1101  * => caller should NOT adjust pmap's wire_count
1102  * => we return the removed pv
1103  */
1104 static struct pv_entry *
pmap_remove_pv(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va)1105 pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1106 {
1107 	struct pv_entry *pv, **prevptr;
1108 
1109 	NPDEBUG(PDB_PVDUMP,
1110 	    printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
1111 
1112 	prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
1113 	pv = *prevptr;
1114 
1115 	while (pv) {
1116 		if (pv->pv_pmap == pm && pv->pv_va == va) {	/* match? */
1117 			NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
1118 			    "%p, flags 0x%x\n", pm, md, pv->pv_flags));
1119 			if (pv->pv_flags & PVF_WIRED) {
1120 				--pm->pm_stats.wired_count;
1121 			}
1122 			*prevptr = SLIST_NEXT(pv, pv_link);	/* remove it! */
1123 			if (pm == pmap_kernel()) {
1124 				PMAPCOUNT(kernel_unmappings);
1125 				if (pv->pv_flags & PVF_WRITE)
1126 					md->krw_mappings--;
1127 				else
1128 					md->kro_mappings--;
1129 			} else {
1130 				if (pv->pv_flags & PVF_WRITE)
1131 					md->urw_mappings--;
1132 				else
1133 					md->uro_mappings--;
1134 			}
1135 
1136 			PMAPCOUNT(unmappings);
1137 #ifdef PMAP_CACHE_VIPT
1138 			if (!(pv->pv_flags & PVF_WRITE))
1139 				break;
1140 			/*
1141 			 * If this page has had an exec mapping, then if
1142 			 * this was the last mapping, discard the contents,
1143 			 * otherwise sync the i-cache for this page.
1144 			 */
1145 			if (PV_IS_EXEC_P(md->pvh_attrs)) {
1146 #ifdef ARM_MMU_EXTENDED
1147 				md->pvh_attrs &= ~PVF_EXEC;
1148 				PMAPCOUNT(exec_discarded_unmap);
1149 #else
1150 				if (SLIST_EMPTY(&md->pvh_list)) {
1151 					md->pvh_attrs &= ~PVF_EXEC;
1152 					PMAPCOUNT(exec_discarded_unmap);
1153 				} else {
1154 					pmap_syncicache_page(md, pa);
1155 					PMAPCOUNT(exec_synced_unmap);
1156 				}
1157 #endif /* ARM_MMU_EXTENDED */
1158 			}
1159 #endif /* PMAP_CACHE_VIPT */
1160 			break;
1161 		}
1162 		prevptr = &SLIST_NEXT(pv, pv_link);	/* previous pointer */
1163 		pv = *prevptr;				/* advance */
1164 	}
1165 
1166 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
1167 	/*
1168 	 * If we no longer have a WRITEABLE KENTRY at the head of list,
1169 	 * clear the KMOD attribute from the page.
1170 	 */
1171 	if (SLIST_FIRST(&md->pvh_list) == NULL
1172 	    || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
1173 		md->pvh_attrs &= ~PVF_KMOD;
1174 
1175 	/*
1176 	 * If this was a writeable page and there are no more writeable
1177 	 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
1178 	 * the contents to memory.
1179 	 */
1180 	if (arm_cache_prefer_mask != 0) {
1181 		if (md->krw_mappings + md->urw_mappings == 0)
1182 			md->pvh_attrs &= ~PVF_WRITE;
1183 		PMAP_VALIDATE_MD_PAGE(md);
1184 	}
1185 	KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1186 #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
1187 
1188 	return(pv);				/* return removed pv */
1189 }
1190 
1191 /*
1192  *
1193  * pmap_modify_pv: Update pv flags
1194  *
1195  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1196  * => caller should NOT adjust pmap's wire_count
1197  * => caller must call pmap_vac_me_harder() if writable status of a page
1198  *    may have changed.
1199  * => we return the old flags
1200  *
1201  * Modify a physical-virtual mapping in the pv table
1202  */
1203 static u_int
pmap_modify_pv(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va,u_int clr_mask,u_int set_mask)1204 pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
1205     u_int clr_mask, u_int set_mask)
1206 {
1207 	struct pv_entry *npv;
1208 	u_int flags, oflags;
1209 
1210 	KASSERT(!PV_IS_KENTRY_P(clr_mask));
1211 	KASSERT(!PV_IS_KENTRY_P(set_mask));
1212 
1213 	if ((npv = pmap_find_pv(md, pm, va)) == NULL)
1214 		return (0);
1215 
1216 	NPDEBUG(PDB_PVDUMP,
1217 	    printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
1218 
1219 	/*
1220 	 * There is at least one VA mapping this page.
1221 	 */
1222 
1223 	if (clr_mask & (PVF_REF | PVF_MOD)) {
1224 		md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1225 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
1226 		if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
1227 			md->pvh_attrs |= PVF_DIRTY;
1228 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1229 #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
1230 	}
1231 
1232 	oflags = npv->pv_flags;
1233 	npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1234 
1235 	if ((flags ^ oflags) & PVF_WIRED) {
1236 		if (flags & PVF_WIRED)
1237 			++pm->pm_stats.wired_count;
1238 		else
1239 			--pm->pm_stats.wired_count;
1240 	}
1241 
1242 	if ((flags ^ oflags) & PVF_WRITE) {
1243 		if (pm == pmap_kernel()) {
1244 			if (flags & PVF_WRITE) {
1245 				md->krw_mappings++;
1246 				md->kro_mappings--;
1247 			} else {
1248 				md->kro_mappings++;
1249 				md->krw_mappings--;
1250 			}
1251 		} else {
1252 			if (flags & PVF_WRITE) {
1253 				md->urw_mappings++;
1254 				md->uro_mappings--;
1255 			} else {
1256 				md->uro_mappings++;
1257 				md->urw_mappings--;
1258 			}
1259 		}
1260 	}
1261 #ifdef PMAP_CACHE_VIPT
1262 	if (arm_cache_prefer_mask != 0) {
1263 		if (md->urw_mappings + md->krw_mappings == 0) {
1264 			md->pvh_attrs &= ~PVF_WRITE;
1265 		} else {
1266 			md->pvh_attrs |= PVF_WRITE;
1267 		}
1268 	}
1269 #ifndef ARM_MMU_EXTENDED
1270 	/*
1271 	 * We have two cases here: the first is from enter_pv (new exec
1272 	 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
1273 	 * Since in latter, pmap_enter_pv won't do anything, we just have
1274 	 * to do what pmap_remove_pv would do.
1275 	 */
1276 	if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs))
1277 	    || (PV_IS_EXEC_P(md->pvh_attrs)
1278 		|| (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1279 		pmap_syncicache_page(md, pa);
1280 		PMAPCOUNT(exec_synced_remap);
1281 	}
1282 	KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1283 #endif /* !ARM_MMU_EXTENDED */
1284 #endif /* PMAP_CACHE_VIPT */
1285 
1286 	PMAPCOUNT(remappings);
1287 
1288 	return (oflags);
1289 }
1290 
1291 /*
1292  * Allocate an L1 translation table for the specified pmap.
1293  * This is called at pmap creation time.
1294  */
1295 static void
pmap_alloc_l1(pmap_t pm)1296 pmap_alloc_l1(pmap_t pm)
1297 {
1298 #ifdef ARM_MMU_EXTENDED
1299 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1300 	struct vm_page *pg;
1301 	bool ok __diagused;
1302 	for (;;) {
1303 #ifdef PMAP_NEED_ALLOC_POOLPAGE
1304 		pg = arm_pmap_alloc_poolpage(UVM_PGA_ZERO);
1305 #else
1306 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1307 #endif
1308 		if (pg != NULL)
1309 			break;
1310 		uvm_wait("pmapl1alloc");
1311 	}
1312 	pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg);
1313 	vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0);
1314 	KASSERT(ok);
1315 	KASSERT(va >= KERNEL_BASE);
1316 
1317 #else
1318 	KASSERTMSG(kernel_map != NULL, "pm %p", pm);
1319 	vaddr_t va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
1320 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
1321 	KASSERT(va);
1322 	pmap_extract(pmap_kernel(), va, &pm->pm_l1_pa);
1323 #endif
1324 	pm->pm_l1 = (pd_entry_t *)va;
1325 	PTE_SYNC_RANGE(pm->pm_l1, PAGE_SIZE / sizeof(pt_entry_t));
1326 #else
1327 	struct l1_ttable *l1;
1328 	uint8_t domain;
1329 
1330 	/*
1331 	 * Remove the L1 at the head of the LRU list
1332 	 */
1333 	mutex_spin_enter(&l1_lru_lock);
1334 	l1 = TAILQ_FIRST(&l1_lru_list);
1335 	KDASSERT(l1 != NULL);
1336 	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1337 
1338 	/*
1339 	 * Pick the first available domain number, and update
1340 	 * the link to the next number.
1341 	 */
1342 	domain = l1->l1_domain_first;
1343 	l1->l1_domain_first = l1->l1_domain_free[domain];
1344 
1345 	/*
1346 	 * If there are still free domain numbers in this L1,
1347 	 * put it back on the TAIL of the LRU list.
1348 	 */
1349 	if (++l1->l1_domain_use_count < PMAP_DOMAINS)
1350 		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1351 
1352 	mutex_spin_exit(&l1_lru_lock);
1353 
1354 	/*
1355 	 * Fix up the relevant bits in the pmap structure
1356 	 */
1357 	pm->pm_l1 = l1;
1358 	pm->pm_domain = domain + 1;
1359 #endif
1360 }
1361 
1362 /*
1363  * Free an L1 translation table.
1364  * This is called at pmap destruction time.
1365  */
1366 static void
pmap_free_l1(pmap_t pm)1367 pmap_free_l1(pmap_t pm)
1368 {
1369 #ifdef ARM_MMU_EXTENDED
1370 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1371 	struct vm_page *pg = PHYS_TO_VM_PAGE(pm->pm_l1_pa);
1372 	uvm_pagefree(pg);
1373 #else
1374 	uvm_km_free(kernel_map, (vaddr_t)pm->pm_l1, PAGE_SIZE, UVM_KMF_WIRED);
1375 #endif
1376 	pm->pm_l1 = NULL;
1377 	pm->pm_l1_pa = 0;
1378 #else
1379 	struct l1_ttable *l1 = pm->pm_l1;
1380 
1381 	mutex_spin_enter(&l1_lru_lock);
1382 
1383 	/*
1384 	 * If this L1 is currently on the LRU list, remove it.
1385 	 */
1386 	if (l1->l1_domain_use_count < PMAP_DOMAINS)
1387 		TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1388 
1389 	/*
1390 	 * Free up the domain number which was allocated to the pmap
1391 	 */
1392 	l1->l1_domain_free[pmap_domain(pm) - 1] = l1->l1_domain_first;
1393 	l1->l1_domain_first = pmap_domain(pm) - 1;
1394 	l1->l1_domain_use_count--;
1395 
1396 	/*
1397 	 * The L1 now must have at least 1 free domain, so add
1398 	 * it back to the LRU list. If the use count is zero,
1399 	 * put it at the head of the list, otherwise it goes
1400 	 * to the tail.
1401 	 */
1402 	if (l1->l1_domain_use_count == 0)
1403 		TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
1404 	else
1405 		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1406 
1407 	mutex_spin_exit(&l1_lru_lock);
1408 #endif /* ARM_MMU_EXTENDED */
1409 }
1410 
1411 #ifndef ARM_MMU_EXTENDED
1412 static inline void
pmap_use_l1(pmap_t pm)1413 pmap_use_l1(pmap_t pm)
1414 {
1415 	struct l1_ttable *l1;
1416 
1417 	/*
1418 	 * Do nothing if we're in interrupt context.
1419 	 * Access to an L1 by the kernel pmap must not affect
1420 	 * the LRU list.
1421 	 */
1422 	if (cpu_intr_p() || pm == pmap_kernel())
1423 		return;
1424 
1425 	l1 = pm->pm_l1;
1426 
1427 	/*
1428 	 * If the L1 is not currently on the LRU list, just return
1429 	 */
1430 	if (l1->l1_domain_use_count == PMAP_DOMAINS)
1431 		return;
1432 
1433 	mutex_spin_enter(&l1_lru_lock);
1434 
1435 	/*
1436 	 * Check the use count again, now that we've acquired the lock
1437 	 */
1438 	if (l1->l1_domain_use_count == PMAP_DOMAINS) {
1439 		mutex_spin_exit(&l1_lru_lock);
1440 		return;
1441 	}
1442 
1443 	/*
1444 	 * Move the L1 to the back of the LRU list
1445 	 */
1446 	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1447 	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1448 
1449 	mutex_spin_exit(&l1_lru_lock);
1450 }
1451 #endif /* !ARM_MMU_EXTENDED */
1452 
1453 /*
1454  * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
1455  *
1456  * Free an L2 descriptor table.
1457  */
1458 static inline void
1459 #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT)
pmap_free_l2_ptp(bool need_sync,pt_entry_t * l2,paddr_t pa)1460 pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa)
1461 #else
1462 pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
1463 #endif
1464 {
1465 #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT)
1466 	/*
1467 	 * Note: With a write-back cache, we may need to sync this
1468 	 * L2 table before re-using it.
1469 	 * This is because it may have belonged to a non-current
1470 	 * pmap, in which case the cache syncs would have been
1471 	 * skipped for the pages that were being unmapped. If the
1472 	 * L2 table were then to be immediately re-allocated to
1473 	 * the *current* pmap, it may well contain stale mappings
1474 	 * which have not yet been cleared by a cache write-back
1475 	 * and so would still be visible to the mmu.
1476 	 */
1477 	if (need_sync)
1478 		PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1479 #endif /* PMAP_INCLUDE_PTE_SYNC && PMAP_CACHE_VIVT */
1480 	pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
1481 }
1482 
1483 /*
1484  * Returns a pointer to the L2 bucket associated with the specified pmap
1485  * and VA, or NULL if no L2 bucket exists for the address.
1486  */
1487 static inline struct l2_bucket *
pmap_get_l2_bucket(pmap_t pm,vaddr_t va)1488 pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
1489 {
1490 	const size_t l1slot = l1pte_index(va);
1491 	struct l2_dtable *l2;
1492 	struct l2_bucket *l2b;
1493 
1494 	if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL ||
1495 	    (l2b = &l2->l2_bucket[L2_BUCKET(l1slot)])->l2b_kva == NULL)
1496 		return (NULL);
1497 
1498 	return (l2b);
1499 }
1500 
1501 /*
1502  * Returns a pointer to the L2 bucket associated with the specified pmap
1503  * and VA.
1504  *
1505  * If no L2 bucket exists, perform the necessary allocations to put an L2
1506  * bucket/page table in place.
1507  *
1508  * Note that if a new L2 bucket/page was allocated, the caller *must*
1509  * increment the bucket occupancy counter appropriately *before*
1510  * releasing the pmap's lock to ensure no other thread or cpu deallocates
1511  * the bucket/page in the meantime.
1512  */
1513 static struct l2_bucket *
pmap_alloc_l2_bucket(pmap_t pm,vaddr_t va)1514 pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
1515 {
1516 	const size_t l1slot = l1pte_index(va);
1517 	struct l2_dtable *l2;
1518 
1519 	if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) {
1520 		/*
1521 		 * No mapping at this address, as there is
1522 		 * no entry in the L1 table.
1523 		 * Need to allocate a new l2_dtable.
1524 		 */
1525 		if ((l2 = pmap_alloc_l2_dtable()) == NULL)
1526 			return (NULL);
1527 
1528 		/*
1529 		 * Link it into the parent pmap
1530 		 */
1531 		pm->pm_l2[L2_IDX(l1slot)] = l2;
1532 	}
1533 
1534 	struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)];
1535 
1536 	/*
1537 	 * Fetch pointer to the L2 page table associated with the address.
1538 	 */
1539 	if (l2b->l2b_kva == NULL) {
1540 		pt_entry_t *ptep;
1541 
1542 		/*
1543 		 * No L2 page table has been allocated. Chances are, this
1544 		 * is because we just allocated the l2_dtable, above.
1545 		 */
1546 		if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_pa)) == NULL) {
1547 			/*
1548 			 * Oops, no more L2 page tables available at this
1549 			 * time. We may need to deallocate the l2_dtable
1550 			 * if we allocated a new one above.
1551 			 */
1552 			if (l2->l2_occupancy == 0) {
1553 				pm->pm_l2[L2_IDX(l1slot)] = NULL;
1554 				pmap_free_l2_dtable(l2);
1555 			}
1556 			return (NULL);
1557 		}
1558 
1559 		l2->l2_occupancy++;
1560 		l2b->l2b_kva = ptep;
1561 		l2b->l2b_l1slot = l1slot;
1562 
1563 #ifdef ARM_MMU_EXTENDED
1564 		/*
1565 		 * We know there will be a mapping here, so simply
1566 		 * enter this PTP into the L1 now.
1567 		 */
1568 		pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot;
1569 		pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa
1570 		    | L1_C_DOM(pmap_domain(pm));
1571 		KASSERT(*pdep == 0);
1572 		l1pte_setone(pdep, npde);
1573 		PDE_SYNC(pdep);
1574 #endif
1575 	}
1576 
1577 	return (l2b);
1578 }
1579 
1580 /*
1581  * One or more mappings in the specified L2 descriptor table have just been
1582  * invalidated.
1583  *
1584  * Garbage collect the metadata and descriptor table itself if necessary.
1585  *
1586  * The pmap lock must be acquired when this is called (not necessary
1587  * for the kernel pmap).
1588  */
1589 static void
pmap_free_l2_bucket(pmap_t pm,struct l2_bucket * l2b,u_int count)1590 pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1591 {
1592 	KDASSERT(count <= l2b->l2b_occupancy);
1593 
1594 	/*
1595 	 * Update the bucket's reference count according to how many
1596 	 * PTEs the caller has just invalidated.
1597 	 */
1598 	l2b->l2b_occupancy -= count;
1599 
1600 	/*
1601 	 * Note:
1602 	 *
1603 	 * Level 2 page tables allocated to the kernel pmap are never freed
1604 	 * as that would require checking all Level 1 page tables and
1605 	 * removing any references to the Level 2 page table. See also the
1606 	 * comment elsewhere about never freeing bootstrap L2 descriptors.
1607 	 *
1608 	 * We make do with just invalidating the mapping in the L2 table.
1609 	 *
1610 	 * This isn't really a big deal in practice and, in fact, leads
1611 	 * to a performance win over time as we don't need to continually
1612 	 * alloc/free.
1613 	 */
1614 	if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
1615 		return;
1616 
1617 	/*
1618 	 * There are no more valid mappings in this level 2 page table.
1619 	 * Go ahead and NULL-out the pointer in the bucket, then
1620 	 * free the page table.
1621 	 */
1622 	const size_t l1slot = l2b->l2b_l1slot;
1623 	pt_entry_t * const ptep = l2b->l2b_kva;
1624 	l2b->l2b_kva = NULL;
1625 
1626 	pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot;
1627 	pd_entry_t pde __diagused = *pdep;
1628 
1629 #ifdef ARM_MMU_EXTENDED
1630 	/*
1631 	 * Invalidate the L1 slot.
1632 	 */
1633 	KASSERT((pde & L1_TYPE_MASK) == L1_TYPE_C);
1634 #else
1635 	/*
1636 	 * If the L1 slot matches the pmap's domain number, then invalidate it.
1637 	 */
1638 	if ((pde & (L1_C_DOM_MASK|L1_TYPE_MASK))
1639 	    == (L1_C_DOM(pmap_domain(pm))|L1_TYPE_C)) {
1640 #endif
1641 		l1pte_setone(pdep, 0);
1642 		PDE_SYNC(pdep);
1643 #ifndef ARM_MMU_EXTENDED
1644 	}
1645 #endif
1646 
1647 	/*
1648 	 * Release the L2 descriptor table back to the pool cache.
1649 	 */
1650 #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT)
1651 	pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_pa);
1652 #else
1653 	pmap_free_l2_ptp(ptep, l2b->l2b_pa);
1654 #endif
1655 
1656 	/*
1657 	 * Update the reference count in the associated l2_dtable
1658 	 */
1659 	struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)];
1660 	if (--l2->l2_occupancy > 0)
1661 		return;
1662 
1663 	/*
1664 	 * There are no more valid mappings in any of the Level 1
1665 	 * slots managed by this l2_dtable. Go ahead and NULL-out
1666 	 * the pointer in the parent pmap and free the l2_dtable.
1667 	 */
1668 	pm->pm_l2[L2_IDX(l1slot)] = NULL;
1669 	pmap_free_l2_dtable(l2);
1670 }
1671 
1672 /*
1673  * Pool cache constructors for L2 descriptor tables, metadata and pmap
1674  * structures.
1675  */
1676 static int
pmap_l2ptp_ctor(void * arg,void * v,int flags)1677 pmap_l2ptp_ctor(void *arg, void *v, int flags)
1678 {
1679 #ifndef PMAP_INCLUDE_PTE_SYNC
1680 	vaddr_t va = (vaddr_t)v & ~PGOFSET;
1681 
1682 	/*
1683 	 * The mappings for these page tables were initially made using
1684 	 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
1685 	 * mode will not be right for page table mappings. To avoid
1686 	 * polluting the pmap_kenter_pa() code with a special case for
1687 	 * page tables, we simply fix up the cache-mode here if it's not
1688 	 * correct.
1689 	 */
1690 	if (pte_l2_s_cache_mode != pte_l2_s_cache_mode_pt) {
1691 		const struct l2_bucket * const l2b =
1692 		    pmap_get_l2_bucket(pmap_kernel(), va);
1693 		KASSERTMSG(l2b != NULL, "%#lx", va);
1694 		pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
1695 		const pt_entry_t opte = *ptep;
1696 
1697 		if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1698 			/*
1699 			 * Page tables must have the cache-mode set correctly.
1700 			 */
1701 			const pt_entry_t npte = (pte & ~L2_S_CACHE_MASK)
1702 			    | pte_l2_s_cache_mode_pt;
1703 			l2pte_set(ptep, npte, opte);
1704 			PTE_SYNC(ptep);
1705 			cpu_tlb_flushD_SE(va);
1706 			cpu_cpwait();
1707 		}
1708 	}
1709 #endif
1710 
1711 	memset(v, 0, L2_TABLE_SIZE_REAL);
1712 	PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1713 	return (0);
1714 }
1715 
1716 static int
pmap_l2dtable_ctor(void * arg,void * v,int flags)1717 pmap_l2dtable_ctor(void *arg, void *v, int flags)
1718 {
1719 
1720 	memset(v, 0, sizeof(struct l2_dtable));
1721 	return (0);
1722 }
1723 
1724 static int
pmap_pmap_ctor(void * arg,void * v,int flags)1725 pmap_pmap_ctor(void *arg, void *v, int flags)
1726 {
1727 
1728 	memset(v, 0, sizeof(struct pmap));
1729 	return (0);
1730 }
1731 
1732 static void
pmap_pinit(pmap_t pm)1733 pmap_pinit(pmap_t pm)
1734 {
1735 #ifndef ARM_HAS_VBAR
1736 	struct l2_bucket *l2b;
1737 
1738 	if (vector_page < KERNEL_BASE) {
1739 		/*
1740 		 * Map the vector page.
1741 		 */
1742 		pmap_enter(pm, vector_page, systempage.pv_pa,
1743 		    VM_PROT_READ | VM_PROT_EXECUTE,
1744 		    VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED);
1745 		pmap_update(pm);
1746 
1747 		pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page);
1748 		l2b = pmap_get_l2_bucket(pm, vector_page);
1749 		KASSERTMSG(l2b != NULL, "%#lx", vector_page);
1750 		pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO |
1751 		    L1_C_DOM(pmap_domain(pm));
1752 	} else
1753 		pm->pm_pl1vec = NULL;
1754 #endif
1755 }
1756 
1757 #ifdef PMAP_CACHE_VIVT
1758 /*
1759  * Since we have a virtually indexed cache, we may need to inhibit caching if
1760  * there is more than one mapping and at least one of them is writable.
1761  * Since we purge the cache on every context switch, we only need to check for
1762  * other mappings within the same pmap, or kernel_pmap.
1763  * This function is also called when a page is unmapped, to possibly reenable
1764  * caching on any remaining mappings.
1765  *
1766  * The code implements the following logic, where:
1767  *
1768  * KW = # of kernel read/write pages
1769  * KR = # of kernel read only pages
1770  * UW = # of user read/write pages
1771  * UR = # of user read only pages
1772  *
1773  * KC = kernel mapping is cacheable
1774  * UC = user mapping is cacheable
1775  *
1776  *               KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
1777  *             +---------------------------------------------
1778  * UW=0,UR=0   | ---        KC=1       KC=1       KC=0
1779  * UW=0,UR>0   | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
1780  * UW=1,UR=0   | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1781  * UW>1,UR>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1782  */
1783 
1784 static const int pmap_vac_flags[4][4] = {
1785 	{-1,		0,		0,		PVF_KNC},
1786 	{0,		0,		PVF_NC,		PVF_NC},
1787 	{0,		PVF_NC,		PVF_NC,		PVF_NC},
1788 	{PVF_UNC,	PVF_NC,		PVF_NC,		PVF_NC}
1789 };
1790 
1791 static inline int
pmap_get_vac_flags(const struct vm_page_md * md)1792 pmap_get_vac_flags(const struct vm_page_md *md)
1793 {
1794 	int kidx, uidx;
1795 
1796 	kidx = 0;
1797 	if (md->kro_mappings || md->krw_mappings > 1)
1798 		kidx |= 1;
1799 	if (md->krw_mappings)
1800 		kidx |= 2;
1801 
1802 	uidx = 0;
1803 	if (md->uro_mappings || md->urw_mappings > 1)
1804 		uidx |= 1;
1805 	if (md->urw_mappings)
1806 		uidx |= 2;
1807 
1808 	return (pmap_vac_flags[uidx][kidx]);
1809 }
1810 
1811 static inline void
pmap_vac_me_harder(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va)1812 pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1813 {
1814 	int nattr;
1815 
1816 	nattr = pmap_get_vac_flags(md);
1817 
1818 	if (nattr < 0) {
1819 		md->pvh_attrs &= ~PVF_NC;
1820 		return;
1821 	}
1822 
1823 	if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0)
1824 		return;
1825 
1826 	if (pm == pmap_kernel())
1827 		pmap_vac_me_kpmap(md, pa, pm, va);
1828 	else
1829 		pmap_vac_me_user(md, pa, pm, va);
1830 
1831 	md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr;
1832 }
1833 
1834 static void
pmap_vac_me_kpmap(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va)1835 pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1836 {
1837 	u_int u_cacheable, u_entries;
1838 	struct pv_entry *pv;
1839 	pmap_t last_pmap = pm;
1840 
1841 	/*
1842 	 * Pass one, see if there are both kernel and user pmaps for
1843 	 * this page.  Calculate whether there are user-writable or
1844 	 * kernel-writable pages.
1845 	 */
1846 	u_cacheable = 0;
1847 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1848 		if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
1849 			u_cacheable++;
1850 	}
1851 
1852 	u_entries = md->urw_mappings + md->uro_mappings;
1853 
1854 	/*
1855 	 * We know we have just been updating a kernel entry, so if
1856 	 * all user pages are already cacheable, then there is nothing
1857 	 * further to do.
1858 	 */
1859 	if (md->k_mappings == 0 && u_cacheable == u_entries)
1860 		return;
1861 
1862 	if (u_entries) {
1863 		/*
1864 		 * Scan over the list again, for each entry, if it
1865 		 * might not be set correctly, call pmap_vac_me_user
1866 		 * to recalculate the settings.
1867 		 */
1868 		SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1869 			/*
1870 			 * We know kernel mappings will get set
1871 			 * correctly in other calls.  We also know
1872 			 * that if the pmap is the same as last_pmap
1873 			 * then we've just handled this entry.
1874 			 */
1875 			if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
1876 				continue;
1877 
1878 			/*
1879 			 * If there are kernel entries and this page
1880 			 * is writable but non-cacheable, then we can
1881 			 * skip this entry also.
1882 			 */
1883 			if (md->k_mappings &&
1884 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
1885 			    (PVF_NC | PVF_WRITE))
1886 				continue;
1887 
1888 			/*
1889 			 * Similarly if there are no kernel-writable
1890 			 * entries and the page is already
1891 			 * read-only/cacheable.
1892 			 */
1893 			if (md->krw_mappings == 0 &&
1894 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1895 				continue;
1896 
1897 			/*
1898 			 * For some of the remaining cases, we know
1899 			 * that we must recalculate, but for others we
1900 			 * can't tell if they are correct or not, so
1901 			 * we recalculate anyway.
1902 			 */
1903 			pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0);
1904 		}
1905 
1906 		if (md->k_mappings == 0)
1907 			return;
1908 	}
1909 
1910 	pmap_vac_me_user(md, pa, pm, va);
1911 }
1912 
1913 static void
pmap_vac_me_user(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va)1914 pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1915 {
1916 	pmap_t kpmap = pmap_kernel();
1917 	struct pv_entry *pv, *npv = NULL;
1918 	u_int entries = 0;
1919 	u_int writable = 0;
1920 	u_int cacheable_entries = 0;
1921 	u_int kern_cacheable = 0;
1922 	u_int other_writable = 0;
1923 
1924 	/*
1925 	 * Count mappings and writable mappings in this pmap.
1926 	 * Include kernel mappings as part of our own.
1927 	 * Keep a pointer to the first one.
1928 	 */
1929 	npv = NULL;
1930 	KASSERT(pmap_page_locked_p(md));
1931 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1932 		/* Count mappings in the same pmap */
1933 		if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
1934 			if (entries++ == 0)
1935 				npv = pv;
1936 
1937 			/* Cacheable mappings */
1938 			if ((pv->pv_flags & PVF_NC) == 0) {
1939 				cacheable_entries++;
1940 				if (kpmap == pv->pv_pmap)
1941 					kern_cacheable++;
1942 			}
1943 
1944 			/* Writable mappings */
1945 			if (pv->pv_flags & PVF_WRITE)
1946 				++writable;
1947 		} else
1948 		if (pv->pv_flags & PVF_WRITE)
1949 			other_writable = 1;
1950 	}
1951 
1952 	/*
1953 	 * Enable or disable caching as necessary.
1954 	 * Note: the first entry might be part of the kernel pmap,
1955 	 * so we can't assume this is indicative of the state of the
1956 	 * other (maybe non-kpmap) entries.
1957 	 */
1958 	if ((entries > 1 && writable) ||
1959 	    (entries > 0 && pm == kpmap && other_writable)) {
1960 		if (cacheable_entries == 0) {
1961 			return;
1962 		}
1963 
1964 		for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1965 			if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
1966 			    (pv->pv_flags & PVF_NC))
1967 				continue;
1968 
1969 			pv->pv_flags |= PVF_NC;
1970 
1971 			struct l2_bucket * const l2b
1972 			    = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1973 			KASSERTMSG(l2b != NULL, "%#lx", va);
1974 			pt_entry_t * const ptep
1975 			    = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1976 			const pt_entry_t opte = *ptep;
1977 			pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
1978 
1979 			if ((va != pv->pv_va || pm != pv->pv_pmap)
1980 			    && l2pte_valid_p(npte)) {
1981 #ifdef PMAP_CACHE_VIVT
1982 				pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va,
1983 				    true, pv->pv_flags);
1984 #endif
1985 				pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
1986 				    pv->pv_flags);
1987 			}
1988 
1989 			l2pte_set(ptep, npte, opte);
1990 			PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1991 		}
1992 		cpu_cpwait();
1993 	} else
1994 	if (entries > cacheable_entries) {
1995 		/*
1996 		 * Turn cacheing back on for some pages.  If it is a kernel
1997 		 * page, only do so if there are no other writable pages.
1998 		 */
1999 		for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
2000 			if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
2001 			    (kpmap != pv->pv_pmap || other_writable)))
2002 				continue;
2003 
2004 			pv->pv_flags &= ~PVF_NC;
2005 
2006 			struct l2_bucket * const l2b
2007 			    = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
2008 			KASSERTMSG(l2b != NULL, "%#lx", va);
2009 			pt_entry_t * const ptep
2010 			    = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2011 			const pt_entry_t opte = *ptep;
2012 			pt_entry_t npte = (opte & ~L2_S_CACHE_MASK)
2013 			    | pte_l2_s_cache_mode;
2014 
2015 			if (l2pte_valid_p(opte)) {
2016 				pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
2017 				    pv->pv_flags);
2018 			}
2019 
2020 			l2pte_set(ptep, npte, opte);
2021 			PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2022 		}
2023 	}
2024 }
2025 #endif
2026 
2027 #ifdef PMAP_CACHE_VIPT
2028 static void
pmap_vac_me_harder(struct vm_page_md * md,paddr_t pa,pmap_t pm,vaddr_t va)2029 pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
2030 {
2031 #ifndef ARM_MMU_EXTENDED
2032 	struct pv_entry *pv;
2033 	vaddr_t tst_mask;
2034 	bool bad_alias;
2035 	const u_int
2036 	    rw_mappings = md->urw_mappings + md->krw_mappings,
2037 	    ro_mappings = md->uro_mappings + md->kro_mappings;
2038 
2039 	/* do we need to do anything? */
2040 	if (arm_cache_prefer_mask == 0)
2041 		return;
2042 
2043 	NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n",
2044 	    md, pm, va));
2045 
2046 	KASSERT(!va || pm);
2047 	KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2048 
2049 	/* Already a conflict? */
2050 	if (__predict_false(md->pvh_attrs & PVF_NC)) {
2051 		/* just an add, things are already non-cached */
2052 		KASSERT(!(md->pvh_attrs & PVF_DIRTY));
2053 		KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2054 		bad_alias = false;
2055 		if (va) {
2056 			PMAPCOUNT(vac_color_none);
2057 			bad_alias = true;
2058 			KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2059 			goto fixup;
2060 		}
2061 		pv = SLIST_FIRST(&md->pvh_list);
2062 		/* the list can't be empty because it would be cachable */
2063 		if (md->pvh_attrs & PVF_KMPAGE) {
2064 			tst_mask = md->pvh_attrs;
2065 		} else {
2066 			KASSERT(pv);
2067 			tst_mask = pv->pv_va;
2068 			pv = SLIST_NEXT(pv, pv_link);
2069 		}
2070 		/*
2071 		 * Only check for a bad alias if we have writable mappings.
2072 		 */
2073 		tst_mask &= arm_cache_prefer_mask;
2074 		if (rw_mappings > 0) {
2075 			for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
2076 				/* if there's a bad alias, stop checking. */
2077 				if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
2078 					bad_alias = true;
2079 			}
2080 			md->pvh_attrs |= PVF_WRITE;
2081 			if (!bad_alias)
2082 				md->pvh_attrs |= PVF_DIRTY;
2083 		} else {
2084 			/*
2085 			 * We have only read-only mappings.  Let's see if there
2086 			 * are multiple colors in use or if we mapped a KMPAGE.
2087 			 * If the latter, we have a bad alias.  If the former,
2088 			 * we need to remember that.
2089 			 */
2090 			for (; pv; pv = SLIST_NEXT(pv, pv_link)) {
2091 				if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) {
2092 					if (md->pvh_attrs & PVF_KMPAGE)
2093 						bad_alias = true;
2094 					break;
2095 				}
2096 			}
2097 			md->pvh_attrs &= ~PVF_WRITE;
2098 			/*
2099 			 * No KMPAGE and we exited early, so we must have
2100 			 * multiple color mappings.
2101 			 */
2102 			if (!bad_alias && pv != NULL)
2103 				md->pvh_attrs |= PVF_MULTCLR;
2104 		}
2105 
2106 		/* If no conflicting colors, set everything back to cached */
2107 		if (!bad_alias) {
2108 #ifdef DEBUG
2109 			if ((md->pvh_attrs & PVF_WRITE)
2110 			    || ro_mappings < 2) {
2111 				SLIST_FOREACH(pv, &md->pvh_list, pv_link)
2112 					KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
2113 			}
2114 #endif
2115 			md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
2116 			md->pvh_attrs |= tst_mask | PVF_COLORED;
2117 			/*
2118 			 * Restore DIRTY bit if page is modified
2119 			 */
2120 			if (md->pvh_attrs & PVF_DMOD)
2121 				md->pvh_attrs |= PVF_DIRTY;
2122 			PMAPCOUNT(vac_color_restore);
2123 		} else {
2124 			KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
2125 			KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
2126 		}
2127 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2128 		KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2129 	} else if (!va) {
2130 		KASSERT(pmap_is_page_colored_p(md));
2131 		KASSERT(!(md->pvh_attrs & PVF_WRITE)
2132 		    || (md->pvh_attrs & PVF_DIRTY));
2133 		if (rw_mappings == 0) {
2134 			md->pvh_attrs &= ~PVF_WRITE;
2135 			if (ro_mappings == 1
2136 			    && (md->pvh_attrs & PVF_MULTCLR)) {
2137 				/*
2138 				 * If this is the last readonly mapping
2139 				 * but it doesn't match the current color
2140 				 * for the page, change the current color
2141 				 * to match this last readonly mapping.
2142 				 */
2143 				pv = SLIST_FIRST(&md->pvh_list);
2144 				tst_mask = (md->pvh_attrs ^ pv->pv_va)
2145 				    & arm_cache_prefer_mask;
2146 				if (tst_mask) {
2147 					md->pvh_attrs ^= tst_mask;
2148 					PMAPCOUNT(vac_color_change);
2149 				}
2150 			}
2151 		}
2152 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2153 		KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2154 		return;
2155 	} else if (!pmap_is_page_colored_p(md)) {
2156 		/* not colored so we just use its color */
2157 		KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY));
2158 		KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2159 		PMAPCOUNT(vac_color_new);
2160 		md->pvh_attrs &= PAGE_SIZE - 1;
2161 		md->pvh_attrs |= PVF_COLORED
2162 		    | (va & arm_cache_prefer_mask)
2163 		    | (rw_mappings > 0 ? PVF_WRITE : 0);
2164 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2165 		KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2166 		return;
2167 	} else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
2168 		bad_alias = false;
2169 		if (rw_mappings > 0) {
2170 			/*
2171 			 * We now have writeable mappings and if we have
2172 			 * readonly mappings in more than once color, we have
2173 			 * an aliasing problem.  Regardless mark the page as
2174 			 * writeable.
2175 			 */
2176 			if (md->pvh_attrs & PVF_MULTCLR) {
2177 				if (ro_mappings < 2) {
2178 					/*
2179 					 * If we only have less than two
2180 					 * read-only mappings, just flush the
2181 					 * non-primary colors from the cache.
2182 					 */
2183 					pmap_flush_page(md, pa,
2184 					    PMAP_FLUSH_SECONDARY);
2185 				} else {
2186 					bad_alias = true;
2187 				}
2188 			}
2189 			md->pvh_attrs |= PVF_WRITE;
2190 		}
2191 		/* If no conflicting colors, set everything back to cached */
2192 		if (!bad_alias) {
2193 #ifdef DEBUG
2194 			if (rw_mappings > 0
2195 			    || (md->pvh_attrs & PMAP_KMPAGE)) {
2196 				tst_mask = md->pvh_attrs & arm_cache_prefer_mask;
2197 				SLIST_FOREACH(pv, &md->pvh_list, pv_link)
2198 					KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
2199 			}
2200 #endif
2201 			if (SLIST_EMPTY(&md->pvh_list))
2202 				PMAPCOUNT(vac_color_reuse);
2203 			else
2204 				PMAPCOUNT(vac_color_ok);
2205 
2206 			/* matching color, just return */
2207 			KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2208 			KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2209 			return;
2210 		}
2211 		KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
2212 		KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
2213 
2214 		/* color conflict.  evict from cache. */
2215 
2216 		pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
2217 		md->pvh_attrs &= ~PVF_COLORED;
2218 		md->pvh_attrs |= PVF_NC;
2219 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2220 		KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2221 		PMAPCOUNT(vac_color_erase);
2222 	} else if (rw_mappings == 0
2223 		   && (md->pvh_attrs & PVF_KMPAGE) == 0) {
2224 		KASSERT((md->pvh_attrs & PVF_WRITE) == 0);
2225 
2226 		/*
2227 		 * If the page has dirty cache lines, clean it.
2228 		 */
2229 		if (md->pvh_attrs & PVF_DIRTY)
2230 			pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
2231 
2232 		/*
2233 		 * If this is the first remapping (we know that there are no
2234 		 * writeable mappings), then this is a simple color change.
2235 		 * Otherwise this is a seconary r/o mapping, which means
2236 		 * we don't have to do anything.
2237 		 */
2238 		if (ro_mappings == 1) {
2239 			KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0);
2240 			md->pvh_attrs &= PAGE_SIZE - 1;
2241 			md->pvh_attrs |= (va & arm_cache_prefer_mask);
2242 			PMAPCOUNT(vac_color_change);
2243 		} else {
2244 			PMAPCOUNT(vac_color_blind);
2245 		}
2246 		md->pvh_attrs |= PVF_MULTCLR;
2247 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2248 		KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2249 		return;
2250 	} else {
2251 		if (rw_mappings > 0)
2252 			md->pvh_attrs |= PVF_WRITE;
2253 
2254 		/* color conflict.  evict from cache. */
2255 		pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
2256 
2257 		/* the list can't be empty because this was a enter/modify */
2258 		pv = SLIST_FIRST(&md->pvh_list);
2259 		if ((md->pvh_attrs & PVF_KMPAGE) == 0) {
2260 			KASSERT(pv);
2261 			/*
2262 			 * If there's only one mapped page, change color to the
2263 			 * page's new color and return.  Restore the DIRTY bit
2264 			 * that was erased by pmap_flush_page.
2265 			 */
2266 			if (SLIST_NEXT(pv, pv_link) == NULL) {
2267 				md->pvh_attrs &= PAGE_SIZE - 1;
2268 				md->pvh_attrs |= (va & arm_cache_prefer_mask);
2269 				if (md->pvh_attrs & PVF_DMOD)
2270 					md->pvh_attrs |= PVF_DIRTY;
2271 				PMAPCOUNT(vac_color_change);
2272 				KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2273 				KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2274 				KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2275 				return;
2276 			}
2277 		}
2278 		bad_alias = true;
2279 		md->pvh_attrs &= ~PVF_COLORED;
2280 		md->pvh_attrs |= PVF_NC;
2281 		PMAPCOUNT(vac_color_erase);
2282 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2283 	}
2284 
2285   fixup:
2286 	KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2287 
2288 	/*
2289 	 * Turn cacheing on/off for all pages.
2290 	 */
2291 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2292 		struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap,
2293 		    pv->pv_va);
2294 		KASSERTMSG(l2b != NULL, "%#lx", va);
2295 		pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2296 		const pt_entry_t opte = *ptep;
2297 		pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
2298 		if (bad_alias) {
2299 			pv->pv_flags |= PVF_NC;
2300 		} else {
2301 			pv->pv_flags &= ~PVF_NC;
2302 			npte |= pte_l2_s_cache_mode;
2303 		}
2304 
2305 		if (opte == npte)	/* only update is there's a change */
2306 			continue;
2307 
2308 		if (l2pte_valid_p(npte)) {
2309 			pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags);
2310 		}
2311 
2312 		l2pte_set(ptep, npte, opte);
2313 		PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2314 	}
2315 #endif /* !ARM_MMU_EXTENDED */
2316 }
2317 #endif	/* PMAP_CACHE_VIPT */
2318 
2319 
2320 /*
2321  * Modify pte bits for all ptes corresponding to the given physical address.
2322  * We use `maskbits' rather than `clearbits' because we're always passing
2323  * constants and the latter would require an extra inversion at run-time.
2324  */
2325 static void
pmap_clearbit(struct vm_page_md * md,paddr_t pa,u_int maskbits)2326 pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
2327 {
2328 	struct pv_entry *pv;
2329 #ifdef PMAP_CACHE_VIPT
2330 	const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2331 #ifdef ARM_MMU_EXTENDED
2332 	const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0;
2333 #else
2334 	const u_int execbits = 0;
2335 	bool need_vac_me_harder = false;
2336 	bool need_syncicache = false;
2337 #endif
2338 #else
2339 	const u_int execbits = 0;
2340 #endif
2341 
2342 	NPDEBUG(PDB_BITS,
2343 	    printf("pmap_clearbit: md %p mask 0x%x\n",
2344 	    md, maskbits));
2345 
2346 #ifdef PMAP_CACHE_VIPT
2347 	/*
2348 	 * If we might want to sync the I-cache and we've modified it,
2349 	 * then we know we definitely need to sync or discard it.
2350 	 */
2351 	if (want_syncicache) {
2352 #ifdef ARM_MMU_EXTENDED
2353 		if (md->pvh_attrs & PVF_MOD)
2354 			md->pvh_attrs &= ~PVF_EXEC;
2355 #else
2356 		need_syncicache = md->pvh_attrs & PVF_MOD;
2357 #endif
2358 	}
2359 #endif
2360 	KASSERT(pmap_page_locked_p(md));
2361 
2362 	/*
2363 	 * Clear saved attributes (modify, reference)
2364 	 */
2365 	md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
2366 
2367 	if (SLIST_EMPTY(&md->pvh_list)) {
2368 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
2369 		if (need_syncicache) {
2370 			/*
2371 			 * No one has it mapped, so just discard it.  The next
2372 			 * exec remapping will cause it to be synced.
2373 			 */
2374 			md->pvh_attrs &= ~PVF_EXEC;
2375 			PMAPCOUNT(exec_discarded_clearbit);
2376 		}
2377 #endif
2378 		return;
2379 	}
2380 
2381 	/*
2382 	 * Loop over all current mappings setting/clearing as appropos
2383 	 */
2384 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2385 		pmap_t pm = pv->pv_pmap;
2386 		const vaddr_t va = pv->pv_va;
2387 		const u_int oflags = pv->pv_flags;
2388 #ifndef ARM_MMU_EXTENDED
2389 		/*
2390 		 * Kernel entries are unmanaged and as such not to be changed.
2391 		 */
2392 		if (PV_IS_KENTRY_P(oflags))
2393 			continue;
2394 #endif
2395 		pv->pv_flags &= ~maskbits;
2396 
2397 		pmap_release_page_lock(md);
2398 		pmap_acquire_pmap_lock(pm);
2399 
2400 		struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va);
2401 		if (l2b == NULL) {
2402 			pmap_release_pmap_lock(pm);
2403 			pmap_acquire_page_lock(md);
2404 			continue;
2405 		}
2406 		KASSERTMSG(l2b != NULL, "%#lx", va);
2407 
2408 		pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
2409 		const pt_entry_t opte = *ptep;
2410 		pt_entry_t npte = opte | execbits;
2411 
2412 #ifdef ARM_MMU_EXTENDED
2413 		KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG));
2414 #endif
2415 
2416 		NPDEBUG(PDB_BITS,
2417 		    printf( "%s: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
2418 			__func__, pv, pm, va, oflags));
2419 
2420 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
2421 #ifdef PMAP_CACHE_VIVT
2422 			if ((oflags & PVF_NC)) {
2423 				/*
2424 				 * Entry is not cacheable:
2425 				 *
2426 				 * Don't turn caching on again if this is a
2427 				 * modified emulation. This would be
2428 				 * inconsitent with the settings created by
2429 				 * pmap_vac_me_harder(). Otherwise, it's safe
2430 				 * to re-enable cacheing.
2431 				 *
2432 				 * There's no need to call pmap_vac_me_harder()
2433 				 * here: all pages are losing their write
2434 				 * permission.
2435 				 */
2436 				if (maskbits & PVF_WRITE) {
2437 					npte |= pte_l2_s_cache_mode;
2438 					pv->pv_flags &= ~PVF_NC;
2439 				}
2440 			} else
2441 			if (l2pte_writable_p(opte)) {
2442 				/*
2443 				 * Entry is writable/cacheable: check if pmap
2444 				 * is current if it is flush it, otherwise it
2445 				 * won't be in the cache
2446 				 */
2447 				pmap_cache_wbinv_page(pm, va,
2448 				    (maskbits & PVF_REF) != 0,
2449 				    oflags|PVF_WRITE);
2450 			}
2451 #endif
2452 
2453 			/* make the pte read only */
2454 			npte = l2pte_set_readonly(npte);
2455 
2456 			pmap_acquire_page_lock(md);
2457 #ifdef MULTIPROCESSOR
2458 			pv = pmap_find_pv(md, pm, va);
2459 #endif
2460 			if (pv != NULL && (maskbits & oflags & PVF_WRITE)) {
2461 				/*
2462 				 * Keep alias accounting up to date
2463 				 */
2464 				if (pm == pmap_kernel()) {
2465 					md->krw_mappings--;
2466 					md->kro_mappings++;
2467 				} else {
2468 					md->urw_mappings--;
2469 					md->uro_mappings++;
2470 				}
2471 #ifdef PMAP_CACHE_VIPT
2472 				if (arm_cache_prefer_mask != 0) {
2473 					if (md->urw_mappings + md->krw_mappings == 0) {
2474 						md->pvh_attrs &= ~PVF_WRITE;
2475 					} else {
2476 						PMAP_VALIDATE_MD_PAGE(md);
2477 					}
2478 				}
2479 #ifndef ARM_MMU_EXTENDED
2480 				if (want_syncicache)
2481 					need_syncicache = true;
2482 				need_vac_me_harder = true;
2483 #endif
2484 #endif /* PMAP_CACHE_VIPT */
2485 			}
2486 			pmap_release_page_lock(md);
2487 		}
2488 
2489 		if (maskbits & PVF_REF) {
2490 			if (true
2491 #ifndef ARM_MMU_EXTENDED
2492 			    && (oflags & PVF_NC) == 0
2493 #endif
2494 			    && (maskbits & (PVF_WRITE|PVF_MOD)) == 0
2495 			    && l2pte_valid_p(npte)) {
2496 #ifdef PMAP_CACHE_VIVT
2497 				/*
2498 				 * Check npte here; we may have already
2499 				 * done the wbinv above, and the validity
2500 				 * of the PTE is the same for opte and
2501 				 * npte.
2502 				 */
2503 				pmap_cache_wbinv_page(pm, va, true, oflags);
2504 #endif
2505 			}
2506 
2507 			/*
2508 			 * Make the PTE invalid so that we will take a
2509 			 * page fault the next time the mapping is
2510 			 * referenced.
2511 			 */
2512 			npte &= ~L2_TYPE_MASK;
2513 			npte |= L2_TYPE_INV;
2514 		}
2515 
2516 		if (npte != opte) {
2517 			l2pte_reset(ptep);
2518 			PTE_SYNC(ptep);
2519 
2520 			/* Flush the TLB entry if a current pmap. */
2521 			pmap_tlb_flush_SE(pm, va, oflags);
2522 
2523 			l2pte_set(ptep, npte, 0);
2524 			PTE_SYNC(ptep);
2525 		}
2526 
2527 		pmap_release_pmap_lock(pm);
2528 		pmap_acquire_page_lock(md);
2529 
2530 		NPDEBUG(PDB_BITS,
2531 		    printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
2532 		    pm, va, opte, npte));
2533 	}
2534 
2535 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
2536 	/*
2537 	 * If we need to sync the I-cache and we haven't done it yet, do it.
2538 	 */
2539 	if (need_syncicache) {
2540 		pmap_release_page_lock(md);
2541 		pmap_syncicache_page(md, pa);
2542 		pmap_acquire_page_lock(md);
2543 		PMAPCOUNT(exec_synced_clearbit);
2544 	}
2545 
2546 	/*
2547 	 * If we are changing this to read-only, we need to call vac_me_harder
2548 	 * so we can change all the read-only pages to cacheable.  We pretend
2549 	 * this as a page deletion.
2550 	 */
2551 	if (need_vac_me_harder) {
2552 		if (md->pvh_attrs & PVF_NC)
2553 			pmap_vac_me_harder(md, pa, NULL, 0);
2554 	}
2555 #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
2556 }
2557 
2558 /*
2559  * pmap_clean_page()
2560  *
2561  * This is a local function used to work out the best strategy to clean
2562  * a single page referenced by its entry in the PV table. It's used by
2563  * pmap_copy_page, pmap_zero_page and maybe some others later on.
2564  *
2565  * Its policy is effectively:
2566  *  o If there are no mappings, we don't bother doing anything with the cache.
2567  *  o If there is one mapping, we clean just that page.
2568  *  o If there are multiple mappings, we clean the entire cache.
2569  *
2570  * So that some functions can be further optimised, it returns 0 if it didn't
2571  * clean the entire cache, or 1 if it did.
2572  *
2573  * XXX One bug in this routine is that if the pv_entry has a single page
2574  * mapped at 0x00000000 a whole cache clean will be performed rather than
2575  * just the 1 page. Since this should not occur in everyday use and if it does
2576  * it will just result in not the most efficient clean for the page.
2577  */
2578 #ifdef PMAP_CACHE_VIVT
2579 static bool
pmap_clean_page(struct vm_page_md * md,bool is_src)2580 pmap_clean_page(struct vm_page_md *md, bool is_src)
2581 {
2582 	struct pv_entry *pv;
2583 	pmap_t pm_to_clean = NULL;
2584 	bool cache_needs_cleaning = false;
2585 	vaddr_t page_to_clean = 0;
2586 	u_int flags = 0;
2587 
2588 	/*
2589 	 * Since we flush the cache each time we change to a different
2590 	 * user vmspace, we only need to flush the page if it is in the
2591 	 * current pmap.
2592 	 */
2593 	KASSERT(pmap_page_locked_p(md));
2594 	SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2595 		if (pmap_is_current(pv->pv_pmap)) {
2596 			flags |= pv->pv_flags;
2597 			/*
2598 			 * The page is mapped non-cacheable in
2599 			 * this map.  No need to flush the cache.
2600 			 */
2601 			if (pv->pv_flags & PVF_NC) {
2602 #ifdef DIAGNOSTIC
2603 				KASSERT(!cache_needs_cleaning);
2604 #endif
2605 				break;
2606 			} else if (is_src && (pv->pv_flags & PVF_WRITE) == 0)
2607 				continue;
2608 			if (cache_needs_cleaning) {
2609 				page_to_clean = 0;
2610 				break;
2611 			} else {
2612 				page_to_clean = pv->pv_va;
2613 				pm_to_clean = pv->pv_pmap;
2614 			}
2615 			cache_needs_cleaning = true;
2616 		}
2617 	}
2618 
2619 	if (page_to_clean) {
2620 		pmap_cache_wbinv_page(pm_to_clean, page_to_clean,
2621 		    !is_src, flags | PVF_REF);
2622 	} else if (cache_needs_cleaning) {
2623 		pmap_t const pm = curproc->p_vmspace->vm_map.pmap;
2624 
2625 		pmap_cache_wbinv_all(pm, flags);
2626 		return true;
2627 	}
2628 	return false;
2629 }
2630 #endif
2631 
2632 #ifdef PMAP_CACHE_VIPT
2633 /*
2634  * Sync a page with the I-cache.  Since this is a VIPT, we must pick the
2635  * right cache alias to make sure we flush the right stuff.
2636  */
2637 void
pmap_syncicache_page(struct vm_page_md * md,paddr_t pa)2638 pmap_syncicache_page(struct vm_page_md *md, paddr_t pa)
2639 {
2640 	pmap_t kpm = pmap_kernel();
2641 	const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT
2642 	    ? PAGE_SIZE
2643 	    : arm_pcache.icache_way_size;
2644 
2645 	NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n",
2646 	    md, md->pvh_attrs));
2647 	/*
2648 	 * No need to clean the page if it's non-cached.
2649 	 */
2650 #ifndef ARM_MMU_EXTENDED
2651 	if (md->pvh_attrs & PVF_NC)
2652 		return;
2653 	KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
2654 #endif
2655 
2656 	pt_entry_t * const ptep = cpu_cdst_pte(0);
2657 	const vaddr_t dstp = cpu_cdstp(0);
2658 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
2659 	if (way_size <= PAGE_SIZE) {
2660 		bool ok = false;
2661 		vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp);
2662 		if (ok) {
2663 			cpu_icache_sync_range(vdstp, way_size);
2664 			return;
2665 		}
2666 	}
2667 #endif
2668 
2669 	/*
2670 	 * We don't worry about the color of the exec page, we map the
2671 	 * same page to pages in the way and then do the icache_sync on
2672 	 * the entire way making sure we are cleaned.
2673 	 */
2674 	const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode
2675 	    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
2676 
2677 	for (size_t i = 0, j = 0; i < way_size;
2678 	     i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) {
2679 		l2pte_reset(ptep + j);
2680 		PTE_SYNC(ptep + j);
2681 
2682 		pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC);
2683 		/*
2684 		 * Set up a PTE with to flush these cache lines.
2685 		 */
2686 		l2pte_set(ptep + j, npte, 0);
2687 	}
2688 	PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE);
2689 
2690 	/*
2691 	 * Flush it.
2692 	 */
2693 	cpu_icache_sync_range(dstp, way_size);
2694 
2695 	for (size_t i = 0, j = 0; i < way_size;
2696 	     i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) {
2697 		/*
2698 		 * Unmap the page(s).
2699 		 */
2700 		l2pte_reset(ptep + j);
2701 		pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC);
2702 	}
2703 	PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE);
2704 
2705 	md->pvh_attrs |= PVF_EXEC;
2706 	PMAPCOUNT(exec_synced);
2707 }
2708 
2709 #ifndef ARM_MMU_EXTENDED
2710 void
pmap_flush_page(struct vm_page_md * md,paddr_t pa,enum pmap_flush_op flush)2711 pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush)
2712 {
2713 	vsize_t va_offset, end_va;
2714 	bool wbinv_p;
2715 
2716 	if (arm_cache_prefer_mask == 0)
2717 		return;
2718 
2719 	switch (flush) {
2720 	case PMAP_FLUSH_PRIMARY:
2721 		if (md->pvh_attrs & PVF_MULTCLR) {
2722 			va_offset = 0;
2723 			end_va = arm_cache_prefer_mask;
2724 			md->pvh_attrs &= ~PVF_MULTCLR;
2725 			PMAPCOUNT(vac_flush_lots);
2726 		} else {
2727 			va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2728 			end_va = va_offset;
2729 			PMAPCOUNT(vac_flush_one);
2730 		}
2731 		/*
2732 		 * Mark that the page is no longer dirty.
2733 		 */
2734 		md->pvh_attrs &= ~PVF_DIRTY;
2735 		wbinv_p = true;
2736 		break;
2737 	case PMAP_FLUSH_SECONDARY:
2738 		va_offset = 0;
2739 		end_va = arm_cache_prefer_mask;
2740 		wbinv_p = true;
2741 		md->pvh_attrs &= ~PVF_MULTCLR;
2742 		PMAPCOUNT(vac_flush_lots);
2743 		break;
2744 	case PMAP_CLEAN_PRIMARY:
2745 		va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2746 		end_va = va_offset;
2747 		wbinv_p = false;
2748 		/*
2749 		 * Mark that the page is no longer dirty.
2750 		 */
2751 		if ((md->pvh_attrs & PVF_DMOD) == 0)
2752 			md->pvh_attrs &= ~PVF_DIRTY;
2753 		PMAPCOUNT(vac_clean_one);
2754 		break;
2755 	default:
2756 		return;
2757 	}
2758 
2759 	KASSERT(!(md->pvh_attrs & PVF_NC));
2760 
2761 	NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n",
2762 	    md, md->pvh_attrs));
2763 
2764 	const size_t scache_line_size = arm_scache.dcache_line_size;
2765 
2766 	for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
2767 		pt_entry_t * const ptep = cpu_cdst_pte(va_offset);
2768 		const vaddr_t dstp = cpu_cdstp(va_offset);
2769 		const pt_entry_t opte = *ptep;
2770 
2771 		if (flush == PMAP_FLUSH_SECONDARY
2772 		    && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
2773 			continue;
2774 
2775 		pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC);
2776 		/*
2777 		 * Set up a PTE with the right coloring to flush
2778 		 * existing cache entries.
2779 		 */
2780 		const pt_entry_t npte = L2_S_PROTO
2781 		    | pa
2782 		    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
2783 		    | pte_l2_s_cache_mode;
2784 		l2pte_set(ptep, npte, opte);
2785 		PTE_SYNC(ptep);
2786 
2787 		/*
2788 		 * Flush it.  Make sure to flush secondary cache too since
2789 		 * bus_dma will ignore uncached pages.
2790 		 */
2791 		if (scache_line_size != 0) {
2792 			cpu_dcache_wb_range(dstp, PAGE_SIZE);
2793 			if (wbinv_p) {
2794 				cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE);
2795 				cpu_dcache_inv_range(dstp, PAGE_SIZE);
2796 			} else {
2797 				cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE);
2798 			}
2799 		} else {
2800 			if (wbinv_p) {
2801 				cpu_dcache_wbinv_range(dstp, PAGE_SIZE);
2802 			} else {
2803 				cpu_dcache_wb_range(dstp, PAGE_SIZE);
2804 			}
2805 		}
2806 
2807 		/*
2808 		 * Restore the page table entry since we might have interrupted
2809 		 * pmap_zero_page or pmap_copy_page which was already using
2810 		 * this pte.
2811 		 */
2812 		if (opte) {
2813 			l2pte_set(ptep, opte, npte);
2814 		} else {
2815 			l2pte_reset(ptep);
2816 		}
2817 		PTE_SYNC(ptep);
2818 		pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC);
2819 	}
2820 }
2821 #endif /* ARM_MMU_EXTENDED */
2822 #endif /* PMAP_CACHE_VIPT */
2823 
2824 /*
2825  * Routine:	pmap_page_remove
2826  * Function:
2827  *		Removes this physical page from
2828  *		all physical maps in which it resides.
2829  *		Reflects back modify bits to the pager.
2830  */
2831 static void
pmap_page_remove(struct vm_page_md * md,paddr_t pa)2832 pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2833 {
2834 	struct l2_bucket *l2b;
2835 	struct pv_entry *pv;
2836 	pt_entry_t *ptep;
2837 #ifndef ARM_MMU_EXTENDED
2838 	bool flush = false;
2839 #endif
2840 	u_int flags = 0;
2841 
2842 	NPDEBUG(PDB_FOLLOW,
2843 	    printf("pmap_page_remove: md %p (0x%08lx)\n", md,
2844 	    pa));
2845 
2846 	struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list);
2847 	pmap_acquire_page_lock(md);
2848 	if (*pvp == NULL) {
2849 #ifdef PMAP_CACHE_VIPT
2850 		/*
2851 		 * We *know* the page contents are about to be replaced.
2852 		 * Discard the exec contents
2853 		 */
2854 		if (PV_IS_EXEC_P(md->pvh_attrs))
2855 			PMAPCOUNT(exec_discarded_page_protect);
2856 		md->pvh_attrs &= ~PVF_EXEC;
2857 		PMAP_VALIDATE_MD_PAGE(md);
2858 #endif
2859 		pmap_release_page_lock(md);
2860 		return;
2861 	}
2862 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
2863 	KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
2864 #endif
2865 
2866 	/*
2867 	 * Clear alias counts
2868 	 */
2869 #ifdef PMAP_CACHE_VIVT
2870 	md->k_mappings = 0;
2871 #endif
2872 	md->urw_mappings = md->uro_mappings = 0;
2873 
2874 #ifdef PMAP_CACHE_VIVT
2875 	pmap_clean_page(md, false);
2876 #endif
2877 
2878 	while ((pv = *pvp) != NULL) {
2879 		pmap_t pm = pv->pv_pmap;
2880 #ifndef ARM_MMU_EXTENDED
2881 		if (flush == false && pmap_is_current(pm))
2882 			flush = true;
2883 #endif
2884 
2885 		if (pm == pmap_kernel()) {
2886 #ifdef PMAP_CACHE_VIPT
2887 			/*
2888 			 * If this was unmanaged mapping, it must be preserved.
2889 			 * Move it back on the list and advance the end-of-list
2890 			 * pointer.
2891 			 */
2892 			if (PV_IS_KENTRY_P(pv->pv_flags)) {
2893 				*pvp = pv;
2894 				pvp = &SLIST_NEXT(pv, pv_link);
2895 				continue;
2896 			}
2897 			if (pv->pv_flags & PVF_WRITE)
2898 				md->krw_mappings--;
2899 			else
2900 				md->kro_mappings--;
2901 #endif
2902 			PMAPCOUNT(kernel_unmappings);
2903 		}
2904 		*pvp = SLIST_NEXT(pv, pv_link); /* remove from list */
2905 		PMAPCOUNT(unmappings);
2906 
2907 		pmap_release_page_lock(md);
2908 		pmap_acquire_pmap_lock(pm);
2909 
2910 		l2b = pmap_get_l2_bucket(pm, pv->pv_va);
2911 		KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va);
2912 
2913 		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2914 
2915 		/*
2916 		 * Update statistics
2917 		 */
2918 		--pm->pm_stats.resident_count;
2919 
2920 		/* Wired bit */
2921 		if (pv->pv_flags & PVF_WIRED)
2922 			--pm->pm_stats.wired_count;
2923 
2924 		flags |= pv->pv_flags;
2925 
2926 		/*
2927 		 * Invalidate the PTEs.
2928 		 */
2929 		l2pte_reset(ptep);
2930 		PTE_SYNC_CURRENT(pm, ptep);
2931 
2932 #ifdef ARM_MMU_EXTENDED
2933 		pmap_tlb_invalidate_addr(pm, pv->pv_va);
2934 #endif
2935 
2936 		pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE);
2937 
2938 		pmap_release_pmap_lock(pm);
2939 
2940 		pool_put(&pmap_pv_pool, pv);
2941 		pmap_acquire_page_lock(md);
2942 #ifdef MULTIPROCESSOR
2943 		/*
2944 		 * Restart of the beginning of the list.
2945 		 */
2946 		pvp = &SLIST_FIRST(&md->pvh_list);
2947 #endif
2948 	}
2949 	/*
2950 	 * if we reach the end of the list and there are still mappings, they
2951 	 * might be able to be cached now.  And they must be kernel mappings.
2952 	 */
2953 	if (!SLIST_EMPTY(&md->pvh_list)) {
2954 		pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
2955 	}
2956 
2957 #ifdef PMAP_CACHE_VIPT
2958 	/*
2959 	 * Its EXEC cache is now gone.
2960 	 */
2961 	if (PV_IS_EXEC_P(md->pvh_attrs))
2962 		PMAPCOUNT(exec_discarded_page_protect);
2963 	md->pvh_attrs &= ~PVF_EXEC;
2964 	KASSERT(md->urw_mappings == 0);
2965 	KASSERT(md->uro_mappings == 0);
2966 #ifndef ARM_MMU_EXTENDED
2967 	if (arm_cache_prefer_mask != 0) {
2968 		if (md->krw_mappings == 0)
2969 			md->pvh_attrs &= ~PVF_WRITE;
2970 		PMAP_VALIDATE_MD_PAGE(md);
2971 	}
2972 #endif /* ARM_MMU_EXTENDED */
2973 #endif /* PMAP_CACHE_VIPT */
2974 	pmap_release_page_lock(md);
2975 
2976 #ifndef ARM_MMU_EXTENDED
2977 	if (flush) {
2978 		/*
2979 		 * Note: We can't use pmap_tlb_flush{I,D}() here since that
2980 		 * would need a subsequent call to pmap_update() to ensure
2981 		 * curpm->pm_cstate.cs_all is reset. Our callers are not
2982 		 * required to do that (see pmap(9)), so we can't modify
2983 		 * the current pmap's state.
2984 		 */
2985 		if (PV_BEEN_EXECD(flags))
2986 			cpu_tlb_flushID();
2987 		else
2988 			cpu_tlb_flushD();
2989 	}
2990 	cpu_cpwait();
2991 #endif /* ARM_MMU_EXTENDED */
2992 }
2993 
2994 /*
2995  * pmap_t pmap_create(void)
2996  *
2997  *      Create a new pmap structure from scratch.
2998  */
2999 pmap_t
pmap_create(void)3000 pmap_create(void)
3001 {
3002 	pmap_t pm;
3003 
3004 	pm = pool_cache_get(&pmap_cache, PR_WAITOK);
3005 
3006 	mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
3007 	uvm_obj_init(&pm->pm_obj, NULL, false, 1);
3008 	uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
3009 
3010 	pm->pm_stats.wired_count = 0;
3011 	pm->pm_stats.resident_count = 1;
3012 #ifdef ARM_MMU_EXTENDED
3013 #ifdef MULTIPROCESSOR
3014 	kcpuset_create(&pm->pm_active, true);
3015 	kcpuset_create(&pm->pm_onproc, true);
3016 #endif
3017 #else
3018 	pm->pm_cstate.cs_all = 0;
3019 #endif
3020 	pmap_alloc_l1(pm);
3021 
3022 	/*
3023 	 * Note: The pool cache ensures that the pm_l2[] array is already
3024 	 * initialised to zero.
3025 	 */
3026 
3027 	pmap_pinit(pm);
3028 
3029 	return (pm);
3030 }
3031 
3032 u_int
arm32_mmap_flags(paddr_t pa)3033 arm32_mmap_flags(paddr_t pa)
3034 {
3035 	/*
3036 	 * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff
3037 	 * and we're using the upper bits in page numbers to pass flags around
3038 	 * so we might as well use the same bits
3039 	 */
3040 	return (u_int)pa & PMAP_MD_MASK;
3041 }
3042 /*
3043  * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
3044  *      u_int flags)
3045  *
3046  *      Insert the given physical page (p) at
3047  *      the specified virtual address (v) in the
3048  *      target physical map with the protection requested.
3049  *
3050  *      NB:  This is the only routine which MAY NOT lazy-evaluate
3051  *      or lose information.  That is, this routine must actually
3052  *      insert this page into the given map NOW.
3053  */
3054 int
pmap_enter(pmap_t pm,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)3055 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
3056 {
3057 	struct l2_bucket *l2b;
3058 	struct vm_page *pg, *opg;
3059 	u_int nflags;
3060 	u_int oflags;
3061 	const bool kpm_p = (pm == pmap_kernel());
3062 #ifdef ARM_HAS_VBAR
3063 	const bool vector_page_p = false;
3064 #else
3065 	const bool vector_page_p = (va == vector_page);
3066 #endif
3067 
3068 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
3069 
3070 	UVMHIST_LOG(maphist, " (pm %p va %#x pa %#x prot %#x",
3071 	    pm, va, pa, prot);
3072 	UVMHIST_LOG(maphist, "  flag %#x", flags, 0, 0, 0);
3073 
3074 	KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
3075 	KDASSERT(((va | pa) & PGOFSET) == 0);
3076 
3077 	/*
3078 	 * Get a pointer to the page.  Later on in this function, we
3079 	 * test for a managed page by checking pg != NULL.
3080 	 */
3081 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
3082 
3083 	nflags = 0;
3084 	if (prot & VM_PROT_WRITE)
3085 		nflags |= PVF_WRITE;
3086 	if (prot & VM_PROT_EXECUTE)
3087 		nflags |= PVF_EXEC;
3088 	if (flags & PMAP_WIRED)
3089 		nflags |= PVF_WIRED;
3090 
3091 	pmap_acquire_pmap_lock(pm);
3092 
3093 	/*
3094 	 * Fetch the L2 bucket which maps this page, allocating one if
3095 	 * necessary for user pmaps.
3096 	 */
3097 	if (kpm_p) {
3098 		l2b = pmap_get_l2_bucket(pm, va);
3099 	} else {
3100 		l2b = pmap_alloc_l2_bucket(pm, va);
3101 	}
3102 	if (l2b == NULL) {
3103 		if (flags & PMAP_CANFAIL) {
3104 			pmap_release_pmap_lock(pm);
3105 			return (ENOMEM);
3106 		}
3107 		panic("pmap_enter: failed to allocate L2 bucket");
3108 	}
3109 	pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)];
3110 	const pt_entry_t opte = *ptep;
3111 	pt_entry_t npte = pa;
3112 	oflags = 0;
3113 
3114 	if (opte) {
3115 		/*
3116 		 * There is already a mapping at this address.
3117 		 * If the physical address is different, lookup the
3118 		 * vm_page.
3119 		 */
3120 		if (l2pte_pa(opte) != pa) {
3121 			KASSERT(!pmap_pv_tracked(pa));
3122 			opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3123 		} else
3124 			opg = pg;
3125 	} else
3126 		opg = NULL;
3127 
3128 	struct pmap_page *pp = pmap_pv_tracked(pa);
3129 
3130 	if (pg || pp) {
3131 		KASSERT((pg != NULL) != (pp != NULL));
3132 		struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) :
3133 		    PMAP_PAGE_TO_MD(pp);
3134 
3135 		/*
3136 		 * This is to be a managed mapping.
3137 		 */
3138 		pmap_acquire_page_lock(md);
3139 		if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) {
3140 			/*
3141 			 * - The access type indicates that we don't need
3142 			 *   to do referenced emulation.
3143 			 * OR
3144 			 * - The physical page has already been referenced
3145 			 *   so no need to re-do referenced emulation here.
3146 			 */
3147 			npte |= l2pte_set_readonly(L2_S_PROTO);
3148 
3149 			nflags |= PVF_REF;
3150 
3151 			if ((prot & VM_PROT_WRITE) != 0 &&
3152 			    ((flags & VM_PROT_WRITE) != 0 ||
3153 			     (md->pvh_attrs & PVF_MOD) != 0)) {
3154 				/*
3155 				 * This is a writable mapping, and the
3156 				 * page's mod state indicates it has
3157 				 * already been modified. Make it
3158 				 * writable from the outset.
3159 				 */
3160 				npte = l2pte_set_writable(npte);
3161 				nflags |= PVF_MOD;
3162 			}
3163 
3164 #ifdef ARM_MMU_EXTENDED
3165 			/*
3166 			 * If the page has been cleaned, then the pvh_attrs
3167 			 * will have PVF_EXEC set, so mark it execute so we
3168 			 * don't get an access fault when trying to execute
3169 			 * from it.
3170 			 */
3171 			if (md->pvh_attrs & nflags & PVF_EXEC) {
3172 				npte &= ~L2_XS_XN;
3173 			}
3174 #endif
3175 		} else {
3176 			/*
3177 			 * Need to do page referenced emulation.
3178 			 */
3179 			npte |= L2_TYPE_INV;
3180 		}
3181 
3182 		if (flags & ARM32_MMAP_WRITECOMBINE) {
3183 			npte |= pte_l2_s_wc_mode;
3184 		} else
3185 			npte |= pte_l2_s_cache_mode;
3186 
3187 		if (pg != NULL && pg == opg) {
3188 			/*
3189 			 * We're changing the attrs of an existing mapping.
3190 			 */
3191 			oflags = pmap_modify_pv(md, pa, pm, va,
3192 			    PVF_WRITE | PVF_EXEC | PVF_WIRED |
3193 			    PVF_MOD | PVF_REF, nflags);
3194 
3195 #ifdef PMAP_CACHE_VIVT
3196 			/*
3197 			 * We may need to flush the cache if we're
3198 			 * doing rw-ro...
3199 			 */
3200 			if (pm->pm_cstate.cs_cache_d &&
3201 			    (oflags & PVF_NC) == 0 &&
3202 			    l2pte_writable_p(opte) &&
3203 			    (prot & VM_PROT_WRITE) == 0)
3204 				cpu_dcache_wb_range(va, PAGE_SIZE);
3205 #endif
3206 		} else {
3207 			struct pv_entry *pv;
3208 			/*
3209 			 * New mapping, or changing the backing page
3210 			 * of an existing mapping.
3211 			 */
3212 			if (opg) {
3213 				struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3214 				paddr_t opa = VM_PAGE_TO_PHYS(opg);
3215 
3216 				/*
3217 				 * Replacing an existing mapping with a new one.
3218 				 * It is part of our managed memory so we
3219 				 * must remove it from the PV list
3220 				 */
3221 				pv = pmap_remove_pv(omd, opa, pm, va);
3222 				pmap_vac_me_harder(omd, opa, pm, 0);
3223 				oflags = pv->pv_flags;
3224 
3225 #ifdef PMAP_CACHE_VIVT
3226 				/*
3227 				 * If the old mapping was valid (ref/mod
3228 				 * emulation creates 'invalid' mappings
3229 				 * initially) then make sure to frob
3230 				 * the cache.
3231 				 */
3232 				if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) {
3233 					pmap_cache_wbinv_page(pm, va, true,
3234 					    oflags);
3235 				}
3236 #endif
3237 			} else {
3238 				pmap_release_page_lock(md);
3239 				pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3240 				if (pv == NULL) {
3241 					pmap_release_pmap_lock(pm);
3242 					if ((flags & PMAP_CANFAIL) == 0)
3243 						panic("pmap_enter: "
3244 						    "no pv entries");
3245 
3246 					pmap_free_l2_bucket(pm, l2b, 0);
3247 					UVMHIST_LOG(maphist, "  <-- done (ENOMEM)",
3248 					    0, 0, 0, 0);
3249 					return (ENOMEM);
3250 				}
3251 				pmap_acquire_page_lock(md);
3252 			}
3253 
3254 			pmap_enter_pv(md, pa, pv, pm, va, nflags);
3255 		}
3256 		pmap_release_page_lock(md);
3257 	} else {
3258 		/*
3259 		 * We're mapping an unmanaged page.
3260 		 * These are always readable, and possibly writable, from
3261 		 * the get go as we don't need to track ref/mod status.
3262 		 */
3263 		npte |= l2pte_set_readonly(L2_S_PROTO);
3264 		if (prot & VM_PROT_WRITE)
3265 			npte = l2pte_set_writable(npte);
3266 
3267 		/*
3268 		 * Make sure the vector table is mapped cacheable
3269 		 */
3270 		if ((vector_page_p && !kpm_p)
3271 		    || (flags & ARM32_MMAP_CACHEABLE)) {
3272 			npte |= pte_l2_s_cache_mode;
3273 #ifdef ARM_MMU_EXTENDED
3274 			npte &= ~L2_XS_XN;	/* and executable */
3275 #endif
3276 		} else if (flags & ARM32_MMAP_WRITECOMBINE) {
3277 			npte |= pte_l2_s_wc_mode;
3278 		}
3279 		if (opg) {
3280 			/*
3281 			 * Looks like there's an existing 'managed' mapping
3282 			 * at this address.
3283 			 */
3284 			struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3285 			paddr_t opa = VM_PAGE_TO_PHYS(opg);
3286 
3287 			pmap_acquire_page_lock(omd);
3288 			struct pv_entry *pv = pmap_remove_pv(omd, opa, pm, va);
3289 			pmap_vac_me_harder(omd, opa, pm, 0);
3290 			oflags = pv->pv_flags;
3291 			pmap_release_page_lock(omd);
3292 
3293 #ifdef PMAP_CACHE_VIVT
3294 			if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) {
3295 				pmap_cache_wbinv_page(pm, va, true, oflags);
3296 			}
3297 #endif
3298 			pool_put(&pmap_pv_pool, pv);
3299 		}
3300 	}
3301 
3302 	/*
3303 	 * Make sure userland mappings get the right permissions
3304 	 */
3305 	if (!vector_page_p && !kpm_p) {
3306 		npte |= L2_S_PROT_U;
3307 #ifdef ARM_MMU_EXTENDED
3308 		npte |= L2_XS_nG;	/* user pages are not global */
3309 #endif
3310 	}
3311 
3312 	/*
3313 	 * Keep the stats up to date
3314 	 */
3315 	if (opte == 0) {
3316 		l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE;
3317 		pm->pm_stats.resident_count++;
3318 	}
3319 
3320 	UVMHIST_LOG(maphist, " opte %#x npte %#x", opte, npte, 0, 0);
3321 
3322 #if defined(ARM_MMU_EXTENDED)
3323 	/*
3324 	 * If exec protection was requested but the page hasn't been synced,
3325 	 * sync it now and allow execution from it.
3326 	 */
3327 	if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) {
3328 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3329 		npte &= ~L2_XS_XN;
3330 		pmap_syncicache_page(md, pa);
3331 		PMAPCOUNT(exec_synced_map);
3332 	}
3333 #endif
3334 	/*
3335 	 * If this is just a wiring change, the two PTEs will be
3336 	 * identical, so there's no need to update the page table.
3337 	 */
3338 	if (npte != opte) {
3339 		l2pte_reset(ptep);
3340 		PTE_SYNC(ptep);
3341 		if (l2pte_valid_p(opte)) {
3342 			pmap_tlb_flush_SE(pm, va, oflags);
3343 		}
3344 		l2pte_set(ptep, npte, 0);
3345 		PTE_SYNC(ptep);
3346 #ifndef ARM_MMU_EXTENDED
3347 		bool is_cached = pmap_is_cached(pm);
3348 		if (is_cached) {
3349 			/*
3350 			 * We only need to frob the cache/tlb if this pmap
3351 			 * is current
3352 			 */
3353 			if (!vector_page_p && l2pte_valid_p(npte)) {
3354 				/*
3355 				 * This mapping is likely to be accessed as
3356 				 * soon as we return to userland. Fix up the
3357 				 * L1 entry to avoid taking another
3358 				 * page/domain fault.
3359 				 */
3360 				pd_entry_t *pdep = pmap_l1_kva(pm)
3361 				     + l1pte_index(va);
3362 				pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa
3363 				    | L1_C_DOM(pmap_domain(pm));
3364 				if (*pdep != pde) {
3365 					l1pte_setone(pdep, pde);
3366 					PDE_SYNC(pdep);
3367 				}
3368 			}
3369 		}
3370 #endif /* !ARM_MMU_EXTENDED */
3371 
3372 #ifndef ARM_MMU_EXTENDED
3373 		UVMHIST_LOG(maphist, "  is_cached %d cs 0x%08x\n",
3374 		    is_cached, pm->pm_cstate.cs_all, 0, 0);
3375 
3376 		if (pg != NULL) {
3377 			struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3378 
3379 			pmap_acquire_page_lock(md);
3380 			pmap_vac_me_harder(md, pa, pm, va);
3381 			pmap_release_page_lock(md);
3382 		}
3383 #endif
3384 	}
3385 #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
3386 	if (pg) {
3387 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3388 
3389 		pmap_acquire_page_lock(md);
3390 #ifndef ARM_MMU_EXTENDED
3391 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3392 #endif
3393 		PMAP_VALIDATE_MD_PAGE(md);
3394 		pmap_release_page_lock(md);
3395 	}
3396 #endif
3397 
3398 	pmap_release_pmap_lock(pm);
3399 
3400 	return (0);
3401 }
3402 
3403 /*
3404  * pmap_remove()
3405  *
3406  * pmap_remove is responsible for nuking a number of mappings for a range
3407  * of virtual address space in the current pmap. To do this efficiently
3408  * is interesting, because in a number of cases a wide virtual address
3409  * range may be supplied that contains few actual mappings. So, the
3410  * optimisations are:
3411  *  1. Skip over hunks of address space for which no L1 or L2 entry exists.
3412  *  2. Build up a list of pages we've hit, up to a maximum, so we can
3413  *     maybe do just a partial cache clean. This path of execution is
3414  *     complicated by the fact that the cache must be flushed _before_
3415  *     the PTE is nuked, being a VAC :-)
3416  *  3. If we're called after UVM calls pmap_remove_all(), we can defer
3417  *     all invalidations until pmap_update(), since pmap_remove_all() has
3418  *     already flushed the cache.
3419  *  4. Maybe later fast-case a single page, but I don't think this is
3420  *     going to make _that_ much difference overall.
3421  */
3422 
3423 #define	PMAP_REMOVE_CLEAN_LIST_SIZE	3
3424 
3425 void
pmap_remove(pmap_t pm,vaddr_t sva,vaddr_t eva)3426 pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
3427 {
3428 	vaddr_t next_bucket;
3429 	u_int cleanlist_idx, total, cnt;
3430 	struct {
3431 		vaddr_t va;
3432 		pt_entry_t *ptep;
3433 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
3434 	u_int mappings;
3435 
3436 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
3437 	UVMHIST_LOG(maphist, " (pm=%p, sva=%#x, eva=%#x)", pm, sva, eva, 0);
3438 
3439 	/*
3440 	 * we lock in the pmap => pv_head direction
3441 	 */
3442 	pmap_acquire_pmap_lock(pm);
3443 
3444 	if (pm->pm_remove_all || !pmap_is_cached(pm)) {
3445 		cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3446 #ifndef ARM_MMU_EXTENDED
3447 		if (pm->pm_cstate.cs_tlb == 0)
3448 			pm->pm_remove_all = true;
3449 #endif
3450 	} else
3451 		cleanlist_idx = 0;
3452 
3453 	total = 0;
3454 
3455 	while (sva < eva) {
3456 		/*
3457 		 * Do one L2 bucket's worth at a time.
3458 		 */
3459 		next_bucket = L2_NEXT_BUCKET_VA(sva);
3460 		if (next_bucket > eva)
3461 			next_bucket = eva;
3462 
3463 		struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva);
3464 		if (l2b == NULL) {
3465 			sva = next_bucket;
3466 			continue;
3467 		}
3468 
3469 		pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)];
3470 
3471 		for (mappings = 0;
3472 		     sva < next_bucket;
3473 		     sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) {
3474 			pt_entry_t opte = *ptep;
3475 
3476 			if (opte == 0) {
3477 				/* Nothing here, move along */
3478 				continue;
3479 			}
3480 
3481 			u_int flags = PVF_REF;
3482 			paddr_t pa = l2pte_pa(opte);
3483 			struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
3484 
3485 			/*
3486 			 * Update flags. In a number of circumstances,
3487 			 * we could cluster a lot of these and do a
3488 			 * number of sequential pages in one go.
3489 			 */
3490 			if (pg != NULL) {
3491 				struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3492 				struct pv_entry *pv;
3493 
3494 				pmap_acquire_page_lock(md);
3495 				pv = pmap_remove_pv(md, pa, pm, sva);
3496 				pmap_vac_me_harder(md, pa, pm, 0);
3497 				pmap_release_page_lock(md);
3498 				if (pv != NULL) {
3499 					if (pm->pm_remove_all == false) {
3500 						flags = pv->pv_flags;
3501 					}
3502 					pool_put(&pmap_pv_pool, pv);
3503 				}
3504 			}
3505 			mappings += PAGE_SIZE / L2_S_SIZE;
3506 
3507 			if (!l2pte_valid_p(opte)) {
3508 				/*
3509 				 * Ref/Mod emulation is still active for this
3510 				 * mapping, therefore it is has not yet been
3511 				 * accessed. No need to frob the cache/tlb.
3512 				 */
3513 				l2pte_reset(ptep);
3514 				PTE_SYNC_CURRENT(pm, ptep);
3515 				continue;
3516 			}
3517 
3518 #ifdef ARM_MMU_EXTENDED
3519 			if (pm == pmap_kernel()) {
3520 				l2pte_reset(ptep);
3521 				PTE_SYNC(ptep);
3522  				pmap_tlb_flush_SE(pm, sva, flags);
3523 				continue;
3524 			}
3525 #endif
3526 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
3527 				/* Add to the clean list. */
3528 				cleanlist[cleanlist_idx].ptep = ptep;
3529 				cleanlist[cleanlist_idx].va =
3530 				    sva | (flags & PVF_EXEC);
3531 				cleanlist_idx++;
3532 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
3533 				/* Nuke everything if needed. */
3534 #ifdef PMAP_CACHE_VIVT
3535 				pmap_cache_wbinv_all(pm, PVF_EXEC);
3536 #endif
3537 				/*
3538 				 * Roll back the previous PTE list,
3539 				 * and zero out the current PTE.
3540 				 */
3541 				for (cnt = 0;
3542 				     cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
3543 					l2pte_reset(cleanlist[cnt].ptep);
3544 					PTE_SYNC(cleanlist[cnt].ptep);
3545 				}
3546 				l2pte_reset(ptep);
3547 				PTE_SYNC(ptep);
3548 				cleanlist_idx++;
3549 				pm->pm_remove_all = true;
3550 			} else {
3551 				l2pte_reset(ptep);
3552 				PTE_SYNC(ptep);
3553 				if (pm->pm_remove_all == false) {
3554 					pmap_tlb_flush_SE(pm, sva, flags);
3555 				}
3556 			}
3557 		}
3558 
3559 		/*
3560 		 * Deal with any left overs
3561 		 */
3562 		if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
3563 			total += cleanlist_idx;
3564 			for (cnt = 0; cnt < cleanlist_idx; cnt++) {
3565 				l2pte_reset(cleanlist[cnt].ptep);
3566 				PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep);
3567 #ifdef ARM_MMU_EXTENDED
3568 				vaddr_t clva = cleanlist[cnt].va;
3569 				pmap_tlb_flush_SE(pm, clva, PVF_REF);
3570 #else
3571 				vaddr_t va = cleanlist[cnt].va;
3572 				if (pm->pm_cstate.cs_all != 0) {
3573 					vaddr_t clva = va & ~PAGE_MASK;
3574 					u_int flags = va & PVF_EXEC;
3575 #ifdef PMAP_CACHE_VIVT
3576 					pmap_cache_wbinv_page(pm, clva, true,
3577 					    PVF_REF | PVF_WRITE | flags);
3578 #endif
3579 					pmap_tlb_flush_SE(pm, clva,
3580 					    PVF_REF | flags);
3581 				}
3582 #endif /* ARM_MMU_EXTENDED */
3583 			}
3584 
3585 			/*
3586 			 * If it looks like we're removing a whole bunch
3587 			 * of mappings, it's faster to just write-back
3588 			 * the whole cache now and defer TLB flushes until
3589 			 * pmap_update() is called.
3590 			 */
3591 			if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
3592 				cleanlist_idx = 0;
3593 			else {
3594 				cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3595 #ifdef PMAP_CACHE_VIVT
3596 				pmap_cache_wbinv_all(pm, PVF_EXEC);
3597 #endif
3598 				pm->pm_remove_all = true;
3599 			}
3600 		}
3601 
3602 
3603 		pmap_free_l2_bucket(pm, l2b, mappings);
3604 		pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE);
3605 	}
3606 
3607 	pmap_release_pmap_lock(pm);
3608 }
3609 
3610 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3611 static struct pv_entry *
pmap_kremove_pg(struct vm_page * pg,vaddr_t va)3612 pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
3613 {
3614 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3615 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
3616 	struct pv_entry *pv;
3617 
3618 	KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
3619 	KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
3620 	KASSERT(pmap_page_locked_p(md));
3621 
3622 	pv = pmap_remove_pv(md, pa, pmap_kernel(), va);
3623 	KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va);
3624 	KASSERT(PV_IS_KENTRY_P(pv->pv_flags));
3625 
3626 	/*
3627 	 * If we are removing a writeable mapping to a cached exec page,
3628 	 * if it's the last mapping then clear it execness other sync
3629 	 * the page to the icache.
3630 	 */
3631 	if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
3632 	    && (pv->pv_flags & PVF_WRITE) != 0) {
3633 		if (SLIST_EMPTY(&md->pvh_list)) {
3634 			md->pvh_attrs &= ~PVF_EXEC;
3635 			PMAPCOUNT(exec_discarded_kremove);
3636 		} else {
3637 			pmap_syncicache_page(md, pa);
3638 			PMAPCOUNT(exec_synced_kremove);
3639 		}
3640 	}
3641 	pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
3642 
3643 	return pv;
3644 }
3645 #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
3646 
3647 /*
3648  * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3649  *
3650  * We assume there is already sufficient KVM space available
3651  * to do this, as we can't allocate L2 descriptor tables/metadata
3652  * from here.
3653  */
3654 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)3655 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
3656 {
3657 #ifdef PMAP_CACHE_VIVT
3658 	struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
3659 #endif
3660 #ifdef PMAP_CACHE_VIPT
3661 	struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3662 	struct vm_page *opg;
3663 #ifndef ARM_MMU_EXTENDED
3664 	struct pv_entry *pv = NULL;
3665 #endif
3666 #endif
3667 	struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
3668 
3669 	UVMHIST_FUNC(__func__);
3670 
3671 	if (pmap_initialized) {
3672 		UVMHIST_CALLED(maphist);
3673 		UVMHIST_LOG(maphist, " (va=%#x, pa=%#x, prot=%#x, flags=%#x",
3674 		    va, pa, prot, flags);
3675 	}
3676 
3677 	pmap_t kpm = pmap_kernel();
3678 	pmap_acquire_pmap_lock(kpm);
3679 	struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3680 	const size_t l1slot __diagused = l1pte_index(va);
3681 	KASSERTMSG(l2b != NULL,
3682 	    "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p",
3683 	    va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)],
3684 	    kpm->pm_l2[L2_IDX(l1slot)]
3685 		? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)]
3686 		: NULL,
3687 	    kpm->pm_l2[L2_IDX(l1slot)]
3688 		? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva
3689 		: NULL);
3690 	KASSERT(l2b->l2b_kva != NULL);
3691 
3692 	pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
3693 	const pt_entry_t opte = *ptep;
3694 
3695 	if (opte == 0) {
3696 		PMAPCOUNT(kenter_mappings);
3697 		l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE;
3698 	} else {
3699 		PMAPCOUNT(kenter_remappings);
3700 #ifdef PMAP_CACHE_VIPT
3701 		opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3702 #if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC)
3703 		struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg);
3704 #endif
3705 		if (opg && arm_cache_prefer_mask != 0) {
3706 			KASSERT(opg != pg);
3707 			KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
3708 			KASSERT((flags & PMAP_KMPAGE) == 0);
3709 #ifndef ARM_MMU_EXTENDED
3710 			pmap_acquire_page_lock(omd);
3711 			pv = pmap_kremove_pg(opg, va);
3712 			pmap_release_page_lock(omd);
3713 #endif
3714 		}
3715 #endif
3716 		if (l2pte_valid_p(opte)) {
3717 			l2pte_reset(ptep);
3718 			PTE_SYNC(ptep);
3719 #ifdef PMAP_CACHE_VIVT
3720 			cpu_dcache_wbinv_range(va, PAGE_SIZE);
3721 #endif
3722 			cpu_tlb_flushD_SE(va);
3723 			cpu_cpwait();
3724 		}
3725 	}
3726 	pmap_release_pmap_lock(kpm);
3727 
3728 	pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot)
3729 	    | ((flags & PMAP_NOCACHE)
3730 		? 0
3731 		: ((flags & PMAP_PTE)
3732 		    ? pte_l2_s_cache_mode_pt : pte_l2_s_cache_mode));
3733 #ifdef ARM_MMU_EXTENDED
3734 	if (prot & VM_PROT_EXECUTE)
3735 		npte &= ~L2_XS_XN;
3736 #endif
3737 	l2pte_set(ptep, npte, 0);
3738 	PTE_SYNC(ptep);
3739 
3740 	if (pg) {
3741 		if (flags & PMAP_KMPAGE) {
3742 			KASSERT(md->urw_mappings == 0);
3743 			KASSERT(md->uro_mappings == 0);
3744 			KASSERT(md->krw_mappings == 0);
3745 			KASSERT(md->kro_mappings == 0);
3746 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3747 			KASSERT(pv == NULL);
3748 			KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0);
3749 			KASSERT((md->pvh_attrs & PVF_NC) == 0);
3750 			/* if there is a color conflict, evict from cache. */
3751 			if (pmap_is_page_colored_p(md)
3752 			    && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) {
3753 				PMAPCOUNT(vac_color_change);
3754 				pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
3755 			} else if (md->pvh_attrs & PVF_MULTCLR) {
3756 				/*
3757 				 * If this page has multiple colors, expunge
3758 				 * them.
3759 				 */
3760 				PMAPCOUNT(vac_flush_lots2);
3761 				pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY);
3762 			}
3763 			/*
3764 			 * Since this is a KMPAGE, there can be no contention
3765 			 * for this page so don't lock it.
3766 			 */
3767 			md->pvh_attrs &= PAGE_SIZE - 1;
3768 			md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY
3769 			    | (va & arm_cache_prefer_mask);
3770 #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */
3771 			md->pvh_attrs |= PVF_KMPAGE;
3772 #endif
3773 			atomic_inc_32(&pmap_kmpages);
3774 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3775 		} else if (arm_cache_prefer_mask != 0) {
3776 			if (pv == NULL) {
3777 				pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3778 				KASSERT(pv != NULL);
3779 			}
3780 			pmap_acquire_page_lock(md);
3781 			pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
3782 			    PVF_WIRED | PVF_KENTRY
3783 			    | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
3784 			if ((prot & VM_PROT_WRITE)
3785 			    && !(md->pvh_attrs & PVF_NC))
3786 				md->pvh_attrs |= PVF_DIRTY;
3787 			KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3788 			pmap_vac_me_harder(md, pa, pmap_kernel(), va);
3789 			pmap_release_page_lock(md);
3790 #endif
3791 		}
3792 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3793 	} else {
3794 		if (pv != NULL)
3795 			pool_put(&pmap_pv_pool, pv);
3796 #endif
3797 	}
3798 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3799 	KASSERT(md == NULL || !pmap_page_locked_p(md));
3800 #endif
3801 	if (pmap_initialized) {
3802 		UVMHIST_LOG(maphist, "  <-- done (ptep %p: %#x -> %#x)",
3803 		    ptep, opte, npte, 0);
3804 	}
3805 
3806 }
3807 
3808 void
pmap_kremove(vaddr_t va,vsize_t len)3809 pmap_kremove(vaddr_t va, vsize_t len)
3810 {
3811 #ifdef UVMHIST
3812 	u_int total_mappings = 0;
3813 #endif
3814 
3815 	PMAPCOUNT(kenter_unmappings);
3816 
3817 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
3818 
3819 	UVMHIST_LOG(maphist, " (va=%#x, len=%#x)", va, len, 0, 0);
3820 
3821 	const vaddr_t eva = va + len;
3822 
3823 	pmap_acquire_pmap_lock(pmap_kernel());
3824 
3825 	while (va < eva) {
3826 		vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va);
3827 		if (next_bucket > eva)
3828 			next_bucket = eva;
3829 
3830 		pmap_t kpm = pmap_kernel();
3831 		struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3832 		KDASSERT(l2b != NULL);
3833 
3834 		pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
3835 		pt_entry_t *ptep = sptep;
3836 		u_int mappings = 0;
3837 
3838 		while (va < next_bucket) {
3839 			const pt_entry_t opte = *ptep;
3840 			struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3841 			if (opg != NULL) {
3842 				struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3843 
3844 				if (omd->pvh_attrs & PVF_KMPAGE) {
3845 					KASSERT(omd->urw_mappings == 0);
3846 					KASSERT(omd->uro_mappings == 0);
3847 					KASSERT(omd->krw_mappings == 0);
3848 					KASSERT(omd->kro_mappings == 0);
3849 					omd->pvh_attrs &= ~PVF_KMPAGE;
3850 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3851 					if (arm_cache_prefer_mask != 0) {
3852 						omd->pvh_attrs &= ~PVF_WRITE;
3853 					}
3854 #endif
3855 					atomic_dec_32(&pmap_kmpages);
3856 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3857 				} else if (arm_cache_prefer_mask != 0) {
3858 					pmap_acquire_page_lock(omd);
3859 					pool_put(&pmap_pv_pool,
3860 					    pmap_kremove_pg(opg, va));
3861 					pmap_release_page_lock(omd);
3862 #endif
3863 				}
3864 			}
3865 			if (l2pte_valid_p(opte)) {
3866 				l2pte_reset(ptep);
3867 				PTE_SYNC(ptep);
3868 #ifdef PMAP_CACHE_VIVT
3869 				cpu_dcache_wbinv_range(va, PAGE_SIZE);
3870 #endif
3871 				cpu_tlb_flushD_SE(va);
3872 
3873 				mappings += PAGE_SIZE / L2_S_SIZE;
3874 			}
3875 			va += PAGE_SIZE;
3876 			ptep += PAGE_SIZE / L2_S_SIZE;
3877 		}
3878 		KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u",
3879 		    mappings, l2b->l2b_occupancy);
3880 		l2b->l2b_occupancy -= mappings;
3881 		//PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
3882 #ifdef UVMHIST
3883 		total_mappings += mappings;
3884 #endif
3885 	}
3886 	pmap_release_pmap_lock(pmap_kernel());
3887 	cpu_cpwait();
3888 	UVMHIST_LOG(maphist, "  <--- done (%u mappings removed)",
3889 	    total_mappings, 0, 0, 0);
3890 }
3891 
3892 bool
pmap_extract(pmap_t pm,vaddr_t va,paddr_t * pap)3893 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
3894 {
3895 	struct l2_dtable *l2;
3896 	pd_entry_t *pdep, pde;
3897 	pt_entry_t *ptep, pte;
3898 	paddr_t pa;
3899 	u_int l1slot;
3900 
3901 	pmap_acquire_pmap_lock(pm);
3902 
3903 	l1slot = l1pte_index(va);
3904 	pdep = pmap_l1_kva(pm) + l1slot;
3905 	pde = *pdep;
3906 
3907 	if (l1pte_section_p(pde)) {
3908 		/*
3909 		 * These should only happen for pmap_kernel()
3910 		 */
3911 		KDASSERT(pm == pmap_kernel());
3912 		pmap_release_pmap_lock(pm);
3913 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
3914 		if (l1pte_supersection_p(pde)) {
3915 			pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET);
3916 		} else
3917 #endif
3918 			pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET);
3919 	} else {
3920 		/*
3921 		 * Note that we can't rely on the validity of the L1
3922 		 * descriptor as an indication that a mapping exists.
3923 		 * We have to look it up in the L2 dtable.
3924 		 */
3925 		l2 = pm->pm_l2[L2_IDX(l1slot)];
3926 
3927 		if (l2 == NULL ||
3928 		    (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) {
3929 			pmap_release_pmap_lock(pm);
3930 			return false;
3931 		}
3932 
3933 		pte = ptep[l2pte_index(va)];
3934 		pmap_release_pmap_lock(pm);
3935 
3936 		if (pte == 0)
3937 			return false;
3938 
3939 		switch (pte & L2_TYPE_MASK) {
3940 		case L2_TYPE_L:
3941 			pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3942 			break;
3943 
3944 		default:
3945 			pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK);
3946 			break;
3947 		}
3948 	}
3949 
3950 	if (pap != NULL)
3951 		*pap = pa;
3952 
3953 	return true;
3954 }
3955 
3956 /*
3957  * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps
3958  *	that map it
3959  */
3960 
3961 static void
pmap_pv_remove(paddr_t pa)3962 pmap_pv_remove(paddr_t pa)
3963 {
3964 	struct pmap_page *pp;
3965 
3966 	pp = pmap_pv_tracked(pa);
3967 	if (pp == NULL)
3968 		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
3969 		    pa);
3970 
3971 	struct vm_page_md *md = PMAP_PAGE_TO_MD(pp);
3972 	pmap_page_remove(md, pa);
3973 }
3974 
3975 void
pmap_pv_protect(paddr_t pa,vm_prot_t prot)3976 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
3977 {
3978 
3979 	/* the only case is remove at the moment */
3980 	KASSERT(prot == VM_PROT_NONE);
3981 	pmap_pv_remove(pa);
3982 }
3983 
3984 void
pmap_protect(pmap_t pm,vaddr_t sva,vaddr_t eva,vm_prot_t prot)3985 pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
3986 {
3987 	struct l2_bucket *l2b;
3988 	vaddr_t next_bucket;
3989 
3990 	NPDEBUG(PDB_PROTECT,
3991 	    printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
3992 	    pm, sva, eva, prot));
3993 
3994 	if ((prot & VM_PROT_READ) == 0) {
3995 		pmap_remove(pm, sva, eva);
3996 		return;
3997 	}
3998 
3999 	if (prot & VM_PROT_WRITE) {
4000 		/*
4001 		 * If this is a read->write transition, just ignore it and let
4002 		 * uvm_fault() take care of it later.
4003 		 */
4004 		return;
4005 	}
4006 
4007 	pmap_acquire_pmap_lock(pm);
4008 
4009 #ifndef ARM_MMU_EXTENDED
4010 	const bool flush = eva - sva >= PAGE_SIZE * 4;
4011 	u_int flags = 0;
4012 #endif
4013 	u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC);
4014 
4015 	while (sva < eva) {
4016 		next_bucket = L2_NEXT_BUCKET_VA(sva);
4017 		if (next_bucket > eva)
4018 			next_bucket = eva;
4019 
4020 		l2b = pmap_get_l2_bucket(pm, sva);
4021 		if (l2b == NULL) {
4022 			sva = next_bucket;
4023 			continue;
4024 		}
4025 
4026 		pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)];
4027 
4028 		while (sva < next_bucket) {
4029 			const pt_entry_t opte = *ptep;
4030 			if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) {
4031 				struct vm_page *pg;
4032 #ifndef ARM_MMU_EXTENDED
4033 				u_int f;
4034 #endif
4035 
4036 #ifdef PMAP_CACHE_VIVT
4037 				/*
4038 				 * OK, at this point, we know we're doing
4039 				 * write-protect operation.  If the pmap is
4040 				 * active, write-back the page.
4041 				 */
4042 				pmap_cache_wbinv_page(pm, sva, false,
4043 				    PVF_REF | PVF_WRITE);
4044 #endif
4045 
4046 				pg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
4047 				pt_entry_t npte = l2pte_set_readonly(opte);
4048 				l2pte_reset(ptep);
4049 				PTE_SYNC(ptep);
4050 #ifdef ARM_MMU_EXTENDED
4051 				pmap_tlb_flush_SE(pm, sva, PVF_REF);
4052 #endif
4053 				l2pte_set(ptep, npte, 0);
4054 				PTE_SYNC(ptep);
4055 
4056 				if (pg != NULL) {
4057 					struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4058 					paddr_t pa = VM_PAGE_TO_PHYS(pg);
4059 
4060 					pmap_acquire_page_lock(md);
4061 #ifndef ARM_MMU_EXTENDED
4062 					f =
4063 #endif
4064 					    pmap_modify_pv(md, pa, pm, sva,
4065 					       clr_mask, 0);
4066 					pmap_vac_me_harder(md, pa, pm, sva);
4067 					pmap_release_page_lock(md);
4068 #ifndef ARM_MMU_EXTENDED
4069 				} else {
4070 					f = PVF_REF | PVF_EXEC;
4071 				}
4072 
4073 				if (flush) {
4074 					flags |= f;
4075 				} else {
4076 					pmap_tlb_flush_SE(pm, sva, f);
4077 #endif
4078 				}
4079 			}
4080 
4081 			sva += PAGE_SIZE;
4082 			ptep += PAGE_SIZE / L2_S_SIZE;
4083 		}
4084 	}
4085 
4086 #ifndef ARM_MMU_EXTENDED
4087 	if (flush) {
4088 		if (PV_BEEN_EXECD(flags)) {
4089 			pmap_tlb_flushID(pm);
4090 		} else if (PV_BEEN_REFD(flags)) {
4091 			pmap_tlb_flushD(pm);
4092 		}
4093 	}
4094 #endif
4095 
4096 	pmap_release_pmap_lock(pm);
4097 }
4098 
4099 void
pmap_icache_sync_range(pmap_t pm,vaddr_t sva,vaddr_t eva)4100 pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
4101 {
4102 	struct l2_bucket *l2b;
4103 	pt_entry_t *ptep;
4104 	vaddr_t next_bucket;
4105 	vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
4106 
4107 	NPDEBUG(PDB_EXEC,
4108 	    printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n",
4109 	    pm, sva, eva));
4110 
4111 	pmap_acquire_pmap_lock(pm);
4112 
4113 	while (sva < eva) {
4114 		next_bucket = L2_NEXT_BUCKET_VA(sva);
4115 		if (next_bucket > eva)
4116 			next_bucket = eva;
4117 
4118 		l2b = pmap_get_l2_bucket(pm, sva);
4119 		if (l2b == NULL) {
4120 			sva = next_bucket;
4121 			continue;
4122 		}
4123 
4124 		for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
4125 		     sva < next_bucket;
4126 		     sva += page_size,
4127 		     ptep += PAGE_SIZE / L2_S_SIZE,
4128 		     page_size = PAGE_SIZE) {
4129 			if (l2pte_valid_p(*ptep)) {
4130 				cpu_icache_sync_range(sva,
4131 				    min(page_size, eva - sva));
4132 			}
4133 		}
4134 	}
4135 
4136 	pmap_release_pmap_lock(pm);
4137 }
4138 
4139 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)4140 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
4141 {
4142 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4143 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
4144 
4145 	NPDEBUG(PDB_PROTECT,
4146 	    printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n",
4147 	    md, pa, prot));
4148 
4149 	switch(prot) {
4150 	case VM_PROT_READ|VM_PROT_WRITE:
4151 #if defined(ARM_MMU_EXTENDED)
4152 		pmap_acquire_page_lock(md);
4153 		pmap_clearbit(md, pa, PVF_EXEC);
4154 		pmap_release_page_lock(md);
4155 		break;
4156 #endif
4157 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
4158 		break;
4159 
4160 	case VM_PROT_READ:
4161 #if defined(ARM_MMU_EXTENDED)
4162 		pmap_acquire_page_lock(md);
4163 		pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC);
4164 		pmap_release_page_lock(md);
4165 		break;
4166 #endif
4167 	case VM_PROT_READ|VM_PROT_EXECUTE:
4168 		pmap_acquire_page_lock(md);
4169 		pmap_clearbit(md, pa, PVF_WRITE);
4170 		pmap_release_page_lock(md);
4171 		break;
4172 
4173 	default:
4174 		pmap_page_remove(md, pa);
4175 		break;
4176 	}
4177 }
4178 
4179 /*
4180  * pmap_clear_modify:
4181  *
4182  *	Clear the "modified" attribute for a page.
4183  */
4184 bool
pmap_clear_modify(struct vm_page * pg)4185 pmap_clear_modify(struct vm_page *pg)
4186 {
4187 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4188 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
4189 	bool rv;
4190 
4191 	pmap_acquire_page_lock(md);
4192 
4193 	if (md->pvh_attrs & PVF_MOD) {
4194 		rv = true;
4195 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
4196 		/*
4197 		 * If we are going to clear the modified bit and there are
4198 		 * no other modified bits set, flush the page to memory and
4199 		 * mark it clean.
4200 		 */
4201 		if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD)
4202 			pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
4203 #endif
4204 		pmap_clearbit(md, pa, PVF_MOD);
4205 	} else {
4206 		rv = false;
4207 	}
4208 	pmap_release_page_lock(md);
4209 
4210 	return rv;
4211 }
4212 
4213 /*
4214  * pmap_clear_reference:
4215  *
4216  *	Clear the "referenced" attribute for a page.
4217  */
4218 bool
pmap_clear_reference(struct vm_page * pg)4219 pmap_clear_reference(struct vm_page *pg)
4220 {
4221 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4222 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
4223 	bool rv;
4224 
4225 	pmap_acquire_page_lock(md);
4226 
4227 	if (md->pvh_attrs & PVF_REF) {
4228 		rv = true;
4229 		pmap_clearbit(md, pa, PVF_REF);
4230 	} else {
4231 		rv = false;
4232 	}
4233 	pmap_release_page_lock(md);
4234 
4235 	return rv;
4236 }
4237 
4238 /*
4239  * pmap_is_modified:
4240  *
4241  *	Test if a page has the "modified" attribute.
4242  */
4243 /* See <arm/arm32/pmap.h> */
4244 
4245 /*
4246  * pmap_is_referenced:
4247  *
4248  *	Test if a page has the "referenced" attribute.
4249  */
4250 /* See <arm/arm32/pmap.h> */
4251 
4252 #if defined(ARM_MMU_EXTENDED) && 0
4253 int
pmap_prefetchabt_fixup(void * v)4254 pmap_prefetchabt_fixup(void *v)
4255 {
4256 	struct trapframe * const tf = v;
4257 	vaddr_t va = trunc_page(tf->tf_pc);
4258 	int rv = ABORT_FIXUP_FAILED;
4259 
4260 	if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS)
4261 		return rv;
4262 
4263 	kpreempt_disable();
4264 	pmap_t pm = curcpu()->ci_pmap_cur;
4265 	const size_t l1slot = l1pte_index(va);
4266 	struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)];
4267 	if (l2 == NULL)
4268 		goto out;
4269 
4270 	struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)];
4271 	if (l2b->l2b_kva == NULL)
4272 		goto out;
4273 
4274 	/*
4275 	 * Check the PTE itself.
4276 	 */
4277 	pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
4278 	const pt_entry_t opte = *ptep;
4279 	if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0)
4280 		goto out;
4281 
4282 	paddr_t pa = l2pte_pa(pte);
4283 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
4284 	KASSERT(pg != NULL);
4285 
4286 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
4287 
4288 	pmap_acquire_page_lock(md);
4289 	struct pv_entry * const pv = pmap_find_pv(md, pm, va);
4290 	KASSERT(pv != NULL);
4291 
4292 	if (PV_IS_EXEC_P(pv->pv_flags)) {
4293 		l2pte_reset(ptep);
4294 		PTE_SYNC(ptep);
4295 		pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF);
4296 		if (!PV_IS_EXEC_P(md->pvh_attrs)) {
4297 			pmap_syncicache_page(md, pa);
4298 		}
4299 		rv = ABORT_FIXUP_RETURN;
4300 		l2pte_set(ptep, opte & ~L2_XS_XN, 0);
4301 		PTE_SYNC(ptep);
4302 	}
4303 	pmap_release_page_lock(md);
4304 
4305   out:
4306 	kpreempt_enable();
4307 	return rv;
4308 }
4309 #endif
4310 
4311 int
pmap_fault_fixup(pmap_t pm,vaddr_t va,vm_prot_t ftype,int user)4312 pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
4313 {
4314 	struct l2_dtable *l2;
4315 	struct l2_bucket *l2b;
4316 	paddr_t pa;
4317 	const size_t l1slot = l1pte_index(va);
4318 	int rv = 0;
4319 
4320 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4321 
4322 	va = trunc_page(va);
4323 
4324 	KASSERT(!user || (pm != pmap_kernel()));
4325 
4326 	UVMHIST_LOG(maphist, " (pm=%#x, va=%#x, ftype=%#x, user=%d)",
4327 	    pm, va, ftype, user);
4328 #ifdef ARM_MMU_EXTENDED
4329 	UVMHIST_LOG(maphist, " ti=%#x pai=%#x asid=%#x",
4330 	    cpu_tlb_info(curcpu()), PMAP_PAI(pm, cpu_tlb_info(curcpu())),
4331 	    PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
4332 #endif
4333 
4334 	pmap_acquire_pmap_lock(pm);
4335 
4336 	/*
4337 	 * If there is no l2_dtable for this address, then the process
4338 	 * has no business accessing it.
4339 	 *
4340 	 * Note: This will catch userland processes trying to access
4341 	 * kernel addresses.
4342 	 */
4343 	l2 = pm->pm_l2[L2_IDX(l1slot)];
4344 	if (l2 == NULL) {
4345 		UVMHIST_LOG(maphist, " no l2 for l1slot %#x", l1slot, 0, 0, 0);
4346 		goto out;
4347 	}
4348 
4349 	/*
4350 	 * Likewise if there is no L2 descriptor table
4351 	 */
4352 	l2b = &l2->l2_bucket[L2_BUCKET(l1slot)];
4353 	if (l2b->l2b_kva == NULL) {
4354 		UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#x)", l1slot, 0, 0, 0);
4355 		goto out;
4356 	}
4357 
4358 	/*
4359 	 * Check the PTE itself.
4360 	 */
4361 	pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
4362 	pt_entry_t const opte = *ptep;
4363 	if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) {
4364 		UVMHIST_LOG(maphist, " <-- done (empty pde for l1slot %#x)", l1slot, 0, 0, 0);
4365 		goto out;
4366 	}
4367 
4368 #ifndef ARM_HAS_VBAR
4369 	/*
4370 	 * Catch a userland access to the vector page mapped at 0x0
4371 	 */
4372 	if (user && (opte & L2_S_PROT_U) == 0) {
4373 		UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0);
4374 		goto out;
4375 	}
4376 #endif
4377 
4378 	pa = l2pte_pa(opte);
4379 
4380 	if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) {
4381 		/*
4382 		 * This looks like a good candidate for "page modified"
4383 		 * emulation...
4384 		 */
4385 		struct pv_entry *pv;
4386 		struct vm_page *pg;
4387 
4388 		/* Extract the physical address of the page */
4389 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
4390 			UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0);
4391 			goto out;
4392 		}
4393 
4394 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4395 
4396 		/* Get the current flags for this page. */
4397 		pmap_acquire_page_lock(md);
4398 		pv = pmap_find_pv(md, pm, va);
4399 		if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) {
4400 			pmap_release_page_lock(md);
4401 			UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0);
4402 			goto out;
4403 		}
4404 
4405 		/*
4406 		 * Do the flags say this page is writable? If not then it
4407 		 * is a genuine write fault. If yes then the write fault is
4408 		 * our fault as we did not reflect the write access in the
4409 		 * PTE. Now we know a write has occurred we can correct this
4410 		 * and also set the modified bit
4411 		 */
4412 		if ((pv->pv_flags & PVF_WRITE) == 0) {
4413 			pmap_release_page_lock(md);
4414 			goto out;
4415 		}
4416 
4417 		md->pvh_attrs |= PVF_REF | PVF_MOD;
4418 		pv->pv_flags |= PVF_REF | PVF_MOD;
4419 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
4420 		/*
4421 		 * If there are cacheable mappings for this page, mark it dirty.
4422 		 */
4423 		if ((md->pvh_attrs & PVF_NC) == 0)
4424 			md->pvh_attrs |= PVF_DIRTY;
4425 #endif
4426 #ifdef ARM_MMU_EXTENDED
4427 		if (md->pvh_attrs & PVF_EXEC) {
4428 			md->pvh_attrs &= ~PVF_EXEC;
4429 			PMAPCOUNT(exec_discarded_modfixup);
4430 		}
4431 #endif
4432 		pmap_release_page_lock(md);
4433 
4434 		/*
4435 		 * Re-enable write permissions for the page.  No need to call
4436 		 * pmap_vac_me_harder(), since this is just a
4437 		 * modified-emulation fault, and the PVF_WRITE bit isn't
4438 		 * changing. We've already set the cacheable bits based on
4439 		 * the assumption that we can write to this page.
4440 		 */
4441 		const pt_entry_t npte =
4442 		    l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO)
4443 #ifdef ARM_MMU_EXTENDED
4444 		    | (pm != pmap_kernel() ? L2_XS_nG : 0)
4445 #endif
4446 		    | 0;
4447 		l2pte_reset(ptep);
4448 		PTE_SYNC(ptep);
4449 		pmap_tlb_flush_SE(pm, va,
4450 		    (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF);
4451 		l2pte_set(ptep, npte, 0);
4452 		PTE_SYNC(ptep);
4453 		PMAPCOUNT(fixup_mod);
4454 		rv = 1;
4455 		UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte from %#x to %#x)",
4456 		    opte, npte, 0, 0);
4457 	} else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) {
4458 		/*
4459 		 * This looks like a good candidate for "page referenced"
4460 		 * emulation.
4461 		 */
4462 		struct vm_page *pg;
4463 
4464 		/* Extract the physical address of the page */
4465 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
4466 			UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0);
4467 			goto out;
4468 		}
4469 
4470 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4471 
4472 		/* Get the current flags for this page. */
4473 		pmap_acquire_page_lock(md);
4474 		struct pv_entry *pv = pmap_find_pv(md, pm, va);
4475 		if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) {
4476 			pmap_release_page_lock(md);
4477 			UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0);
4478 			goto out;
4479 		}
4480 
4481 		md->pvh_attrs |= PVF_REF;
4482 		pv->pv_flags |= PVF_REF;
4483 
4484 		pt_entry_t npte =
4485 		    l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO);
4486 #ifdef ARM_MMU_EXTENDED
4487 		if (pm != pmap_kernel()) {
4488 			npte |= L2_XS_nG;
4489 		}
4490 		/*
4491 		 * If we got called from prefetch abort, then ftype will have
4492 		 * VM_PROT_EXECUTE set.  Now see if we have no-execute set in
4493 		 * the PTE.
4494 		 */
4495 		if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) {
4496 			/*
4497 			 * Is this a mapping of an executable page?
4498 			 */
4499 			if ((pv->pv_flags & PVF_EXEC) == 0) {
4500 				pmap_release_page_lock(md);
4501 				UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)",
4502 				    0, 0, 0, 0);
4503 				goto out;
4504 			}
4505 			/*
4506 			 * If we haven't synced the page, do so now.
4507 			 */
4508 			if ((md->pvh_attrs & PVF_EXEC) == 0) {
4509 				UVMHIST_LOG(maphist, " ref emul: syncicache page #%#x",
4510 				    pa, 0, 0, 0);
4511 				pmap_syncicache_page(md, pa);
4512 				PMAPCOUNT(fixup_exec);
4513 			}
4514 			npte &= ~L2_XS_XN;
4515 		}
4516 #endif /* ARM_MMU_EXTENDED */
4517 		pmap_release_page_lock(md);
4518 		l2pte_reset(ptep);
4519 		PTE_SYNC(ptep);
4520 		pmap_tlb_flush_SE(pm, va,
4521 		    (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF);
4522 		l2pte_set(ptep, npte, 0);
4523 		PTE_SYNC(ptep);
4524 		PMAPCOUNT(fixup_ref);
4525 		rv = 1;
4526 		UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from %#x to %#x)",
4527 		    opte, npte, 0, 0);
4528 #ifdef ARM_MMU_EXTENDED
4529 	} else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) {
4530 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
4531 		if (pg == NULL) {
4532 			UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0);
4533 			goto out;
4534 		}
4535 
4536 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
4537 
4538 		/* Get the current flags for this page. */
4539 		pmap_acquire_page_lock(md);
4540 		struct pv_entry * const pv = pmap_find_pv(md, pm, va);
4541 		if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) {
4542 			pmap_release_page_lock(md);
4543 			UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0);
4544 			goto out;
4545 		}
4546 
4547 		/*
4548 		 * If we haven't synced the page, do so now.
4549 		 */
4550 		if ((md->pvh_attrs & PVF_EXEC) == 0) {
4551 			UVMHIST_LOG(maphist, "syncicache page #%#x",
4552 			    pa, 0, 0, 0);
4553 			pmap_syncicache_page(md, pa);
4554 		}
4555 		pmap_release_page_lock(md);
4556 		/*
4557 		 * Turn off no-execute.
4558 		 */
4559 		KASSERT(opte & L2_XS_nG);
4560 		l2pte_reset(ptep);
4561 		PTE_SYNC(ptep);
4562 		pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF);
4563 		l2pte_set(ptep, opte & ~L2_XS_XN, 0);
4564 		PTE_SYNC(ptep);
4565 		rv = 1;
4566 		PMAPCOUNT(fixup_exec);
4567 		UVMHIST_LOG(maphist, "exec: changed pte from %#x to %#x",
4568 		    opte, opte & ~L2_XS_XN, 0, 0);
4569 #endif
4570 	}
4571 
4572 #ifndef ARM_MMU_EXTENDED
4573 	/*
4574 	 * We know there is a valid mapping here, so simply
4575 	 * fix up the L1 if necessary.
4576 	 */
4577 	pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot;
4578 	pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm));
4579 	if (*pdep != pde) {
4580 		l1pte_setone(pdep, pde);
4581 		PDE_SYNC(pdep);
4582 		rv = 1;
4583 		PMAPCOUNT(fixup_pdes);
4584 	}
4585 #endif
4586 
4587 #ifdef CPU_SA110
4588 	/*
4589 	 * There are bugs in the rev K SA110.  This is a check for one
4590 	 * of them.
4591 	 */
4592 	if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
4593 	    curcpu()->ci_arm_cpurev < 3) {
4594 		/* Always current pmap */
4595 		if (l2pte_valid_p(opte)) {
4596 			extern int kernel_debug;
4597 			if (kernel_debug & 1) {
4598 				struct proc *p = curlwp->l_proc;
4599 				printf("prefetch_abort: page is already "
4600 				    "mapped - pte=%p *pte=%08x\n", ptep, opte);
4601 				printf("prefetch_abort: pc=%08lx proc=%p "
4602 				    "process=%s\n", va, p, p->p_comm);
4603 				printf("prefetch_abort: far=%08x fs=%x\n",
4604 				    cpu_faultaddress(), cpu_faultstatus());
4605 			}
4606 #ifdef DDB
4607 			if (kernel_debug & 2)
4608 				Debugger();
4609 #endif
4610 			rv = 1;
4611 		}
4612 	}
4613 #endif /* CPU_SA110 */
4614 
4615 #ifndef ARM_MMU_EXTENDED
4616 	/*
4617 	 * If 'rv == 0' at this point, it generally indicates that there is a
4618 	 * stale TLB entry for the faulting address.  That might be due to a
4619 	 * wrong setting of pmap_needs_pte_sync.  So set it and retry.
4620 	 */
4621 	if (rv == 0
4622 	    && pm->pm_l1->l1_domain_use_count == 1
4623 	    && pmap_needs_pte_sync == 0) {
4624 		pmap_needs_pte_sync = 1;
4625 		PTE_SYNC(ptep);
4626 		PMAPCOUNT(fixup_ptesync);
4627 		rv = 1;
4628 	}
4629 #endif
4630 
4631 #ifndef MULTIPROCESSOR
4632 #if defined(DEBUG) || 1
4633 	/*
4634 	 * If 'rv == 0' at this point, it generally indicates that there is a
4635 	 * stale TLB entry for the faulting address. This happens when two or
4636 	 * more processes are sharing an L1. Since we don't flush the TLB on
4637 	 * a context switch between such processes, we can take domain faults
4638 	 * for mappings which exist at the same VA in both processes. EVEN IF
4639 	 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
4640 	 * example.
4641 	 *
4642 	 * This is extremely likely to happen if pmap_enter() updated the L1
4643 	 * entry for a recently entered mapping. In this case, the TLB is
4644 	 * flushed for the new mapping, but there may still be TLB entries for
4645 	 * other mappings belonging to other processes in the 1MB range
4646 	 * covered by the L1 entry.
4647 	 *
4648 	 * Since 'rv == 0', we know that the L1 already contains the correct
4649 	 * value, so the fault must be due to a stale TLB entry.
4650 	 *
4651 	 * Since we always need to flush the TLB anyway in the case where we
4652 	 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
4653 	 * stale TLB entries dynamically.
4654 	 *
4655 	 * However, the above condition can ONLY happen if the current L1 is
4656 	 * being shared. If it happens when the L1 is unshared, it indicates
4657 	 * that other parts of the pmap are not doing their job WRT managing
4658 	 * the TLB.
4659 	 */
4660 	if (rv == 0
4661 #ifndef ARM_MMU_EXTENDED
4662 	    && pm->pm_l1->l1_domain_use_count == 1
4663 #endif
4664 	    && true) {
4665 #ifdef DEBUG
4666 		extern int last_fault_code;
4667 #else
4668 		int last_fault_code = ftype & VM_PROT_EXECUTE
4669 		    ? armreg_ifsr_read()
4670 		    : armreg_dfsr_read();
4671 #endif
4672 		printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
4673 		    pm, va, ftype);
4674 		printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n",
4675 		    l2, l2b, ptep, opte);
4676 
4677 #ifndef ARM_MMU_EXTENDED
4678 		printf("fixup: pdep %p, pde %#x, fsr %#x\n",
4679 		    pdep, pde, last_fault_code);
4680 #else
4681 		printf("fixup: pdep %p, pde %#x, ttbcr %#x\n",
4682 		    &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot],
4683 		   armreg_ttbcr_read());
4684 		printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n",
4685 		    last_fault_code, curcpu()->ci_pmap_cur,
4686 		    curcpu()->ci_pmap_asid_cur,
4687 		    armreg_contextidr_read(), armreg_dacr_read());
4688 #ifdef _ARM_ARCH_7
4689 		if (ftype & VM_PROT_WRITE)
4690 			armreg_ats1cuw_write(va);
4691 		else
4692 			armreg_ats1cur_write(va);
4693 		arm_isb();
4694 		printf("fixup: par %#x\n", armreg_par_read());
4695 #endif
4696 #endif
4697 #ifdef DDB
4698 		extern int kernel_debug;
4699 
4700 		if (kernel_debug & 2) {
4701 			pmap_release_pmap_lock(pm);
4702 #ifdef UVMHIST
4703 			KERNHIST_DUMP(maphist);
4704 #endif
4705 			cpu_Debugger();
4706 			pmap_acquire_pmap_lock(pm);
4707 		}
4708 #endif
4709 	}
4710 #endif
4711 #endif
4712 
4713 #ifndef ARM_MMU_EXTENDED
4714 	/* Flush the TLB in the shared L1 case - see comment above */
4715 	pmap_tlb_flush_SE(pm, va,
4716 	    (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF);
4717 #endif
4718 
4719 	rv = 1;
4720 
4721 out:
4722 	pmap_release_pmap_lock(pm);
4723 
4724 	return (rv);
4725 }
4726 
4727 /*
4728  * Routine:	pmap_procwr
4729  *
4730  * Function:
4731  *	Synchronize caches corresponding to [addr, addr+len) in p.
4732  *
4733  */
4734 void
pmap_procwr(struct proc * p,vaddr_t va,int len)4735 pmap_procwr(struct proc *p, vaddr_t va, int len)
4736 {
4737 	/* We only need to do anything if it is the current process. */
4738 	if (p == curproc)
4739 		cpu_icache_sync_range(va, len);
4740 }
4741 
4742 /*
4743  * Routine:	pmap_unwire
4744  * Function:	Clear the wired attribute for a map/virtual-address pair.
4745  *
4746  * In/out conditions:
4747  *		The mapping must already exist in the pmap.
4748  */
4749 void
pmap_unwire(pmap_t pm,vaddr_t va)4750 pmap_unwire(pmap_t pm, vaddr_t va)
4751 {
4752 	struct l2_bucket *l2b;
4753 	pt_entry_t *ptep, pte;
4754 	struct vm_page *pg;
4755 	paddr_t pa;
4756 
4757 	NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
4758 
4759 	pmap_acquire_pmap_lock(pm);
4760 
4761 	l2b = pmap_get_l2_bucket(pm, va);
4762 	KDASSERT(l2b != NULL);
4763 
4764 	ptep = &l2b->l2b_kva[l2pte_index(va)];
4765 	pte = *ptep;
4766 
4767 	/* Extract the physical address of the page */
4768 	pa = l2pte_pa(pte);
4769 
4770 	if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
4771 		/* Update the wired bit in the pv entry for this page. */
4772 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4773 
4774 		pmap_acquire_page_lock(md);
4775 		(void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4776 		pmap_release_page_lock(md);
4777 	}
4778 
4779 	pmap_release_pmap_lock(pm);
4780 }
4781 
4782 void
pmap_activate(struct lwp * l)4783 pmap_activate(struct lwp *l)
4784 {
4785 	struct cpu_info * const ci = curcpu();
4786 	extern int block_userspace_access;
4787 	pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap;
4788 #ifdef ARM_MMU_EXTENDED
4789 	struct pmap_asid_info * const pai = PMAP_PAI(npm, cpu_tlb_info(ci));
4790 #endif
4791 
4792 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4793 
4794 	UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, npm, 0, 0);
4795 
4796 	/*
4797 	 * If activating a non-current lwp or the current lwp is
4798 	 * already active, just return.
4799 	 */
4800 	if (false
4801 	    || l != curlwp
4802 #ifdef ARM_MMU_EXTENDED
4803 	    || (ci->ci_pmap_cur == npm &&
4804 		(npm == pmap_kernel()
4805 		 /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */))
4806 #else
4807 	    || npm->pm_activated == true
4808 #endif
4809 	    || false) {
4810 		UVMHIST_LOG(maphist, " <-- (same pmap)", curlwp, l, 0, 0);
4811 		return;
4812 	}
4813 
4814 #ifndef ARM_MMU_EXTENDED
4815 	const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2))
4816 	    | (DOMAIN_CLIENT << (pmap_domain(npm) * 2));
4817 
4818 	/*
4819 	 * If TTB and DACR are unchanged, short-circuit all the
4820 	 * TLB/cache management stuff.
4821 	 */
4822 	pmap_t opm = ci->ci_lastlwp
4823 	    ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap
4824 	    : NULL;
4825 	if (opm != NULL) {
4826 		uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2))
4827 		    | (DOMAIN_CLIENT << (pmap_domain(opm) * 2));
4828 
4829 		if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr)
4830 			goto all_done;
4831 	}
4832 #endif /* !ARM_MMU_EXTENDED */
4833 
4834 	PMAPCOUNT(activations);
4835 	block_userspace_access = 1;
4836 
4837 #ifndef ARM_MMU_EXTENDED
4838 	/*
4839 	 * If switching to a user vmspace which is different to the
4840 	 * most recent one, and the most recent one is potentially
4841 	 * live in the cache, we must write-back and invalidate the
4842 	 * entire cache.
4843 	 */
4844 	pmap_t rpm = ci->ci_pmap_lastuser;
4845 #endif
4846 
4847 /*
4848  * XXXSCW: There's a corner case here which can leave turds in the cache as
4849  * reported in kern/41058. They're probably left over during tear-down and
4850  * switching away from an exiting process. Until the root cause is identified
4851  * and fixed, zap the cache when switching pmaps. This will result in a few
4852  * unnecessary cache flushes, but that's better than silently corrupting data.
4853  */
4854 #ifndef ARM_MMU_EXTENDED
4855 #if 0
4856 	if (npm != pmap_kernel() && rpm && npm != rpm &&
4857 	    rpm->pm_cstate.cs_cache) {
4858 		rpm->pm_cstate.cs_cache = 0;
4859 #ifdef PMAP_CACHE_VIVT
4860 		cpu_idcache_wbinv_all();
4861 #endif
4862 	}
4863 #else
4864 	if (rpm) {
4865 		rpm->pm_cstate.cs_cache = 0;
4866 		if (npm == pmap_kernel())
4867 			ci->ci_pmap_lastuser = NULL;
4868 #ifdef PMAP_CACHE_VIVT
4869 		cpu_idcache_wbinv_all();
4870 #endif
4871 	}
4872 #endif
4873 
4874 	/* No interrupts while we frob the TTB/DACR */
4875 	uint32_t oldirqstate = disable_interrupts(IF32_bits);
4876 #endif /* !ARM_MMU_EXTENDED */
4877 
4878 #ifndef ARM_HAS_VBAR
4879 	/*
4880 	 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4881 	 * entry corresponding to 'vector_page' in the incoming L1 table
4882 	 * before switching to it otherwise subsequent interrupts/exceptions
4883 	 * (including domain faults!) will jump into hyperspace.
4884 	 */
4885 	if (npm->pm_pl1vec != NULL) {
4886 		cpu_tlb_flushID_SE((u_int)vector_page);
4887 		cpu_cpwait();
4888 		*npm->pm_pl1vec = npm->pm_l1vec;
4889 		PTE_SYNC(npm->pm_pl1vec);
4890 	}
4891 #endif
4892 
4893 #ifdef ARM_MMU_EXTENDED
4894 	/*
4895 	 * Assume that TTBR1 has only global mappings and TTBR0 only has
4896 	 * non-global mappings.  To prevent speculation from doing evil things
4897 	 * we disable translation table walks using TTBR0 before setting the
4898 	 * CONTEXTIDR (ASID) or new TTBR0 value.  Once both are set, table
4899 	 * walks are reenabled.
4900 	 */
4901 	UVMHIST_LOG(maphist, " acquiring asid", 0, 0, 0, 0);
4902 	const uint32_t old_ttbcr = armreg_ttbcr_read();
4903 	armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
4904 	arm_isb();
4905 	pmap_tlb_asid_acquire(npm, l);
4906 	UVMHIST_LOG(maphist, " setting ttbr pa=%#x asid=%#x", npm->pm_l1_pa, pai->pai_asid, 0, 0);
4907 	cpu_setttb(npm->pm_l1_pa, pai->pai_asid);
4908 	/*
4909 	 * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 have
4910 	 * been updated.
4911 	 */
4912 	arm_isb();
4913 	if (npm != pmap_kernel()) {
4914 		armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0);
4915 	}
4916 	cpu_cpwait();
4917 	ci->ci_pmap_asid_cur = pai->pai_asid;
4918 #else
4919 	cpu_domains(ndacr);
4920 	if (npm == pmap_kernel() || npm == rpm) {
4921 		/*
4922 		 * Switching to a kernel thread, or back to the
4923 		 * same user vmspace as before... Simply update
4924 		 * the TTB (no TLB flush required)
4925 		 */
4926 		cpu_setttb(npm->pm_l1->l1_physaddr, false);
4927 		cpu_cpwait();
4928 	} else {
4929 		/*
4930 		 * Otherwise, update TTB and flush TLB
4931 		 */
4932 		cpu_context_switch(npm->pm_l1->l1_physaddr);
4933 		if (rpm != NULL)
4934 			rpm->pm_cstate.cs_tlb = 0;
4935 	}
4936 
4937 	restore_interrupts(oldirqstate);
4938 #endif /* ARM_MMU_EXTENDED */
4939 
4940 	block_userspace_access = 0;
4941 
4942 #ifndef ARM_MMU_EXTENDED
4943  all_done:
4944 	/*
4945 	 * The new pmap is resident. Make sure it's marked
4946 	 * as resident in the cache/TLB.
4947 	 */
4948 	npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4949 	if (npm != pmap_kernel())
4950 		ci->ci_pmap_lastuser = npm;
4951 
4952 	/* The old pmap is not longer active */
4953 	if (opm != npm) {
4954 		if (opm != NULL)
4955 			opm->pm_activated = false;
4956 
4957 		/* But the new one is */
4958 		npm->pm_activated = true;
4959 	}
4960 #endif
4961 	ci->ci_pmap_cur = npm;
4962 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
4963 }
4964 
4965 void
pmap_deactivate(struct lwp * l)4966 pmap_deactivate(struct lwp *l)
4967 {
4968 	pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap;
4969 
4970 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4971 
4972 	UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, pm, 0, 0);
4973 
4974 #ifdef ARM_MMU_EXTENDED
4975 	kpreempt_disable();
4976 	struct cpu_info * const ci = curcpu();
4977 	/*
4978 	 * Disable translation table walks from TTBR0 while no pmap has been
4979 	 * activated.
4980 	 */
4981 	const uint32_t old_ttbcr = armreg_ttbcr_read();
4982 	armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
4983 	arm_isb();
4984 	pmap_tlb_asid_deactivate(pm);
4985 	cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID);
4986 	ci->ci_pmap_cur = pmap_kernel();
4987 	ci->ci_pmap_asid_cur = KERNEL_PID;
4988 	kpreempt_enable();
4989 #else
4990 	/*
4991 	 * If the process is exiting, make sure pmap_activate() does
4992 	 * a full MMU context-switch and cache flush, which we might
4993 	 * otherwise skip. See PR port-arm/38950.
4994 	 */
4995 	if (l->l_proc->p_sflag & PS_WEXIT)
4996 		curcpu()->ci_lastlwp = NULL;
4997 
4998 	pm->pm_activated = false;
4999 #endif
5000 	UVMHIST_LOG(maphist, "  <-- done", 0, 0, 0, 0);
5001 }
5002 
5003 void
pmap_update(pmap_t pm)5004 pmap_update(pmap_t pm)
5005 {
5006 
5007 	if (pm->pm_remove_all) {
5008 #ifdef ARM_MMU_EXTENDED
5009 		KASSERT(pm != pmap_kernel());
5010 
5011 		KASSERTMSG(curcpu()->ci_pmap_cur != pm
5012 		    || pm->pm_pai[0].pai_asid == curcpu()->ci_pmap_asid_cur,
5013 		    "pmap/asid %p/%#x != %s cur pmap/asid %p/%#x", pm,
5014 		    pm->pm_pai[0].pai_asid, curcpu()->ci_data.cpu_name,
5015 		    curcpu()->ci_pmap_cur, curcpu()->ci_pmap_asid_cur);
5016 
5017 #ifdef MULTIPROCESSOR
5018 		/*
5019 		 * Finish up the pmap_remove_all() optimisation by flushing
5020 		 * all our ASIDs.
5021 		 */
5022 		// This should be the last CPU with this pmap onproc
5023 		KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(curcpu())));
5024 		if (kcpuset_isset(pm->pm_onproc, cpu_index(curcpu()))) {
5025 			if (pm != pmap_kernel()) {
5026 				struct cpu_info * const ci = curcpu();
5027 				KASSERT(!cpu_intr_p());
5028 				/*
5029 				 * The bits in pm_onproc that belong to this
5030 				 * TLB can be changed while this TLBs lock is
5031 				 * not held as long as we use atomic ops.
5032 				 */
5033 				kcpuset_atomic_clear(pm->pm_onproc,
5034 				    cpu_index(ci));
5035 			}
5036 		}
5037 		KASSERT(kcpuset_iszero(pm->pm_onproc));
5038 #endif
5039 		struct pmap_asid_info * const pai =
5040 		    PMAP_PAI(pm, cpu_tlb_info(ci));
5041 
5042 		tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
5043 
5044 #else
5045 		/*
5046 		 * Finish up the pmap_remove_all() optimisation by flushing
5047 		 * the TLB.
5048 		 */
5049 		pmap_tlb_flushID(pm);
5050 #endif
5051 		pm->pm_remove_all = false;
5052 	}
5053 
5054 #ifdef ARM_MMU_EXTENDED
5055 #if defined(MULTIPROCESSOR)
5056 	armreg_bpiallis_write(0);
5057 #else
5058 	armreg_bpiall_write(0);
5059 #endif
5060 
5061 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
5062 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
5063 	if (pending && pmap_tlb_shootdown_bystanders(pmap)) {
5064 		PMAP_COUNT(shootdown_ipis);
5065 	}
5066 #endif
5067 	KASSERTMSG(pm == pmap_kernel()
5068 	    || curcpu()->ci_pmap_cur != pm
5069 	    || pm->pm_pai[0].pai_asid == curcpu()->ci_pmap_asid_cur,
5070 	    "pmap/asid %p/%#x != %s cur pmap/asid %p/%#x", pm,
5071 	    pm->pm_pai[0].pai_asid, curcpu()->ci_data.cpu_name,
5072 	    curcpu()->ci_pmap_cur, curcpu()->ci_pmap_asid_cur);
5073 #else
5074 	if (pmap_is_current(pm)) {
5075 		/*
5076 		 * If we're dealing with a current userland pmap, move its L1
5077 		 * to the end of the LRU.
5078 		 */
5079 		if (pm != pmap_kernel())
5080 			pmap_use_l1(pm);
5081 
5082 		/*
5083 		 * We can assume we're done with frobbing the cache/tlb for
5084 		 * now. Make sure any future pmap ops don't skip cache/tlb
5085 		 * flushes.
5086 		 */
5087 		pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
5088 	}
5089 #endif
5090 
5091 	PMAPCOUNT(updates);
5092 
5093 	/*
5094 	 * make sure TLB/cache operations have completed.
5095 	 */
5096 	cpu_cpwait();
5097 }
5098 
5099 void
pmap_remove_all(pmap_t pm)5100 pmap_remove_all(pmap_t pm)
5101 {
5102 
5103 	/*
5104 	 * The vmspace described by this pmap is about to be torn down.
5105 	 * Until pmap_update() is called, UVM will only make calls
5106 	 * to pmap_remove(). We can make life much simpler by flushing
5107 	 * the cache now, and deferring TLB invalidation to pmap_update().
5108 	 */
5109 #ifdef PMAP_CACHE_VIVT
5110 	pmap_cache_wbinv_all(pm, PVF_EXEC);
5111 #endif
5112 	pm->pm_remove_all = true;
5113 }
5114 
5115 /*
5116  * Retire the given physical map from service.
5117  * Should only be called if the map contains no valid mappings.
5118  */
5119 void
pmap_destroy(pmap_t pm)5120 pmap_destroy(pmap_t pm)
5121 {
5122 	u_int count;
5123 
5124 	if (pm == NULL)
5125 		return;
5126 
5127 	if (pm->pm_remove_all) {
5128 		pmap_tlb_flushID(pm);
5129 		pm->pm_remove_all = false;
5130 	}
5131 
5132 	/*
5133 	 * Drop reference count
5134 	 */
5135 	mutex_enter(pm->pm_lock);
5136 	count = --pm->pm_obj.uo_refs;
5137 	mutex_exit(pm->pm_lock);
5138 	if (count > 0) {
5139 #ifndef ARM_MMU_EXTENDED
5140 		if (pmap_is_current(pm)) {
5141 			if (pm != pmap_kernel())
5142 				pmap_use_l1(pm);
5143 			pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
5144 		}
5145 #endif
5146 		return;
5147 	}
5148 
5149 	/*
5150 	 * reference count is zero, free pmap resources and then free pmap.
5151 	 */
5152 
5153 #ifndef ARM_HAS_VBAR
5154 	if (vector_page < KERNEL_BASE) {
5155 		KDASSERT(!pmap_is_current(pm));
5156 
5157 		/* Remove the vector page mapping */
5158 		pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
5159 		pmap_update(pm);
5160 	}
5161 #endif
5162 
5163 	pmap_free_l1(pm);
5164 
5165 #ifdef ARM_MMU_EXTENDED
5166 #ifdef MULTIPROCESSOR
5167 	kcpuset_destroy(pm->pm_active);
5168 	kcpuset_destroy(pm->pm_onproc);
5169 #endif
5170 #else
5171 	struct cpu_info * const ci = curcpu();
5172 	if (ci->ci_pmap_lastuser == pm)
5173 		ci->ci_pmap_lastuser = NULL;
5174 #endif
5175 
5176 	uvm_obj_destroy(&pm->pm_obj, false);
5177 	mutex_destroy(&pm->pm_obj_lock);
5178 	pool_cache_put(&pmap_cache, pm);
5179 }
5180 
5181 
5182 /*
5183  * void pmap_reference(pmap_t pm)
5184  *
5185  * Add a reference to the specified pmap.
5186  */
5187 void
pmap_reference(pmap_t pm)5188 pmap_reference(pmap_t pm)
5189 {
5190 
5191 	if (pm == NULL)
5192 		return;
5193 
5194 #ifndef ARM_MMU_EXTENDED
5195 	pmap_use_l1(pm);
5196 #endif
5197 
5198 	mutex_enter(pm->pm_lock);
5199 	pm->pm_obj.uo_refs++;
5200 	mutex_exit(pm->pm_lock);
5201 }
5202 
5203 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
5204 
5205 static struct evcnt pmap_prefer_nochange_ev =
5206     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange");
5207 static struct evcnt pmap_prefer_change_ev =
5208     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change");
5209 
5210 EVCNT_ATTACH_STATIC(pmap_prefer_change_ev);
5211 EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev);
5212 
5213 void
pmap_prefer(vaddr_t hint,vaddr_t * vap,int td)5214 pmap_prefer(vaddr_t hint, vaddr_t *vap, int td)
5215 {
5216 	vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1);
5217 	vaddr_t va = *vap;
5218 	vaddr_t diff = (hint - va) & mask;
5219 	if (diff == 0) {
5220 		pmap_prefer_nochange_ev.ev_count++;
5221 	} else {
5222 		pmap_prefer_change_ev.ev_count++;
5223 		if (__predict_false(td))
5224 			va -= mask + 1;
5225 		*vap = va + diff;
5226 	}
5227 }
5228 #endif /* ARM_MMU_V6 | ARM_MMU_V7 */
5229 
5230 /*
5231  * pmap_zero_page()
5232  *
5233  * Zero a given physical page by mapping it at a page hook point.
5234  * In doing the zero page op, the page we zero is mapped cachable, as with
5235  * StrongARM accesses to non-cached pages are non-burst making writing
5236  * _any_ bulk data very slow.
5237  */
5238 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
5239 void
pmap_zero_page_generic(paddr_t pa)5240 pmap_zero_page_generic(paddr_t pa)
5241 {
5242 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
5243 	struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
5244 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
5245 #endif
5246 #if defined(PMAP_CACHE_VIPT)
5247 	/* Choose the last page color it had, if any */
5248 	const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
5249 #else
5250 	const vsize_t va_offset = 0;
5251 #endif
5252 #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
5253 	/*
5254 	 * Is this page mapped at its natural color?
5255 	 * If we have all of memory mapped, then just convert PA to VA.
5256 	 */
5257 	bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
5258 	   || va_offset == (pa & arm_cache_prefer_mask);
5259 	const vaddr_t vdstp = okcolor
5260 	    ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
5261 	    : cpu_cdstp(va_offset);
5262 #else
5263 	const bool okcolor = false;
5264 	const vaddr_t vdstp = cpu_cdstp(va_offset);
5265 #endif
5266 	pt_entry_t * const ptep = cpu_cdst_pte(va_offset);
5267 
5268 
5269 #ifdef DEBUG
5270 	if (!SLIST_EMPTY(&md->pvh_list))
5271 		panic("pmap_zero_page: page has mappings");
5272 #endif
5273 
5274 	KDASSERT((pa & PGOFSET) == 0);
5275 
5276 	if (!okcolor) {
5277 		/*
5278 		 * Hook in the page, zero it, and purge the cache for that
5279 		 * zeroed page. Invalidate the TLB as needed.
5280 		 */
5281 		const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode
5282 		    | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE);
5283 		l2pte_set(ptep, npte, 0);
5284 		PTE_SYNC(ptep);
5285 		cpu_tlb_flushD_SE(vdstp);
5286 		cpu_cpwait();
5287 #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \
5288     && !defined(ARM_MMU_EXTENDED)
5289 		/*
5290 		 * If we are direct-mapped and our color isn't ok, then before
5291 		 * we bzero the page invalidate its contents from the cache and
5292 		 * reset the color to its natural color.
5293 		 */
5294 		cpu_dcache_inv_range(vdstp, PAGE_SIZE);
5295 		md->pvh_attrs &= ~arm_cache_prefer_mask;
5296 		md->pvh_attrs |= (pa & arm_cache_prefer_mask);
5297 #endif
5298 	}
5299 	bzero_page(vdstp);
5300 	if (!okcolor) {
5301 		/*
5302 		 * Unmap the page.
5303 		 */
5304 		l2pte_reset(ptep);
5305 		PTE_SYNC(ptep);
5306 		cpu_tlb_flushD_SE(vdstp);
5307 #ifdef PMAP_CACHE_VIVT
5308 		cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
5309 #endif
5310 	}
5311 #ifdef PMAP_CACHE_VIPT
5312 	/*
5313 	 * This page is now cache resident so it now has a page color.
5314 	 * Any contents have been obliterated so clear the EXEC flag.
5315 	 */
5316 #ifndef ARM_MMU_EXTENDED
5317 	if (!pmap_is_page_colored_p(md)) {
5318 		PMAPCOUNT(vac_color_new);
5319 		md->pvh_attrs |= PVF_COLORED;
5320 	}
5321 	md->pvh_attrs |= PVF_DIRTY;
5322 #endif
5323 	if (PV_IS_EXEC_P(md->pvh_attrs)) {
5324 		md->pvh_attrs &= ~PVF_EXEC;
5325 		PMAPCOUNT(exec_discarded_zero);
5326 	}
5327 #endif
5328 }
5329 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
5330 
5331 #if ARM_MMU_XSCALE == 1
5332 void
pmap_zero_page_xscale(paddr_t pa)5333 pmap_zero_page_xscale(paddr_t pa)
5334 {
5335 #ifdef DEBUG
5336 	struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
5337 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
5338 
5339 	if (!SLIST_EMPTY(&md->pvh_list))
5340 		panic("pmap_zero_page: page has mappings");
5341 #endif
5342 
5343 	KDASSERT((pa & PGOFSET) == 0);
5344 
5345 	/*
5346 	 * Hook in the page, zero it, and purge the cache for that
5347 	 * zeroed page. Invalidate the TLB as needed.
5348 	 */
5349 
5350 	pt_entry_t npte = L2_S_PROTO | pa |
5351 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
5352 	    L2_C | L2_XS_T_TEX(TEX_XSCALE_X);	/* mini-data */
5353 	l2pte_set(cdst_pte, npte, 0);
5354 	PTE_SYNC(cdst_pte);
5355 	cpu_tlb_flushD_SE(cdstp);
5356 	cpu_cpwait();
5357 	bzero_page(cdstp);
5358 	xscale_cache_clean_minidata();
5359 	l2pte_reset(cdst_pte);
5360 	PTE_SYNC(cdst_pte);
5361 }
5362 #endif /* ARM_MMU_XSCALE == 1 */
5363 
5364 /* pmap_pageidlezero()
5365  *
5366  * The same as above, except that we assume that the page is not
5367  * mapped.  This means we never have to flush the cache first.  Called
5368  * from the idle loop.
5369  */
5370 bool
pmap_pageidlezero(paddr_t pa)5371 pmap_pageidlezero(paddr_t pa)
5372 {
5373 	bool rv = true;
5374 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
5375 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
5376 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
5377 #endif
5378 #ifdef PMAP_CACHE_VIPT
5379 	/* Choose the last page color it had, if any */
5380 	const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
5381 #else
5382 	const vsize_t va_offset = 0;
5383 #endif
5384 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
5385 	bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
5386 	   || va_offset == (pa & arm_cache_prefer_mask);
5387 	const vaddr_t vdstp = okcolor
5388 	    ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
5389 	    : cpu_cdstp(va_offset);
5390 #else
5391 	const bool okcolor = false;
5392 	const vaddr_t vdstp = cpu_cdstp(va_offset);
5393 #endif
5394 	pt_entry_t * const ptep = cpu_cdst_pte(va_offset);
5395 
5396 
5397 #ifdef DEBUG
5398 	if (!SLIST_EMPTY(&md->pvh_list))
5399 		panic("pmap_pageidlezero: page has mappings");
5400 #endif
5401 
5402 	KDASSERT((pa & PGOFSET) == 0);
5403 
5404 	if (!okcolor) {
5405 		/*
5406 		 * Hook in the page, zero it, and purge the cache for that
5407 		 * zeroed page. Invalidate the TLB as needed.
5408 		 */
5409 		const pt_entry_t npte = L2_S_PROTO | pa |
5410 		    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
5411 		l2pte_set(ptep, npte, 0);
5412 		PTE_SYNC(ptep);
5413 		cpu_tlb_flushD_SE(vdstp);
5414 		cpu_cpwait();
5415 	}
5416 
5417 	uint64_t *ptr = (uint64_t *)vdstp;
5418 	for (size_t i = 0; i < PAGE_SIZE / sizeof(*ptr); i++) {
5419 		if (sched_curcpu_runnable_p() != 0) {
5420 			/*
5421 			 * A process has become ready.  Abort now,
5422 			 * so we don't keep it waiting while we
5423 			 * do slow memory access to finish this
5424 			 * page.
5425 			 */
5426 			rv = false;
5427 			break;
5428 		}
5429 		*ptr++ = 0;
5430 	}
5431 
5432 #ifdef PMAP_CACHE_VIVT
5433 	if (rv)
5434 		/*
5435 		 * if we aborted we'll rezero this page again later so don't
5436 		 * purge it unless we finished it
5437 		 */
5438 		cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
5439 #elif defined(PMAP_CACHE_VIPT)
5440 	/*
5441 	 * This page is now cache resident so it now has a page color.
5442 	 * Any contents have been obliterated so clear the EXEC flag.
5443 	 */
5444 #ifndef ARM_MMU_EXTENDED
5445 	if (!pmap_is_page_colored_p(md)) {
5446 		PMAPCOUNT(vac_color_new);
5447 		md->pvh_attrs |= PVF_COLORED;
5448 	}
5449 #endif
5450 	if (PV_IS_EXEC_P(md->pvh_attrs)) {
5451 		md->pvh_attrs &= ~PVF_EXEC;
5452 		PMAPCOUNT(exec_discarded_zero);
5453 	}
5454 #endif
5455 	/*
5456 	 * Unmap the page.
5457 	 */
5458 	if (!okcolor) {
5459 		l2pte_reset(ptep);
5460 		PTE_SYNC(ptep);
5461 		cpu_tlb_flushD_SE(vdstp);
5462 	}
5463 
5464 	return rv;
5465 }
5466 
5467 /*
5468  * pmap_copy_page()
5469  *
5470  * Copy one physical page into another, by mapping the pages into
5471  * hook points. The same comment regarding cachability as in
5472  * pmap_zero_page also applies here.
5473  */
5474 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
5475 void
pmap_copy_page_generic(paddr_t src,paddr_t dst)5476 pmap_copy_page_generic(paddr_t src, paddr_t dst)
5477 {
5478 	struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src);
5479 	struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
5480 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
5481 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst);
5482 	struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg);
5483 #endif
5484 #ifdef PMAP_CACHE_VIPT
5485 	const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask;
5486 	const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask;
5487 #else
5488 	const vsize_t src_va_offset = 0;
5489 	const vsize_t dst_va_offset = 0;
5490 #endif
5491 #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
5492 	/*
5493 	 * Is this page mapped at its natural color?
5494 	 * If we have all of memory mapped, then just convert PA to VA.
5495 	 */
5496 	bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
5497 	    || src_va_offset == (src & arm_cache_prefer_mask);
5498 	bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
5499 	    || dst_va_offset == (dst & arm_cache_prefer_mask);
5500 	const vaddr_t vsrcp = src_okcolor
5501 	    ? pmap_direct_mapped_phys(src, &src_okcolor,
5502 		cpu_csrcp(src_va_offset))
5503 	    : cpu_csrcp(src_va_offset);
5504 	const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor,
5505 	    cpu_cdstp(dst_va_offset));
5506 #else
5507 	const bool src_okcolor = false;
5508 	const bool dst_okcolor = false;
5509 	const vaddr_t vsrcp = cpu_csrcp(src_va_offset);
5510 	const vaddr_t vdstp = cpu_cdstp(dst_va_offset);
5511 #endif
5512 	pt_entry_t * const src_ptep = cpu_csrc_pte(src_va_offset);
5513 	pt_entry_t * const dst_ptep = cpu_cdst_pte(dst_va_offset);
5514 
5515 #ifdef DEBUG
5516 	if (!SLIST_EMPTY(&dst_md->pvh_list))
5517 		panic("pmap_copy_page: dst page has mappings");
5518 #endif
5519 
5520 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
5521 	KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC));
5522 #endif
5523 	KDASSERT((src & PGOFSET) == 0);
5524 	KDASSERT((dst & PGOFSET) == 0);
5525 
5526 	/*
5527 	 * Clean the source page.  Hold the source page's lock for
5528 	 * the duration of the copy so that no other mappings can
5529 	 * be created while we have a potentially aliased mapping.
5530 	 */
5531 #ifdef PMAP_CACHE_VIVT
5532 	pmap_acquire_page_lock(src_md);
5533 	(void) pmap_clean_page(src_md, true);
5534 	pmap_release_page_lock(src_md);
5535 #endif
5536 
5537 	/*
5538 	 * Map the pages into the page hook points, copy them, and purge
5539 	 * the cache for the appropriate page. Invalidate the TLB
5540 	 * as required.
5541 	 */
5542 	if (!src_okcolor) {
5543 		const pt_entry_t nsrc_pte = L2_S_PROTO
5544 		    | src
5545 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
5546 		    | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
5547 #else // defined(PMAP_CACHE_VIVT) || defined(ARM_MMU_EXTENDED)
5548 		    | pte_l2_s_cache_mode
5549 #endif
5550 		    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
5551 		l2pte_set(src_ptep, nsrc_pte, 0);
5552 		PTE_SYNC(src_ptep);
5553 		cpu_tlb_flushD_SE(vsrcp);
5554 		cpu_cpwait();
5555 	}
5556 	if (!dst_okcolor) {
5557 		const pt_entry_t ndst_pte = L2_S_PROTO | dst |
5558 		    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
5559 		l2pte_set(dst_ptep, ndst_pte, 0);
5560 		PTE_SYNC(dst_ptep);
5561 		cpu_tlb_flushD_SE(vdstp);
5562 		cpu_cpwait();
5563 #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
5564 		/*
5565 		 * If we are direct-mapped and our color isn't ok, then before
5566 		 * we bcopy to the new page invalidate its contents from the
5567 		 * cache and reset its color to its natural color.
5568 		 */
5569 		cpu_dcache_inv_range(vdstp, PAGE_SIZE);
5570 		dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
5571 		dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
5572 #endif
5573 	}
5574 	bcopy_page(vsrcp, vdstp);
5575 #ifdef PMAP_CACHE_VIVT
5576 	cpu_dcache_inv_range(vsrcp, PAGE_SIZE);
5577 	cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
5578 #endif
5579 	/*
5580 	 * Unmap the pages.
5581 	 */
5582 	if (!src_okcolor) {
5583 		l2pte_reset(src_ptep);
5584 		PTE_SYNC(src_ptep);
5585 		cpu_tlb_flushD_SE(vsrcp);
5586 		cpu_cpwait();
5587 	}
5588 	if (!dst_okcolor) {
5589 		l2pte_reset(dst_ptep);
5590 		PTE_SYNC(dst_ptep);
5591 		cpu_tlb_flushD_SE(vdstp);
5592 		cpu_cpwait();
5593 	}
5594 #ifdef PMAP_CACHE_VIPT
5595 	/*
5596 	 * Now that the destination page is in the cache, mark it as colored.
5597 	 * If this was an exec page, discard it.
5598 	 */
5599 	pmap_acquire_page_lock(dst_md);
5600 #ifndef ARM_MMU_EXTENDED
5601 	if (arm_pcache.cache_type == CACHE_TYPE_PIPT) {
5602 		dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
5603 		dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
5604 	}
5605 	if (!pmap_is_page_colored_p(dst_md)) {
5606 		PMAPCOUNT(vac_color_new);
5607 		dst_md->pvh_attrs |= PVF_COLORED;
5608 	}
5609 	dst_md->pvh_attrs |= PVF_DIRTY;
5610 #endif
5611 	if (PV_IS_EXEC_P(dst_md->pvh_attrs)) {
5612 		dst_md->pvh_attrs &= ~PVF_EXEC;
5613 		PMAPCOUNT(exec_discarded_copy);
5614 	}
5615 	pmap_release_page_lock(dst_md);
5616 #endif
5617 }
5618 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
5619 
5620 #if ARM_MMU_XSCALE == 1
5621 void
pmap_copy_page_xscale(paddr_t src,paddr_t dst)5622 pmap_copy_page_xscale(paddr_t src, paddr_t dst)
5623 {
5624 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
5625 	struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
5626 #ifdef DEBUG
5627 	struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst));
5628 
5629 	if (!SLIST_EMPTY(&dst_md->pvh_list))
5630 		panic("pmap_copy_page: dst page has mappings");
5631 #endif
5632 
5633 	KDASSERT((src & PGOFSET) == 0);
5634 	KDASSERT((dst & PGOFSET) == 0);
5635 
5636 	/*
5637 	 * Clean the source page.  Hold the source page's lock for
5638 	 * the duration of the copy so that no other mappings can
5639 	 * be created while we have a potentially aliased mapping.
5640 	 */
5641 #ifdef PMAP_CACHE_VIVT
5642 	pmap_acquire_page_lock(src_md);
5643 	(void) pmap_clean_page(src_md, true);
5644 	pmap_release_page_lock(src_md);
5645 #endif
5646 
5647 	/*
5648 	 * Map the pages into the page hook points, copy them, and purge
5649 	 * the cache for the appropriate page. Invalidate the TLB
5650 	 * as required.
5651 	 */
5652 	const pt_entry_t nsrc_pte = L2_S_PROTO | src
5653 	    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ)
5654 	    | L2_C | L2_XS_T_TEX(TEX_XSCALE_X);	/* mini-data */
5655 	l2pte_set(csrc_pte, nsrc_pte, 0);
5656 	PTE_SYNC(csrc_pte);
5657 
5658 	const pt_entry_t ndst_pte = L2_S_PROTO | dst
5659 	    | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE)
5660 	    | L2_C | L2_XS_T_TEX(TEX_XSCALE_X);	/* mini-data */
5661 	l2pte_set(cdst_pte, ndst_pte, 0);
5662 	PTE_SYNC(cdst_pte);
5663 
5664 	cpu_tlb_flushD_SE(csrcp);
5665 	cpu_tlb_flushD_SE(cdstp);
5666 	cpu_cpwait();
5667 	bcopy_page(csrcp, cdstp);
5668 	xscale_cache_clean_minidata();
5669 	l2pte_reset(csrc_pte);
5670 	l2pte_reset(cdst_pte);
5671 	PTE_SYNC(csrc_pte);
5672 	PTE_SYNC(cdst_pte);
5673 }
5674 #endif /* ARM_MMU_XSCALE == 1 */
5675 
5676 /*
5677  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
5678  *
5679  * Return the start and end addresses of the kernel's virtual space.
5680  * These values are setup in pmap_bootstrap and are updated as pages
5681  * are allocated.
5682  */
5683 void
pmap_virtual_space(vaddr_t * start,vaddr_t * end)5684 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
5685 {
5686 	*start = virtual_avail;
5687 	*end = virtual_end;
5688 }
5689 
5690 /*
5691  * Helper function for pmap_grow_l2_bucket()
5692  */
5693 static inline int
pmap_grow_map(vaddr_t va,paddr_t * pap)5694 pmap_grow_map(vaddr_t va, paddr_t *pap)
5695 {
5696 	paddr_t pa;
5697 
5698 	if (uvm.page_init_done == false) {
5699 #ifdef PMAP_STEAL_MEMORY
5700 		pv_addr_t pv;
5701 		pmap_boot_pagealloc(PAGE_SIZE,
5702 #ifdef PMAP_CACHE_VIPT
5703 		    arm_cache_prefer_mask,
5704 		    va & arm_cache_prefer_mask,
5705 #else
5706 		    0, 0,
5707 #endif
5708 		    &pv);
5709 		pa = pv.pv_pa;
5710 #else
5711 		if (uvm_page_physget(&pa) == false)
5712 			return (1);
5713 #endif	/* PMAP_STEAL_MEMORY */
5714 	} else {
5715 		struct vm_page *pg;
5716 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
5717 		if (pg == NULL)
5718 			return (1);
5719 		pa = VM_PAGE_TO_PHYS(pg);
5720 		/*
5721 		 * This new page must not have any mappings.  Enter it via
5722 		 * pmap_kenter_pa and let that routine do the hard work.
5723 		 */
5724 		struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg);
5725 		KASSERT(SLIST_EMPTY(&md->pvh_list));
5726 		pmap_kenter_pa(va, pa,
5727 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE);
5728 	}
5729 
5730 	if (pap)
5731 		*pap = pa;
5732 
5733 	PMAPCOUNT(pt_mappings);
5734 #ifdef DEBUG
5735 	struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5736 	KDASSERT(l2b != NULL);
5737 
5738 	pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
5739 	const pt_entry_t opte = *ptep;
5740 	KDASSERT((opte & L2_S_CACHE_MASK) == pte_l2_s_cache_mode_pt);
5741 #endif
5742 	memset((void *)va, 0, PAGE_SIZE);
5743 	return (0);
5744 }
5745 
5746 /*
5747  * This is the same as pmap_alloc_l2_bucket(), except that it is only
5748  * used by pmap_growkernel().
5749  */
5750 static inline struct l2_bucket *
pmap_grow_l2_bucket(pmap_t pm,vaddr_t va)5751 pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
5752 {
5753 	struct l2_dtable *l2;
5754 	struct l2_bucket *l2b;
5755 	u_short l1slot;
5756 	vaddr_t nva;
5757 
5758 	l1slot = l1pte_index(va);
5759 
5760 	if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) {
5761 		/*
5762 		 * No mapping at this address, as there is
5763 		 * no entry in the L1 table.
5764 		 * Need to allocate a new l2_dtable.
5765 		 */
5766 		nva = pmap_kernel_l2dtable_kva;
5767 		if ((nva & PGOFSET) == 0) {
5768 			/*
5769 			 * Need to allocate a backing page
5770 			 */
5771 			if (pmap_grow_map(nva, NULL))
5772 				return (NULL);
5773 		}
5774 
5775 		l2 = (struct l2_dtable *)nva;
5776 		nva += sizeof(struct l2_dtable);
5777 
5778 		if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
5779 			/*
5780 			 * The new l2_dtable straddles a page boundary.
5781 			 * Map in another page to cover it.
5782 			 */
5783 			if (pmap_grow_map(nva, NULL))
5784 				return (NULL);
5785 		}
5786 
5787 		pmap_kernel_l2dtable_kva = nva;
5788 
5789 		/*
5790 		 * Link it into the parent pmap
5791 		 */
5792 		pm->pm_l2[L2_IDX(l1slot)] = l2;
5793 	}
5794 
5795 	l2b = &l2->l2_bucket[L2_BUCKET(l1slot)];
5796 
5797 	/*
5798 	 * Fetch pointer to the L2 page table associated with the address.
5799 	 */
5800 	if (l2b->l2b_kva == NULL) {
5801 		pt_entry_t *ptep;
5802 
5803 		/*
5804 		 * No L2 page table has been allocated. Chances are, this
5805 		 * is because we just allocated the l2_dtable, above.
5806 		 */
5807 		nva = pmap_kernel_l2ptp_kva;
5808 		ptep = (pt_entry_t *)nva;
5809 		if ((nva & PGOFSET) == 0) {
5810 			/*
5811 			 * Need to allocate a backing page
5812 			 */
5813 			if (pmap_grow_map(nva, &pmap_kernel_l2ptp_phys))
5814 				return (NULL);
5815 			PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
5816 		}
5817 
5818 		l2->l2_occupancy++;
5819 		l2b->l2b_kva = ptep;
5820 		l2b->l2b_l1slot = l1slot;
5821 		l2b->l2b_pa = pmap_kernel_l2ptp_phys;
5822 
5823 		pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
5824 		pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
5825 	}
5826 
5827 	return (l2b);
5828 }
5829 
5830 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)5831 pmap_growkernel(vaddr_t maxkvaddr)
5832 {
5833 	pmap_t kpm = pmap_kernel();
5834 #ifndef ARM_MMU_EXTENDED
5835 	struct l1_ttable *l1;
5836 #endif
5837 	int s;
5838 
5839 	if (maxkvaddr <= pmap_curmaxkvaddr)
5840 		goto out;		/* we are OK */
5841 
5842 	NPDEBUG(PDB_GROWKERN,
5843 	    printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
5844 	    pmap_curmaxkvaddr, maxkvaddr));
5845 
5846 	KDASSERT(maxkvaddr <= virtual_end);
5847 
5848 	/*
5849 	 * whoops!   we need to add kernel PTPs
5850 	 */
5851 
5852 	s = splhigh();	/* to be safe */
5853 	mutex_enter(kpm->pm_lock);
5854 
5855 	/* Map 1MB at a time */
5856 	size_t l1slot = l1pte_index(pmap_curmaxkvaddr);
5857 #ifdef ARM_MMU_EXTENDED
5858 	pd_entry_t * const spdep = &kpm->pm_l1[l1slot];
5859 	pd_entry_t *pdep = spdep;
5860 #endif
5861 	for (;pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE,
5862 #ifdef ARM_MMU_EXTENDED
5863 	     pdep++,
5864 #endif
5865 	     l1slot++) {
5866 		struct l2_bucket *l2b =
5867 		    pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
5868 		KASSERT(l2b != NULL);
5869 
5870 		const pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa
5871 		    | L1_C_DOM(PMAP_DOMAIN_KERNEL);
5872 #ifdef ARM_MMU_EXTENDED
5873 		l1pte_setone(pdep, npde);
5874 #else
5875 		/* Distribute new L1 entry to all other L1s */
5876 		SLIST_FOREACH(l1, &l1_list, l1_link) {
5877 			pd_entry_t * const pdep = &l1->l1_kva[l1slot];
5878 			l1pte_setone(pdep, npde);
5879 			PDE_SYNC(pdep);
5880 		}
5881 #endif
5882 	}
5883 #ifdef ARM_MMU_EXTENDED
5884 	PDE_SYNC_RANGE(spdep, pdep - spdep);
5885 #endif
5886 
5887 #ifdef PMAP_CACHE_VIVT
5888 	/*
5889 	 * flush out the cache, expensive but growkernel will happen so
5890 	 * rarely
5891 	 */
5892 	cpu_dcache_wbinv_all();
5893 	cpu_tlb_flushD();
5894 	cpu_cpwait();
5895 #endif
5896 
5897 	mutex_exit(kpm->pm_lock);
5898 	splx(s);
5899 
5900 out:
5901 	return (pmap_curmaxkvaddr);
5902 }
5903 
5904 /************************ Utility routines ****************************/
5905 
5906 #ifndef ARM_HAS_VBAR
5907 /*
5908  * vector_page_setprot:
5909  *
5910  *	Manipulate the protection of the vector page.
5911  */
5912 void
vector_page_setprot(int prot)5913 vector_page_setprot(int prot)
5914 {
5915 	struct l2_bucket *l2b;
5916 	pt_entry_t *ptep;
5917 
5918 #if defined(CPU_ARMV7) || defined(CPU_ARM11)
5919 	/*
5920 	 * If we are using VBAR to use the vectors in the kernel, then it's
5921 	 * already mapped in the kernel text so no need to anything here.
5922 	 */
5923 	if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) {
5924 		KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0);
5925 		return;
5926 	}
5927 #endif
5928 
5929 	l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
5930 	KASSERT(l2b != NULL);
5931 
5932 	ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
5933 
5934 	const pt_entry_t opte = *ptep;
5935 #ifdef ARM_MMU_EXTENDED
5936 	const pt_entry_t npte = (opte & ~(L2_S_PROT_MASK|L2_XS_XN))
5937 	    | L2_S_PROT(PTE_KERNEL, prot);
5938 #else
5939 	const pt_entry_t npte = (opte & ~L2_S_PROT_MASK)
5940 	    | L2_S_PROT(PTE_KERNEL, prot);
5941 #endif
5942 	l2pte_set(ptep, npte, opte);
5943 	PTE_SYNC(ptep);
5944 	cpu_tlb_flushD_SE(vector_page);
5945 	cpu_cpwait();
5946 }
5947 #endif
5948 
5949 /*
5950  * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
5951  * Returns true if the mapping exists, else false.
5952  *
5953  * NOTE: This function is only used by a couple of arm-specific modules.
5954  * It is not safe to take any pmap locks here, since we could be right
5955  * in the middle of debugging the pmap anyway...
5956  *
5957  * It is possible for this routine to return false even though a valid
5958  * mapping does exist. This is because we don't lock, so the metadata
5959  * state may be inconsistent.
5960  *
5961  * NOTE: We can return a NULL *ptp in the case where the L1 pde is
5962  * a "section" mapping.
5963  */
5964 bool
pmap_get_pde_pte(pmap_t pm,vaddr_t va,pd_entry_t ** pdp,pt_entry_t ** ptp)5965 pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
5966 {
5967 	struct l2_dtable *l2;
5968 	pd_entry_t *pdep, pde;
5969 	pt_entry_t *ptep;
5970 	u_short l1slot;
5971 
5972 	if (pm->pm_l1 == NULL)
5973 		return false;
5974 
5975 	l1slot = l1pte_index(va);
5976 	*pdp = pdep = pmap_l1_kva(pm) + l1slot;
5977 	pde = *pdep;
5978 
5979 	if (l1pte_section_p(pde)) {
5980 		*ptp = NULL;
5981 		return true;
5982 	}
5983 
5984 	l2 = pm->pm_l2[L2_IDX(l1slot)];
5985 	if (l2 == NULL ||
5986 	    (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) {
5987 		return false;
5988 	}
5989 
5990 	*ptp = &ptep[l2pte_index(va)];
5991 	return true;
5992 }
5993 
5994 bool
pmap_get_pde(pmap_t pm,vaddr_t va,pd_entry_t ** pdp)5995 pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp)
5996 {
5997 
5998 	if (pm->pm_l1 == NULL)
5999 		return false;
6000 
6001 	*pdp = pmap_l1_kva(pm) + l1pte_index(va);
6002 
6003 	return true;
6004 }
6005 
6006 /************************ Bootstrapping routines ****************************/
6007 
6008 #ifndef ARM_MMU_EXTENDED
6009 static void
pmap_init_l1(struct l1_ttable * l1,pd_entry_t * l1pt)6010 pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
6011 {
6012 	int i;
6013 
6014 	l1->l1_kva = l1pt;
6015 	l1->l1_domain_use_count = 0;
6016 	l1->l1_domain_first = 0;
6017 
6018 	for (i = 0; i < PMAP_DOMAINS; i++)
6019 		l1->l1_domain_free[i] = i + 1;
6020 
6021 	/*
6022 	 * Copy the kernel's L1 entries to each new L1.
6023 	 */
6024 	if (pmap_initialized)
6025 		memcpy(l1pt, pmap_l1_kva(pmap_kernel()), L1_TABLE_SIZE);
6026 
6027 	if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
6028 	    &l1->l1_physaddr) == false)
6029 		panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
6030 
6031 	SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
6032 	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
6033 }
6034 #endif /* !ARM_MMU_EXTENDED */
6035 
6036 /*
6037  * pmap_bootstrap() is called from the board-specific initarm() routine
6038  * once the kernel L1/L2 descriptors tables have been set up.
6039  *
6040  * This is a somewhat convoluted process since pmap bootstrap is, effectively,
6041  * spread over a number of disparate files/functions.
6042  *
6043  * We are passed the following parameters
6044  *  - kernel_l1pt
6045  *    This is a pointer to the base of the kernel's L1 translation table.
6046  *  - vstart
6047  *    1MB-aligned start of managed kernel virtual memory.
6048  *  - vend
6049  *    1MB-aligned end of managed kernel virtual memory.
6050  *
6051  * We use the first parameter to build the metadata (struct l1_ttable and
6052  * struct l2_dtable) necessary to track kernel mappings.
6053  */
6054 #define	PMAP_STATIC_L2_SIZE 16
6055 void
pmap_bootstrap(vaddr_t vstart,vaddr_t vend)6056 pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
6057 {
6058 	static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
6059 #ifndef ARM_MMU_EXTENDED
6060 	static struct l1_ttable static_l1;
6061 	struct l1_ttable *l1 = &static_l1;
6062 #endif
6063 	struct l2_dtable *l2;
6064 	struct l2_bucket *l2b;
6065 	pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va;
6066 	pmap_t pm = pmap_kernel();
6067 	pt_entry_t *ptep;
6068 	paddr_t pa;
6069 	vsize_t size;
6070 	int nptes, l2idx, l2next = 0;
6071 
6072 #ifdef ARM_MMU_EXTENDED
6073 	KASSERT(pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt);
6074 	KASSERT(pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt);
6075 #endif
6076 
6077 #ifdef VERBOSE_INIT_ARM
6078 	printf("kpm ");
6079 #endif
6080 	/*
6081 	 * Initialise the kernel pmap object
6082 	 */
6083 	curcpu()->ci_pmap_cur = pm;
6084 #ifdef ARM_MMU_EXTENDED
6085 	pm->pm_l1 = l1pt;
6086 	pm->pm_l1_pa = kernel_l1pt.pv_pa;
6087 #ifdef VERBOSE_INIT_ARM
6088 	printf("tlb0 ");
6089 #endif
6090 	pmap_tlb_info_init(&pmap_tlb0_info);
6091 #ifdef MULTIPROCESSOR
6092 #ifdef VERBOSE_INIT_ARM
6093 	printf("kcpusets ");
6094 #endif
6095 	pm->pm_onproc = kcpuset_running;
6096 	pm->pm_active = kcpuset_running;
6097 #endif
6098 #else
6099 	pm->pm_l1 = l1;
6100 #endif
6101 
6102 #ifdef VERBOSE_INIT_ARM
6103 	printf("locks ");
6104 #endif
6105 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
6106 	if (arm_cache_prefer_mask != 0) {
6107 		mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_VM);
6108 	} else {
6109 #endif
6110 		mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
6111 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
6112 	}
6113 #endif
6114 	mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
6115 	uvm_obj_init(&pm->pm_obj, NULL, false, 1);
6116 	uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
6117 
6118 #ifdef VERBOSE_INIT_ARM
6119 	printf("l1pt ");
6120 #endif
6121 	/*
6122 	 * Scan the L1 translation table created by initarm() and create
6123 	 * the required metadata for all valid mappings found in it.
6124 	 */
6125 	for (size_t l1slot = 0;
6126 	     l1slot < L1_TABLE_SIZE / sizeof(pd_entry_t);
6127 	     l1slot++) {
6128 		pd_entry_t pde = l1pt[l1slot];
6129 
6130 		/*
6131 		 * We're only interested in Coarse mappings.
6132 		 * pmap_extract() can deal with section mappings without
6133 		 * recourse to checking L2 metadata.
6134 		 */
6135 		if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
6136 			continue;
6137 
6138 		/*
6139 		 * Lookup the KVA of this L2 descriptor table
6140 		 */
6141 		pa = l1pte_pa(pde);
6142 		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
6143 		if (ptep == NULL) {
6144 			panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
6145 			    (u_int)l1slot << L1_S_SHIFT, pa);
6146 		}
6147 
6148 		/*
6149 		 * Fetch the associated L2 metadata structure.
6150 		 * Allocate a new one if necessary.
6151 		 */
6152 		if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) {
6153 			if (l2next == PMAP_STATIC_L2_SIZE)
6154 				panic("pmap_bootstrap: out of static L2s");
6155 			pm->pm_l2[L2_IDX(l1slot)] = l2 = &static_l2[l2next++];
6156 		}
6157 
6158 		/*
6159 		 * One more L1 slot tracked...
6160 		 */
6161 		l2->l2_occupancy++;
6162 
6163 		/*
6164 		 * Fill in the details of the L2 descriptor in the
6165 		 * appropriate bucket.
6166 		 */
6167 		l2b = &l2->l2_bucket[L2_BUCKET(l1slot)];
6168 		l2b->l2b_kva = ptep;
6169 		l2b->l2b_pa = pa;
6170 		l2b->l2b_l1slot = l1slot;
6171 
6172 		/*
6173 		 * Establish an initial occupancy count for this descriptor
6174 		 */
6175 		for (l2idx = 0;
6176 		    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
6177 		    l2idx++) {
6178 			if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
6179 				l2b->l2b_occupancy++;
6180 			}
6181 		}
6182 
6183 		/*
6184 		 * Make sure the descriptor itself has the correct cache mode.
6185 		 * If not, fix it, but whine about the problem. Port-meisters
6186 		 * should consider this a clue to fix up their initarm()
6187 		 * function. :)
6188 		 */
6189 		if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep, 1)) {
6190 			printf("pmap_bootstrap: WARNING! wrong cache mode for "
6191 			    "L2 pte @ %p\n", ptep);
6192 		}
6193 	}
6194 
6195 #ifdef VERBOSE_INIT_ARM
6196 	printf("cache(l1pt) ");
6197 #endif
6198 	/*
6199 	 * Ensure the primary (kernel) L1 has the correct cache mode for
6200 	 * a page table. Bitch if it is not correctly set.
6201 	 */
6202 	if (pmap_set_pt_cache_mode(l1pt, kernel_l1pt.pv_va,
6203 		    L1_TABLE_SIZE / L2_S_SIZE)) {
6204 		printf("pmap_bootstrap: WARNING! wrong cache mode for "
6205 		    "primary L1 @ 0x%lx\n", kernel_l1pt.pv_va);
6206 	}
6207 
6208 #ifdef PMAP_CACHE_VIVT
6209 	cpu_dcache_wbinv_all();
6210 	cpu_tlb_flushID();
6211 	cpu_cpwait();
6212 #endif
6213 
6214 	/*
6215 	 * now we allocate the "special" VAs which are used for tmp mappings
6216 	 * by the pmap (and other modules).  we allocate the VAs by advancing
6217 	 * virtual_avail (note that there are no pages mapped at these VAs).
6218 	 *
6219 	 * Managed KVM space start from wherever initarm() tells us.
6220 	 */
6221 	virtual_avail = vstart;
6222 	virtual_end = vend;
6223 
6224 #ifdef VERBOSE_INIT_ARM
6225 	printf("specials ");
6226 #endif
6227 #ifdef PMAP_CACHE_VIPT
6228 	/*
6229 	 * If we have a VIPT cache, we need one page/pte per possible alias
6230 	 * page so we won't violate cache aliasing rules.
6231 	 */
6232 	virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask;
6233 	nptes = (arm_cache_prefer_mask >> L2_S_SHIFT) + 1;
6234 	nptes = roundup(nptes, PAGE_SIZE / L2_S_SIZE);
6235 	if (arm_pcache.icache_type != CACHE_TYPE_PIPT
6236 	    && arm_pcache.icache_way_size > nptes * L2_S_SIZE) {
6237 		nptes = arm_pcache.icache_way_size >> L2_S_SHIFT;
6238 		nptes = roundup(nptes, PAGE_SIZE / L2_S_SIZE);
6239 	}
6240 #else
6241 	nptes = PAGE_SIZE / L2_S_SIZE;
6242 #endif
6243 #ifdef MULTIPROCESSOR
6244 	cnptes = nptes;
6245 	nptes *= arm_cpu_max;
6246 #endif
6247 	pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte);
6248 	pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte, nptes);
6249 	pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
6250 	pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte, nptes);
6251 	pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
6252 	if (msgbufaddr == NULL) {
6253 		pmap_alloc_specials(&virtual_avail,
6254 		    round_page(MSGBUFSIZE) / PAGE_SIZE,
6255 		    (void *)&msgbufaddr, NULL);
6256 	}
6257 
6258 	/*
6259 	 * Allocate a range of kernel virtual address space to be used
6260 	 * for L2 descriptor tables and metadata allocation in
6261 	 * pmap_growkernel().
6262 	 */
6263 	size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
6264 	pmap_alloc_specials(&virtual_avail,
6265 	    round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
6266 	    &pmap_kernel_l2ptp_kva, NULL);
6267 
6268 	size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
6269 	pmap_alloc_specials(&virtual_avail,
6270 	    round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
6271 	    &pmap_kernel_l2dtable_kva, NULL);
6272 
6273 #ifndef ARM_MMU_EXTENDED
6274 	/*
6275 	 * init the static-global locks and global pmap list.
6276 	 */
6277 	mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM);
6278 
6279 	/*
6280 	 * We can now initialise the first L1's metadata.
6281 	 */
6282 	SLIST_INIT(&l1_list);
6283 	TAILQ_INIT(&l1_lru_list);
6284 	pmap_init_l1(l1, l1pt);
6285 #endif /* ARM_MMU_EXTENDED */
6286 
6287 #ifndef ARM_HAS_VBAR
6288 	/* Set up vector page L1 details, if necessary */
6289 	if (vector_page < KERNEL_BASE) {
6290 		pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page);
6291 		l2b = pmap_get_l2_bucket(pm, vector_page);
6292 		KDASSERT(l2b != NULL);
6293 		pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO |
6294 		    L1_C_DOM(pmap_domain(pm));
6295 	} else
6296 		pm->pm_pl1vec = NULL;
6297 #endif
6298 
6299 #ifdef VERBOSE_INIT_ARM
6300 	printf("pools ");
6301 #endif
6302 	/*
6303 	 * Initialize the pmap cache
6304 	 */
6305 	pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
6306 	    "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL);
6307 
6308 	/*
6309 	 * Initialize the pv pool.
6310 	 */
6311 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",
6312 	    &pmap_bootstrap_pv_allocator, IPL_NONE);
6313 
6314 	/*
6315 	 * Initialize the L2 dtable pool and cache.
6316 	 */
6317 	pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0,
6318 	    0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL);
6319 
6320 	/*
6321 	 * Initialise the L2 descriptor table pool and cache
6322 	 */
6323 	pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0,
6324 	    L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE,
6325 	    pmap_l2ptp_ctor, NULL, NULL);
6326 
6327 	mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE);
6328 
6329 	cpu_dcache_wbinv_all();
6330 }
6331 
6332 static bool
pmap_set_pt_cache_mode(pd_entry_t * kl1,vaddr_t va,size_t nptes)6333 pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va, size_t nptes)
6334 {
6335 #ifdef ARM_MMU_EXTENDED
6336 	return false;
6337 #else
6338 	if (pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
6339 	    && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
6340 		return false;
6341 
6342 	const vaddr_t eva = va + nptes * PAGE_SIZE;
6343 	int rv = 0;
6344 
6345 	while (va < eva) {
6346 		/*
6347 		 * Make sure the descriptor itself has the correct cache mode
6348 		 */
6349 		pd_entry_t * const pdep = &kl1[l1pte_index(va)];
6350 		pd_entry_t pde = *pdep;
6351 
6352 		if (l1pte_section_p(pde)) {
6353 			__CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
6354 			if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
6355 				*pdep = (pde & ~L1_S_CACHE_MASK) |
6356 				    pte_l1_s_cache_mode_pt;
6357 				PDE_SYNC(pdep);
6358 				cpu_dcache_wbinv_range((vaddr_t)pdep,
6359 				    sizeof(*pdep));
6360 				rv = 1;
6361 			}
6362 			return rv;
6363 		}
6364 		vaddr_t pa = l1pte_pa(pde);
6365 		pt_entry_t *ptep = (pt_entry_t *)kernel_pt_lookup(pa);
6366 		if (ptep == NULL)
6367 			panic("pmap_bootstrap: No PTP for va %#lx\n", va);
6368 
6369 		ptep += l2pte_index(va);
6370 		const pt_entry_t opte = *ptep;
6371 		if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
6372 			const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK)
6373 			    | pte_l2_s_cache_mode_pt;
6374 			l2pte_set(ptep, npte, opte);
6375 			PTE_SYNC(ptep);
6376 			cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep));
6377 			rv = 1;
6378 		}
6379 		va += PAGE_SIZE;
6380 	}
6381 
6382 	return (rv);
6383 #endif
6384 }
6385 
6386 static void
pmap_alloc_specials(vaddr_t * availp,int pages,vaddr_t * vap,pt_entry_t ** ptep)6387 pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
6388 {
6389 	vaddr_t va = *availp;
6390 	struct l2_bucket *l2b;
6391 
6392 	if (ptep) {
6393 		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
6394 		if (l2b == NULL)
6395 			panic("pmap_alloc_specials: no l2b for 0x%lx", va);
6396 
6397 		if (ptep)
6398 			*ptep = &l2b->l2b_kva[l2pte_index(va)];
6399 	}
6400 
6401 	*vap = va;
6402 	*availp = va + (PAGE_SIZE * pages);
6403 }
6404 
6405 void
pmap_init(void)6406 pmap_init(void)
6407 {
6408 
6409 	/*
6410 	 * Set the available memory vars - These do not map to real memory
6411 	 * addresses and cannot as the physical memory is fragmented.
6412 	 * They are used by ps for %mem calculations.
6413 	 * One could argue whether this should be the entire memory or just
6414 	 * the memory that is useable in a user process.
6415 	 */
6416 	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
6417 	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
6418 
6419 	/*
6420 	 * Now we need to free enough pv_entry structures to allow us to get
6421 	 * the kmem_map/kmem_object allocated and inited (done after this
6422 	 * function is finished).  to do this we allocate one bootstrap page out
6423 	 * of kernel_map and use it to provide an initial pool of pv_entry
6424 	 * structures.   we never free this page.
6425 	 */
6426 	pool_setlowat(&pmap_pv_pool, (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
6427 
6428 #ifdef ARM_MMU_EXTENDED
6429 	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
6430 #endif
6431 
6432 	pmap_initialized = true;
6433 }
6434 
6435 static vaddr_t last_bootstrap_page = 0;
6436 static void *free_bootstrap_pages = NULL;
6437 
6438 static void *
pmap_bootstrap_pv_page_alloc(struct pool * pp,int flags)6439 pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
6440 {
6441 	extern void *pool_page_alloc(struct pool *, int);
6442 	vaddr_t new_page;
6443 	void *rv;
6444 
6445 	if (pmap_initialized)
6446 		return (pool_page_alloc(pp, flags));
6447 
6448 	if (free_bootstrap_pages) {
6449 		rv = free_bootstrap_pages;
6450 		free_bootstrap_pages = *((void **)rv);
6451 		return (rv);
6452 	}
6453 
6454 	KASSERT(kernel_map != NULL);
6455 	new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
6456 	    UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT));
6457 
6458 	KASSERT(new_page > last_bootstrap_page);
6459 	last_bootstrap_page = new_page;
6460 	return ((void *)new_page);
6461 }
6462 
6463 static void
pmap_bootstrap_pv_page_free(struct pool * pp,void * v)6464 pmap_bootstrap_pv_page_free(struct pool *pp, void *v)
6465 {
6466 	extern void pool_page_free(struct pool *, void *);
6467 
6468 	if ((vaddr_t)v <= last_bootstrap_page) {
6469 		*((void **)v) = free_bootstrap_pages;
6470 		free_bootstrap_pages = v;
6471 		return;
6472 	}
6473 
6474 	if (pmap_initialized) {
6475 		pool_page_free(pp, v);
6476 		return;
6477 	}
6478 }
6479 
6480 /*
6481  * pmap_postinit()
6482  *
6483  * This routine is called after the vm and kmem subsystems have been
6484  * initialised. This allows the pmap code to perform any initialisation
6485  * that can only be done one the memory allocation is in place.
6486  */
6487 void
pmap_postinit(void)6488 pmap_postinit(void)
6489 {
6490 #ifndef ARM_MMU_EXTENDED
6491 	extern paddr_t physical_start, physical_end;
6492 	struct l1_ttable *l1;
6493 	struct pglist plist;
6494 	struct vm_page *m;
6495 	pd_entry_t *pdep;
6496 	vaddr_t va, eva;
6497 	u_int loop, needed;
6498 	int error;
6499 #endif
6500 
6501 	pool_cache_setlowat(&pmap_l2ptp_cache, (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
6502 	pool_cache_setlowat(&pmap_l2dtable_cache,
6503 	    (PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
6504 
6505 #ifndef ARM_MMU_EXTENDED
6506 	needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
6507 	needed -= 1;
6508 
6509 	l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP);
6510 
6511 	for (loop = 0; loop < needed; loop++, l1++) {
6512 		/* Allocate a L1 page table */
6513 		va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY);
6514 		if (va == 0)
6515 			panic("Cannot allocate L1 KVM");
6516 
6517 		error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
6518 		    physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1);
6519 		if (error)
6520 			panic("Cannot allocate L1 physical pages");
6521 
6522 		m = TAILQ_FIRST(&plist);
6523 		eva = va + L1_TABLE_SIZE;
6524 		pdep = (pd_entry_t *)va;
6525 
6526 		while (m && va < eva) {
6527 			paddr_t pa = VM_PAGE_TO_PHYS(m);
6528 
6529 			pmap_kenter_pa(va, pa,
6530 			    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE);
6531 
6532 			va += PAGE_SIZE;
6533 			m = TAILQ_NEXT(m, pageq.queue);
6534 		}
6535 
6536 #ifdef DIAGNOSTIC
6537 		if (m)
6538 			panic("pmap_alloc_l1pt: pglist not empty");
6539 #endif	/* DIAGNOSTIC */
6540 
6541 		pmap_init_l1(l1, pdep);
6542 	}
6543 
6544 #ifdef DEBUG
6545 	printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
6546 	    needed);
6547 #endif
6548 #endif /* !ARM_MMU_EXTENDED */
6549 }
6550 
6551 /*
6552  * Note that the following routines are used by board-specific initialisation
6553  * code to configure the initial kernel page tables.
6554  *
6555  * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
6556  * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
6557  * behaviour of the old pmap, and provides an easy migration path for
6558  * initial bring-up of the new pmap on existing ports. Fortunately,
6559  * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
6560  * will be deprecated.
6561  *
6562  * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
6563  * tables.
6564  */
6565 
6566 /*
6567  * This list exists for the benefit of pmap_map_chunk().  It keeps track
6568  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
6569  * find them as necessary.
6570  *
6571  * Note that the data on this list MUST remain valid after initarm() returns,
6572  * as pmap_bootstrap() uses it to contruct L2 table metadata.
6573  */
6574 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
6575 
6576 static vaddr_t
kernel_pt_lookup(paddr_t pa)6577 kernel_pt_lookup(paddr_t pa)
6578 {
6579 	pv_addr_t *pv;
6580 
6581 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
6582 		if (pv->pv_pa == (pa & ~PGOFSET))
6583 			return (pv->pv_va | (pa & PGOFSET));
6584 	}
6585 	return (0);
6586 }
6587 
6588 /*
6589  * pmap_map_section:
6590  *
6591  *	Create a single section mapping.
6592  */
6593 void
pmap_map_section(vaddr_t l1pt,vaddr_t va,paddr_t pa,int prot,int cache)6594 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
6595 {
6596 	pd_entry_t * const pdep = (pd_entry_t *) l1pt;
6597 	const size_t l1slot = l1pte_index(va);
6598 	pd_entry_t fl;
6599 
6600 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
6601 
6602 	switch (cache) {
6603 	case PTE_NOCACHE:
6604 	default:
6605 		fl = 0;
6606 		break;
6607 
6608 	case PTE_CACHE:
6609 		fl = pte_l1_s_cache_mode;
6610 		break;
6611 
6612 	case PTE_PAGETABLE:
6613 		fl = pte_l1_s_cache_mode_pt;
6614 		break;
6615 	}
6616 
6617 	const pd_entry_t npde = L1_S_PROTO | pa |
6618 	    L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
6619 	l1pte_setone(pdep + l1slot, npde);
6620 	PDE_SYNC(pdep + l1slot);
6621 }
6622 
6623 /*
6624  * pmap_map_entry:
6625  *
6626  *	Create a single page mapping.
6627  */
6628 void
pmap_map_entry(vaddr_t l1pt,vaddr_t va,paddr_t pa,int prot,int cache)6629 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
6630 {
6631 	pd_entry_t * const pdep = (pd_entry_t *) l1pt;
6632 	const size_t l1slot = l1pte_index(va);
6633 	pt_entry_t npte;
6634 	pt_entry_t *ptep;
6635 
6636 	KASSERT(((va | pa) & PGOFSET) == 0);
6637 
6638 	switch (cache) {
6639 	case PTE_NOCACHE:
6640 	default:
6641 		npte = 0;
6642 		break;
6643 
6644 	case PTE_CACHE:
6645 		npte = pte_l2_s_cache_mode;
6646 		break;
6647 
6648 	case PTE_PAGETABLE:
6649 		npte = pte_l2_s_cache_mode_pt;
6650 		break;
6651 	}
6652 
6653 	if ((pdep[l1slot] & L1_TYPE_MASK) != L1_TYPE_C)
6654 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
6655 
6656 	ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pdep[l1slot]));
6657 	if (ptep == NULL)
6658 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
6659 
6660 	npte |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot);
6661 #ifdef ARM_MMU_EXTENDED
6662 	if (prot & VM_PROT_EXECUTE) {
6663 		npte &= ~L2_XS_XN;
6664 	}
6665 #endif
6666 	ptep += l2pte_index(va);
6667 	l2pte_set(ptep, npte, 0);
6668 	PTE_SYNC(ptep);
6669 }
6670 
6671 /*
6672  * pmap_link_l2pt:
6673  *
6674  *	Link the L2 page table specified by "l2pv" into the L1
6675  *	page table at the slot for "va".
6676  */
6677 void
pmap_link_l2pt(vaddr_t l1pt,vaddr_t va,pv_addr_t * l2pv)6678 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
6679 {
6680 	pd_entry_t * const pdep = (pd_entry_t *) l1pt + l1pte_index(va);
6681 
6682 	KASSERT((va & ((L1_S_SIZE * (PAGE_SIZE / L2_T_SIZE)) - 1)) == 0);
6683 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
6684 
6685 	const pd_entry_t npde = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO
6686 	    | l2pv->pv_pa;
6687 
6688 	l1pte_set(pdep, npde);
6689 	PDE_SYNC_RANGE(pdep, PAGE_SIZE / L2_T_SIZE);
6690 
6691 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
6692 }
6693 
6694 /*
6695  * pmap_map_chunk:
6696  *
6697  *	Map a chunk of memory using the most efficient mappings
6698  *	possible (section, large page, small page) into the
6699  *	provided L1 and L2 tables at the specified virtual address.
6700  */
6701 vsize_t
pmap_map_chunk(vaddr_t l1pt,vaddr_t va,paddr_t pa,vsize_t size,int prot,int cache)6702 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
6703     int prot, int cache)
6704 {
6705 	pd_entry_t * const pdep = (pd_entry_t *) l1pt;
6706 	pt_entry_t f1, f2s, f2l;
6707 	vsize_t resid;
6708 
6709 	resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
6710 
6711 	if (l1pt == 0)
6712 		panic("pmap_map_chunk: no L1 table provided");
6713 
6714 #ifdef VERBOSE_INIT_ARM
6715 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
6716 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
6717 #endif
6718 
6719 	switch (cache) {
6720 	case PTE_NOCACHE:
6721 	default:
6722 		f1 = 0;
6723 		f2l = 0;
6724 		f2s = 0;
6725 		break;
6726 
6727 	case PTE_CACHE:
6728 		f1 = pte_l1_s_cache_mode;
6729 		f2l = pte_l2_l_cache_mode;
6730 		f2s = pte_l2_s_cache_mode;
6731 		break;
6732 
6733 	case PTE_PAGETABLE:
6734 		f1 = pte_l1_s_cache_mode_pt;
6735 		f2l = pte_l2_l_cache_mode_pt;
6736 		f2s = pte_l2_s_cache_mode_pt;
6737 		break;
6738 	}
6739 
6740 	size = resid;
6741 
6742 	while (resid > 0) {
6743 		const size_t l1slot = l1pte_index(va);
6744 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
6745 		/* See if we can use a supersection mapping. */
6746 		if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
6747 			/* Supersection are always domain 0 */
6748 			const pd_entry_t npde = L1_SS_PROTO | pa
6749 #ifdef ARM_MMU_EXTENDED
6750 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
6751 			    | (va & 0x80000000 ? 0 : L1_S_V6_nG)
6752 #endif
6753 			    | L1_S_PROT(PTE_KERNEL, prot) | f1;
6754 #ifdef VERBOSE_INIT_ARM
6755 			printf("sS");
6756 #endif
6757 			l1pte_set(&pdep[l1slot], npde);
6758 			PDE_SYNC_RANGE(&pdep[l1slot], L1_SS_SIZE / L1_S_SIZE);
6759 			va += L1_SS_SIZE;
6760 			pa += L1_SS_SIZE;
6761 			resid -= L1_SS_SIZE;
6762 			continue;
6763 		}
6764 #endif
6765 		/* See if we can use a section mapping. */
6766 		if (L1_S_MAPPABLE_P(va, pa, resid)) {
6767 			const pd_entry_t npde = L1_S_PROTO | pa
6768 #ifdef ARM_MMU_EXTENDED
6769 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
6770 			    | (va & 0x80000000 ? 0 : L1_S_V6_nG)
6771 #endif
6772 			    | L1_S_PROT(PTE_KERNEL, prot) | f1
6773 			    | L1_S_DOM(PMAP_DOMAIN_KERNEL);
6774 #ifdef VERBOSE_INIT_ARM
6775 			printf("S");
6776 #endif
6777 			l1pte_set(&pdep[l1slot], npde);
6778 			PDE_SYNC(&pdep[l1slot]);
6779 			va += L1_S_SIZE;
6780 			pa += L1_S_SIZE;
6781 			resid -= L1_S_SIZE;
6782 			continue;
6783 		}
6784 
6785 		/*
6786 		 * Ok, we're going to use an L2 table.  Make sure
6787 		 * one is actually in the corresponding L1 slot
6788 		 * for the current VA.
6789 		 */
6790 		if ((pdep[l1slot] & L1_TYPE_MASK) != L1_TYPE_C)
6791 			panic("%s: no L2 table for VA %#lx", __func__, va);
6792 
6793 		pt_entry_t *ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pdep[l1slot]));
6794 		if (ptep == NULL)
6795 			panic("%s: can't find L2 table for VA %#lx", __func__,
6796 			    va);
6797 
6798 		ptep += l2pte_index(va);
6799 
6800 		/* See if we can use a L2 large page mapping. */
6801 		if (L2_L_MAPPABLE_P(va, pa, resid)) {
6802 			const pt_entry_t npte = L2_L_PROTO | pa
6803 #ifdef ARM_MMU_EXTENDED
6804 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_L_XN)
6805 			    | (va & 0x80000000 ? 0 : L2_XS_nG)
6806 #endif
6807 			    | L2_L_PROT(PTE_KERNEL, prot) | f2l;
6808 #ifdef VERBOSE_INIT_ARM
6809 			printf("L");
6810 #endif
6811 			l2pte_set(ptep, npte, 0);
6812 			PTE_SYNC_RANGE(ptep, L2_L_SIZE / L2_S_SIZE);
6813 			va += L2_L_SIZE;
6814 			pa += L2_L_SIZE;
6815 			resid -= L2_L_SIZE;
6816 			continue;
6817 		}
6818 
6819 #ifdef VERBOSE_INIT_ARM
6820 		printf("P");
6821 #endif
6822 		/* Use a small page mapping. */
6823 		pt_entry_t npte = L2_S_PROTO | pa
6824 #ifdef ARM_MMU_EXTENDED
6825 		    | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_XN)
6826 		    | (va & 0x80000000 ? 0 : L2_XS_nG)
6827 #endif
6828 		    | L2_S_PROT(PTE_KERNEL, prot) | f2s;
6829 #ifdef ARM_MMU_EXTENDED
6830 		npte &= ((prot & VM_PROT_EXECUTE) ? ~L2_XS_XN : ~0);
6831 #endif
6832 		l2pte_set(ptep, npte, 0);
6833 		PTE_SYNC(ptep);
6834 		va += PAGE_SIZE;
6835 		pa += PAGE_SIZE;
6836 		resid -= PAGE_SIZE;
6837 	}
6838 #ifdef VERBOSE_INIT_ARM
6839 	printf("\n");
6840 #endif
6841 	return (size);
6842 }
6843 
6844 /********************** Static device map routines ***************************/
6845 
6846 static const struct pmap_devmap *pmap_devmap_table;
6847 
6848 /*
6849  * Register the devmap table.  This is provided in case early console
6850  * initialization needs to register mappings created by bootstrap code
6851  * before pmap_devmap_bootstrap() is called.
6852  */
6853 void
pmap_devmap_register(const struct pmap_devmap * table)6854 pmap_devmap_register(const struct pmap_devmap *table)
6855 {
6856 
6857 	pmap_devmap_table = table;
6858 }
6859 
6860 /*
6861  * Map all of the static regions in the devmap table, and remember
6862  * the devmap table so other parts of the kernel can look up entries
6863  * later.
6864  */
6865 void
pmap_devmap_bootstrap(vaddr_t l1pt,const struct pmap_devmap * table)6866 pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
6867 {
6868 	int i;
6869 
6870 	pmap_devmap_table = table;
6871 
6872 	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6873 #ifdef VERBOSE_INIT_ARM
6874 		printf("devmap: %08lx -> %08lx @ %08lx\n",
6875 		    pmap_devmap_table[i].pd_pa,
6876 		    pmap_devmap_table[i].pd_pa +
6877 			pmap_devmap_table[i].pd_size - 1,
6878 		    pmap_devmap_table[i].pd_va);
6879 #endif
6880 		pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
6881 		    pmap_devmap_table[i].pd_pa,
6882 		    pmap_devmap_table[i].pd_size,
6883 		    pmap_devmap_table[i].pd_prot,
6884 		    pmap_devmap_table[i].pd_cache);
6885 	}
6886 }
6887 
6888 const struct pmap_devmap *
pmap_devmap_find_pa(paddr_t pa,psize_t size)6889 pmap_devmap_find_pa(paddr_t pa, psize_t size)
6890 {
6891 	uint64_t endpa;
6892 	int i;
6893 
6894 	if (pmap_devmap_table == NULL)
6895 		return (NULL);
6896 
6897 	endpa = (uint64_t)pa + (uint64_t)(size - 1);
6898 
6899 	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6900 		if (pa >= pmap_devmap_table[i].pd_pa &&
6901 		    endpa <= (uint64_t)pmap_devmap_table[i].pd_pa +
6902 			     (uint64_t)(pmap_devmap_table[i].pd_size - 1))
6903 			return (&pmap_devmap_table[i]);
6904 	}
6905 
6906 	return (NULL);
6907 }
6908 
6909 const struct pmap_devmap *
pmap_devmap_find_va(vaddr_t va,vsize_t size)6910 pmap_devmap_find_va(vaddr_t va, vsize_t size)
6911 {
6912 	int i;
6913 
6914 	if (pmap_devmap_table == NULL)
6915 		return (NULL);
6916 
6917 	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6918 		if (va >= pmap_devmap_table[i].pd_va &&
6919 		    va + size - 1 <= pmap_devmap_table[i].pd_va +
6920 				     pmap_devmap_table[i].pd_size - 1)
6921 			return (&pmap_devmap_table[i]);
6922 	}
6923 
6924 	return (NULL);
6925 }
6926 
6927 /********************** PTE initialization routines **************************/
6928 
6929 /*
6930  * These routines are called when the CPU type is identified to set up
6931  * the PTE prototypes, cache modes, etc.
6932  *
6933  * The variables are always here, just in case modules need to reference
6934  * them (though, they shouldn't).
6935  */
6936 
6937 pt_entry_t	pte_l1_s_cache_mode;
6938 pt_entry_t	pte_l1_s_wc_mode;
6939 pt_entry_t	pte_l1_s_cache_mode_pt;
6940 pt_entry_t	pte_l1_s_cache_mask;
6941 
6942 pt_entry_t	pte_l2_l_cache_mode;
6943 pt_entry_t	pte_l2_l_wc_mode;
6944 pt_entry_t	pte_l2_l_cache_mode_pt;
6945 pt_entry_t	pte_l2_l_cache_mask;
6946 
6947 pt_entry_t	pte_l2_s_cache_mode;
6948 pt_entry_t	pte_l2_s_wc_mode;
6949 pt_entry_t	pte_l2_s_cache_mode_pt;
6950 pt_entry_t	pte_l2_s_cache_mask;
6951 
6952 pt_entry_t	pte_l1_s_prot_u;
6953 pt_entry_t	pte_l1_s_prot_w;
6954 pt_entry_t	pte_l1_s_prot_ro;
6955 pt_entry_t	pte_l1_s_prot_mask;
6956 
6957 pt_entry_t	pte_l2_s_prot_u;
6958 pt_entry_t	pte_l2_s_prot_w;
6959 pt_entry_t	pte_l2_s_prot_ro;
6960 pt_entry_t	pte_l2_s_prot_mask;
6961 
6962 pt_entry_t	pte_l2_l_prot_u;
6963 pt_entry_t	pte_l2_l_prot_w;
6964 pt_entry_t	pte_l2_l_prot_ro;
6965 pt_entry_t	pte_l2_l_prot_mask;
6966 
6967 pt_entry_t	pte_l1_ss_proto;
6968 pt_entry_t	pte_l1_s_proto;
6969 pt_entry_t	pte_l1_c_proto;
6970 pt_entry_t	pte_l2_s_proto;
6971 
6972 void		(*pmap_copy_page_func)(paddr_t, paddr_t);
6973 void		(*pmap_zero_page_func)(paddr_t);
6974 
6975 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
6976 void
pmap_pte_init_generic(void)6977 pmap_pte_init_generic(void)
6978 {
6979 
6980 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
6981 	pte_l1_s_wc_mode = L1_S_B;
6982 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
6983 
6984 	pte_l2_l_cache_mode = L2_B|L2_C;
6985 	pte_l2_l_wc_mode = L2_B;
6986 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
6987 
6988 	pte_l2_s_cache_mode = L2_B|L2_C;
6989 	pte_l2_s_wc_mode = L2_B;
6990 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
6991 
6992 	/*
6993 	 * If we have a write-through cache, set B and C.  If
6994 	 * we have a write-back cache, then we assume setting
6995 	 * only C will make those pages write-through (except for those
6996 	 * Cortex CPUs which can read the L1 caches).
6997 	 */
6998 	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
6999 #if ARM_MMU_V7 > 0
7000 	    || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
7001 #endif
7002 #if ARM_MMU_V6 > 0
7003 	    || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */
7004 #endif
7005 	    || false) {
7006 		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
7007 		pte_l2_l_cache_mode_pt = L2_B|L2_C;
7008 		pte_l2_s_cache_mode_pt = L2_B|L2_C;
7009 	} else {
7010 		pte_l1_s_cache_mode_pt = L1_S_C;	/* write through */
7011 		pte_l2_l_cache_mode_pt = L2_C;		/* write through */
7012 		pte_l2_s_cache_mode_pt = L2_C;		/* write through */
7013 	}
7014 
7015 	pte_l1_s_prot_u = L1_S_PROT_U_generic;
7016 	pte_l1_s_prot_w = L1_S_PROT_W_generic;
7017 	pte_l1_s_prot_ro = L1_S_PROT_RO_generic;
7018 	pte_l1_s_prot_mask = L1_S_PROT_MASK_generic;
7019 
7020 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
7021 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
7022 	pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
7023 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
7024 
7025 	pte_l2_l_prot_u = L2_L_PROT_U_generic;
7026 	pte_l2_l_prot_w = L2_L_PROT_W_generic;
7027 	pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
7028 	pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
7029 
7030 	pte_l1_ss_proto = L1_SS_PROTO_generic;
7031 	pte_l1_s_proto = L1_S_PROTO_generic;
7032 	pte_l1_c_proto = L1_C_PROTO_generic;
7033 	pte_l2_s_proto = L2_S_PROTO_generic;
7034 
7035 	pmap_copy_page_func = pmap_copy_page_generic;
7036 	pmap_zero_page_func = pmap_zero_page_generic;
7037 }
7038 
7039 #if defined(CPU_ARM8)
7040 void
pmap_pte_init_arm8(void)7041 pmap_pte_init_arm8(void)
7042 {
7043 
7044 	/*
7045 	 * ARM8 is compatible with generic, but we need to use
7046 	 * the page tables uncached.
7047 	 */
7048 	pmap_pte_init_generic();
7049 
7050 	pte_l1_s_cache_mode_pt = 0;
7051 	pte_l2_l_cache_mode_pt = 0;
7052 	pte_l2_s_cache_mode_pt = 0;
7053 }
7054 #endif /* CPU_ARM8 */
7055 
7056 #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
7057 void
pmap_pte_init_arm9(void)7058 pmap_pte_init_arm9(void)
7059 {
7060 
7061 	/*
7062 	 * ARM9 is compatible with generic, but we want to use
7063 	 * write-through caching for now.
7064 	 */
7065 	pmap_pte_init_generic();
7066 
7067 	pte_l1_s_cache_mode = L1_S_C;
7068 	pte_l2_l_cache_mode = L2_C;
7069 	pte_l2_s_cache_mode = L2_C;
7070 
7071 	pte_l1_s_wc_mode = L1_S_B;
7072 	pte_l2_l_wc_mode = L2_B;
7073 	pte_l2_s_wc_mode = L2_B;
7074 
7075 	pte_l1_s_cache_mode_pt = L1_S_C;
7076 	pte_l2_l_cache_mode_pt = L2_C;
7077 	pte_l2_s_cache_mode_pt = L2_C;
7078 }
7079 #endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */
7080 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
7081 
7082 #if defined(CPU_ARM10)
7083 void
pmap_pte_init_arm10(void)7084 pmap_pte_init_arm10(void)
7085 {
7086 
7087 	/*
7088 	 * ARM10 is compatible with generic, but we want to use
7089 	 * write-through caching for now.
7090 	 */
7091 	pmap_pte_init_generic();
7092 
7093 	pte_l1_s_cache_mode = L1_S_B | L1_S_C;
7094 	pte_l2_l_cache_mode = L2_B | L2_C;
7095 	pte_l2_s_cache_mode = L2_B | L2_C;
7096 
7097 	pte_l1_s_cache_mode = L1_S_B;
7098 	pte_l2_l_cache_mode = L2_B;
7099 	pte_l2_s_cache_mode = L2_B;
7100 
7101 	pte_l1_s_cache_mode_pt = L1_S_C;
7102 	pte_l2_l_cache_mode_pt = L2_C;
7103 	pte_l2_s_cache_mode_pt = L2_C;
7104 
7105 }
7106 #endif /* CPU_ARM10 */
7107 
7108 #if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH)
7109 void
pmap_pte_init_arm11(void)7110 pmap_pte_init_arm11(void)
7111 {
7112 
7113 	/*
7114 	 * ARM11 is compatible with generic, but we want to use
7115 	 * write-through caching for now.
7116 	 */
7117 	pmap_pte_init_generic();
7118 
7119 	pte_l1_s_cache_mode = L1_S_C;
7120 	pte_l2_l_cache_mode = L2_C;
7121 	pte_l2_s_cache_mode = L2_C;
7122 
7123 	pte_l1_s_wc_mode = L1_S_B;
7124 	pte_l2_l_wc_mode = L2_B;
7125 	pte_l2_s_wc_mode = L2_B;
7126 
7127 	pte_l1_s_cache_mode_pt = L1_S_C;
7128 	pte_l2_l_cache_mode_pt = L2_C;
7129 	pte_l2_s_cache_mode_pt = L2_C;
7130 }
7131 #endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */
7132 
7133 #if ARM_MMU_SA1 == 1
7134 void
pmap_pte_init_sa1(void)7135 pmap_pte_init_sa1(void)
7136 {
7137 
7138 	/*
7139 	 * The StrongARM SA-1 cache does not have a write-through
7140 	 * mode.  So, do the generic initialization, then reset
7141 	 * the page table cache mode to B=1,C=1, and note that
7142 	 * the PTEs need to be sync'd.
7143 	 */
7144 	pmap_pte_init_generic();
7145 
7146 	pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
7147 	pte_l2_l_cache_mode_pt = L2_B|L2_C;
7148 	pte_l2_s_cache_mode_pt = L2_B|L2_C;
7149 
7150 	pmap_needs_pte_sync = 1;
7151 }
7152 #endif /* ARM_MMU_SA1 == 1*/
7153 
7154 #if ARM_MMU_XSCALE == 1
7155 #if (ARM_NMMUS > 1)
7156 static u_int xscale_use_minidata;
7157 #endif
7158 
7159 void
pmap_pte_init_xscale(void)7160 pmap_pte_init_xscale(void)
7161 {
7162 	uint32_t auxctl;
7163 	int write_through = 0;
7164 
7165 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
7166 	pte_l1_s_wc_mode = L1_S_B;
7167 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
7168 
7169 	pte_l2_l_cache_mode = L2_B|L2_C;
7170 	pte_l2_l_wc_mode = L2_B;
7171 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
7172 
7173 	pte_l2_s_cache_mode = L2_B|L2_C;
7174 	pte_l2_s_wc_mode = L2_B;
7175 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
7176 
7177 	pte_l1_s_cache_mode_pt = L1_S_C;
7178 	pte_l2_l_cache_mode_pt = L2_C;
7179 	pte_l2_s_cache_mode_pt = L2_C;
7180 
7181 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
7182 	/*
7183 	 * The XScale core has an enhanced mode where writes that
7184 	 * miss the cache cause a cache line to be allocated.  This
7185 	 * is significantly faster than the traditional, write-through
7186 	 * behavior of this case.
7187 	 */
7188 	pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X);
7189 	pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X);
7190 	pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X);
7191 #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
7192 
7193 #ifdef XSCALE_CACHE_WRITE_THROUGH
7194 	/*
7195 	 * Some versions of the XScale core have various bugs in
7196 	 * their cache units, the work-around for which is to run
7197 	 * the cache in write-through mode.  Unfortunately, this
7198 	 * has a major (negative) impact on performance.  So, we
7199 	 * go ahead and run fast-and-loose, in the hopes that we
7200 	 * don't line up the planets in a way that will trip the
7201 	 * bugs.
7202 	 *
7203 	 * However, we give you the option to be slow-but-correct.
7204 	 */
7205 	write_through = 1;
7206 #elif defined(XSCALE_CACHE_WRITE_BACK)
7207 	/* force write back cache mode */
7208 	write_through = 0;
7209 #elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
7210 	/*
7211 	 * Intel PXA2[15]0 processors are known to have a bug in
7212 	 * write-back cache on revision 4 and earlier (stepping
7213 	 * A[01] and B[012]).  Fixed for C0 and later.
7214 	 */
7215 	{
7216 		uint32_t id, type;
7217 
7218 		id = cpufunc_id();
7219 		type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
7220 
7221 		if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
7222 			if ((id & CPU_ID_REVISION_MASK) < 5) {
7223 				/* write through for stepping A0-1 and B0-2 */
7224 				write_through = 1;
7225 			}
7226 		}
7227 	}
7228 #endif /* XSCALE_CACHE_WRITE_THROUGH */
7229 
7230 	if (write_through) {
7231 		pte_l1_s_cache_mode = L1_S_C;
7232 		pte_l2_l_cache_mode = L2_C;
7233 		pte_l2_s_cache_mode = L2_C;
7234 	}
7235 
7236 #if (ARM_NMMUS > 1)
7237 	xscale_use_minidata = 1;
7238 #endif
7239 
7240 	pte_l1_s_prot_u = L1_S_PROT_U_xscale;
7241 	pte_l1_s_prot_w = L1_S_PROT_W_xscale;
7242 	pte_l1_s_prot_ro = L1_S_PROT_RO_xscale;
7243 	pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale;
7244 
7245 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
7246 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
7247 	pte_l2_s_prot_ro = L2_S_PROT_RO_xscale;
7248 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
7249 
7250 	pte_l2_l_prot_u = L2_L_PROT_U_xscale;
7251 	pte_l2_l_prot_w = L2_L_PROT_W_xscale;
7252 	pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
7253 	pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
7254 
7255 	pte_l1_ss_proto = L1_SS_PROTO_xscale;
7256 	pte_l1_s_proto = L1_S_PROTO_xscale;
7257 	pte_l1_c_proto = L1_C_PROTO_xscale;
7258 	pte_l2_s_proto = L2_S_PROTO_xscale;
7259 
7260 	pmap_copy_page_func = pmap_copy_page_xscale;
7261 	pmap_zero_page_func = pmap_zero_page_xscale;
7262 
7263 	/*
7264 	 * Disable ECC protection of page table access, for now.
7265 	 */
7266 	auxctl = armreg_auxctl_read();
7267 	auxctl &= ~XSCALE_AUXCTL_P;
7268 	armreg_auxctl_write(auxctl);
7269 }
7270 
7271 /*
7272  * xscale_setup_minidata:
7273  *
7274  *	Set up the mini-data cache clean area.  We require the
7275  *	caller to allocate the right amount of physically and
7276  *	virtually contiguous space.
7277  */
7278 void
xscale_setup_minidata(vaddr_t l1pt,vaddr_t va,paddr_t pa)7279 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
7280 {
7281 	extern vaddr_t xscale_minidata_clean_addr;
7282 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
7283 	pd_entry_t *pde = (pd_entry_t *) l1pt;
7284 	vsize_t size;
7285 	uint32_t auxctl;
7286 
7287 	xscale_minidata_clean_addr = va;
7288 
7289 	/* Round it to page size. */
7290 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
7291 
7292 	for (; size != 0;
7293 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
7294 		const size_t l1slot = l1pte_index(va);
7295 		pt_entry_t *ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pde[l1slot]));
7296 		if (ptep == NULL)
7297 			panic("xscale_setup_minidata: can't find L2 table for "
7298 			    "VA 0x%08lx", va);
7299 
7300 		ptep += l2pte_index(va);
7301 		pt_entry_t opte = *ptep;
7302 		l2pte_set(ptep,
7303 		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ)
7304 		    | L2_C | L2_XS_T_TEX(TEX_XSCALE_X), opte);
7305 	}
7306 
7307 	/*
7308 	 * Configure the mini-data cache for write-back with
7309 	 * read/write-allocate.
7310 	 *
7311 	 * NOTE: In order to reconfigure the mini-data cache, we must
7312 	 * make sure it contains no valid data!  In order to do that,
7313 	 * we must issue a global data cache invalidate command!
7314 	 *
7315 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
7316 	 * THIS IS VERY IMPORTANT!
7317 	 */
7318 
7319 	/* Invalidate data and mini-data. */
7320 	__asm volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
7321 	auxctl = armreg_auxctl_read();
7322 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
7323 	armreg_auxctl_write(auxctl);
7324 }
7325 
7326 /*
7327  * Change the PTEs for the specified kernel mappings such that they
7328  * will use the mini data cache instead of the main data cache.
7329  */
7330 void
pmap_uarea(vaddr_t va)7331 pmap_uarea(vaddr_t va)
7332 {
7333 	vaddr_t next_bucket, eva;
7334 
7335 #if (ARM_NMMUS > 1)
7336 	if (xscale_use_minidata == 0)
7337 		return;
7338 #endif
7339 
7340 	eva = va + USPACE;
7341 
7342 	while (va < eva) {
7343 		next_bucket = L2_NEXT_BUCKET_VA(va);
7344 		if (next_bucket > eva)
7345 			next_bucket = eva;
7346 
7347 		struct l2_bucket *l2b = pmap_get_l2_bucket(pmap_kernel(), va);
7348 		KDASSERT(l2b != NULL);
7349 
7350 		pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
7351 		pt_entry_t *ptep = sptep;
7352 
7353 		while (va < next_bucket) {
7354 			const pt_entry_t opte = *ptep;
7355 			if (!l2pte_minidata_p(opte)) {
7356 				cpu_dcache_wbinv_range(va, PAGE_SIZE);
7357 				cpu_tlb_flushD_SE(va);
7358 				l2pte_set(ptep, opte & ~L2_B, opte);
7359 			}
7360 			ptep += PAGE_SIZE / L2_S_SIZE;
7361 			va += PAGE_SIZE;
7362 		}
7363 		PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
7364 	}
7365 	cpu_cpwait();
7366 }
7367 #endif /* ARM_MMU_XSCALE == 1 */
7368 
7369 
7370 #if defined(CPU_ARM11MPCORE)
7371 
7372 void
pmap_pte_init_arm11mpcore(void)7373 pmap_pte_init_arm11mpcore(void)
7374 {
7375 
7376 	/* cache mode is controlled by 5 bits (B, C, TEX) */
7377 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6;
7378 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6;
7379 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
7380 	/* use extended small page (without APn, with TEX) */
7381 	pte_l2_s_cache_mask = L2_XS_CACHE_MASK_armv6;
7382 #else
7383 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6c;
7384 #endif
7385 
7386 	/* write-back, write-allocate */
7387 	pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
7388 	pte_l2_l_cache_mode = L2_C | L2_B | L2_V6_L_TEX(0x01);
7389 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
7390 	pte_l2_s_cache_mode = L2_C | L2_B | L2_V6_XS_TEX(0x01);
7391 #else
7392 	/* no TEX. read-allocate */
7393 	pte_l2_s_cache_mode = L2_C | L2_B;
7394 #endif
7395 	/*
7396 	 * write-back, write-allocate for page tables.
7397 	 */
7398 	pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
7399 	pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_V6_L_TEX(0x01);
7400 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
7401 	pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_V6_XS_TEX(0x01);
7402 #else
7403 	pte_l2_s_cache_mode_pt = L2_C | L2_B;
7404 #endif
7405 
7406 	pte_l1_s_prot_u = L1_S_PROT_U_armv6;
7407 	pte_l1_s_prot_w = L1_S_PROT_W_armv6;
7408 	pte_l1_s_prot_ro = L1_S_PROT_RO_armv6;
7409 	pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6;
7410 
7411 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
7412 	pte_l2_s_prot_u = L2_S_PROT_U_armv6n;
7413 	pte_l2_s_prot_w = L2_S_PROT_W_armv6n;
7414 	pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n;
7415 	pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n;
7416 
7417 #else
7418 	/* with AP[0..3] */
7419 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
7420 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
7421 	pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
7422 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
7423 #endif
7424 
7425 #ifdef	ARM11MPCORE_COMPAT_MMU
7426 	/* with AP[0..3] */
7427 	pte_l2_l_prot_u = L2_L_PROT_U_generic;
7428 	pte_l2_l_prot_w = L2_L_PROT_W_generic;
7429 	pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
7430 	pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
7431 
7432 	pte_l1_ss_proto = L1_SS_PROTO_armv6;
7433 	pte_l1_s_proto = L1_S_PROTO_armv6;
7434 	pte_l1_c_proto = L1_C_PROTO_armv6;
7435 	pte_l2_s_proto = L2_S_PROTO_armv6c;
7436 #else
7437 	pte_l2_l_prot_u = L2_L_PROT_U_armv6n;
7438 	pte_l2_l_prot_w = L2_L_PROT_W_armv6n;
7439 	pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n;
7440 	pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n;
7441 
7442 	pte_l1_ss_proto = L1_SS_PROTO_armv6;
7443 	pte_l1_s_proto = L1_S_PROTO_armv6;
7444 	pte_l1_c_proto = L1_C_PROTO_armv6;
7445 	pte_l2_s_proto = L2_S_PROTO_armv6n;
7446 #endif
7447 
7448 	pmap_copy_page_func = pmap_copy_page_generic;
7449 	pmap_zero_page_func = pmap_zero_page_generic;
7450 	pmap_needs_pte_sync = 1;
7451 }
7452 #endif	/* CPU_ARM11MPCORE */
7453 
7454 
7455 #if ARM_MMU_V7 == 1
7456 void
pmap_pte_init_armv7(void)7457 pmap_pte_init_armv7(void)
7458 {
7459 	/*
7460 	 * The ARMv7-A MMU is mostly compatible with generic. If the
7461 	 * AP field is zero, that now means "no access" rather than
7462 	 * read-only. The prototypes are a little different because of
7463 	 * the XN bit.
7464 	 */
7465 	pmap_pte_init_generic();
7466 
7467 	pmap_needs_pte_sync = 1;
7468 
7469 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7;
7470 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7;
7471 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7;
7472 
7473 	/*
7474 	 * If the core support coherent walk then updates to translation tables
7475 	 * do not require a clean to the point of unification to ensure
7476 	 * visibility by subsequent translation table walks.  That means we can
7477 	 * map everything shareable and cached and the right thing will happen.
7478 	 */
7479         if (__SHIFTOUT(armreg_mmfr3_read(), __BITS(23,20))) {
7480 		pmap_needs_pte_sync = 0;
7481 
7482 		/*
7483 		 * write-back, no write-allocate, shareable for normal pages.
7484 		 */
7485 		pte_l1_s_cache_mode |= L1_S_V6_S;
7486 		pte_l2_l_cache_mode |= L2_XS_S;
7487 		pte_l2_s_cache_mode |= L2_XS_S;
7488 	}
7489 
7490 	/*
7491 	 * Page tables are just all other memory.  We can use write-back since
7492 	 * pmap_needs_pte_sync is 1 (or the MMU can read out of cache).
7493 	 */
7494 	pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode;
7495 	pte_l2_l_cache_mode_pt = pte_l2_l_cache_mode;
7496 	pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
7497 
7498 	/*
7499 	 * Check the Memory Model Features to see if this CPU supports
7500 	 * the TLBIASID coproc op.
7501 	 */
7502 	if (__SHIFTOUT(armreg_mmfr2_read(), __BITS(16,19)) >= 2) {
7503 		arm_has_tlbiasid_p = true;
7504 	}
7505 
7506 	pte_l1_s_prot_u = L1_S_PROT_U_armv7;
7507 	pte_l1_s_prot_w = L1_S_PROT_W_armv7;
7508 	pte_l1_s_prot_ro = L1_S_PROT_RO_armv7;
7509 	pte_l1_s_prot_mask = L1_S_PROT_MASK_armv7;
7510 
7511 	pte_l2_s_prot_u = L2_S_PROT_U_armv7;
7512 	pte_l2_s_prot_w = L2_S_PROT_W_armv7;
7513 	pte_l2_s_prot_ro = L2_S_PROT_RO_armv7;
7514 	pte_l2_s_prot_mask = L2_S_PROT_MASK_armv7;
7515 
7516 	pte_l2_l_prot_u = L2_L_PROT_U_armv7;
7517 	pte_l2_l_prot_w = L2_L_PROT_W_armv7;
7518 	pte_l2_l_prot_ro = L2_L_PROT_RO_armv7;
7519 	pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7;
7520 
7521 	pte_l1_ss_proto = L1_SS_PROTO_armv7;
7522 	pte_l1_s_proto = L1_S_PROTO_armv7;
7523 	pte_l1_c_proto = L1_C_PROTO_armv7;
7524 	pte_l2_s_proto = L2_S_PROTO_armv7;
7525 
7526 }
7527 #endif /* ARM_MMU_V7 */
7528 
7529 /*
7530  * return the PA of the current L1 table, for use when handling a crash dump
7531  */
7532 uint32_t
pmap_kernel_L1_addr(void)7533 pmap_kernel_L1_addr(void)
7534 {
7535 #ifdef ARM_MMU_EXTENDED
7536 	return pmap_kernel()->pm_l1_pa;
7537 #else
7538 	return pmap_kernel()->pm_l1->l1_physaddr;
7539 #endif
7540 }
7541 
7542 #if defined(DDB)
7543 /*
7544  * A couple of ddb-callable functions for dumping pmaps
7545  */
7546 void pmap_dump(pmap_t);
7547 
7548 static pt_entry_t ncptes[64];
7549 static void pmap_dump_ncpg(pmap_t);
7550 
7551 void
pmap_dump(pmap_t pm)7552 pmap_dump(pmap_t pm)
7553 {
7554 	struct l2_dtable *l2;
7555 	struct l2_bucket *l2b;
7556 	pt_entry_t *ptep, pte;
7557 	vaddr_t l2_va, l2b_va, va;
7558 	int i, j, k, occ, rows = 0;
7559 
7560 	if (pm == pmap_kernel())
7561 		printf("pmap_kernel (%p): ", pm);
7562 	else
7563 		printf("user pmap (%p): ", pm);
7564 
7565 #ifdef ARM_MMU_EXTENDED
7566 	printf("l1 at %p\n", pmap_l1_kva(pm));
7567 #else
7568 	printf("domain %d, l1 at %p\n", pmap_domain(pm), pmap_l1_kva(pm));
7569 #endif
7570 
7571 	l2_va = 0;
7572 	for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) {
7573 		l2 = pm->pm_l2[i];
7574 
7575 		if (l2 == NULL || l2->l2_occupancy == 0)
7576 			continue;
7577 
7578 		l2b_va = l2_va;
7579 		for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) {
7580 			l2b = &l2->l2_bucket[j];
7581 
7582 			if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL)
7583 				continue;
7584 
7585 			ptep = l2b->l2b_kva;
7586 
7587 			for (k = 0; k < 256 && ptep[k] == 0; k++)
7588 				;
7589 
7590 			k &= ~63;
7591 			occ = l2b->l2b_occupancy;
7592 			va = l2b_va + (k * 4096);
7593 			for (; k < 256; k++, va += 0x1000) {
7594 				char ch = ' ';
7595 				if ((k % 64) == 0) {
7596 					if ((rows % 8) == 0) {
7597 						printf(
7598 "          |0000   |8000   |10000  |18000  |20000  |28000  |30000  |38000\n");
7599 					}
7600 					printf("%08lx: ", va);
7601 				}
7602 
7603 				ncptes[k & 63] = 0;
7604 				pte = ptep[k];
7605 				if (pte == 0) {
7606 					ch = '.';
7607 				} else {
7608 					occ--;
7609 					switch (pte & 0x0c) {
7610 					case 0x00:
7611 						ch = 'D'; /* No cache No buff */
7612 						break;
7613 					case 0x04:
7614 						ch = 'B'; /* No cache buff */
7615 						break;
7616 					case 0x08:
7617 						if (pte & 0x40)
7618 							ch = 'm';
7619 						else
7620 						   ch = 'C'; /* Cache No buff */
7621 						break;
7622 					case 0x0c:
7623 						ch = 'F'; /* Cache Buff */
7624 						break;
7625 					}
7626 
7627 					if ((pte & L2_S_PROT_U) == L2_S_PROT_U)
7628 						ch += 0x20;
7629 
7630 					if ((pte & 0xc) == 0)
7631 						ncptes[k & 63] = pte;
7632 				}
7633 
7634 				if ((k % 64) == 63) {
7635 					rows++;
7636 					printf("%c\n", ch);
7637 					pmap_dump_ncpg(pm);
7638 					if (occ == 0)
7639 						break;
7640 				} else
7641 					printf("%c", ch);
7642 			}
7643 		}
7644 	}
7645 }
7646 
7647 static void
pmap_dump_ncpg(pmap_t pm)7648 pmap_dump_ncpg(pmap_t pm)
7649 {
7650 	struct vm_page *pg;
7651 	struct vm_page_md *md;
7652 	struct pv_entry *pv;
7653 	int i;
7654 
7655 	for (i = 0; i < 63; i++) {
7656 		if (ncptes[i] == 0)
7657 			continue;
7658 
7659 		pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i]));
7660 		if (pg == NULL)
7661 			continue;
7662 		md = VM_PAGE_TO_MD(pg);
7663 
7664 		printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n",
7665 		    VM_PAGE_TO_PHYS(pg),
7666 		    md->krw_mappings, md->kro_mappings,
7667 		    md->urw_mappings, md->uro_mappings);
7668 
7669 		SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
7670 			printf("   %c va 0x%08lx, flags 0x%x\n",
7671 			    (pm == pv->pv_pmap) ? '*' : ' ',
7672 			    pv->pv_va, pv->pv_flags);
7673 		}
7674 	}
7675 }
7676 #endif
7677 
7678 #ifdef PMAP_STEAL_MEMORY
7679 void
pmap_boot_pageadd(pv_addr_t * newpv)7680 pmap_boot_pageadd(pv_addr_t *newpv)
7681 {
7682 	pv_addr_t *pv, *npv;
7683 
7684 	if ((pv = SLIST_FIRST(&pmap_boot_freeq)) != NULL) {
7685 		if (newpv->pv_pa < pv->pv_va) {
7686 			KASSERT(newpv->pv_pa + newpv->pv_size <= pv->pv_pa);
7687 			if (newpv->pv_pa + newpv->pv_size == pv->pv_pa) {
7688 				newpv->pv_size += pv->pv_size;
7689 				SLIST_REMOVE_HEAD(&pmap_boot_freeq, pv_list);
7690 			}
7691 			pv = NULL;
7692 		} else {
7693 			for (; (npv = SLIST_NEXT(pv, pv_list)) != NULL;
7694 			     pv = npv) {
7695 				KASSERT(pv->pv_pa + pv->pv_size < npv->pv_pa);
7696 				KASSERT(pv->pv_pa < newpv->pv_pa);
7697 				if (newpv->pv_pa > npv->pv_pa)
7698 					continue;
7699 				if (pv->pv_pa + pv->pv_size == newpv->pv_pa) {
7700 					pv->pv_size += newpv->pv_size;
7701 					return;
7702 				}
7703 				if (newpv->pv_pa + newpv->pv_size < npv->pv_pa)
7704 					break;
7705 				newpv->pv_size += npv->pv_size;
7706 				SLIST_INSERT_AFTER(pv, newpv, pv_list);
7707 				SLIST_REMOVE_AFTER(newpv, pv_list);
7708 				return;
7709 			}
7710 		}
7711 	}
7712 
7713 	if (pv) {
7714 		SLIST_INSERT_AFTER(pv, newpv, pv_list);
7715 	} else {
7716 		SLIST_INSERT_HEAD(&pmap_boot_freeq, newpv, pv_list);
7717 	}
7718 }
7719 
7720 void
pmap_boot_pagealloc(psize_t amount,psize_t mask,psize_t match,pv_addr_t * rpv)7721 pmap_boot_pagealloc(psize_t amount, psize_t mask, psize_t match,
7722 	pv_addr_t *rpv)
7723 {
7724 	pv_addr_t *pv, **pvp;
7725 	struct vm_physseg *ps;
7726 	size_t i;
7727 
7728 	KASSERT(amount & PGOFSET);
7729 	KASSERT((mask & PGOFSET) == 0);
7730 	KASSERT((match & PGOFSET) == 0);
7731 	KASSERT(amount != 0);
7732 
7733 	for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
7734 	     (pv = *pvp) != NULL;
7735 	     pvp = &SLIST_NEXT(pv, pv_list)) {
7736 		pv_addr_t *newpv;
7737 		psize_t off;
7738 		/*
7739 		 * If this entry is too small to satify the request...
7740 		 */
7741 		KASSERT(pv->pv_size > 0);
7742 		if (pv->pv_size < amount)
7743 			continue;
7744 
7745 		for (off = 0; off <= mask; off += PAGE_SIZE) {
7746 			if (((pv->pv_pa + off) & mask) == match
7747 			    && off + amount <= pv->pv_size)
7748 				break;
7749 		}
7750 		if (off > mask)
7751 			continue;
7752 
7753 		rpv->pv_va = pv->pv_va + off;
7754 		rpv->pv_pa = pv->pv_pa + off;
7755 		rpv->pv_size = amount;
7756 		pv->pv_size -= amount;
7757 		if (pv->pv_size == 0) {
7758 			KASSERT(off == 0);
7759 			KASSERT((vaddr_t) pv == rpv->pv_va);
7760 			*pvp = SLIST_NEXT(pv, pv_list);
7761 		} else if (off == 0) {
7762 			KASSERT((vaddr_t) pv == rpv->pv_va);
7763 			newpv = (pv_addr_t *) (rpv->pv_va + amount);
7764 			*newpv = *pv;
7765 			newpv->pv_pa += amount;
7766 			newpv->pv_va += amount;
7767 			*pvp = newpv;
7768 		} else if (off < pv->pv_size) {
7769 			newpv = (pv_addr_t *) (rpv->pv_va + amount);
7770 			*newpv = *pv;
7771 			newpv->pv_size -= off;
7772 			newpv->pv_pa += off + amount;
7773 			newpv->pv_va += off + amount;
7774 
7775 			SLIST_NEXT(pv, pv_list) = newpv;
7776 			pv->pv_size = off;
7777 		} else {
7778 			KASSERT((vaddr_t) pv != rpv->pv_va);
7779 		}
7780 		memset((void *)rpv->pv_va, 0, amount);
7781 		return;
7782 	}
7783 
7784 	if (vm_nphysseg == 0)
7785 		panic("pmap_boot_pagealloc: couldn't allocate memory");
7786 
7787 	for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
7788 	     (pv = *pvp) != NULL;
7789 	     pvp = &SLIST_NEXT(pv, pv_list)) {
7790 		if (SLIST_NEXT(pv, pv_list) == NULL)
7791 			break;
7792 	}
7793 	KASSERT(mask == 0);
7794 	for (i = 0; i < vm_nphysseg; i++) {
7795 		ps = VM_PHYSMEM_PTR(i);
7796 		if (ps->avail_start == atop(pv->pv_pa + pv->pv_size)
7797 		    && pv->pv_va + pv->pv_size <= ptoa(ps->avail_end)) {
7798 			rpv->pv_va = pv->pv_va;
7799 			rpv->pv_pa = pv->pv_pa;
7800 			rpv->pv_size = amount;
7801 			*pvp = NULL;
7802 			pmap_map_chunk(kernel_l1pt.pv_va,
7803 			     ptoa(ps->avail_start) + (pv->pv_va - pv->pv_pa),
7804 			     ptoa(ps->avail_start),
7805 			     amount - pv->pv_size,
7806 			     VM_PROT_READ|VM_PROT_WRITE,
7807 			     PTE_CACHE);
7808 			ps->avail_start += atop(amount - pv->pv_size);
7809 			/*
7810 			 * If we consumed the entire physseg, remove it.
7811 			 */
7812 			if (ps->avail_start == ps->avail_end) {
7813 				for (--vm_nphysseg; i < vm_nphysseg; i++)
7814 					VM_PHYSMEM_PTR_SWAP(i, i + 1);
7815 			}
7816 			memset((void *)rpv->pv_va, 0, rpv->pv_size);
7817 			return;
7818 		}
7819 	}
7820 
7821 	panic("pmap_boot_pagealloc: couldn't allocate memory");
7822 }
7823 
7824 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstartp,vaddr_t * vendp)7825 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
7826 {
7827 	pv_addr_t pv;
7828 
7829 	pmap_boot_pagealloc(size, 0, 0, &pv);
7830 
7831 	return pv.pv_va;
7832 }
7833 #endif /* PMAP_STEAL_MEMORY */
7834 
7835 SYSCTL_SETUP(sysctl_machdep_pmap_setup, "sysctl machdep.kmpages setup")
7836 {
7837 	sysctl_createv(clog, 0, NULL, NULL,
7838 			CTLFLAG_PERMANENT,
7839 			CTLTYPE_NODE, "machdep", NULL,
7840 			NULL, 0, NULL, 0,
7841 			CTL_MACHDEP, CTL_EOL);
7842 
7843 	sysctl_createv(clog, 0, NULL, NULL,
7844 			CTLFLAG_PERMANENT,
7845 			CTLTYPE_INT, "kmpages",
7846 			SYSCTL_DESCR("count of pages allocated to kernel memory allocators"),
7847 			NULL, 0, &pmap_kmpages, 0,
7848 			CTL_MACHDEP, CTL_CREATE, CTL_EOL);
7849 }
7850 
7851 #ifdef PMAP_NEED_ALLOC_POOLPAGE
7852 struct vm_page *
arm_pmap_alloc_poolpage(int flags)7853 arm_pmap_alloc_poolpage(int flags)
7854 {
7855 	/*
7856 	 * On some systems, only some pages may be "coherent" for dma and we
7857 	 * want to prefer those for pool pages (think mbufs) but fallback to
7858 	 * any page if none is available.  But we can only fallback if we
7859 	 * aren't direct mapping memory or all of memory can be direct-mapped.
7860 	 * If that isn't true, pool changes can only come from direct-mapped
7861 	 * memory.
7862 	 */
7863 	if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT) {
7864 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
7865 		    UVM_PGA_STRAT_FALLBACK,
7866 		    arm_poolpage_vmfreelist);
7867 	}
7868 
7869 	return uvm_pagealloc(NULL, 0, NULL, flags);
7870 }
7871 #endif
7872 
7873 #if defined(ARM_MMU_EXTENDED) && defined(MULTIPROCESSOR)
7874 void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)7875 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
7876 {
7877         /* nothing */
7878 }
7879 
7880 int
pic_ipi_shootdown(void * arg)7881 pic_ipi_shootdown(void *arg)
7882 {
7883 #if PMAP_TLB_NEED_SHOOTDOWN
7884 	pmap_tlb_shootdown_process();
7885 #endif
7886 	return 1;
7887 }
7888 #endif /* ARM_MMU_EXTENDED && MULTIPROCESSOR */
7889 
7890 
7891 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
7892 vaddr_t
pmap_direct_mapped_phys(paddr_t pa,bool * ok_p,vaddr_t va)7893 pmap_direct_mapped_phys(paddr_t pa, bool *ok_p, vaddr_t va)
7894 {
7895 	bool ok = false;
7896 	if (physical_start <= pa && pa < physical_end) {
7897 #ifdef KERNEL_BASE_VOFFSET
7898 		const vaddr_t newva = pa + KERNEL_BASE_VOFFSET;
7899 #else
7900 		const vaddr_t newva = KERNEL_BASE + pa - physical_start;
7901 #endif
7902 #ifdef ARM_MMU_EXTENDED
7903 		if (newva >= KERNEL_BASE && newva < pmap_directlimit) {
7904 #endif
7905 			va = newva;
7906 			ok = true;
7907 #ifdef ARM_MMU_EXTENDED
7908 		}
7909 #endif
7910 	}
7911 	KASSERT(ok_p);
7912 	*ok_p = ok;
7913 	return va;
7914 }
7915 
7916 vaddr_t
pmap_map_poolpage(paddr_t pa)7917 pmap_map_poolpage(paddr_t pa)
7918 {
7919 	bool ok __diagused;
7920 	vaddr_t va = pmap_direct_mapped_phys(pa, &ok, 0);
7921 	KASSERTMSG(ok, "pa %#lx not direct mappable", pa);
7922 #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
7923 	if (arm_cache_prefer_mask != 0) {
7924 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
7925 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
7926 		pmap_acquire_page_lock(md);
7927 		pmap_vac_me_harder(md, pa, pmap_kernel(), va);
7928 		pmap_release_page_lock(md);
7929 	}
7930 #endif
7931 	return va;
7932 }
7933 
7934 paddr_t
pmap_unmap_poolpage(vaddr_t va)7935 pmap_unmap_poolpage(vaddr_t va)
7936 {
7937 	KASSERT(va >= KERNEL_BASE);
7938 #ifdef PMAP_CACHE_VIVT
7939 	cpu_idcache_wbinv_range(va, PAGE_SIZE);
7940 #endif
7941 #if defined(KERNEL_BASE_VOFFSET)
7942         return va - KERNEL_BASE_VOFFSET;
7943 #else
7944         return va - KERNEL_BASE + physical_start;
7945 #endif
7946 }
7947 #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
7948