1 /* $NetBSD: pmap_tlb.c,v 1.18 2016/07/23 20:06:25 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.18 2016/07/23 20:06:25 matt Exp $");
35
36 /*
37 * Manages address spaces in a TLB.
38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities.
44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID.
47 *
48 * We allocate ASIDs in increasing order until we have exhausted the supply,
49 * then reinitialize the ASID space, and start allocating again at 1. When
50 * allocating from the ASID bitmap, we skip any ASID who has a corresponding
51 * bit set in the ASID bitmap. Eventually this causes the ASID bitmap to fill
52 * and, when completely filled, a reinitialization of the ASID space.
53 *
54 * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs
55 * of non-kernel TLB entries get recorded in the ASID bitmap. If the entries
56 * in TLB consume more than half of the ASID space, all ASIDs are invalidated,
57 * the ASID bitmap is recleared, and the list of pmaps is emptied. Otherwise,
58 * (the normal case), any ASID present in the TLB (even those which are no
59 * longer used by a pmap) will remain active (allocated) and all other ASIDs
60 * will be freed. If the size of the TLB is much smaller than the ASID space,
61 * this algorithm completely avoids TLB invalidation.
62 *
63 * For multiprocessors, we also have to deal TLB invalidation requests from
64 * other CPUs, some of which are dealt with the reinitialization of the ASID
65 * space. Whereas above we keep the ASIDs of those pmaps which have active
66 * TLB entries, this type of reinitialization preserves the ASIDs of any
67 * "onproc" user pmap and all other ASIDs will be freed. We must do this
68 * since we can't change the current ASID.
69 *
70 * Each pmap has two bitmaps: pm_active and pm_onproc. Each bit in pm_active
71 * indicates whether that pmap has an allocated ASID for a CPU. Each bit in
72 * pm_onproc indicates that pmap's ASID is active (equal to the ASID in COP 0
73 * register EntryHi) on a CPU. The bit number comes from the CPU's cpu_index().
74 * Even though these bitmaps contain the bits for all CPUs, the bits that
75 * correspond to the bits belonging to the CPUs sharing a TLB can only be
76 * manipulated while holding that TLB's lock. Atomic ops must be used to
77 * update them since multiple CPUs may be changing different sets of bits at
78 * same time but these sets never overlap.
79 *
80 * When a change to the local TLB may require a change in the TLB's of other
81 * CPUs, we try to avoid sending an IPI if at all possible. For instance, if
82 * we are updating a PTE and that PTE previously was invalid and therefore
83 * couldn't support an active mapping, there's no need for an IPI since there
84 * can't be a TLB entry to invalidate. The other case is when we change a PTE
85 * to be modified we just update the local TLB. If another TLB has a stale
86 * entry, a TLB MOD exception will be raised and that will cause the local TLB
87 * to be updated.
88 *
89 * We never need to update a non-local TLB if the pmap doesn't have a valid
90 * ASID for that TLB. If it does have a valid ASID but isn't current "onproc"
91 * we simply reset its ASID for that TLB and then when it goes "onproc" it
92 * will allocate a new ASID and any existing TLB entries will be orphaned.
93 * Only in the case that pmap has an "onproc" ASID do we actually have to send
94 * an IPI.
95 *
96 * Once we determined we must send an IPI to shootdown a TLB, we need to send
97 * it to one of CPUs that share that TLB. We choose the lowest numbered CPU
98 * that has one of the pmap's ASID "onproc". In reality, any CPU sharing that
99 * TLB would do, but interrupting an active CPU seems best.
100 *
101 * A TLB might have multiple shootdowns active concurrently. The shootdown
102 * logic compresses these into a few cases:
103 * 0) nobody needs to have its TLB entries invalidated
104 * 1) one ASID needs to have its TLB entries invalidated
105 * 2) more than one ASID needs to have its TLB entries invalidated
106 * 3) the kernel needs to have its TLB entries invalidated
107 * 4) the kernel and one or more ASID need their TLB entries invalidated.
108 *
109 * And for each case we do:
110 * 0) nothing,
111 * 1) if that ASID is still "onproc", we invalidate the TLB entries for
112 * that single ASID. If not, just reset the pmap's ASID to invalidate
113 * and let it allocate a new ASID the next time it goes "onproc",
114 * 2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
115 * invalidate all non-wired non-global TLB entries,
116 * 3) we invalidate all of the non-wired global TLB entries,
117 * 4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
118 * invalidate all non-wired TLB entries.
119 *
120 * As you can see, shootdowns are not concerned with addresses, just address
121 * spaces. Since the number of TLB entries is usually quite small, this avoids
122 * a lot of overhead for not much gain.
123 */
124
125 #define __PMAP_PRIVATE
126
127 #include "opt_multiprocessor.h"
128
129 #include <sys/param.h>
130 #include <sys/systm.h>
131 #include <sys/proc.h>
132 #include <sys/mutex.h>
133 #include <sys/atomic.h>
134 #include <sys/kernel.h> /* for cold */
135 #include <sys/cpu.h>
136
137 #include <uvm/uvm.h>
138
139 static kmutex_t pmap_tlb0_lock __cacheline_aligned;
140
141 #define IFCONSTANT(x) (__builtin_constant_p((x)) ? (x) : 0)
142
143 struct pmap_tlb_info pmap_tlb0_info = {
144 .ti_name = "tlb0",
145 .ti_asid_hint = KERNEL_PID + 1,
146 #ifdef PMAP_TLB_NUM_PIDS
147 .ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1),
148 .ti_asids_free = IFCONSTANT(PMAP_TLB_NUM_PIDS - (1 + KERNEL_PID)),
149 #endif
150 .ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1,
151 #ifdef PMAP_TLB_WIRED_UPAGES
152 .ti_wired = PMAP_TLB_WIRED_UPAGES,
153 #endif
154 .ti_lock = &pmap_tlb0_lock,
155 .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
156 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
157 .ti_tlbinvop = TLBINV_NOBODY,
158 #endif
159 };
160
161 #undef IFCONSTANT
162
163 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
164 struct pmap_tlb_info *pmap_tlbs[PMAP_TLB_MAX] = {
165 [0] = &pmap_tlb0_info,
166 };
167 u_int pmap_ntlbs = 1;
168 #endif
169
170 #define __BITMAP_SET(bm, n) \
171 ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
172 #define __BITMAP_CLR(bm, n) \
173 ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0])))))
174 #define __BITMAP_ISSET_P(bm, n) \
175 (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0)
176
177 #define TLBINFO_ASID_MARK_UNUSED(ti, asid) \
178 __BITMAP_CLR((ti)->ti_asid_bitmap, (asid))
179 #define TLBINFO_ASID_MARK_USED(ti, asid) \
180 __BITMAP_SET((ti)->ti_asid_bitmap, (asid))
181 #define TLBINFO_ASID_INUSE_P(ti, asid) \
182 __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid))
183
184 #ifdef MULTIPROCESSOR
185 __unused static inline bool
pmap_tlb_intersecting_active_p(pmap_t pm,struct pmap_tlb_info * ti)186 pmap_tlb_intersecting_active_p(pmap_t pm, struct pmap_tlb_info *ti)
187 {
188 #if PMAP_TLB_MAX == 1
189 return !kcpuset_iszero(pm->pm_active);
190 #else
191 return kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset);
192 #endif
193 }
194
195 static inline bool
pmap_tlb_intersecting_onproc_p(pmap_t pm,struct pmap_tlb_info * ti)196 pmap_tlb_intersecting_onproc_p(pmap_t pm, struct pmap_tlb_info *ti)
197 {
198 #if PMAP_TLB_MAX == 1
199 return !kcpuset_iszero(pm->pm_onproc);
200 #else
201 return kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset);
202 #endif
203 }
204 #endif
205
206 static void
pmap_tlb_pai_check(struct pmap_tlb_info * ti,bool locked_p)207 pmap_tlb_pai_check(struct pmap_tlb_info *ti, bool locked_p)
208 {
209 #ifdef DIAGNOSTIC
210 struct pmap_asid_info *pai;
211 if (!locked_p)
212 TLBINFO_LOCK(ti);
213 LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
214 KASSERT(pai != NULL);
215 KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
216 KASSERT(pai->pai_asid > KERNEL_PID);
217 KASSERTMSG(pai->pai_asid <= ti->ti_asid_max,
218 "pm %p asid %#x", PAI_PMAP(pai, ti), pai->pai_asid);
219 KASSERTMSG(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid),
220 "pm %p asid %u", PAI_PMAP(pai, ti), pai->pai_asid);
221 #ifdef MULTIPROCESSOR
222 KASSERT(pmap_tlb_intersecting_active_p(PAI_PMAP(pai, ti), ti));
223 #endif
224 }
225 if (!locked_p)
226 TLBINFO_UNLOCK(ti);
227 #endif
228 }
229
230 static void
pmap_tlb_pai_reset(struct pmap_tlb_info * ti,struct pmap_asid_info * pai,struct pmap * pm)231 pmap_tlb_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
232 struct pmap *pm)
233 {
234 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
235 UVMHIST_LOG(maphist, "(ti=%p, pai=%p, pm=%p): asid %u",
236 ti, pai, pm, pai->pai_asid);
237
238 /*
239 * We must have an ASID but it must not be onproc (on a processor).
240 */
241 KASSERT(pai->pai_asid > KERNEL_PID);
242 KASSERT(pai->pai_asid <= ti->ti_asid_max);
243 #if defined(MULTIPROCESSOR)
244 KASSERT(pmap_tlb_intersecting_active_p(pm, ti));
245 KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
246 #endif
247 LIST_REMOVE(pai, pai_link);
248 #ifdef DIAGNOSTIC
249 pai->pai_link.le_prev = NULL; /* tagged as unlinked */
250 #endif
251 /*
252 * If the platform has a cheap way to flush ASIDs then free the ASID
253 * back into the pool. On multiprocessor systems, we will flush the
254 * ASID from the TLB when it's allocated. That way we know the flush
255 * was always done in the correct TLB space. On uniprocessor systems,
256 * just do the flush now since we know that it has been used. This has
257 * a bit less overhead. Either way, this will mean that we will only
258 * need to flush all ASIDs if all ASIDs are in use and we need to
259 * allocate a new one.
260 */
261 if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
262 #ifndef MULTIPROCESSOR
263 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
264 #endif
265 if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
266 TLBINFO_ASID_MARK_UNUSED(ti, pai->pai_asid);
267 ti->ti_asids_free++;
268 }
269 }
270 /*
271 * Note that we don't mark the ASID as not in use in the TLB's ASID
272 * bitmap (thus it can't be allocated until the ASID space is exhausted
273 * and therefore reinitialized). We don't want to flush the TLB for
274 * entries belonging to this ASID so we will let natural TLB entry
275 * replacement flush them out of the TLB. Any new entries for this
276 * pmap will need a new ASID allocated.
277 */
278 pai->pai_asid = 0;
279
280 #if defined(MULTIPROCESSOR)
281 /*
282 * The bits in pm_active belonging to this TLB can only be changed
283 * while this TLB's lock is held.
284 */
285 #if PMAP_TLB_MAX == 1
286 kcpuset_zero(pm->pm_active);
287 #else
288 kcpuset_remove(pm->pm_active, ti->ti_kcpuset);
289 #endif
290 KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
291 #endif /* MULTIPROCESSOR */
292
293 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
294 }
295
296 void
pmap_tlb_info_evcnt_attach(struct pmap_tlb_info * ti)297 pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
298 {
299 #if defined(MULTIPROCESSOR)
300 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
301 EVCNT_TYPE_MISC, NULL,
302 ti->ti_name, "icache syncs desired");
303 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
304 EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
305 ti->ti_name, "icache sync asts");
306 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
307 EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
308 ti->ti_name, "icache full syncs");
309 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
310 EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
311 ti->ti_name, "icache pages synced");
312 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
313 EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
314 ti->ti_name, "icache dup pages skipped");
315 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
316 EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
317 ti->ti_name, "icache pages deferred");
318 #endif /* MULTIPROCESSOR */
319 evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
320 EVCNT_TYPE_MISC, NULL,
321 ti->ti_name, "asid pool reinit");
322 }
323
324 void
pmap_tlb_info_init(struct pmap_tlb_info * ti)325 pmap_tlb_info_init(struct pmap_tlb_info *ti)
326 {
327 #if defined(MULTIPROCESSOR)
328 #if PMAP_TLB_MAX == 1
329 KASSERT(ti == &pmap_tlb0_info);
330 #else
331 if (ti != &pmap_tlb0_info) {
332 KASSERT(pmap_ntlbs < PMAP_TLB_MAX);
333
334 KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
335
336 ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
337 ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
338 ti->ti_asid_hint = KERNEL_PID + 1;
339 ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
340 ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
341 ti->ti_tlbinvop = TLBINV_NOBODY,
342 ti->ti_victim = NULL;
343 kcpuset_create(&ti->ti_kcpuset, true);
344 ti->ti_index = pmap_ntlbs++;
345 ti->ti_wired = 0;
346 pmap_tlbs[ti->ti_index] = ti;
347 snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
348 ti->ti_index);
349 pmap_tlb_info_evcnt_attach(ti);
350 return;
351 }
352 #endif
353 #endif /* MULTIPROCESSOR */
354 KASSERT(ti == &pmap_tlb0_info);
355 KASSERT(ti->ti_lock == &pmap_tlb0_lock);
356 //printf("ti_lock %p ", ti->ti_lock);
357 mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
358 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
359 kcpuset_create(&ti->ti_kcpuset, true);
360 kcpuset_set(ti->ti_kcpuset, cpu_index(curcpu()));
361 #endif
362 //printf("asid ");
363 if (ti->ti_asid_max == 0) {
364 ti->ti_asid_max = pmap_md_tlb_asid_max();
365 ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
366 }
367
368 KASSERT(ti->ti_asid_max < sizeof(ti->ti_asid_bitmap)*8);
369 }
370
371 #if defined(MULTIPROCESSOR)
372 void
pmap_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)373 pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
374 {
375 KASSERT(!CPU_IS_PRIMARY(ci));
376 KASSERT(ci->ci_data.cpu_idlelwp != NULL);
377 KASSERT(cold);
378
379 TLBINFO_LOCK(ti);
380 #if PMAP_TLB_MAX > 1
381 kcpuset_set(ti->ti_kcpuset, cpu_index(ci));
382 cpu_set_tlb_info(ci, ti);
383 #endif
384
385 /*
386 * Do any MD tlb info init.
387 */
388 pmap_md_tlb_info_attach(ti, ci);
389
390 /*
391 * The kernel pmap uses the kcpuset_running set so it's always
392 * up-to-date.
393 */
394 TLBINFO_UNLOCK(ti);
395 }
396 #endif /* MULTIPROCESSOR */
397
398 #ifdef DIAGNOSTIC
399 static size_t
pmap_tlb_asid_count(struct pmap_tlb_info * ti)400 pmap_tlb_asid_count(struct pmap_tlb_info *ti)
401 {
402 size_t count = 0;
403 for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) {
404 count += TLBINFO_ASID_INUSE_P(ti, asid);
405 }
406 return count;
407 }
408 #endif
409
410 static void
pmap_tlb_asid_reinitialize(struct pmap_tlb_info * ti,enum tlb_invalidate_op op)411 pmap_tlb_asid_reinitialize(struct pmap_tlb_info *ti, enum tlb_invalidate_op op)
412 {
413 const size_t asid_bitmap_words =
414 ti->ti_asid_max / (8 * sizeof(ti->ti_asid_bitmap[0]));
415
416 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
417 UVMHIST_LOG(maphist, "(ti=%p, op=%u)", ti, op, 0, 0);
418
419 pmap_tlb_pai_check(ti, true);
420
421 ti->ti_evcnt_asid_reinits.ev_count++;
422
423 /*
424 * First, clear the ASID bitmap (except for ASID 0 which belongs
425 * to the kernel).
426 */
427 ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
428 ti->ti_asid_hint = KERNEL_PID + 1;
429 ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
430 for (size_t word = 1; word <= asid_bitmap_words; word++) {
431 ti->ti_asid_bitmap[word] = 0;
432 }
433
434 switch (op) {
435 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
436 case TLBINV_ALL:
437 tlb_invalidate_all();
438 break;
439 case TLBINV_ALLUSER:
440 tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
441 break;
442 #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
443 case TLBINV_NOBODY: {
444 /*
445 * If we are just reclaiming ASIDs in the TLB, let's go find
446 * what ASIDs are in use in the TLB. Since this is a
447 * semi-expensive operation, we don't want to do it too often.
448 * So if more half of the ASIDs are in use, we don't have
449 * enough free ASIDs so invalidate the TLB entries with ASIDs
450 * and clear the ASID bitmap. That will force everyone to
451 * allocate a new ASID.
452 */
453 #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
454 pmap_tlb_asid_check();
455 const u_int asids_found = tlb_record_asids(ti->ti_asid_bitmap,
456 ti->ti_asid_max);
457 pmap_tlb_asid_check();
458 KASSERT(asids_found == pmap_tlb_asid_count(ti));
459 if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
460 tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
461 #else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
462 /*
463 * For those systems (PowerPC) that don't require
464 * cross cpu TLB shootdowns, we have to invalidate the
465 * entire TLB because we can't record the ASIDs in use
466 * on the other CPUs. This is hopefully cheaper than
467 * than trying to use an IPI to record all the ASIDs
468 * on all the CPUs (which would be a synchronization
469 * nightmare).
470 */
471 tlb_invalidate_all();
472 #endif /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
473 ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
474 for (size_t word = 1;
475 word <= asid_bitmap_words;
476 word++) {
477 ti->ti_asid_bitmap[word] = 0;
478 }
479 ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
480 #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
481 } else {
482 ti->ti_asids_free -= asids_found;
483 }
484 #endif /* !MULTIPROCESSOR || PMAP_TLB_NEED_SHOOTDOWN */
485 KASSERTMSG(ti->ti_asids_free <= ti->ti_asid_max, "%u",
486 ti->ti_asids_free);
487 break;
488 }
489 default:
490 panic("%s: unexpected op %d", __func__, op);
491 }
492
493 /*
494 * Now go through the active ASIDs. If the ASID is on a processor or
495 * we aren't invalidating all ASIDs and the TLB has an entry owned by
496 * that ASID, mark it as in use. Otherwise release the ASID.
497 */
498 struct pmap_asid_info *pai, *next;
499 for (pai = LIST_FIRST(&ti->ti_pais); pai != NULL; pai = next) {
500 struct pmap * const pm = PAI_PMAP(pai, ti);
501 next = LIST_NEXT(pai, pai_link);
502 KASSERT(pm != pmap_kernel());
503 KASSERT(pai->pai_asid > KERNEL_PID);
504 #if defined(MULTIPROCESSOR)
505 if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
506 if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
507 TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
508 ti->ti_asids_free--;
509 }
510 continue;
511 }
512 #endif /* MULTIPROCESSOR */
513 if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
514 KASSERT(op == TLBINV_NOBODY);
515 } else {
516 pmap_tlb_pai_reset(ti, pai, pm);
517 }
518 }
519 #ifdef DIAGNOSTIC
520 size_t free_count __diagused = ti->ti_asid_max - pmap_tlb_asid_count(ti);
521 KASSERTMSG(free_count == ti->ti_asids_free,
522 "bitmap error: %zu != %u", free_count, ti->ti_asids_free);
523 #endif
524 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
525 }
526
527 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
528 #if PMAP_TLB_MAX == 1
529 #error shootdown not required for single TLB systems
530 #endif
531 void
pmap_tlb_shootdown_process(void)532 pmap_tlb_shootdown_process(void)
533 {
534 struct cpu_info * const ci = curcpu();
535 struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
536 #ifdef DIAGNOSTIC
537 struct pmap * const pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
538 #endif
539
540 KASSERT(cpu_intr_p());
541 KASSERTMSG(ci->ci_cpl >= IPL_SCHED,
542 "%s: cpl (%d) < IPL_SCHED (%d)",
543 __func__, ci->ci_cpl, IPL_SCHED);
544
545 TLBINFO_LOCK(ti);
546
547 switch (ti->ti_tlbinvop) {
548 case TLBINV_ONE: {
549 /*
550 * We only need to invalidate one user ASID.
551 */
552 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
553 KASSERT(ti->ti_victim != pmap_kernel());
554 if (!pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) {
555 /*
556 * The victim is an active pmap so we will just
557 * invalidate its TLB entries.
558 */
559 KASSERT(pai->pai_asid > KERNEL_PID);
560 pmap_tlb_asid_check();
561 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
562 pmap_tlb_asid_check();
563 } else if (pai->pai_asid) {
564 /*
565 * The victim is no longer an active pmap for this TLB.
566 * So simply clear its ASID and when pmap_activate is
567 * next called for this pmap, it will allocate a new
568 * ASID.
569 */
570 KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
571 pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti));
572 }
573 break;
574 }
575 case TLBINV_ALLUSER:
576 /*
577 * Flush all user TLB entries.
578 */
579 pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
580 break;
581 case TLBINV_ALLKERNEL:
582 /*
583 * We need to invalidate all global TLB entries.
584 */
585 pmap_tlb_asid_check();
586 tlb_invalidate_globals();
587 pmap_tlb_asid_check();
588 break;
589 case TLBINV_ALL:
590 /*
591 * Flush all the TLB entries (user and kernel).
592 */
593 pmap_tlb_asid_reinitialize(ti, TLBINV_ALL);
594 break;
595 case TLBINV_NOBODY:
596 /*
597 * Might be spurious or another SMT CPU sharing this TLB
598 * could have already done the work.
599 */
600 break;
601 }
602
603 /*
604 * Indicate we are done with shutdown event.
605 */
606 ti->ti_victim = NULL;
607 ti->ti_tlbinvop = TLBINV_NOBODY;
608 TLBINFO_UNLOCK(ti);
609 }
610
611 /*
612 * This state machine could be encoded into an array of integers but since all
613 * the values fit in 3 bits, the 5 entry "table" fits in a 16 bit value which
614 * can be loaded in a single instruction.
615 */
616 #define TLBINV_MAP(op, nobody, one, alluser, allkernel, all) \
617 (((( (nobody) << 3*TLBINV_NOBODY) \
618 | ( (one) << 3*TLBINV_ONE) \
619 | ( (alluser) << 3*TLBINV_ALLUSER) \
620 | ((allkernel) << 3*TLBINV_ALLKERNEL) \
621 | ( (all) << 3*TLBINV_ALL)) >> 3*(op)) & 7)
622
623 #define TLBINV_USER_MAP(op) \
624 TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER, \
625 TLBINV_ALL, TLBINV_ALL)
626
627 #define TLBINV_KERNEL_MAP(op) \
628 TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \
629 TLBINV_ALLKERNEL, TLBINV_ALL)
630
631 bool
pmap_tlb_shootdown_bystanders(pmap_t pm)632 pmap_tlb_shootdown_bystanders(pmap_t pm)
633 {
634 /*
635 * We don't need to deal with our own TLB.
636 */
637
638 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
639
640 kcpuset_t *pm_active;
641 kcpuset_clone(&pm_active, pm->pm_active);
642 kcpuset_remove(pm_active, cpu_tlb_info(curcpu())->ti_kcpuset);
643 const bool kernel_p = (pm == pmap_kernel());
644 bool ipi_sent = false;
645
646 /*
647 * If pm_active gets more bits set, then it's after all our changes
648 * have been made so they will already be cognizant of them.
649 */
650
651 for (size_t i = 0; !kcpuset_iszero(pm_active); i++) {
652 KASSERT(i < pmap_ntlbs);
653 struct pmap_tlb_info * const ti = pmap_tlbs[i];
654 KASSERT(tlbinfo_index(ti) == i);
655 /*
656 * Skip this TLB if there are no active mappings for it.
657 */
658 if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset))
659 continue;
660 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
661 kcpuset_remove(pm_active, ti->ti_kcpuset);
662 TLBINFO_LOCK(ti);
663 cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc,
664 ti->ti_kcpuset);
665 // post decrement since ffs returns bit + 1 or 0 if no bit
666 if (j-- > 0) {
667 if (kernel_p) {
668 ti->ti_tlbinvop =
669 TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
670 ti->ti_victim = NULL;
671 } else {
672 KASSERT(pai->pai_asid);
673 if (__predict_false(ti->ti_victim == pm)) {
674 KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
675 /*
676 * We still need to invalidate this one
677 * ASID so there's nothing to change.
678 */
679 } else {
680 ti->ti_tlbinvop =
681 TLBINV_USER_MAP(ti->ti_tlbinvop);
682 if (ti->ti_tlbinvop == TLBINV_ONE)
683 ti->ti_victim = pm;
684 else
685 ti->ti_victim = NULL;
686 }
687 }
688 TLBINFO_UNLOCK(ti);
689 /*
690 * Now we can send out the shootdown IPIs to a CPU
691 * that shares this TLB and is currently using this
692 * pmap. That CPU will process the IPI and do the
693 * all the work. Any other CPUs sharing that TLB
694 * will take advantage of that work. pm_onproc might
695 * change now that we have released the lock but we
696 * can tolerate spurious shootdowns.
697 */
698 cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
699 ipi_sent = true;
700 continue;
701 }
702 if (!pmap_tlb_intersecting_active_p(pm, ti)) {
703 /*
704 * If this pmap has an ASID assigned but it's not
705 * currently running, nuke its ASID. Next time the
706 * pmap is activated, it will allocate a new ASID.
707 * And best of all, we avoid an IPI.
708 */
709 KASSERT(!kernel_p);
710 pmap_tlb_pai_reset(ti, pai, pm);
711 //ti->ti_evcnt_lazy_shots.ev_count++;
712 }
713 TLBINFO_UNLOCK(ti);
714 }
715
716 kcpuset_destroy(pm_active);
717
718 UVMHIST_LOG(maphist, " <-- done (ipi_sent=%d)", ipi_sent, 0, 0, 0);
719
720 return ipi_sent;
721 }
722 #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
723
724 #ifndef PMAP_TLB_HWPAGEWALKER
725 int
pmap_tlb_update_addr(pmap_t pm,vaddr_t va,pt_entry_t pte,u_int flags)726 pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags)
727 {
728 struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
729 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
730 int rv = -1;
731
732 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
733 UVMHIST_LOG(maphist,
734 " (pm=%p va=%#"PRIxVADDR", pte=%#"PRIxPTE" flags=%#x)",
735 pm, va, pte_value(pte), flags);
736
737 KASSERT(kpreempt_disabled());
738
739 KASSERTMSG(pte_valid_p(pte), "va %#"PRIxVADDR" %#"PRIxPTE,
740 va, pte_value(pte));
741
742 TLBINFO_LOCK(ti);
743 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
744 pmap_tlb_asid_check();
745 rv = tlb_update_addr(va, pai->pai_asid, pte,
746 (flags & PMAP_TLB_INSERT) != 0);
747 pmap_tlb_asid_check();
748 UVMHIST_LOG(maphist,
749 " %d <-- tlb_update_addr(%#"PRIxVADDR", %#x, %#"PRIxPTE", ...)",
750 rv, va, pai->pai_asid, pte_value(pte));
751 KASSERTMSG((flags & PMAP_TLB_INSERT) == 0 || rv == 1,
752 "pmap %p (asid %u) va %#"PRIxVADDR" pte %#"PRIxPTE" rv %d",
753 pm, pai->pai_asid, va, pte_value(pte), rv);
754 }
755 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
756 if (flags & PMAP_TLB_NEED_IPI)
757 pm->pm_shootdown_pending = 1;
758 #endif
759 TLBINFO_UNLOCK(ti);
760
761 UVMHIST_LOG(maphist, " <-- done (rv=%d)", rv, 0, 0, 0);
762
763 return rv;
764 }
765 #endif /* !PMAP_TLB_HWPAGEWALKER */
766
767 void
pmap_tlb_invalidate_addr(pmap_t pm,vaddr_t va)768 pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
769 {
770 struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
771 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
772
773 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
774 UVMHIST_LOG(maphist, " (pm=%p va=%#"PRIxVADDR") ti=%p asid=%#x",
775 pm, va, ti, pai->pai_asid);
776
777 KASSERT(kpreempt_disabled());
778
779 TLBINFO_LOCK(ti);
780 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
781 pmap_tlb_asid_check();
782 UVMHIST_LOG(maphist, " invalidating %#"PRIxVADDR" asid %#x",
783 va, pai->pai_asid, 0, 0);
784 tlb_invalidate_addr(va, pai->pai_asid);
785 pmap_tlb_asid_check();
786 }
787 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
788 pm->pm_shootdown_pending = 1;
789 #endif
790 TLBINFO_UNLOCK(ti);
791 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
792 }
793
794 static inline void
pmap_tlb_asid_alloc(struct pmap_tlb_info * ti,pmap_t pm,struct pmap_asid_info * pai)795 pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
796 struct pmap_asid_info *pai)
797 {
798 /*
799 * We shouldn't have an ASID assigned, and thusly must not be onproc
800 * nor active.
801 */
802 KASSERT(pm != pmap_kernel());
803 KASSERT(pai->pai_asid == 0);
804 KASSERT(pai->pai_link.le_prev == NULL);
805 #if defined(MULTIPROCESSOR)
806 KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
807 KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
808 #endif
809 KASSERT(ti->ti_asids_free > 0);
810 KASSERT(ti->ti_asid_hint > KERNEL_PID);
811
812 /*
813 * If the last ASID allocated was the maximum ASID, then the
814 * hint will be out of range. Reset the hint to first
815 * available ASID.
816 */
817 if (PMAP_TLB_FLUSH_ASID_ON_RESET
818 && ti->ti_asid_hint > ti->ti_asid_max) {
819 ti->ti_asid_hint = KERNEL_PID + 1;
820 }
821 KASSERTMSG(ti->ti_asid_hint <= ti->ti_asid_max, "hint %u",
822 ti->ti_asid_hint);
823
824 /*
825 * Let's see if the hinted ASID is free. If not search for
826 * a new one.
827 */
828 if (__predict_true(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
829 const size_t nbpw __diagused = 8*sizeof(ti->ti_asid_bitmap[0]);
830 size_t i;
831 u_long bits;
832 for (i = 0; (bits = ~ti->ti_asid_bitmap[i]) == 0; i++) {
833 KASSERT(i < __arraycount(ti->ti_asid_bitmap) - 1);
834 }
835 /*
836 * ffs wants to find the first bit set while we want
837 * to find the first bit cleared.
838 */
839 const u_int n = __builtin_ffsl(bits) - 1;
840 KASSERTMSG((bits << (nbpw - (n+1))) == (1ul << (nbpw-1)),
841 "n %u bits %#lx", n, bits);
842 KASSERT(n < nbpw);
843 ti->ti_asid_hint = n + i * nbpw;
844 }
845
846 KASSERT(ti->ti_asid_hint > KERNEL_PID);
847 KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
848 KASSERTMSG(PMAP_TLB_FLUSH_ASID_ON_RESET
849 || TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint - 1),
850 "hint %u bitmap %p", ti->ti_asid_hint, ti->ti_asid_bitmap);
851 KASSERTMSG(!TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint),
852 "hint %u bitmap %p", ti->ti_asid_hint, ti->ti_asid_bitmap);
853
854 /*
855 * The hint contains our next ASID so take it and advance the hint.
856 * Mark it as used and insert the pai into the list of active asids.
857 * There is also one less asid free in this TLB.
858 */
859 KASSERT(ti->ti_asid_hint > KERNEL_PID);
860 pai->pai_asid = ti->ti_asid_hint++;
861 #ifdef MULTIPROCESSOR
862 if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
863 /*
864 * Clean the new ASID from the TLB.
865 */
866 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
867 }
868 #endif
869 TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
870 LIST_INSERT_HEAD(&ti->ti_pais, pai, pai_link);
871 ti->ti_asids_free--;
872
873 #if defined(MULTIPROCESSOR)
874 /*
875 * Mark that we now have an active ASID for all CPUs sharing this TLB.
876 * The bits in pm_active belonging to this TLB can only be changed
877 * while this TLBs lock is held.
878 */
879 #if PMAP_TLB_MAX == 1
880 kcpuset_copy(pm->pm_active, kcpuset_running);
881 #else
882 kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
883 #endif
884 #endif
885 }
886
887 /*
888 * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
889 * ASID might have already been previously acquired.
890 */
891 void
pmap_tlb_asid_acquire(pmap_t pm,struct lwp * l)892 pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
893 {
894 struct cpu_info * const ci = l->l_cpu;
895 struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
896 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
897
898 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
899 UVMHIST_LOG(maphist, "(pm=%p, l=%p, ti=%p)", pm, l, ti, 0);
900
901 KASSERT(kpreempt_disabled());
902
903 /*
904 * Kernels use a fixed ASID and thus doesn't need to acquire one.
905 */
906 if (pm == pmap_kernel()) {
907 UVMHIST_LOG(maphist, " <-- done (kernel)", 0, 0, 0, 0);
908 return;
909 }
910
911 TLBINFO_LOCK(ti);
912 KASSERT(pai->pai_asid <= KERNEL_PID || pai->pai_link.le_prev != NULL);
913 KASSERT(pai->pai_asid > KERNEL_PID || pai->pai_link.le_prev == NULL);
914 pmap_tlb_pai_check(ti, true);
915 if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
916 /*
917 * If we've run out ASIDs, reinitialize the ASID space.
918 */
919 if (__predict_false(tlbinfo_noasids_p(ti))) {
920 KASSERT(l == curlwp);
921 UVMHIST_LOG(maphist, " asid reinit", 0, 0, 0, 0);
922 pmap_tlb_asid_reinitialize(ti, TLBINV_NOBODY);
923 KASSERT(!tlbinfo_noasids_p(ti));
924 }
925
926 /*
927 * Get an ASID.
928 */
929 pmap_tlb_asid_alloc(ti, pm, pai);
930 UVMHIST_LOG(maphist, "allocated asid %#x", pai->pai_asid, 0, 0, 0);
931 }
932 pmap_tlb_pai_check(ti, true);
933 #if defined(MULTIPROCESSOR)
934 KASSERT(kcpuset_isset(pm->pm_active, cpu_index(ci)));
935 #endif
936
937 if (l == curlwp) {
938 #if defined(MULTIPROCESSOR)
939 /*
940 * The bits in pm_onproc belonging to this TLB can only
941 * be changed while this TLBs lock is held unless atomic
942 * operations are used.
943 */
944 KASSERT(pm != pmap_kernel());
945 kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci));
946 #endif
947 ci->ci_pmap_asid_cur = pai->pai_asid;
948 UVMHIST_LOG(maphist, "setting asid to %#x", pai->pai_asid, 0, 0, 0);
949 tlb_set_asid(pai->pai_asid);
950 pmap_tlb_asid_check();
951 } else {
952 printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
953 }
954 TLBINFO_UNLOCK(ti);
955 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
956 }
957
958 void
pmap_tlb_asid_deactivate(pmap_t pm)959 pmap_tlb_asid_deactivate(pmap_t pm)
960 {
961 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
962
963 KASSERT(kpreempt_disabled());
964 #if defined(MULTIPROCESSOR)
965 /*
966 * The kernel pmap is aways onproc and active and must never have
967 * those bits cleared. If pmap_remove_all was called, it has already
968 * deactivated the pmap and thusly onproc will be 0 so there's nothing
969 * to do.
970 */
971 if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) {
972 struct cpu_info * const ci = curcpu();
973 KASSERT(!cpu_intr_p());
974 KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)),
975 "%s: pmap %p onproc %p doesn't include cpu %d (%p)",
976 __func__, pm, pm->pm_onproc, cpu_index(ci), ci);
977 /*
978 * The bits in pm_onproc that belong to this TLB can
979 * be changed while this TLBs lock is not held as long
980 * as we use atomic ops.
981 */
982 kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci));
983 }
984 #endif
985 curcpu()->ci_pmap_asid_cur = KERNEL_PID;
986 UVMHIST_LOG(maphist, " <-- done (pm=%p)", pm, 0, 0, 0);
987 tlb_set_asid(KERNEL_PID);
988 pmap_tlb_pai_check(cpu_tlb_info(curcpu()), false);
989 #if defined(DEBUG)
990 pmap_tlb_asid_check();
991 #endif
992 }
993
994 void
pmap_tlb_asid_release_all(struct pmap * pm)995 pmap_tlb_asid_release_all(struct pmap *pm)
996 {
997 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
998 UVMHIST_LOG(maphist, "(pm=%p)", pm, 0, 0, 0);
999
1000 KASSERT(pm != pmap_kernel());
1001 #if defined(MULTIPROCESSOR)
1002 //KASSERT(!kcpuset_iszero(pm->pm_onproc)); // XXX
1003 struct cpu_info * const ci __diagused = curcpu();
1004 KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci)));
1005 #if PMAP_TLB_MAX > 1
1006 for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) {
1007 KASSERT(i < pmap_ntlbs);
1008 struct pmap_tlb_info * const ti = pmap_tlbs[i];
1009 #else
1010 struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1011 #endif
1012 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1013 TLBINFO_LOCK(ti);
1014 if (PMAP_PAI_ASIDVALID_P(pai, ti)) {
1015 /*
1016 * This pmap should not be in use by any other cpu so
1017 * we can just reset and be happy.
1018 */
1019 if (ti->ti_victim == pm)
1020 ti->ti_victim = NULL;
1021 pmap_tlb_pai_reset(ti, pai, pm);
1022 }
1023 KASSERT(pai->pai_link.le_prev == NULL);
1024 TLBINFO_UNLOCK(ti);
1025 #if PMAP_TLB_MAX > 1
1026 }
1027 #endif
1028 #ifdef DIAGNOSTIC
1029 for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
1030 KASSERTMSG(pm->pm_pai[i].pai_asid == 0,
1031 "pm %p i %zu asid %u",
1032 pm, i, pm->pm_pai[i].pai_asid);
1033 }
1034 #endif
1035 #else
1036 /*
1037 * Handle the case of an UP kernel which only has, at most, one TLB.
1038 * If the pmap has an ASID allocated, free it.
1039 */
1040 struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1041 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1042 TLBINFO_LOCK(ti);
1043 if (pai->pai_asid > KERNEL_PID) {
1044 if (curcpu()->ci_pmap_asid_cur == pai->pai_asid) {
1045 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
1046 } else {
1047 pmap_tlb_pai_reset(ti, pai, pm);
1048 }
1049 }
1050 TLBINFO_UNLOCK(ti);
1051 #endif /* MULTIPROCESSOR */
1052 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1053 }
1054
1055 void
pmap_tlb_asid_check(void)1056 pmap_tlb_asid_check(void)
1057 {
1058 #ifdef DEBUG
1059 kpreempt_disable();
1060 const tlb_asid_t asid __debugused = tlb_get_asid();
1061 KDASSERTMSG(asid == curcpu()->ci_pmap_asid_cur,
1062 "%s: asid (%#x) != current asid (%#x)",
1063 __func__, asid, curcpu()->ci_pmap_asid_cur);
1064 kpreempt_enable();
1065 #endif
1066 }
1067
1068 #ifdef DEBUG
1069 void
pmap_tlb_check(pmap_t pm,bool (* func)(void *,vaddr_t,tlb_asid_t,pt_entry_t))1070 pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
1071 {
1072 struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
1073 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1074 TLBINFO_LOCK(ti);
1075 if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID)
1076 tlb_walk(pm, func);
1077 TLBINFO_UNLOCK(ti);
1078 }
1079 #endif /* DEBUG */
1080