xref: /freebsd/sys/powerpc/aim/slb.c (revision 2f513db7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/uma.h>
42 #include <vm/vm.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46 
47 #include <machine/md_var.h>
48 #include <machine/platform.h>
49 #include <machine/vmparam.h>
50 #include <machine/trap.h>
51 
52 #include "mmu_oea64.h"
53 
54 uintptr_t moea64_get_unique_vsid(void);
55 void moea64_release_vsid(uint64_t vsid);
56 static void slb_zone_init(void *);
57 
58 static uma_zone_t slbt_zone;
59 static uma_zone_t slb_cache_zone;
60 int n_slbs = 64;
61 
62 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
63 
64 struct slbtnode {
65 	uint16_t	ua_alloc;
66 	uint8_t		ua_level;
67 	/* Only 36 bits needed for full 64-bit address space. */
68 	uint64_t	ua_base;
69 	union {
70 		struct slbtnode	*ua_child[16];
71 		struct slb	slb_entries[16];
72 	} u;
73 };
74 
75 /*
76  * For a full 64-bit address space, there are 36 bits in play in an
77  * esid, so 8 levels, with the leaf being at level 0.
78  *
79  * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
80  * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
81  * +----+----+----+----+----+----+----+----+----+--------
82  * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
83  */
84 #define UAD_ROOT_LEVEL  8
85 #define UAD_LEAF_LEVEL  0
86 
87 static inline int
88 esid2idx(uint64_t esid, int level)
89 {
90 	int shift;
91 
92 	shift = level * 4;
93 	return ((esid >> shift) & 0xF);
94 }
95 
96 /*
97  * The ua_base field should have 0 bits after the first 4*(level+1)
98  * bits; i.e. only
99  */
100 #define uad_baseok(ua)                          \
101 	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
102 
103 
104 static inline uint64_t
105 esid2base(uint64_t esid, int level)
106 {
107 	uint64_t mask;
108 	int shift;
109 
110 	shift = (level + 1) * 4;
111 	mask = ~((1ULL << shift) - 1);
112 	return (esid & mask);
113 }
114 
115 /*
116  * Allocate a new leaf node for the specified esid/vmhandle from the
117  * parent node.
118  */
119 static struct slb *
120 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
121 {
122 	struct slbtnode *child;
123 	struct slb *retval;
124 	int idx;
125 
126 	idx = esid2idx(esid, parent->ua_level);
127 	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
128 
129 	/* unlock and M_WAITOK and loop? */
130 	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
131 	KASSERT(child != NULL, ("unhandled NULL case"));
132 
133 	child->ua_level = UAD_LEAF_LEVEL;
134 	child->ua_base = esid2base(esid, child->ua_level);
135 	idx = esid2idx(esid, child->ua_level);
136 	child->u.slb_entries[idx].slbv = slbv;
137 	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
138 	setbit(&child->ua_alloc, idx);
139 
140 	retval = &child->u.slb_entries[idx];
141 
142 	/*
143 	 * The above stores must be visible before the next one, so
144 	 * that a lockless searcher always sees a valid path through
145 	 * the tree.
146 	 */
147 	powerpc_lwsync();
148 
149 	idx = esid2idx(esid, parent->ua_level);
150 	parent->u.ua_child[idx] = child;
151 	setbit(&parent->ua_alloc, idx);
152 
153 	return (retval);
154 }
155 
156 /*
157  * Allocate a new intermediate node to fit between the parent and
158  * esid.
159  */
160 static struct slbtnode*
161 make_intermediate(uint64_t esid, struct slbtnode *parent)
162 {
163 	struct slbtnode *child, *inter;
164 	int idx, level;
165 
166 	idx = esid2idx(esid, parent->ua_level);
167 	child = parent->u.ua_child[idx];
168 	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
169 	    ("No need for an intermediate node?"));
170 
171 	/*
172 	 * Find the level where the existing child and our new esid
173 	 * meet.  It must be lower than parent->ua_level or we would
174 	 * have chosen a different index in parent.
175 	 */
176 	level = child->ua_level + 1;
177 	while (esid2base(esid, level) !=
178 	    esid2base(child->ua_base, level))
179 		level++;
180 	KASSERT(level < parent->ua_level,
181 	    ("Found splitting level %d for %09jx and %09jx, "
182 	    "but it's the same as %p's",
183 	    level, esid, child->ua_base, parent));
184 
185 	/* unlock and M_WAITOK and loop? */
186 	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
187 	KASSERT(inter != NULL, ("unhandled NULL case"));
188 
189 	/* Set up intermediate node to point to child ... */
190 	inter->ua_level = level;
191 	inter->ua_base = esid2base(esid, inter->ua_level);
192 	idx = esid2idx(child->ua_base, inter->ua_level);
193 	inter->u.ua_child[idx] = child;
194 	setbit(&inter->ua_alloc, idx);
195 	powerpc_lwsync();
196 
197 	/* Set up parent to point to intermediate node ... */
198 	idx = esid2idx(inter->ua_base, parent->ua_level);
199 	parent->u.ua_child[idx] = inter;
200 	setbit(&parent->ua_alloc, idx);
201 
202 	return (inter);
203 }
204 
205 uint64_t
206 kernel_va_to_slbv(vm_offset_t va)
207 {
208 	uint64_t slbv;
209 
210 	/* Set kernel VSID to deterministic value */
211 	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
212 
213 	/*
214 	 * Figure out if this is a large-page mapping.
215 	 */
216 	if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) {
217 		/*
218 		 * XXX: If we have set up a direct map, assumes
219 		 * all physical memory is mapped with large pages.
220 		 */
221 
222 		if (mem_valid(DMAP_TO_PHYS(va), 0) == 0)
223 			slbv |= SLBV_L;
224 	} else if (moea64_large_page_size != 0 &&
225 	    va >= (vm_offset_t)vm_page_array &&
226 	    va <= (uintptr_t)(&vm_page_array[vm_page_array_size]))
227 		slbv |= SLBV_L;
228 
229 	return (slbv);
230 }
231 
232 struct slb *
233 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
234 {
235 	uint64_t esid = va >> ADDR_SR_SHFT;
236 	struct slbtnode *ua;
237 	int idx;
238 
239 	ua = pm->pm_slb_tree_root;
240 
241 	for (;;) {
242 		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
243 		    ua->ua_base, ua->ua_level));
244 		idx = esid2idx(esid, ua->ua_level);
245 
246 		/*
247 		 * This code is specific to ppc64 where a load is
248 		 * atomic, so no need for atomic_load macro.
249 		 */
250 		if (ua->ua_level == UAD_LEAF_LEVEL)
251 			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
252 			    &ua->u.slb_entries[idx] : NULL);
253 
254 		/*
255 		 * The following accesses are implicitly ordered under the POWER
256 		 * ISA by load dependencies (the store ordering is provided by
257 		 * the powerpc_lwsync() calls elsewhere) and so are run without
258 		 * barriers.
259 		 */
260 		ua = ua->u.ua_child[idx];
261 		if (ua == NULL ||
262 		    esid2base(esid, ua->ua_level) != ua->ua_base)
263 			return (NULL);
264 	}
265 
266 	return (NULL);
267 }
268 
269 uint64_t
270 va_to_vsid(pmap_t pm, vm_offset_t va)
271 {
272 	struct slb *entry;
273 
274 	/* Shortcut kernel case */
275 	if (pm == kernel_pmap)
276 		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
277 
278 	/*
279 	 * If there is no vsid for this VA, we need to add a new entry
280 	 * to the PMAP's segment table.
281 	 */
282 
283 	entry = user_va_to_slb_entry(pm, va);
284 
285 	if (entry == NULL)
286 		return (allocate_user_vsid(pm,
287 		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
288 
289 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
290 }
291 
292 uint64_t
293 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
294 {
295 	uint64_t vsid, slbv;
296 	struct slbtnode *ua, *next, *inter;
297 	struct slb *slb;
298 	int idx;
299 
300 	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
301 
302 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
303 	vsid = moea64_get_unique_vsid();
304 
305 	slbv = vsid << SLBV_VSID_SHIFT;
306 	if (large)
307 		slbv |= SLBV_L;
308 
309 	ua = pm->pm_slb_tree_root;
310 
311 	/* Descend to the correct leaf or NULL pointer. */
312 	for (;;) {
313 		KASSERT(uad_baseok(ua),
314 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
315 		idx = esid2idx(esid, ua->ua_level);
316 
317 		if (ua->ua_level == UAD_LEAF_LEVEL) {
318 			ua->u.slb_entries[idx].slbv = slbv;
319 			eieio();
320 			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
321 			    | SLBE_VALID;
322 			setbit(&ua->ua_alloc, idx);
323 			slb = &ua->u.slb_entries[idx];
324 			break;
325 		}
326 
327 		next = ua->u.ua_child[idx];
328 		if (next == NULL) {
329 			slb = make_new_leaf(esid, slbv, ua);
330 			break;
331                 }
332 
333 		/*
334 		 * Check if the next item down has an okay ua_base.
335 		 * If not, we need to allocate an intermediate node.
336 		 */
337 		if (esid2base(esid, next->ua_level) != next->ua_base) {
338 			inter = make_intermediate(esid, ua);
339 			slb = make_new_leaf(esid, slbv, inter);
340 			break;
341 		}
342 
343 		ua = next;
344 	}
345 
346 	/*
347 	 * Someone probably wants this soon, and it may be a wired
348 	 * SLB mapping, so pre-spill this entry.
349 	 */
350 	eieio();
351 	slb_insert_user(pm, slb);
352 
353 	return (vsid);
354 }
355 
356 void
357 free_vsid(pmap_t pm, uint64_t esid, int large)
358 {
359 	struct slbtnode *ua;
360 	int idx;
361 
362 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
363 
364 	ua = pm->pm_slb_tree_root;
365 	/* Descend to the correct leaf. */
366 	for (;;) {
367 		KASSERT(uad_baseok(ua),
368 		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
369 
370 		idx = esid2idx(esid, ua->ua_level);
371 		if (ua->ua_level == UAD_LEAF_LEVEL) {
372 			ua->u.slb_entries[idx].slbv = 0;
373 			eieio();
374 			ua->u.slb_entries[idx].slbe = 0;
375 			clrbit(&ua->ua_alloc, idx);
376 			return;
377 		}
378 
379 		ua = ua->u.ua_child[idx];
380 		if (ua == NULL ||
381 		    esid2base(esid, ua->ua_level) != ua->ua_base) {
382 			/* Perhaps just return instead of assert? */
383 			KASSERT(0,
384 			    ("Asked to remove an entry that was never inserted!"));
385 			return;
386 		}
387 	}
388 }
389 
390 static void
391 free_slb_tree_node(struct slbtnode *ua)
392 {
393 	int idx;
394 
395 	for (idx = 0; idx < 16; idx++) {
396 		if (ua->ua_level != UAD_LEAF_LEVEL) {
397 			if (ua->u.ua_child[idx] != NULL)
398 				free_slb_tree_node(ua->u.ua_child[idx]);
399 		} else {
400 			if (ua->u.slb_entries[idx].slbv != 0)
401 				moea64_release_vsid(ua->u.slb_entries[idx].slbv
402 				    >> SLBV_VSID_SHIFT);
403 		}
404 	}
405 
406 	uma_zfree(slbt_zone, ua);
407 }
408 
409 void
410 slb_free_tree(pmap_t pm)
411 {
412 
413 	free_slb_tree_node(pm->pm_slb_tree_root);
414 }
415 
416 struct slbtnode *
417 slb_alloc_tree(void)
418 {
419 	struct slbtnode *root;
420 
421 	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
422 	KASSERT(root != NULL, ("unhandled NULL case"));
423 	root->ua_level = UAD_ROOT_LEVEL;
424 
425 	return (root);
426 }
427 
428 /* Lock entries mapping kernel text and stacks */
429 
430 void
431 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
432 {
433 	struct slb *slbcache;
434 	int i;
435 
436 	/* We don't want to be preempted while modifying the kernel map */
437 	critical_enter();
438 
439 	slbcache = PCPU_GET(aim.slb);
440 
441 	/* Check for an unused slot, abusing the user slot as a full flag */
442 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
443 		for (i = 0; i < n_slbs; i++) {
444 			if (i == USER_SLB_SLOT)
445 				continue;
446 			if (!(slbcache[i].slbe & SLBE_VALID))
447 				goto fillkernslb;
448 		}
449 
450 		if (i == n_slbs)
451 			slbcache[USER_SLB_SLOT].slbe = 1;
452 	}
453 
454 	i = mftb() % n_slbs;
455 	if (i == USER_SLB_SLOT)
456 			i = (i+1) % n_slbs;
457 
458 fillkernslb:
459 	KASSERT(i != USER_SLB_SLOT,
460 	    ("Filling user SLB slot with a kernel mapping"));
461 	slbcache[i].slbv = slbv;
462 	slbcache[i].slbe = slbe | (uint64_t)i;
463 
464 	/* If it is for this CPU, put it in the SLB right away */
465 	if (pmap_bootstrapped) {
466 		/* slbie not required */
467 		__asm __volatile ("slbmte %0, %1" ::
468 		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
469 	}
470 
471 	critical_exit();
472 }
473 
474 void
475 slb_insert_user(pmap_t pm, struct slb *slb)
476 {
477 	int i;
478 
479 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
480 
481 	if (pm->pm_slb_len < n_slbs) {
482 		i = pm->pm_slb_len;
483 		pm->pm_slb_len++;
484 	} else {
485 		i = mftb() % n_slbs;
486 	}
487 
488 	/* Note that this replacement is atomic with respect to trap_subr */
489 	pm->pm_slb[i] = slb;
490 }
491 
492 static void *
493 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
494     u_int8_t *flags, int wait)
495 {
496 	static vm_offset_t realmax = 0;
497 	void *va;
498 	vm_page_t m;
499 
500 	if (realmax == 0)
501 		realmax = platform_real_maxaddr();
502 
503 	*flags = UMA_SLAB_PRIV;
504 	m = vm_page_alloc_contig_domain(NULL, 0, domain,
505 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
506 	    1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
507 	if (m == NULL)
508 		return (NULL);
509 
510 	if (hw_direct_map)
511 		va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
512 	else {
513 		va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
514 		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
515 	}
516 
517 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
518 		bzero(va, PAGE_SIZE);
519 
520 	return (va);
521 }
522 
523 static void
524 slb_zone_init(void *dummy)
525 {
526 	uint32_t allocf_flags;
527 
528 	allocf_flags = 0;
529 	if (platform_real_maxaddr() != VM_MAX_ADDRESS)
530 		allocf_flags = UMA_ZONE_CONTIG;
531 
532 	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
533 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
534 	    allocf_flags | UMA_ZONE_VM);
535 	slb_cache_zone = uma_zcreate("SLB cache",
536 	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
537 	    UMA_ALIGN_PTR, allocf_flags | UMA_ZONE_VM);
538 
539 	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
540 		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
541 		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
542 	}
543 }
544 
545 struct slb **
546 slb_alloc_user_cache(void)
547 {
548 	return (uma_zalloc(slb_cache_zone, M_ZERO));
549 }
550 
551 void
552 slb_free_user_cache(struct slb **slb)
553 {
554 	uma_zfree(slb_cache_zone, slb);
555 }
556 
557 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
558 void
559 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
560 {
561 	struct slb *slbcache;
562 	uint64_t slbe, slbv;
563 	uint64_t esid, addr;
564 	int i;
565 
566 	addr = (type == EXC_ISE) ? srr0 : dar;
567 	slbcache = PCPU_GET(aim.slb);
568 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
569 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
570 
571 	/* See if the hardware flushed this somehow (can happen in LPARs) */
572 	for (i = 0; i < n_slbs; i++)
573 		if (slbcache[i].slbe == (slbe | (uint64_t)i))
574 			return;
575 
576 	/* Not in the map, needs to actually be added */
577 	slbv = kernel_va_to_slbv(addr);
578 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
579 		for (i = 0; i < n_slbs; i++) {
580 			if (i == USER_SLB_SLOT)
581 				continue;
582 			if (!(slbcache[i].slbe & SLBE_VALID))
583 				goto fillkernslb;
584 		}
585 
586 		if (i == n_slbs)
587 			slbcache[USER_SLB_SLOT].slbe = 1;
588 	}
589 
590 	/* Sacrifice a random SLB entry that is not the user entry */
591 	i = mftb() % n_slbs;
592 	if (i == USER_SLB_SLOT)
593 		i = (i+1) % n_slbs;
594 
595 fillkernslb:
596 	/* Write new entry */
597 	slbcache[i].slbv = slbv;
598 	slbcache[i].slbe = slbe | (uint64_t)i;
599 
600 	/* Trap handler will restore from cache on exit */
601 }
602 
603 int
604 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
605 {
606 	struct slb *user_entry;
607 	uint64_t esid;
608 	int i;
609 
610 	if (pm->pm_slb == NULL)
611 		return (-1);
612 
613 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
614 
615 	PMAP_LOCK(pm);
616 	user_entry = user_va_to_slb_entry(pm, addr);
617 
618 	if (user_entry == NULL) {
619 		/* allocate_vsid auto-spills it */
620 		(void)allocate_user_vsid(pm, esid, 0);
621 	} else {
622 		/*
623 		 * Check that another CPU has not already mapped this.
624 		 * XXX: Per-thread SLB caches would be better.
625 		 */
626 		for (i = 0; i < pm->pm_slb_len; i++)
627 			if (pm->pm_slb[i] == user_entry)
628 				break;
629 
630 		if (i == pm->pm_slb_len)
631 			slb_insert_user(pm, user_entry);
632 	}
633 	PMAP_UNLOCK(pm);
634 
635 	return (0);
636 }
637