xref: /netbsd/sys/arch/powerpc/booke/booke_pmap.c (revision 6550d01e)
1 /*-
2  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
7  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
8  *
9  * This material is based upon work supported by the Defense Advanced Research
10  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
11  * Contract No. N66001-09-C-2073.
12  * Approved for Public Release, Distribution Unlimited
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 
38 __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.2 2011/01/18 01:02:52 matt Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/kcore.h>
42 #include <sys/buf.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <machine/pmap.h>
47 
48 /*
49  * Initialize the kernel pmap.
50  */
51 #ifdef MULTIPROCESSOR
52 #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[MAXCPUS])
53 #else
54 #define	PMAP_SIZE	sizeof(struct pmap)
55 #endif
56 
57 CTASSERT(sizeof(struct pmap_segtab) == NBPG);
58 
59 void
60 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
61 {
62 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
63 	vsize_t off = va & PAGE_SIZE;
64 
65 	kpreempt_disable();
66 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
67 		const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
68 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
69 		if (ptep == NULL) {
70 			va = segeva;
71 			continue;
72 		}
73 		pt_entry_t pt_entry = *ptep;
74 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
75 			va = segeva;
76 			continue;
77 		}
78 		kpreempt_enable();
79 		dcache_wb(pte_to_paddr(pt_entry), segeva - va);
80 		icache_inv(pte_to_paddr(pt_entry), segeva - va);
81 		kpreempt_disable();
82 		va = segeva;
83 	}
84 	kpreempt_enable();
85 }
86 
87 void
88 pmap_md_page_syncicache(struct vm_page *pg)
89 {
90 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
91 	dcache_wb_page(pa);
92 	icache_inv_page(pa);
93 }
94 
95 vaddr_t
96 pmap_md_direct_map_paddr(paddr_t pa)
97 {
98 	return (vaddr_t) pa;
99 }
100 
101 bool
102 pmap_md_direct_mapped_vaddr_p(vaddr_t va)
103 {
104 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
105 }
106 
107 paddr_t
108 pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
109 {
110 	return (paddr_t) va;
111 }
112 
113 /*
114  *	Bootstrap the system enough to run with virtual memory.
115  *	firstaddr is the first unused kseg0 address (not page aligned).
116  */
117 void
118 pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
119 	const phys_ram_seg_t *avail, size_t cnt)
120 {
121 	for (size_t i = 0; i < cnt; i++) {
122 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
123 		    atop(avail[i].start),
124 		    atop(avail[i].start + avail[i].size) - 1,
125 		    atop(avail[i].start),
126 		    atop(avail[i].start + avail[i].size) - 1,
127 		    VM_FREELIST_DEFAULT);
128 		uvm_page_physload(
129 		    atop(avail[i].start),
130 		    atop(avail[i].start + avail[i].size) - 1,
131 		    atop(avail[i].start),
132 		    atop(avail[i].start + avail[i].size) - 1,
133 		    VM_FREELIST_DEFAULT);
134 	}
135 
136 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
137 
138 	/*
139 	 * Compute the number of pages kmem_map will have.
140 	 */
141 	kmeminit_nkmempages();
142 
143 	/*
144 	 * Figure out how many PTE's are necessary to map the kernel.
145 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
146 	 */
147 
148 	/* Get size of buffer cache and set an upper limit */
149 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
150 	vsize_t bufsz = buf_memcalc();
151 	buf_setvalimit(bufsz);
152 
153 	vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
154 	    + (ubc_nwins << ubc_winshift)
155 	    + bufsz
156 	    + 16 * NCARGS
157 	    + pager_map_size
158 	    + maxproc * USPACE
159 #ifdef SYSVSHM
160 	    + NBPG * shminfo.shmall
161 #endif
162 	    + NBPG * nkmempages);
163 
164 	/*
165 	 * Initialize `FYI' variables.	Note we're relying on
166 	 * the fact that BSEARCH sorts the vm_physmem[] array
167 	 * for us.  Must do this before uvm_pageboot_alloc()
168 	 * can be called.
169 	 */
170 	pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
171 	pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
172 	const vsize_t max_nsegtabs =
173 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
174 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
175 	if (nsegtabs >= max_nsegtabs) {
176 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
177 		nsegtabs = max_nsegtabs;
178 	} else {
179 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
180 		    + nsegtabs * NBSEG;
181 	}
182 
183 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
184 
185 	/*
186 	 * Now actually allocate the kernel PTE array (must be done
187 	 * after virtual_end is initialized).
188 	 */
189 	vaddr_t segtabs =
190 	    uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
191 
192 	/*
193 	 * Initialize the kernel's two-level page level.  This only wastes
194 	 * an extra page for the segment table and allows the user/kernel
195 	 * access to be common.
196 	 */
197 	struct pmap_segtab * const stp = (void *)segtabs;
198 	segtabs += round_page(sizeof(struct pmap_segtab));
199 	pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
200 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
201 		*ptp++ = (void *)segtabs;
202 	}
203 	pmap_kernel()->pm_segtab = stp;
204 	curcpu()->ci_pmap_kern_segtab = stp;
205 	printf(" kern_segtab=%p", stp);
206 
207 #if 0
208 	nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
209 	segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
210 	ptp = stp->seg_tab;
211 	pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
212 	pt_entry_t *ptep = (void *)segtabs;
213 	printf("%s: allocated %lu page table pages for mapping %u pages\n",
214 	    __func__, nsegtabs, physmem);
215 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
216 		*ptp = ptep;
217 		for (u_int j = 0; j < NPTEPG; j++, ptep++) {
218 			*ptep = pt_entry;
219 			pt_entry += NBPG;
220 		}
221 		printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
222 		pt_entry |= PTE_xW;
223 		pt_entry &= ~PTE_xX;
224 	}
225 
226 	/*
227 	 * Now make everything before the kernel inaccessible.
228 	 */
229 	for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
230 		stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
231 	}
232 #endif
233 
234 	/*
235 	 * Initialize the pools.
236 	 */
237 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
238 	    &pool_allocator_nointr, IPL_NONE);
239 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
240 	    &pmap_pv_page_allocator, IPL_NONE);
241 
242 	tlb_set_asid(0);
243 }
244 
245 struct vm_page *
246 pmap_md_alloc_poolpage(int flags)
247 {
248 	/*
249 	 * Any managed page works for us.
250 	 */
251 	return uvm_pagealloc(NULL, 0, NULL, flags);
252 }
253 
254 void
255 pmap_zero_page(paddr_t pa)
256 {
257 //	printf("%s(%#lx): calling dcache_zero_page(%#lx)\n", __func__, pa, pa);
258 	dcache_zero_page(pa);
259 }
260 
261 void
262 pmap_copy_page(paddr_t src, paddr_t dst)
263 {
264 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
265 	const paddr_t end = src + PAGE_SIZE;
266 
267 	while (src < end) {
268 		__asm(
269 			"dcbt	%2,%1"	"\n\t"	/* touch next src cachline */
270 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
271 		    :: "b"(src), "b"(dst), "b"(line_size));
272 		for (u_int i = 0;
273 		     i < line_size;
274 		     src += 32, dst += 32, i += 32) {
275 			__asm(
276 				"lmw	24,0(%0)" "\n\t"
277 				"stmw	24,0(%1)"
278 			    :: "b"(src), "b"(dst)
279 			    : "r24", "r25", "r26", "r27",
280 			      "r28", "r29", "r30", "r31");
281 		}
282 	}
283 }
284 
285 void
286 pmap_md_init(void)
287 {
288 
289 	/* nothing for now */
290 }
291 
292 bool
293 pmap_md_io_vaddr_p(vaddr_t va)
294 {
295 	return va >= pmap_limits.avail_end
296 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
297 }
298 
299