xref: /netbsd/sys/arch/powerpc/powerpc/pmap_subr.c (revision c4a72b64)
1 /*	$NetBSD: pmap_subr.c,v 1.7 2002/11/13 21:08:50 matt Exp $	*/
2 /*-
3  * Copyright (c) 2001 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *        This product includes software developed by the NetBSD
20  *        Foundation, Inc. and its contributors.
21  * 4. Neither the name of The NetBSD Foundation nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "opt_multiprocessor.h"
39 #include "opt_altivec.h"
40 #include "opt_pmap.h"
41 #include <sys/param.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/device.h>
46 #include <sys/systm.h>
47 
48 #include <uvm/uvm_extern.h>
49 #ifdef PPC_MPC6XX
50 #include <powerpc/mpc6xx/vmparam.h>
51 #ifdef ALTIVEC
52 #include <powerpc/altivec.h>
53 #endif
54 #endif
55 #include <powerpc/psl.h>
56 
57 #define	MFMSR()		mfmsr()
58 #define	MTMSR(psl)	__asm __volatile("sync; mtmsr %0; isync" :: "r"(psl))
59 
60 #ifdef PMAPCOUNTERS
61 struct evcnt pmap_evcnt_zeroed_pages =
62     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
63 	"pages zeroed");
64 struct evcnt pmap_evcnt_copied_pages =
65     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
66 	"pages copied");
67 struct evcnt pmap_evcnt_idlezeroed_pages =
68     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
69 	"pages idle zeroed");
70 #ifdef PPC_MPC6XX
71 extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
72 extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
73 #endif
74 #endif /* PMAPCOUNTERS */
75 
76 /*
77  * This file uses a sick & twisted method to deal with the common pmap
78  * operations of zero'ing, copying, and syncing the page with the
79  * instruction cache.
80  *
81  * When a PowerPC CPU takes an exception (interrupt or trap), that
82  * exception is handled with the MMU off.  The handler has to explicitly
83  * renable the MMU before continuing.  The state of the MMU will be restored
84  * when the exception is returned from.
85  *
86  * Therefore if we disable the MMU we know that doesn't affect any exception.
87  * So it's safe for us to disable the MMU so we can deal with physical
88  * addresses without having to map any pages via a BAT or into a page table.
89  *
90  * It's also safe to do regardless of IPL.
91  *
92  * However while relocation is off, we MUST not access the kernel stack in
93  * any manner since it will probably no longer be mapped.  This mean no
94  * calls while relocation is off.  The AltiVEC routines need to handle the
95  * MSR fiddling themselves so they can save things on the stack.
96  */
97 
98 /*
99  * Fill the given physical page with zeroes.
100  */
101 void
102 pmap_zero_page(paddr_t pa)
103 {
104 	size_t linewidth;
105 	register_t msr;
106 
107 #if defined(PPC_MPC6XX)
108 	{
109 		/*
110 		 * If we are zeroing this page, we must clear the EXEC-ness
111 		 * of this page since the page contents will have changed.
112 		 */
113 		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
114 		KDASSERT(pg != NULL);
115 		KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
116 #ifdef PMAPCOUNTERS
117 		if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
118 			pmap_evcnt_exec_uncached_zero_page.ev_count++;
119 		}
120 #endif
121 		pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
122 	}
123 #endif
124 #ifdef PMAPCOUNTERS
125 	pmap_evcnt_zeroed_pages.ev_count++;
126 #endif
127 #ifdef ALTIVEC
128 	if (pmap_use_altivec) {
129 		vzeropage(pa);
130 		return;
131 	}
132 #endif
133 
134 	/*
135 	 * Turn off data relocation (DMMU off).
136 	 */
137 #ifdef PPC_MPC6XX
138 	if (pa >= SEGMENT_LENGTH) {
139 #endif
140 		msr = MFMSR();
141 		MTMSR(msr & ~PSL_DR);
142 #ifdef PPC_MPC6XX
143 	}
144 #endif
145 
146 	/*
147 	 * Zero the page.  Since DR is off, the address is assumed to
148 	 * valid but we know that UVM will never pass a uncacheable page.
149 	 * Don't use dcbz if we don't know the cache width.
150 	 */
151 	if ((linewidth = curcpu()->ci_ci.dcache_line_size) == 0) {
152 		long *dp = (long *)pa;
153 		long * const ep = dp + NBPG/sizeof(dp[0]);
154 		do {
155 			dp[0] = 0; dp[1] = 0; dp[2] = 0; dp[3] = 0;
156 			dp[4] = 0; dp[5] = 0; dp[6] = 0; dp[7] = 0;
157 		} while ((dp += 8) < ep);
158 	} else {
159 		size_t i = 0;
160 		do {
161 			__asm ("dcbz %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
162 			__asm ("dcbz %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
163 		} while (i < NBPG);
164 	}
165 
166 	/*
167 	 * Restore data relocation (DMMU on).
168 	 */
169 #ifdef PPC_MPC6XX
170 	if (pa >= SEGMENT_LENGTH)
171 #endif
172 		MTMSR(msr);
173 }
174 
175 /*
176  * Copy the given physical source page to its destination.
177  */
178 void
179 pmap_copy_page(paddr_t src, paddr_t dst)
180 {
181 	const register_t *sp;
182 	register_t *dp;
183 	register_t msr;
184 	size_t i;
185 
186 #if defined(PPC_MPC6XX)
187 	{
188 		/*
189 		 * If we are copying to the destination page, we must clear
190 		 * the EXEC-ness of this page since the page contents have
191 		 * changed.
192 		 */
193 		struct vm_page *pg = PHYS_TO_VM_PAGE(dst);
194 		KDASSERT(pg != NULL);
195 		KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
196 #ifdef PMAPCOUNTERS
197 		if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
198 			pmap_evcnt_exec_uncached_copy_page.ev_count++;
199 		}
200 #endif
201 		pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
202 	}
203 #endif
204 #ifdef PMAPCOUNTERS
205 	pmap_evcnt_copied_pages.ev_count++;
206 #endif
207 #ifdef ALTIVEC
208 	if (pmap_use_altivec) {
209 		vcopypage(dst, src);
210 		return;
211 	}
212 #endif
213 
214 #ifdef PPC_MPC6XX
215 	if (src < SEGMENT_LENGTH && dst < SEGMENT_LENGTH) {
216 		/*
217 		 * Copy the page (memcpy is optimized, right? :)
218 		 */
219 		memcpy((void *) dst, (void *) src, NBPG);
220 		return;
221 	}
222 #endif
223 
224 	/*
225 	 * Turn off data relocation (DMMU off).
226 	 */
227 	msr = MFMSR();
228 	MTMSR(msr & ~PSL_DR);
229 
230 	/*
231 	 * Copy the page.  Don't use memcpy as we can't refer to the
232 	 * kernel stack at this point.
233 	 */
234 	sp = (const register_t *) src;
235 	dp = (register_t *) dst;
236 	for (i = 0; i < NBPG/sizeof(dp[0]); i += 8, dp += 8, sp += 8) {
237 		dp[0] = sp[0]; dp[1] = sp[1]; dp[2] = sp[2]; dp[3] = sp[3];
238 		dp[4] = sp[4]; dp[5] = sp[5]; dp[6] = sp[6]; dp[7] = sp[7];
239 	}
240 
241 	/*
242 	 * Restore data relocation (DMMU on).
243 	 */
244 	MTMSR(msr);
245 }
246 
247 void
248 pmap_syncicache(paddr_t pa, psize_t len)
249 {
250 #ifdef MULTIPROCESSOR
251 	__syncicache((void *)pa, len);
252 #else
253 	const size_t linewidth = curcpu()->ci_ci.icache_line_size;
254 	register_t msr;
255 	size_t i;
256 
257 #ifdef PPC_MPC6XX
258 	if (pa + len <= SEGMENT_LENGTH) {
259 		__syncicache((void *)pa, len);
260 		return;
261 	}
262 #endif
263 
264 	/*
265 	 * Turn off instruction and data relocation (MMU off).
266 	 */
267 	msr = MFMSR();
268 	MTMSR(msr & ~(PSL_IR|PSL_DR));
269 
270 	/*
271 	 * Make sure to start on a cache boundary.
272 	 */
273 	len += pa - (pa & ~linewidth);
274 	pa &= ~linewidth;
275 
276 	/*
277 	 * Write out the data cache
278 	 */
279 	i = 0;
280 	do {
281 		__asm ("dcbst %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
282 	} while (i < len);
283 
284 	/*
285 	 * Wait for it to finish
286 	 */
287 	__asm __volatile("sync");
288 
289 	/*
290 	 * Now invalidate the instruction cache.
291 	 */
292 	i = 0;
293 	do {
294 		__asm ("icbi %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
295 	} while (i < len);
296 
297 	/*
298 	 * Restore relocation (MMU on).  (this will do the required
299 	 * sync and isync).
300 	 */
301 	MTMSR(msr);
302 #endif	/* !MULTIPROCESSOR */
303 }
304 
305 boolean_t
306 pmap_pageidlezero(paddr_t pa)
307 {
308 	register_t msr;
309 	register_t *dp = (register_t *) pa;
310 	boolean_t rv = TRUE;
311 	int i;
312 
313 #ifdef PPC_MPC6XX
314 	if (pa < SEGMENT_LENGTH) {
315 		for (i = 0; i < NBPG / sizeof(dp[0]); i++) {
316 			if (sched_whichqs != 0)
317 				return FALSE;
318 			*dp++ = 0;
319 		}
320 #ifdef PMAPCOUNTERS
321 		pmap_evcnt_idlezeroed_pages.ev_count++;
322 #endif
323 		return TRUE;
324 	}
325 #endif
326 
327 	/*
328 	 * Turn off instruction and data relocation (MMU off).
329 	 */
330 	msr = MFMSR();
331 	MTMSR(msr & ~(PSL_IR|PSL_DR));
332 
333 	/*
334 	 * Zero the page until a process becomes runnable.
335 	 */
336 	for (i = 0; i < NBPG / sizeof(dp[0]); i++) {
337 		if (sched_whichqs != 0) {
338 			rv = FALSE;
339 			break;
340 		}
341 		*dp++ = 0;
342 	}
343 
344 	/*
345 	 * Restore relocation (MMU on).
346 	 */
347 	MTMSR(msr);
348 #ifdef PMAPCOUNTERS
349 	if (rv)
350 		pmap_evcnt_idlezeroed_pages.ev_count++;
351 #endif
352 	return rv;
353 }
354