xref: /netbsd/sys/arch/hpc/stand/hpcboot/sh3/sh_mmu.cpp (revision bf9ec67e)
1 /*	$NetBSD: sh_mmu.cpp,v 1.3 2002/02/11 17:08:57 uch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sh3/sh_arch.h>
40 #include <sh3/sh_mmu.h>
41 
42 #include <sh3/cpu/sh3.h>
43 #include <sh3/cpu/sh4.h>
44 
45 //
46 // Get physical address from memory mapped TLB.
47 // SH3 version. SH4 can't do this method. because address/data array must be
48 // accessed from P2.
49 //
50 paddr_t
51 MemoryManager_SHMMU::searchPage(vaddr_t vaddr)
52 {
53 	u_int32_t vpn, idx, s, dum, aae, dae, entry_idx, asid;
54 	paddr_t paddr = ~0;
55 	int way, kmode;
56 
57 	vpn = vaddr & SH3_PAGE_MASK;
58 	// Windows CE uses VPN-only index-mode.
59 	idx = vaddr & SH3_MMU_VPN_MASK;
60 
61 	kmode = SetKMode(1);
62 	// Get current ASID
63 	asid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK;
64 
65 	// to avoid another TLB access, disable external interrupt.
66 	s = suspendIntr();
67 
68 	do {
69 		// load target address page to TLB
70 		dum = _reg_read_4(vaddr);
71 		_reg_write_4(vaddr, dum);
72 
73 		for (way = 0; way < SH3_MMU_WAY; way++) {
74 			entry_idx = idx | (way << SH3_MMU_WAY_SHIFT);
75 			// inquire MMU address array.
76 			aae = _reg_read_4(SH3_MMUAA | entry_idx);
77 
78 			if (!(aae & SH3_MMU_D_VALID) ||
79 			    ((aae & SH3_MMUAA_D_ASID_MASK) != asid) ||
80 			    (((aae | idx) & SH3_PAGE_MASK) != vpn))
81 				continue;
82 
83 			// entry found.
84 			// inquire MMU data array to get its physical address.
85 			dae = _reg_read_4(SH3_MMUDA | entry_idx);
86 			paddr = (dae & SH3_PAGE_MASK) | (vaddr & ~SH3_PAGE_MASK);
87 			break;
88 		}
89 	} while (paddr == ~0);
90 
91 	resumeIntr(s);
92 	SetKMode(kmode);
93 
94 	return paddr;
95 }
96 
97 void
98 MemoryManager_SHMMU::CacheDump()
99 {
100 	static const char *able[] = {"dis", "en" };
101 	int write_through_p0_u0_p3;
102 	int write_through_p1;
103 	u_int32_t r;
104 	int kmode;
105 
106 	DPRINTF_SETUP();
107 
108 	kmode = SetKMode(1);
109 	switch (SHArchitecture::cpu_type()) {
110 	default:
111 		DPRINTF((TEXT("unknown architecture.\n")));
112 		SetKMode(kmode);
113 		return;
114 	case 3:
115 		r = _reg_read_4(SH3_CCR);
116 		DPRINTF((TEXT("cache %Sabled"),
117 		    able[(r & SH3_CCR_CE ? 1 : 0)]));
118 		if (r & SH3_CCR_RA)
119 			DPRINTF((TEXT(" ram-mode")));
120 
121 		write_through_p0_u0_p3 = r & SH3_CCR_WT;
122 		write_through_p1 = !(r & SH3_CCR_CB);
123 		break;
124 	case 4:
125 		r = _reg_read_4(SH4_CCR);
126 		DPRINTF((TEXT("I-cache %Sabled"),
127 		    able[(r & SH4_CCR_ICE) ? 1 : 0]));
128 		if (r & SH4_CCR_IIX)
129 			DPRINTF((TEXT(" index-mode ")));
130 		DPRINTF((TEXT(" D-cache %Sabled"),
131 		    able[(r & SH4_CCR_OCE) ? 1 : 0]));
132 		if (r & SH4_CCR_OIX)
133 			DPRINTF((TEXT(" index-mode")));
134 		if (r & SH4_CCR_ORA)
135 			DPRINTF((TEXT(" ram-mode")));
136 
137 		write_through_p0_u0_p3 = r & SH4_CCR_WT;
138 		write_through_p1 = !(r & SH4_CCR_CB);
139 		break;
140 	}
141 	DPRINTF((TEXT(".")));
142 
143 	// Write-through/back
144 	DPRINTF((TEXT(" P0, U0, P3 write-%S P1 write-%S\n"),
145 	    write_through_p0_u0_p3 ? "through" : "back",
146 	    write_through_p1 ? "through" : "back"));
147 
148 	SetKMode(kmode);
149 }
150 
151 void
152 MemoryManager_SHMMU::MMUDump()
153 {
154 #define ON(x, c)	((x) & (c) ? '|' : '.')
155 	u_int32_t r, e, a;
156 	int i, kmode;
157 
158 	DPRINTF_SETUP();
159 
160 	kmode = SetKMode(1);
161 	DPRINTF((TEXT("MMU:\n")));
162 	switch (SHArchitecture::cpu_type()) {
163 	default:
164 		DPRINTF((TEXT("unknown architecture.\n")));
165 		SetKMode(kmode);
166 		return;
167 	case 3:
168 		r = _reg_read_4(SH3_MMUCR);
169 		if (!(r & SH3_MMUCR_AT))
170 			goto disabled;
171 
172 		// MMU configuration.
173 		DPRINTF((TEXT("%s index-mode, %s virtual storage mode\n"),
174 		    r & SH3_MMUCR_IX
175 		    ? TEXT("ASID + VPN") : TEXT("VPN only"),
176 		    r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));
177 
178 		// Dump TLB.
179 		DPRINTF((TEXT("---TLB---\n")));
180 		DPRINTF((TEXT("   VPN    ASID    PFN     VDCG PR SZ\n")));
181 		for (i = 0; i < SH3_MMU_WAY; i++) {
182 			DPRINTF((TEXT(" [way %d]\n"), i));
183 			for (e = 0; e < SH3_MMU_ENTRY; e++) {
184 				// address/data array common offset.
185 				a = (e << SH3_MMU_VPN_SHIFT) |
186 				    (i << SH3_MMU_WAY_SHIFT);
187 
188 				r = _reg_read_4(SH3_MMUAA | a);
189 				DPRINTF((TEXT("0x%08x %3d"),
190 				    r & SH3_MMUAA_D_VPN_MASK,
191 				    r & SH3_MMUAA_D_ASID_MASK));
192 				r = _reg_read_4(SH3_MMUDA | a);
193 				DPRINTF((TEXT(" 0x%08x %c%c%c%c  %d %dK\n"),
194 				    r & SH3_MMUDA_D_PPN_MASK,
195 				    ON(r, SH3_MMUDA_D_V),
196 				    ON(r, SH3_MMUDA_D_D),
197 				    ON(r, SH3_MMUDA_D_C),
198 				    ON(r, SH3_MMUDA_D_SH),
199 				    (r & SH3_MMUDA_D_PR_MASK) >>
200 				    SH3_MMUDA_D_PR_SHIFT,
201 				    r & SH3_MMUDA_D_SZ ? 4 : 1));
202 			}
203 		}
204 
205 		break;
206 	case 4:
207 		r = _reg_read_4(SH4_MMUCR);
208 		if (!(r & SH4_MMUCR_AT))
209 			goto disabled;
210 		DPRINTF((TEXT("%s virtual storage mode,"),
211 		    r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));
212 		DPRINTF((TEXT(" SQ access: (priviledge%S)"),
213 		    r & SH4_MMUCR_SQMD ? "" : "/user"));
214 		DPRINTF((TEXT("\n")));
215 #if sample_code
216 		//
217 		// Memory mapped TLB accessing program must run on P2.
218 		// This is sample code.
219 		//
220 		// Dump ITLB
221 		DPRINTF((TEXT("---ITLB---\n")));
222 		for (i = 0; i < 4; i++) {
223 			e = i << SH4_ITLB_E_SHIFT;
224 			r = _reg_read_4(SH4_ITLB_AA | e);
225 			DPRINTF((TEXT("%08x %3d _%c"),
226 			    r & SH4_ITLB_AA_VPN_MASK,
227 			    r & SH4_ITLB_AA_ASID_MASK,
228 			    ON(r, SH4_ITLB_AA_V)));
229 			r = _reg_read_4(SH4_ITLB_DA1 | e);
230 			DPRINTF((TEXT(" %08x %c%c_%c_ %1d"),
231 			    r & SH4_ITLB_DA1_PPN_MASK,
232 			    ON(r, SH4_ITLB_DA1_V),
233 			    ON(r, SH4_ITLB_DA1_C),
234 			    ON(r, SH4_ITLB_DA1_SH),
235 			    (r & SH4_ITLB_DA1_PR) >> SH4_UTLB_DA1_PR_SHIFT
236 			    ));
237 			r = _reg_read_4(SH4_ITLB_DA2 | e);
238 			DPRINTF((TEXT(" %c%d\n"),
239 			    ON(r, SH4_ITLB_DA2_TC),
240 			    r & SH4_ITLB_DA2_SA_MASK));
241 		}
242 		// Dump UTLB
243 		DPRINTF((TEXT("---UTLB---\n")));
244 		for (i = 0; i < 64; i++) {
245 			e = i << SH4_UTLB_E_SHIFT;
246 			r = _reg_read_4(SH4_UTLB_AA | e);
247 			DPRINTF((TEXT("%08x %3d %c%c"),
248 			    r & SH4_UTLB_AA_VPN_MASK,
249 			    ON(r, SH4_UTLB_AA_D),
250 			    ON(r, SH4_UTLB_AA_V),
251 			    r & SH4_UTLB_AA_ASID_MASK));
252 			r = _reg_read_4(SH4_UTLB_DA1 | e);
253 			DPRINTF((TEXT(" %08x %c%c%c%c%c %1d"),
254 			    r & SH4_UTLB_DA1_PPN_MASK,
255 			    ON(r, SH4_UTLB_DA1_V),
256 			    ON(r, SH4_UTLB_DA1_C),
257 			    ON(r, SH4_UTLB_DA1_D),
258 			    ON(r, SH4_UTLB_DA1_SH),
259 			    ON(r, SH4_UTLB_DA1_WT),
260 			    (r & SH4_UTLB_DA1_PR_MASK) >> SH4_UTLB_DA1_PR_SHIFT
261 			    ));
262 			r = _reg_read_4(SH4_UTLB_DA2 | e);
263 			DPRINTF((TEXT(" %c%d\n"),
264 			    ON(r, SH4_UTLB_DA2_TC),
265 			    r & SH4_UTLB_DA2_SA_MASK));
266 		}
267 #endif //sample_code
268 		break;
269 	}
270 
271 	SetKMode(kmode);
272 	return;
273 
274  disabled:
275 	DPRINTF((TEXT("disabled.\n")));
276 	SetKMode(kmode);
277 #undef ON
278 }
279