xref: /netbsd/sys/arch/sh3/sh3/mmu_sh3.c (revision 6550d01e)
1 /*	$NetBSD: mmu_sh3.c,v 1.14 2008/04/28 20:23:35 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mmu_sh3.c,v 1.14 2008/04/28 20:23:35 martin Exp $");
34 
35 #include <sys/param.h>
36 
37 #include <sh3/pte.h>
38 #include <sh3/mmu.h>
39 #include <sh3/mmu_sh3.h>
40 
41 
42 void
43 sh3_mmu_start()
44 {
45 
46 	/* Zero clear all TLB entries */
47 	sh3_tlb_invalidate_all();
48 
49 	/* Set current ASID to 0 - kernel */
50 	sh_tlb_set_asid(0);
51 
52 	/* Flush TLB (TF) and enable address translation (AT) */
53 	_reg_write_4(SH3_MMUCR, SH3_MMUCR_TF | SH3_MMUCR_AT);
54 }
55 
56 
57 void
58 sh3_tlb_invalidate_all()
59 {
60 	uint32_t idx, a;
61 	int i, way;
62 
63 	for (i = 0; i < SH3_MMU_ENTRY; ++i) {
64 		idx = i << SH3_MMU_VPN_SHIFT;
65 
66 		for (way = 0; way < SH3_MMU_WAY; ++way) {
67 			a = idx | (way << SH3_MMU_WAY_SHIFT);
68 
69 			_reg_write_4(SH3_MMUAA | a, 0);
70 			_reg_write_4(SH3_MMUDA | a, 0);
71 		}
72 	}
73 }
74 
75 
76 void
77 sh3_tlb_invalidate_asid(int asid)
78 {
79 	uint32_t idx, aa;
80 	int i, way;
81 
82 	for (i = 0; i < SH3_MMU_ENTRY; ++i) {
83 		idx = i << SH3_MMU_VPN_SHIFT;
84 
85 		for (way = 0; way < SH3_MMU_WAY; ++way) {
86 			aa = SH3_MMUAA | idx | (way << SH3_MMU_WAY_SHIFT);
87 
88 			if ((_reg_read_4(aa) & SH3_MMUAA_D_ASID_MASK) == asid)
89 				_reg_write_4(aa, 0);
90 		}
91 	}
92 }
93 
94 
95 void
96 sh3_tlb_invalidate_addr(int asid, vaddr_t va)
97 {
98 	uint32_t match, idx, aa, entry;
99 	int way;
100 
101 	/* What we are looking for in the address array */
102 	match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;
103 
104 	/* Where in the address array this VA is located - bits [16:12] */
105 	idx = va & SH3_MMU_VPN_MASK;
106 
107 	/* Check each way - bits [9:8] */
108 	for (way = 0; way < SH3_MMU_WAY; ++way) {
109 		aa = SH3_MMUAA | idx | (way << SH3_MMU_WAY_SHIFT);
110 
111 		entry = _reg_read_4(aa)
112 		    & (SH3_MMUAA_D_VPN_MASK_4K | SH3_MMUAA_D_ASID_MASK);
113 
114 		if (entry == match) {
115 			_reg_write_4(aa, 0);
116 			break;
117 		}
118 	}
119 }
120 
121 
122 void
123 sh3_tlb_update(int asid, vaddr_t va, uint32_t pte)
124 {
125 	static unsigned int rc = 0;
126 
127 	uint32_t match, idx, a, entry;
128 	uint32_t freea, matcha;
129 	uint32_t newa, newd;
130 	int way;
131 	int s;
132 
133 	KDASSERT(asid < 256 && (pte & ~PGOFSET) != 0 && va != 0);
134 
135 
136 	if ((pte & PG_V) == 0) {
137 		sh3_tlb_invalidate_addr(asid, va);
138 		return;
139 	}
140 
141 
142 	/*
143 	 * Simple approach is to invalidate + ldtlb, but ldtlb uses
144 	 * MMUCR.RC to select the way to overwrite, and RC is only
145 	 * meaningful immediately after TLB exception, so ldtlb here
146 	 * would update some random way, e.g. a valid way even if
147 	 * there is an invalid way we could use instead.
148 	 *
149 	 * Nano-optimization: as invalidatation needs to loop over
150 	 * ways anyway, just loop over all of them, noting if there's
151 	 * either an existing entry for this VA that we can update or
152 	 * an invalid way we can use.
153 	 */
154 
155 	/* What we are looking for in the address array */
156 	match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;
157 
158 	/* Where in the address array this VA is located - bits [16:12] */
159 	idx = va & SH3_MMU_VPN_MASK;
160 
161 	newa = match | SH3_MMU_D_VALID;
162 	newd = pte & PG_HW_BITS;
163 
164 	matcha = freea = ~0;
165 
166 
167 	s = splhigh();
168 
169 	/* Check each way - bits [9:8] */
170 	for (way = 0; way < SH3_MMU_WAY; ++way) {
171 		a = idx | (way << SH3_MMU_WAY_SHIFT);
172 
173 		entry = _reg_read_4(SH3_MMUAA | a);
174 
175 		if ((entry & SH3_MMU_D_VALID) == 0)
176 			freea = a;
177 
178 		entry &= (SH3_MMUAA_D_VPN_MASK_4K | SH3_MMUAA_D_ASID_MASK);
179 		if (entry == match) {
180 			matcha = a;
181 			break;
182 		}
183 	}
184 
185 	if ((int)matcha >= 0)	/* there's an existing entry, update it */
186 		a = matcha;
187 	else if ((int)freea >= 0) /* there's an invalid way, overwrite it */
188 		a = freea;
189 	else {			/* no match, all ways are valid */
190 		a = idx | (rc << SH3_MMU_WAY_SHIFT);
191 		rc = (rc + 1) % SH3_MMU_WAY;
192 	}
193 
194 	_reg_write_4(SH3_MMUAA | a, newa);
195 	_reg_write_4(SH3_MMUDA | a, newd);
196 
197 	splx(s);
198 }
199