xref: /netbsd/sys/arch/sh3/sh3/cache_sh4.c (revision c4a72b64)
1 /*	$NetBSD: cache_sh4.c,v 1.7 2002/11/08 14:58:25 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 
42 #include <sh3/cache.h>
43 #include <sh3/cache_sh4.h>
44 
45 #define	round_line(x)		(((x) + 31) & ~31)
46 #define	trunc_line(x)		((x) & ~31)
47 
48 void sh4_icache_sync_all(void);
49 void sh4_icache_sync_range(vaddr_t, vsize_t);
50 void sh4_icache_sync_range_index(vaddr_t, vsize_t);
51 void sh4_dcache_wbinv_all(void);
52 void sh4_dcache_wbinv_range(vaddr_t, vsize_t);
53 void sh4_dcache_wbinv_range_index(vaddr_t, vsize_t);
54 void sh4_dcache_inv_range(vaddr_t, vsize_t);
55 void sh4_dcache_wb_range(vaddr_t, vsize_t);
56 
57 /* must be inlined. */
58 extern __inline__ void cache_sh4_op_line_32(vaddr_t, vaddr_t, u_int32_t,
59     u_int32_t);
60 extern __inline__ void cache_sh4_op_8lines_32(vaddr_t, vaddr_t, u_int32_t,
61     u_int32_t);
62 
63 void
64 sh4_cache_config()
65 {
66 	u_int32_t r;
67 
68 	/*
69 	 * For now, P0, U0, P3 write-through P1 write-through
70 	 * XXX will be obsoleted.
71 	 */
72 	sh4_icache_sync_all();
73 	RUN_P2;
74 	_reg_write_4(SH4_CCR, 0x0000090b);
75 	RUN_P1;
76 
77 	r = _reg_read_4(SH4_CCR);
78 
79 	sh_cache_unified = 0;
80 	sh_cache_enable_icache = (r & SH4_CCR_ICE);
81 	sh_cache_enable_dcache = (r & SH4_CCR_OCE);
82 	sh_cache_ways = 1;
83 	sh_cache_line_size = 32;
84 	sh_cache_write_through_p0_u0_p3 = (r & SH4_CCR_WT);
85 	sh_cache_write_through_p1 = !(r & SH4_CCR_CB);
86 	sh_cache_write_through = sh_cache_write_through_p0_u0_p3 &&
87 	    sh_cache_write_through_p1;
88 	sh_cache_ram_mode = (r & SH4_CCR_ORA);
89 	sh_cache_index_mode_icache = (r & SH4_CCR_IIX);
90 	sh_cache_index_mode_dcache = (r & SH4_CCR_OIX);
91 
92 	sh_cache_size_dcache = SH4_DCACHE_SIZE;
93 	if (sh_cache_ram_mode)
94 		sh_cache_size_dcache /= 2;
95 	sh_cache_size_icache = SH4_ICACHE_SIZE;
96 
97 	sh_cache_ops._icache_sync_all		= sh4_icache_sync_all;
98 	sh_cache_ops._icache_sync_range		= sh4_icache_sync_range;
99 	sh_cache_ops._icache_sync_range_index	= sh4_icache_sync_range_index;
100 
101 	sh_cache_ops._dcache_wbinv_all		= sh4_dcache_wbinv_all;
102 	sh_cache_ops._dcache_wbinv_range	= sh4_dcache_wbinv_range;
103 	sh_cache_ops._dcache_wbinv_range_index	= sh4_dcache_wbinv_range_index;
104 	sh_cache_ops._dcache_inv_range		= sh4_dcache_inv_range;
105 	sh_cache_ops._dcache_wb_range		= sh4_dcache_wb_range;
106 }
107 
108 /*
109  * cache_sh4_op_line_32: (index-operation)
110  *
111  *	Clear the specified bits on single 32-byte cache line.
112  *
113  */
114 void
115 cache_sh4_op_line_32(vaddr_t va, vaddr_t base, u_int32_t mask, u_int32_t bits)
116 {
117 	vaddr_t cca;
118 
119 	cca = base | (va & mask);
120 	_reg_bclr_4(cca, bits);
121 }
122 
123 /*
124  * cache_sh4_op_8lines_32: (index-operation)
125  *
126  *	Clear the specified bits on 8 32-byte cache lines.
127  *
128  */
129 void
130 cache_sh4_op_8lines_32(vaddr_t va, vaddr_t base, u_int32_t mask, u_int32_t bits)
131 {
132 	__volatile__ u_int32_t *cca = (__volatile__ u_int32_t *)
133 	    (base | (va & mask));
134 
135 	cca[ 0] &= ~bits;
136 	cca[ 8] &= ~bits;
137 	cca[16] &= ~bits;
138 	cca[24] &= ~bits;
139 	cca[32] &= ~bits;
140 	cca[40] &= ~bits;
141 	cca[48] &= ~bits;
142 	cca[56] &= ~bits;
143 }
144 
145 void
146 sh4_icache_sync_all()
147 {
148 	vaddr_t va = 0;
149 	vaddr_t eva = SH4_ICACHE_SIZE;
150 
151 	sh4_dcache_wbinv_all();
152 
153 	RUN_P2;
154 	while (va < eva) {
155 		cache_sh4_op_8lines_32(va, SH4_CCIA, CCIA_ENTRY_MASK, CCIA_V);
156 		va += 32 * 8;
157 	}
158 	RUN_P1;
159 }
160 
161 void
162 sh4_icache_sync_range(vaddr_t va, vsize_t sz)
163 {
164 	vaddr_t ccia;
165 	vaddr_t eva = round_line(va + sz);
166 	va = trunc_line(va);
167 
168 	sh4_dcache_wbinv_range(va, (eva - va));
169 
170 	RUN_P2;
171 	while (va < eva) {
172 		/* CCR.IIX has no effect on this entry specification */
173 		ccia = SH4_CCIA | CCIA_A | (va & CCIA_ENTRY_MASK);
174 		_reg_write_4(ccia, va & CCIA_TAGADDR_MASK); /* V = 0 */
175 		va += 32;
176 	}
177 	RUN_P1;
178 }
179 
180 void
181 sh4_icache_sync_range_index(vaddr_t va, vsize_t sz)
182 {
183 	vaddr_t eva = round_line(va + sz);
184 	va = trunc_line(va);
185 
186 	sh4_dcache_wbinv_range_index(va, eva - va);
187 
188 	RUN_P2;
189 	while ((eva - va) >= (8 * 32)) {
190 		cache_sh4_op_8lines_32(va, SH4_CCIA, CCIA_ENTRY_MASK, CCIA_V);
191 		va += 32 * 8;
192 	}
193 
194 	while (va < eva) {
195 		cache_sh4_op_line_32(va, SH4_CCIA, CCIA_ENTRY_MASK, CCIA_V);
196 		va += 32;
197 	}
198 	RUN_P1;
199 }
200 
201 void
202 sh4_dcache_wbinv_all()
203 {
204 	vaddr_t va = 0;
205 	vaddr_t eva = SH4_DCACHE_SIZE;
206 
207 	RUN_P2;
208 	while (va < eva) {
209 		cache_sh4_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
210 		    (CCDA_U | CCDA_V));
211 		va += 32 * 8;
212 	}
213 	RUN_P1;
214 }
215 
216 void
217 sh4_dcache_wbinv_range(vaddr_t va, vsize_t sz)
218 {
219 	vaddr_t eva = round_line(va + sz);
220 	va = trunc_line(va);
221 
222 	while (va < eva) {
223 		__asm__ __volatile__("ocbp @%0" : : "r"(va));
224 		va += 32;
225 	}
226 }
227 
228 void
229 sh4_dcache_wbinv_range_index(vaddr_t va, vsize_t sz)
230 {
231 	vaddr_t eva = round_line(va + sz);
232 	va = trunc_line(va);
233 
234 	RUN_P2;
235 	while ((eva - va) >= (8 * 32)) {
236 		cache_sh4_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
237 		    (CCDA_U | CCDA_V));
238 		va += 32 * 8;
239 	}
240 
241 	while (va < eva) {
242 		cache_sh4_op_line_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
243 		    (CCDA_U | CCDA_V));
244 		va += 32;
245 	}
246 	RUN_P1;
247 }
248 
249 void
250 sh4_dcache_inv_range(vaddr_t va, vsize_t sz)
251 {
252 	vaddr_t eva = round_line(va + sz);
253 	va = trunc_line(va);
254 
255 	while (va < eva) {
256 		__asm__ __volatile__("ocbi @%0" : : "r"(va));
257 		va += 32;
258 	}
259 }
260 
261 void
262 sh4_dcache_wb_range(vaddr_t va, vsize_t sz)
263 {
264 	vaddr_t eva = round_line(va + sz);
265 	va = trunc_line(va);
266 
267 	while (va < eva) {
268 		__asm__ __volatile__("ocbwb @%0" : : "r"(va));
269 		va += 32;
270 	}
271 }
272