xref: /netbsd/sys/arch/mips/mips/cache_ls2.c (revision 6550d01e)
1 /*	$NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas <matt@3am-software.com>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $");
34 
35 #include <sys/param.h>
36 
37 #include <mips/cache.h>
38 #include <mips/cache_ls2.h>
39 #include <mips/locore.h>
40 
41 /*
42  * Cache operations for Loongson2-style caches:
43  *
44  *	- 4-way set-associative 32b/l
45  *	- Write-back
46  *	- Primary is virtually indexed, physically tagged
47  *	- Seconadry is physically indexed, physically tagged
48  */
49 
50 #define	round_line(x)		(((x) + 31) & ~31)
51 #define	trunc_line(x)		((x) & ~31)
52 
53 __asm(".set mips3");
54 
55 void
56 ls2_icache_sync_range(vaddr_t va, vsize_t size)
57 {
58 	const vaddr_t eva = round_line(va + size);
59 
60 	va = trunc_line(va);
61 
62 	if (va + mips_picache_size <= eva) {
63 		ls2_icache_sync_all();
64 		return;
65 	}
66 
67 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
68 		cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
69 		cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
70 	}
71 
72 	for (; va < eva; va += 32) {
73 		cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
74 		cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
75 	}
76 
77 	__asm volatile("sync");
78 }
79 
80 void
81 ls2_icache_sync_range_index(vaddr_t va, vsize_t size)
82 {
83 	vaddr_t eva;
84 
85 	/*
86 	 * Since we're doing Index ops, we expect to not be able
87 	 * to access the address we've been given.  So, get the
88 	 * bits that determine the cache index, and make a KSEG0
89 	 * address out of them.
90 	 */
91 
92 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
93 	eva = round_line(va + size);
94 	va = trunc_line(va);
95 
96 	if (va + mips_picache_way_size < eva) {
97 		va = MIPS_PHYS_TO_KSEG0(0);
98 		eva = mips_picache_way_size;
99 	}
100 
101 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
102 		cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
103 		cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
104 	}
105 
106 	for (; va < eva; va += 32) {
107 		cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
108 		cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
109 	}
110 
111 	__asm volatile("sync");
112 }
113 
114 void
115 ls2_icache_sync_all(void)
116 {
117 	ls2_icache_sync_range_index(0, mips_picache_way_size);
118 }
119 
120 void
121 ls2_pdcache_inv_range(vaddr_t va, vsize_t size)
122 {
123 	const vaddr_t eva = round_line(va + size);
124 
125 	va = trunc_line(va);
126 
127 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
128 		cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
129 	}
130 
131 	for (; va < eva; va += 32) {
132 		cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
133 	}
134 
135 	__asm volatile("sync");
136 }
137 
138 void
139 ls2_pdcache_wbinv_range(vaddr_t va, vsize_t size)
140 {
141 	const vaddr_t eva = round_line(va + size);
142 
143 	va = trunc_line(va);
144 
145 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
146 		cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
147 	}
148 
149 	for (; va < eva; va += 32) {
150 		cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
151 	}
152 
153 	__asm volatile("sync");
154 }
155 
156 void
157 ls2_pdcache_wb_range(vaddr_t va, vsize_t size)
158 {
159 	/*
160 	 * Alas, can't writeback without invalidating...
161 	 */
162 	ls2_pdcache_wbinv_range(va, size);
163 }
164 
165 void
166 ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
167 {
168 	vaddr_t eva;
169 
170 	/*
171 	 * Since we're doing Index ops, we expect to not be able
172 	 * to access the address we've been given.  So, get the
173 	 * bits that determine the cache index, and make a KSEG0
174 	 * address out of them.
175 	 */
176 	va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask);
177 
178 	eva = round_line(va + size);
179 	va = trunc_line(va);
180 
181 	if (va + mips_pdcache_way_size > eva) {
182 		va = MIPS_PHYS_TO_KSEG0(0);
183 		eva = mips_pdcache_way_size;
184 	}
185 
186 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
187 		cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
188 	}
189 
190 	for (; va < eva; va += 32) {
191 		cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
192 	}
193 
194 	__asm volatile("sync");
195 }
196 
197 void
198 ls2_pdcache_wbinv_all(void)
199 {
200 	ls2_pdcache_wbinv_range_index(0, mips_pdcache_way_size);
201 }
202 
203 /*
204  * Cache operations for secondary caches:
205  *
206  *	- Direct-mapped
207  *	- Write-back
208  *	- Physically indexed, physically tagged
209  *
210  */
211 
212 void
213 ls2_sdcache_inv_range(vaddr_t va, vsize_t size)
214 {
215 	const vaddr_t eva = round_line(va + size);
216 
217 	va = trunc_line(va);
218 
219 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
220 		cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
221 		cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_INV);
222 	}
223 
224 	for (; va < eva; va += 32) {
225 		cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
226 		cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_INV);
227 	}
228 
229 	__asm volatile("sync");
230 }
231 
232 void
233 ls2_sdcache_wbinv_range(vaddr_t va, vsize_t size)
234 {
235 	const vaddr_t eva = round_line(va + size);
236 
237 	va = trunc_line(va);
238 
239 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
240 		cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
241 		cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_WB_INV);
242 	}
243 
244 	for (; va < eva; va += 32) {
245 		cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
246 		cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_WB_INV);
247 	}
248 
249 	__asm volatile("sync");
250 }
251 
252 void
253 ls2_sdcache_wb_range(vaddr_t va, vsize_t size)
254 {
255 	/*
256 	 * Alas, can't writeback without invalidating...
257 	 */
258 	ls2_sdcache_wbinv_range(va, size);
259 }
260 
261 void
262 ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
263 {
264 	vaddr_t eva;
265 
266 	/*
267 	 * Since we're doing Index ops, we expect to not be able
268 	 * to access the address we've been given.  So, get the
269 	 * bits that determine the cache index, and make a KSEG0
270 	 * address out of them.
271 	 */
272 	va = MIPS_PHYS_TO_KSEG0(va & mips_sdcache_way_mask);
273 
274 	eva = round_line(va + size);
275 	va = trunc_line(va);
276 
277 	if (va + mips_sdcache_way_size > eva) {
278 		va = MIPS_PHYS_TO_KSEG0(0);
279 		eva = va + mips_sdcache_way_size;
280 	}
281 
282 	for (; va + 8 * 32 <= eva; va += 8 * 32) {
283 		cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
284 		cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
285 	}
286 
287 	for (; va < eva; va += 32) {
288 		cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
289 		cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
290 	}
291 
292 	__asm volatile("sync");
293 }
294 
295 void
296 ls2_sdcache_wbinv_all(void)
297 {
298 	ls2_sdcache_wbinv_range_index(0, mips_sdcache_way_size);
299 }
300