1 /*	$NetBSD: cache_r10k.c,v 1.8 2016/07/13 21:25:15 macallan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003 Takao Shinohara.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Copyright 2001 Wasabi Systems, Inc.
29  * All rights reserved.
30  *
31  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed for the NetBSD Project by
44  *	Wasabi Systems, Inc.
45  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
46  *    or promote products derived from this software without specific prior
47  *    written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  */
61 
62 #include <sys/param.h>
63 
64 #include <mips/cpuregs.h>
65 #include <mips/cache.h>
66 #include <mips/cache_r4k.h>
67 #include <mips/cache_r10k.h>
68 
69 /*
70  * Cache operations for R10000-style caches:
71  *
72  *	2-way, write-back
73  *	primary cache: virtual index/physical tag
74  *	secondary cache: physical index/physical tag
75  */
76 
77 __asm(".set mips3");
78 
79 #define	round_line(x)	(((x) + 64 - 1) & ~(64 - 1))
80 #define	trunc_line(x)	((x) & ~(64 - 1))
81 
82 void
r10k_icache_sync_all(void)83 r10k_icache_sync_all(void)
84 {
85 	const struct mips_cache_info * const mci = &mips_cache_info;
86 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
87 	vaddr_t eva = va + mci->mci_picache_way_size;
88 
89 	mips_dcache_wbinv_all();
90 
91 	__asm volatile("sync");
92 
93 	while (va < eva) {
94 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
95 		va++;
96 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
97 		va += 63;
98 	}
99 }
100 
101 void
r10k_icache_sync_range(register_t va,vsize_t size)102 r10k_icache_sync_range(register_t va, vsize_t size)
103 {
104 	vaddr_t eva = round_line(va + size);
105 
106 	va = trunc_line(va);
107 
108 	mips_dcache_wb_range(va, (eva - va));
109 
110 	__asm volatile("sync");
111 
112 	while (va < eva) {
113 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
114 		va += 64;
115 	}
116 }
117 
118 void
r10k_icache_sync_range_index(vaddr_t va,vsize_t size)119 r10k_icache_sync_range_index(vaddr_t va, vsize_t size)
120 {
121 	const struct mips_cache_info * const mci = &mips_cache_info;
122 	vaddr_t eva, orig_va;
123 
124 	orig_va = va;
125 
126 	eva = round_line(va + size);
127 	va = trunc_line(va);
128 
129 	mips_dcache_wbinv_range_index(va, (eva - va));
130 
131 	__asm volatile("sync");
132 
133 	/*
134 	 * Since we're doing Index ops, we expect to not be able
135 	 * to access the address we've been given.  So, get the
136 	 * bits that determine the cache index, and make a KSEG0
137 	 * address out of them.
138 	 */
139 	va = MIPS_PHYS_TO_KSEG0(orig_va & mci->mci_picache_way_mask);
140 
141 	eva = round_line(va + size);
142 	va = trunc_line(va);
143 
144 	while (va < eva) {
145 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
146 		va++;
147 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
148 		va += 63;
149 	}
150 }
151 
152 #undef round_line
153 #undef trunc_line
154 
155 #define	round_line(x)	(((x) + 32 - 1) & ~(32 - 1))
156 #define	trunc_line(x)	((x) & ~(32 - 1))
157 
158 void
r10k_pdcache_wbinv_all(void)159 r10k_pdcache_wbinv_all(void)
160 {
161 	const struct mips_cache_info * const mci = &mips_cache_info;
162 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
163 	vaddr_t eva = va + mci->mci_pdcache_way_size;
164 
165 	while (va < eva) {
166 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
167 		va++;
168 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
169 		va += 31;
170 	}
171 }
172 
173 void
r10k_pdcache_wbinv_range(register_t va,vsize_t size)174 r10k_pdcache_wbinv_range(register_t va, vsize_t size)
175 {
176 	vaddr_t eva = round_line(va + size);
177 
178 	va = trunc_line(va);
179 
180 	while (va < eva) {
181 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
182 		va += 32;
183 	}
184 }
185 
186 void
r10k_pdcache_wbinv_range_index(vaddr_t va,vsize_t size)187 r10k_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
188 {
189 	const struct mips_cache_info * const mci = &mips_cache_info;
190 	vaddr_t eva;
191 
192 	/*
193 	 * Since we're doing Index ops, we expect to not be able
194 	 * to access the address we've been given.  So, get the
195 	 * bits that determine the cache index, and make a KSEG0
196 	 * address out of them.
197 	 */
198 	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask);
199 
200 	eva = round_line(va + size);
201 	va = trunc_line(va);
202 
203 	while (va < eva) {
204 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
205 		va++;
206 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
207 		va += 31;
208 	}
209 }
210 
211 void
r10k_pdcache_inv_range(register_t va,vsize_t size)212 r10k_pdcache_inv_range(register_t va, vsize_t size)
213 {
214 	vaddr_t eva = round_line(va + size);
215 
216 	va = trunc_line(va);
217 
218 	while (va < eva) {
219 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
220 		va += 32;
221 	}
222 }
223 
224 void
r10k_pdcache_wb_range(register_t va,vsize_t size)225 r10k_pdcache_wb_range(register_t va, vsize_t size)
226 {
227 	vaddr_t eva = round_line(va + size);
228 
229 	va = trunc_line(va);
230 
231 	while (va < eva) {
232 		/* R10000 does not support HitWriteBack operation */
233 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
234 		va += 32;
235 	}
236 }
237 
238 #undef round_line
239 #undef trunc_line
240 
241 #define	round_line(x)	(((x) + mci->mci_sdcache_line_size - 1) & ~(mci->mci_sdcache_line_size - 1))
242 #define	trunc_line(x)	((x) & ~(mci->mci_sdcache_line_size - 1))
243 
244 void
r10k_sdcache_wbinv_all(void)245 r10k_sdcache_wbinv_all(void)
246 {
247 	const struct mips_cache_info * const mci = &mips_cache_info;
248 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
249 	vaddr_t eva = va + mci->mci_sdcache_way_size;
250 	vsize_t line_size = mci->mci_sdcache_line_size;
251 
252 	while (va < eva) {
253 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
254 		va++;
255 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
256 		va += line_size - 1;
257 	}
258 }
259 
260 void
r10k_sdcache_wbinv_range(register_t va,vsize_t size)261 r10k_sdcache_wbinv_range(register_t va, vsize_t size)
262 {
263 	const struct mips_cache_info * const mci = &mips_cache_info;
264 	vaddr_t eva = round_line(va + size);
265 	vsize_t line_size = mci->mci_sdcache_line_size;
266 
267 	va = trunc_line(va);
268 
269 	while (va < eva) {
270 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
271 		va += line_size;
272 	}
273 }
274 
275 void
r10k_sdcache_wbinv_range_index(vaddr_t va,vsize_t size)276 r10k_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
277 {
278 	const struct mips_cache_info * const mci = &mips_cache_info;
279 	vaddr_t eva;
280 	vsize_t line_size = mci->mci_sdcache_line_size;
281 
282 	/*
283 	 * Since we're doing Index ops, we expect to not be able
284 	 * to access the address we've been given.  So, get the
285 	 * bits that determine the cache index, and make a KSEG0
286 	 * address out of them.
287 	 */
288 	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask);
289 
290 	eva = round_line(va + size);
291 	va = trunc_line(va);
292 
293 	while (va < eva) {
294 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
295 		va++;
296 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
297 		va += line_size - 1;
298 	}
299 }
300 
301 void
r10k_sdcache_inv_range(register_t va,vsize_t size)302 r10k_sdcache_inv_range(register_t va, vsize_t size)
303 {
304 	const struct mips_cache_info * const mci = &mips_cache_info;
305 	vaddr_t eva = round_line(va + size);
306 	vsize_t line_size = mci->mci_sdcache_line_size;
307 
308 	va = trunc_line(va);
309 
310 	while (va < eva) {
311 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
312 		va += line_size;
313 	}
314 }
315 
316 void
r10k_sdcache_wb_range(register_t va,vsize_t size)317 r10k_sdcache_wb_range(register_t va, vsize_t size)
318 {
319 	const struct mips_cache_info * const mci = &mips_cache_info;
320 	vaddr_t eva = round_line(va + size);
321 	vsize_t line_size = mci->mci_sdcache_line_size;
322 
323 	va = trunc_line(va);
324 
325 	while (va < eva) {
326 		/* R10000 does not support HitWriteBack operation */
327 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
328 		va += line_size;
329 	}
330 }
331 
332 #undef round_line
333 #undef trunc_line
334