xref: /netbsd/sys/arch/mips/mips/cache_r4k.c (revision bf9ec67e)
1 /*	$NetBSD: cache_r4k.c,v 1.6 2001/11/23 06:21:50 tsutsui Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/param.h>
39 
40 #include <mips/cache.h>
41 #include <mips/cache_r4k.h>
42 
43 /*
44  * Cache operations for R4000/R4400-style caches:
45  *
46  *	- Direct-mapped
47  *	- Write-back
48  *	- Virtually indexed, physically tagged
49  *
50  * XXX Does not handle split secondary caches.
51  */
52 
53 #define	round_line(x)		(((x) + 15) & ~15)
54 #define	trunc_line(x)		((x) & ~15)
55 
56 __asm(".set mips3");
57 
58 void
59 r4k_icache_sync_all_16(void)
60 {
61 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
62 	vaddr_t eva = va + mips_picache_size;
63 
64 	mips_dcache_wbinv_all();
65 
66 	__asm __volatile("sync");
67 
68 	while (va < eva) {
69 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
70 		va += (32 * 16);
71 	}
72 }
73 
74 void
75 r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
76 {
77 	vaddr_t eva = round_line(va + size);
78 
79 	va = trunc_line(va);
80 
81 	mips_dcache_wb_range(va, (eva - va));
82 
83 	__asm __volatile("sync");
84 
85 	while ((eva - va) >= (32 * 16)) {
86 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
87 		va += (32 * 16);
88 	}
89 
90 	while (va < eva) {
91 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
92 		va += 16;
93 	}
94 }
95 
96 void
97 r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
98 {
99 	vaddr_t eva;
100 
101 	eva = round_line(va + size);
102 	va = trunc_line(va);
103 
104 	mips_dcache_wbinv_range_index(va, (eva - va));
105 
106 	__asm __volatile("sync");
107 
108 	/*
109 	 * Since we're doing Index ops, we expect to not be able
110 	 * to access the address we've been given.  So, get the
111 	 * bits that determine the cache index, and make a KSEG0
112 	 * address out of them.
113 	 */
114 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
115 
116 	eva = round_line(va + size);
117 	va = trunc_line(va);
118 
119 	while ((eva - va) >= (32 * 16)) {
120 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
121 		va += (32 * 16);
122 	}
123 
124 	while (va < eva) {
125 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
126 		va += 16;
127 	}
128 }
129 
130 void
131 r4k_pdcache_wbinv_all_16(void)
132 {
133 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
134 	vaddr_t eva = va + mips_pdcache_size;
135 
136 	while (va < eva) {
137 		cache_r4k_op_32lines_16(va,
138 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
139 		va += (32 * 16);
140 	}
141 }
142 
143 void
144 r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
145 {
146 	vaddr_t eva = round_line(va + size);
147 
148 	va = trunc_line(va);
149 
150 	while ((eva - va) >= (32 * 16)) {
151 		cache_r4k_op_32lines_16(va,
152 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
153 		va += (32 * 16);
154 	}
155 
156 	while (va < eva) {
157 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
158 		va += 16;
159 	}
160 }
161 
162 void
163 r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
164 {
165 	vaddr_t eva;
166 
167 	/*
168 	 * Since we're doing Index ops, we expect to not be able
169 	 * to access the address we've been given.  So, get the
170 	 * bits that determine the cache index, and make a KSEG0
171 	 * address out of them.
172 	 */
173 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
174 
175 	eva = round_line(va + size);
176 	va = trunc_line(va);
177 
178 	while ((eva - va) >= (32 * 16)) {
179 		cache_r4k_op_32lines_16(va,
180 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
181 		va += (32 * 16);
182 	}
183 
184 	while (va < eva) {
185 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
186 		va += 16;
187 	}
188 }
189 
190 void
191 r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
192 {
193 	vaddr_t eva = round_line(va + size);
194 
195 	va = trunc_line(va);
196 
197 	while ((eva - va) >= (32 * 16)) {
198 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
199 		va += (32 * 16);
200 	}
201 
202 	while (va < eva) {
203 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
204 		va += 16;
205 	}
206 }
207 
208 void
209 r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
210 {
211 	vaddr_t eva = round_line(va + size);
212 
213 	va = trunc_line(va);
214 
215 	while ((eva - va) >= (32 * 16)) {
216 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
217 		va += (32 * 16);
218 	}
219 
220 	while (va < eva) {
221 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
222 		va += 16;
223 	}
224 }
225 
226 #undef round_line
227 #undef trunc_line
228 
229 #define	round_line(x)		(((x) + 31) & ~31)
230 #define	trunc_line(x)		((x) & ~31)
231 
232 void
233 r4k_icache_sync_all_32(void)
234 {
235 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
236 	vaddr_t eva = va + mips_picache_size;
237 
238 	mips_dcache_wbinv_all();
239 
240 	__asm __volatile("sync");
241 
242 	while (va < eva) {
243 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
244 		va += (32 * 32);
245 	}
246 }
247 
248 void
249 r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
250 {
251 	vaddr_t eva = round_line(va + size);
252 
253 	va = trunc_line(va);
254 
255 	mips_dcache_wb_range(va, (eva - va));
256 
257 	__asm __volatile("sync");
258 
259 	while ((eva - va) >= (32 * 32)) {
260 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
261 		va += (32 * 32);
262 	}
263 
264 	while (va < eva) {
265 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
266 		va += 32;
267 	}
268 }
269 
270 void
271 r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
272 {
273 	vaddr_t eva;
274 
275 	eva = round_line(va + size);
276 	va = trunc_line(va);
277 
278 	mips_dcache_wbinv_range_index(va, (eva - va));
279 
280 	__asm __volatile("sync");
281 
282 	/*
283 	 * Since we're doing Index ops, we expect to not be able
284 	 * to access the address we've been given.  So, get the
285 	 * bits that determine the cache index, and make a KSEG0
286 	 * address out of them.
287 	 */
288 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
289 
290 	eva = round_line(va + size);
291 	va = trunc_line(va);
292 
293 	while ((eva - va) >= (32 * 32)) {
294 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
295 		va += (32 * 32);
296 	}
297 
298 	while (va < eva) {
299 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
300 		va += 32;
301 	}
302 }
303 
304 void
305 r4k_pdcache_wbinv_all_32(void)
306 {
307 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
308 	vaddr_t eva = va + mips_pdcache_size;
309 
310 	while (va < eva) {
311 		cache_r4k_op_32lines_32(va,
312 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
313 		va += (32 * 32);
314 	}
315 }
316 
317 void
318 r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
319 {
320 	vaddr_t eva = round_line(va + size);
321 
322 	va = trunc_line(va);
323 
324 	while ((eva - va) >= (32 * 32)) {
325 		cache_r4k_op_32lines_32(va,
326 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
327 		va += (32 * 32);
328 	}
329 
330 	while (va < eva) {
331 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
332 		va += 32;
333 	}
334 }
335 
336 void
337 r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
338 {
339 	vaddr_t eva;
340 
341 	/*
342 	 * Since we're doing Index ops, we expect to not be able
343 	 * to access the address we've been given.  So, get the
344 	 * bits that determine the cache index, and make a KSEG0
345 	 * address out of them.
346 	 */
347 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
348 
349 	eva = round_line(va + size);
350 	va = trunc_line(va);
351 
352 	while ((eva - va) >= (32 * 32)) {
353 		cache_r4k_op_32lines_32(va,
354 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
355 		va += (32 * 32);
356 	}
357 
358 	while (va < eva) {
359 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
360 		va += 32;
361 	}
362 }
363 
364 void
365 r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
366 {
367 	vaddr_t eva = round_line(va + size);
368 
369 	va = trunc_line(va);
370 
371 	while ((eva - va) >= (32 * 32)) {
372 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
373 		va += (32 * 32);
374 	}
375 
376 	while (va < eva) {
377 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
378 		va += 32;
379 	}
380 }
381 
382 void
383 r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
384 {
385 	vaddr_t eva = round_line(va + size);
386 
387 	va = trunc_line(va);
388 
389 	while ((eva - va) >= (32 * 32)) {
390 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
391 		va += (32 * 32);
392 	}
393 
394 	while (va < eva) {
395 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
396 		va += 32;
397 	}
398 }
399 
400 void
401 r4k_sdcache_wbinv_all_32(void)
402 {
403 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
404 	vaddr_t eva = va + mips_sdcache_size;
405 
406 	while (va < eva) {
407 		cache_r4k_op_32lines_32(va,
408 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
409 		va += (32 * 32);
410 	}
411 }
412 
413 void
414 r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
415 {
416 	vaddr_t eva = round_line(va + size);
417 
418 	va = trunc_line(va);
419 
420 	while ((eva - va) >= (32 * 32)) {
421 		cache_r4k_op_32lines_32(va,
422 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
423 		va += (32 * 32);
424 	}
425 
426 	while (va < eva) {
427 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
428 		va += 32;
429 	}
430 }
431 
432 void
433 r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
434 {
435 	vaddr_t eva;
436 
437 	/*
438 	 * Since we're doing Index ops, we expect to not be able
439 	 * to access the address we've been given.  So, get the
440 	 * bits that determine the cache index, and make a KSEG0
441 	 * address out of them.
442 	 */
443 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
444 
445 	eva = round_line(va + size);
446 	va = trunc_line(va);
447 
448 	while ((eva - va) >= (32 * 32)) {
449 		cache_r4k_op_32lines_32(va,
450 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
451 		va += (32 * 32);
452 	}
453 
454 	while (va < eva) {
455 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
456 		va += 32;
457 	}
458 }
459 
460 void
461 r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
462 {
463 	vaddr_t eva = round_line(va + size);
464 
465 	va = trunc_line(va);
466 
467 	while ((eva - va) >= (32 * 32)) {
468 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
469 		va += (32 * 32);
470 	}
471 
472 	while (va < eva) {
473 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
474 		va += 32;
475 	}
476 }
477 
478 void
479 r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
480 {
481 	vaddr_t eva = round_line(va + size);
482 
483 	va = trunc_line(va);
484 
485 	while ((eva - va) >= (32 * 32)) {
486 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
487 		va += (32 * 32);
488 	}
489 
490 	while (va < eva) {
491 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
492 		va += 32;
493 	}
494 }
495 
496 #undef round_line
497 #undef trunc_line
498 
499 #define	round_line(x)		(((x) + 127) & ~127)
500 #define	trunc_line(x)		((x) & ~127)
501 
502 void
503 r4k_sdcache_wbinv_all_128(void)
504 {
505 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
506 	vaddr_t eva = va + mips_sdcache_size;
507 
508 	while (va < eva) {
509 		cache_r4k_op_32lines_128(va,
510 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
511 		va += (32 * 128);
512 	}
513 }
514 
515 void
516 r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
517 {
518 	vaddr_t eva = round_line(va + size);
519 
520 	va = trunc_line(va);
521 
522 	while ((eva - va) >= (32 * 128)) {
523 		cache_r4k_op_32lines_128(va,
524 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
525 		va += (32 * 128);
526 	}
527 
528 	while (va < eva) {
529 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
530 		va += 128;
531 	}
532 }
533 
534 void
535 r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
536 {
537 	vaddr_t eva;
538 
539 	/*
540 	 * Since we're doing Index ops, we expect to not be able
541 	 * to access the address we've been given.  So, get the
542 	 * bits that determine the cache index, and make a KSEG0
543 	 * address out of them.
544 	 */
545 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
546 
547 	eva = round_line(va + size);
548 	va = trunc_line(va);
549 
550 	while ((eva - va) >= (32 * 128)) {
551 		cache_r4k_op_32lines_128(va,
552 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
553 		va += (32 * 128);
554 	}
555 
556 	while (va < eva) {
557 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
558 		va += 128;
559 	}
560 }
561 
562 void
563 r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
564 {
565 	vaddr_t eva = round_line(va + size);
566 
567 	va = trunc_line(va);
568 
569 	while ((eva - va) >= (32 * 128)) {
570 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
571 		va += (32 * 128);
572 	}
573 
574 	while (va < eva) {
575 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
576 		va += 128;
577 	}
578 }
579 
580 void
581 r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
582 {
583 	vaddr_t eva = round_line(va + size);
584 
585 	va = trunc_line(va);
586 
587 	while ((eva - va) >= (32 * 128)) {
588 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
589 		va += (32 * 128);
590 	}
591 
592 	while (va < eva) {
593 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
594 		va += 128;
595 	}
596 }
597 
598 #undef round_line
599 #undef trunc_line
600 
601 #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
602 #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
603 
604 void
605 r4k_sdcache_wbinv_all_generic(void)
606 {
607 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
608 	vaddr_t eva = va + mips_sdcache_size;
609 	int line_size = mips_sdcache_line_size;
610 
611 	while (va < eva) {
612 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
613 		va += line_size;
614 	}
615 }
616 
617 void
618 r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
619 {
620 	vaddr_t eva = round_line(va + size);
621 	int line_size = mips_sdcache_line_size;
622 
623 	va = trunc_line(va);
624 
625 	while (va < eva) {
626 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
627 		va += line_size;
628 	}
629 }
630 
631 void
632 r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
633 {
634 	vaddr_t eva;
635 	int line_size = mips_sdcache_line_size;
636 
637 	/*
638 	 * Since we're doing Index ops, we expect to not be able
639 	 * to access the address we've been given.  So, get the
640 	 * bits that determine the cache index, and make a KSEG0
641 	 * address out of them.
642 	 */
643 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
644 
645 	eva = round_line(va + size);
646 	va = trunc_line(va);
647 
648 	while (va < eva) {
649 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
650 		va += line_size;
651 	}
652 }
653 
654 void
655 r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
656 {
657 	vaddr_t eva = round_line(va + size);
658 	int line_size = mips_sdcache_line_size;
659 
660 	va = trunc_line(va);
661 
662 	while (va < eva) {
663 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
664 		va += line_size;
665 	}
666 }
667 
668 void
669 r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
670 {
671 	vaddr_t eva = round_line(va + size);
672 	int line_size = mips_sdcache_line_size;
673 
674 	va = trunc_line(va);
675 
676 	while (va < eva) {
677 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
678 		va += line_size;
679 	}
680 }
681 
682 #undef round_line
683 #undef trunc_line
684