xref: /netbsd/sys/arch/mips/mips/cache_r4k.c (revision c4a72b64)
1 /*	$NetBSD: cache_r4k.c,v 1.7 2002/11/07 23:03:21 cgd Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/param.h>
39 
40 #include <mips/cache.h>
41 #include <mips/cache_r4k.h>
42 
43 /*
44  * Cache operations for R4000/R4400-style caches:
45  *
46  *	- Direct-mapped
47  *	- Write-back
48  *	- Virtually indexed, physically tagged
49  *
50  * XXX Does not handle split secondary caches.
51  */
52 
53 #define	round_line(x)		(((x) + 15) & ~15)
54 #define	trunc_line(x)		((x) & ~15)
55 
56 __asm(".set mips3");
57 
58 void
59 r4k_icache_sync_all_16(void)
60 {
61 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
62 	vaddr_t eva = va + mips_picache_size;
63 
64 	mips_dcache_wbinv_all();
65 
66 	__asm __volatile("sync");
67 
68 	while (va < eva) {
69 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
70 		va += (32 * 16);
71 	}
72 }
73 
74 void
75 r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
76 {
77 	vaddr_t eva = round_line(va + size);
78 
79 	va = trunc_line(va);
80 
81 	mips_dcache_wb_range(va, (eva - va));
82 
83 	__asm __volatile("sync");
84 
85 	while ((eva - va) >= (32 * 16)) {
86 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
87 		va += (32 * 16);
88 	}
89 
90 	while (va < eva) {
91 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
92 		va += 16;
93 	}
94 }
95 
96 void
97 r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
98 {
99 	vaddr_t eva, orig_va;
100 
101 	orig_va = va;
102 
103 	eva = round_line(va + size);
104 	va = trunc_line(va);
105 
106 	mips_dcache_wbinv_range_index(va, (eva - va));
107 
108 	__asm __volatile("sync");
109 
110 	/*
111 	 * Since we're doing Index ops, we expect to not be able
112 	 * to access the address we've been given.  So, get the
113 	 * bits that determine the cache index, and make a KSEG0
114 	 * address out of them.
115 	 */
116 	va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);
117 
118 	eva = round_line(va + size);
119 	va = trunc_line(va);
120 
121 	while ((eva - va) >= (32 * 16)) {
122 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
123 		va += (32 * 16);
124 	}
125 
126 	while (va < eva) {
127 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
128 		va += 16;
129 	}
130 }
131 
132 void
133 r4k_pdcache_wbinv_all_16(void)
134 {
135 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
136 	vaddr_t eva = va + mips_pdcache_size;
137 
138 	while (va < eva) {
139 		cache_r4k_op_32lines_16(va,
140 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
141 		va += (32 * 16);
142 	}
143 }
144 
145 void
146 r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
147 {
148 	vaddr_t eva = round_line(va + size);
149 
150 	va = trunc_line(va);
151 
152 	while ((eva - va) >= (32 * 16)) {
153 		cache_r4k_op_32lines_16(va,
154 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
155 		va += (32 * 16);
156 	}
157 
158 	while (va < eva) {
159 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
160 		va += 16;
161 	}
162 }
163 
164 void
165 r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
166 {
167 	vaddr_t eva;
168 
169 	/*
170 	 * Since we're doing Index ops, we expect to not be able
171 	 * to access the address we've been given.  So, get the
172 	 * bits that determine the cache index, and make a KSEG0
173 	 * address out of them.
174 	 */
175 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
176 
177 	eva = round_line(va + size);
178 	va = trunc_line(va);
179 
180 	while ((eva - va) >= (32 * 16)) {
181 		cache_r4k_op_32lines_16(va,
182 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
183 		va += (32 * 16);
184 	}
185 
186 	while (va < eva) {
187 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
188 		va += 16;
189 	}
190 }
191 
192 void
193 r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
194 {
195 	vaddr_t eva = round_line(va + size);
196 
197 	va = trunc_line(va);
198 
199 	while ((eva - va) >= (32 * 16)) {
200 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
201 		va += (32 * 16);
202 	}
203 
204 	while (va < eva) {
205 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
206 		va += 16;
207 	}
208 }
209 
210 void
211 r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
212 {
213 	vaddr_t eva = round_line(va + size);
214 
215 	va = trunc_line(va);
216 
217 	while ((eva - va) >= (32 * 16)) {
218 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
219 		va += (32 * 16);
220 	}
221 
222 	while (va < eva) {
223 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
224 		va += 16;
225 	}
226 }
227 
228 #undef round_line
229 #undef trunc_line
230 
231 #define	round_line(x)		(((x) + 31) & ~31)
232 #define	trunc_line(x)		((x) & ~31)
233 
234 void
235 r4k_icache_sync_all_32(void)
236 {
237 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
238 	vaddr_t eva = va + mips_picache_size;
239 
240 	mips_dcache_wbinv_all();
241 
242 	__asm __volatile("sync");
243 
244 	while (va < eva) {
245 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
246 		va += (32 * 32);
247 	}
248 }
249 
250 void
251 r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
252 {
253 	vaddr_t eva = round_line(va + size);
254 
255 	va = trunc_line(va);
256 
257 	mips_dcache_wb_range(va, (eva - va));
258 
259 	__asm __volatile("sync");
260 
261 	while ((eva - va) >= (32 * 32)) {
262 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
263 		va += (32 * 32);
264 	}
265 
266 	while (va < eva) {
267 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
268 		va += 32;
269 	}
270 }
271 
272 void
273 r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
274 {
275 	vaddr_t eva;
276 
277 	eva = round_line(va + size);
278 	va = trunc_line(va);
279 
280 	mips_dcache_wbinv_range_index(va, (eva - va));
281 
282 	__asm __volatile("sync");
283 
284 	/*
285 	 * Since we're doing Index ops, we expect to not be able
286 	 * to access the address we've been given.  So, get the
287 	 * bits that determine the cache index, and make a KSEG0
288 	 * address out of them.
289 	 */
290 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
291 
292 	eva = round_line(va + size);
293 	va = trunc_line(va);
294 
295 	while ((eva - va) >= (32 * 32)) {
296 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
297 		va += (32 * 32);
298 	}
299 
300 	while (va < eva) {
301 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
302 		va += 32;
303 	}
304 }
305 
306 void
307 r4k_pdcache_wbinv_all_32(void)
308 {
309 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
310 	vaddr_t eva = va + mips_pdcache_size;
311 
312 	while (va < eva) {
313 		cache_r4k_op_32lines_32(va,
314 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
315 		va += (32 * 32);
316 	}
317 }
318 
319 void
320 r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
321 {
322 	vaddr_t eva = round_line(va + size);
323 
324 	va = trunc_line(va);
325 
326 	while ((eva - va) >= (32 * 32)) {
327 		cache_r4k_op_32lines_32(va,
328 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
329 		va += (32 * 32);
330 	}
331 
332 	while (va < eva) {
333 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
334 		va += 32;
335 	}
336 }
337 
338 void
339 r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
340 {
341 	vaddr_t eva;
342 
343 	/*
344 	 * Since we're doing Index ops, we expect to not be able
345 	 * to access the address we've been given.  So, get the
346 	 * bits that determine the cache index, and make a KSEG0
347 	 * address out of them.
348 	 */
349 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
350 
351 	eva = round_line(va + size);
352 	va = trunc_line(va);
353 
354 	while ((eva - va) >= (32 * 32)) {
355 		cache_r4k_op_32lines_32(va,
356 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
357 		va += (32 * 32);
358 	}
359 
360 	while (va < eva) {
361 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
362 		va += 32;
363 	}
364 }
365 
366 void
367 r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
368 {
369 	vaddr_t eva = round_line(va + size);
370 
371 	va = trunc_line(va);
372 
373 	while ((eva - va) >= (32 * 32)) {
374 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
375 		va += (32 * 32);
376 	}
377 
378 	while (va < eva) {
379 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
380 		va += 32;
381 	}
382 }
383 
384 void
385 r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
386 {
387 	vaddr_t eva = round_line(va + size);
388 
389 	va = trunc_line(va);
390 
391 	while ((eva - va) >= (32 * 32)) {
392 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
393 		va += (32 * 32);
394 	}
395 
396 	while (va < eva) {
397 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
398 		va += 32;
399 	}
400 }
401 
402 void
403 r4k_sdcache_wbinv_all_32(void)
404 {
405 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
406 	vaddr_t eva = va + mips_sdcache_size;
407 
408 	while (va < eva) {
409 		cache_r4k_op_32lines_32(va,
410 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
411 		va += (32 * 32);
412 	}
413 }
414 
415 void
416 r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
417 {
418 	vaddr_t eva = round_line(va + size);
419 
420 	va = trunc_line(va);
421 
422 	while ((eva - va) >= (32 * 32)) {
423 		cache_r4k_op_32lines_32(va,
424 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
425 		va += (32 * 32);
426 	}
427 
428 	while (va < eva) {
429 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
430 		va += 32;
431 	}
432 }
433 
434 void
435 r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
436 {
437 	vaddr_t eva;
438 
439 	/*
440 	 * Since we're doing Index ops, we expect to not be able
441 	 * to access the address we've been given.  So, get the
442 	 * bits that determine the cache index, and make a KSEG0
443 	 * address out of them.
444 	 */
445 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
446 
447 	eva = round_line(va + size);
448 	va = trunc_line(va);
449 
450 	while ((eva - va) >= (32 * 32)) {
451 		cache_r4k_op_32lines_32(va,
452 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
453 		va += (32 * 32);
454 	}
455 
456 	while (va < eva) {
457 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
458 		va += 32;
459 	}
460 }
461 
462 void
463 r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
464 {
465 	vaddr_t eva = round_line(va + size);
466 
467 	va = trunc_line(va);
468 
469 	while ((eva - va) >= (32 * 32)) {
470 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
471 		va += (32 * 32);
472 	}
473 
474 	while (va < eva) {
475 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
476 		va += 32;
477 	}
478 }
479 
480 void
481 r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
482 {
483 	vaddr_t eva = round_line(va + size);
484 
485 	va = trunc_line(va);
486 
487 	while ((eva - va) >= (32 * 32)) {
488 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
489 		va += (32 * 32);
490 	}
491 
492 	while (va < eva) {
493 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
494 		va += 32;
495 	}
496 }
497 
498 #undef round_line
499 #undef trunc_line
500 
501 #define	round_line(x)		(((x) + 127) & ~127)
502 #define	trunc_line(x)		((x) & ~127)
503 
504 void
505 r4k_sdcache_wbinv_all_128(void)
506 {
507 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
508 	vaddr_t eva = va + mips_sdcache_size;
509 
510 	while (va < eva) {
511 		cache_r4k_op_32lines_128(va,
512 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
513 		va += (32 * 128);
514 	}
515 }
516 
517 void
518 r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
519 {
520 	vaddr_t eva = round_line(va + size);
521 
522 	va = trunc_line(va);
523 
524 	while ((eva - va) >= (32 * 128)) {
525 		cache_r4k_op_32lines_128(va,
526 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
527 		va += (32 * 128);
528 	}
529 
530 	while (va < eva) {
531 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
532 		va += 128;
533 	}
534 }
535 
536 void
537 r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
538 {
539 	vaddr_t eva;
540 
541 	/*
542 	 * Since we're doing Index ops, we expect to not be able
543 	 * to access the address we've been given.  So, get the
544 	 * bits that determine the cache index, and make a KSEG0
545 	 * address out of them.
546 	 */
547 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
548 
549 	eva = round_line(va + size);
550 	va = trunc_line(va);
551 
552 	while ((eva - va) >= (32 * 128)) {
553 		cache_r4k_op_32lines_128(va,
554 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
555 		va += (32 * 128);
556 	}
557 
558 	while (va < eva) {
559 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
560 		va += 128;
561 	}
562 }
563 
564 void
565 r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
566 {
567 	vaddr_t eva = round_line(va + size);
568 
569 	va = trunc_line(va);
570 
571 	while ((eva - va) >= (32 * 128)) {
572 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
573 		va += (32 * 128);
574 	}
575 
576 	while (va < eva) {
577 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
578 		va += 128;
579 	}
580 }
581 
582 void
583 r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
584 {
585 	vaddr_t eva = round_line(va + size);
586 
587 	va = trunc_line(va);
588 
589 	while ((eva - va) >= (32 * 128)) {
590 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
591 		va += (32 * 128);
592 	}
593 
594 	while (va < eva) {
595 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
596 		va += 128;
597 	}
598 }
599 
600 #undef round_line
601 #undef trunc_line
602 
603 #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
604 #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
605 
606 void
607 r4k_sdcache_wbinv_all_generic(void)
608 {
609 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
610 	vaddr_t eva = va + mips_sdcache_size;
611 	int line_size = mips_sdcache_line_size;
612 
613 	while (va < eva) {
614 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
615 		va += line_size;
616 	}
617 }
618 
619 void
620 r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
621 {
622 	vaddr_t eva = round_line(va + size);
623 	int line_size = mips_sdcache_line_size;
624 
625 	va = trunc_line(va);
626 
627 	while (va < eva) {
628 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
629 		va += line_size;
630 	}
631 }
632 
633 void
634 r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
635 {
636 	vaddr_t eva;
637 	int line_size = mips_sdcache_line_size;
638 
639 	/*
640 	 * Since we're doing Index ops, we expect to not be able
641 	 * to access the address we've been given.  So, get the
642 	 * bits that determine the cache index, and make a KSEG0
643 	 * address out of them.
644 	 */
645 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
646 
647 	eva = round_line(va + size);
648 	va = trunc_line(va);
649 
650 	while (va < eva) {
651 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
652 		va += line_size;
653 	}
654 }
655 
656 void
657 r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
658 {
659 	vaddr_t eva = round_line(va + size);
660 	int line_size = mips_sdcache_line_size;
661 
662 	va = trunc_line(va);
663 
664 	while (va < eva) {
665 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
666 		va += line_size;
667 	}
668 }
669 
670 void
671 r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
672 {
673 	vaddr_t eva = round_line(va + size);
674 	int line_size = mips_sdcache_line_size;
675 
676 	va = trunc_line(va);
677 
678 	while (va < eva) {
679 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
680 		va += line_size;
681 	}
682 }
683 
684 #undef round_line
685 #undef trunc_line
686