xref: /netbsd/sys/arch/sh3/sh3/cache_sh3.c (revision bf9ec67e)
1 /*	$NetBSD: cache_sh3.c,v 1.6 2002/05/10 15:28:45 uch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 
42 #include <sh3/cache.h>
43 #include <sh3/cache_sh3.h>
44 
45 #define	round_line(x)		(((x) + 15) & ~15)
46 #define	trunc_line(x)		((x) & ~15)
47 
48 void sh3_cache_wbinv_all(void);
49 void sh3_cache_wbinv_range(vaddr_t, vsize_t);
50 void sh3_cache_wbinv_range_index(vaddr_t, vsize_t);
51 void sh3_cache_panic(vaddr_t, vsize_t);
52 void sh3_cache_nop(vaddr_t, vsize_t);
53 
54 int sh_cache_way_size;
55 int sh_cache_way_shift;
56 int sh_cache_entry_mask;
57 
58 static __inline__ void cache_sh3_op_line_16_nway(int, vaddr_t, u_int32_t);
59 static __inline__ void cache_sh3_op_8lines_16_nway(int, vaddr_t, u_int32_t);
60 
61 void
62 sh3_cache_config()
63 {
64 	size_t cache_size;
65 	u_int32_t r;
66 
67 	/* Determine cache size */
68 	switch (cpu_product) {
69 	default:
70 		/* FALLTHROUGH */
71 	case CPU_PRODUCT_7708:
72 		/* FALLTHROUGH */
73 	case CPU_PRODUCT_7708S:
74 		/* FALLTHROUGH */
75 	case CPU_PRODUCT_7708R:
76 		cache_size = 8 * 1024;
77 		break;
78 	case CPU_PRODUCT_7709:
79 		cache_size = 8 * 1024;
80 		break;
81 	case CPU_PRODUCT_7709A:
82 		cache_size = 16 * 1024;
83 		break;
84 	}
85 
86 	r = _reg_read_4(SH3_CCR);
87 
88 	sh_cache_unified = 1;
89 	sh_cache_enable_unified = (r & SH3_CCR_CE);
90 	sh_cache_line_size = 16;
91 	sh_cache_write_through_p0_u0_p3 = r & SH3_CCR_WT;
92 	sh_cache_write_through_p1 = !(r & SH3_CCR_CB);
93 	sh_cache_write_through = sh_cache_write_through_p0_u0_p3 &&
94 	    sh_cache_write_through_p1;
95 
96 	sh_cache_ram_mode = r & SH3_CCR_RA;
97 	if (sh_cache_ram_mode) {
98 		/*
99 		 * In RAM-mode, way 2 and 3 are used as RAM.
100 		 */
101 		sh_cache_ways = 2;
102 		sh_cache_size_unified = cache_size / 2;
103 	} else {
104 		sh_cache_ways = 4;
105 		sh_cache_size_unified = cache_size;
106 	}
107 
108 	/* size enough to access foreach entries */
109 	sh_cache_way_size = sh_cache_size_unified / 4/*way*/;
110 	/* mask for extracting entry select */
111 	sh_cache_entry_mask = (sh_cache_way_size - 1) & ~15/*line-mask*/;
112 	/* shift for way selection (16KB/8KB) */
113 	sh_cache_way_shift =
114 	    /* entry bits */
115 	    ffs(sh_cache_size_unified / (4/*way*/ * 16/*line-size*/)) - 1
116 	    /* line bits */
117 	    + 4;
118 
119 	sh_cache_ops._icache_sync_all		= sh3_cache_wbinv_all;
120 	sh_cache_ops._icache_sync_range		= sh3_cache_wbinv_range;
121 	sh_cache_ops._icache_sync_range_index	= sh3_cache_wbinv_range_index;
122 	sh_cache_ops._dcache_wbinv_all		= sh3_cache_wbinv_all;
123 	sh_cache_ops._dcache_wbinv_range	= sh3_cache_wbinv_range;
124 	sh_cache_ops._dcache_wbinv_range_index	= sh3_cache_wbinv_range_index;
125 	/* SH3 can't invalidate without write-back */
126 	sh_cache_ops._dcache_inv_range		= sh3_cache_panic;
127 	if (sh_cache_write_through) {
128 		sh_cache_ops._dcache_wb_range		= sh3_cache_nop;
129 	} else {
130 		/* SH3 can't write-back without invalidate */
131 		sh_cache_ops._dcache_wb_range		= sh3_cache_wbinv_range;
132 	}
133 }
134 
135 /*
136  * cache_sh3_op_line_16_nway: (index-operation)
137  *
138  *	Clear the specified bits on single 16-byte cache line. n-ways.
139  *
140  */
141 void
142 cache_sh3_op_line_16_nway(int n, vaddr_t va, u_int32_t bits)
143 {
144 	vaddr_t cca;
145 	int way;
146 
147 	/* extract entry # */
148 	va &= sh_cache_entry_mask;
149 
150 	/* operate for each way */
151 	for (way = 0; way < n; way++) {
152 		cca = (SH3_CCA | way << sh_cache_way_shift | va);
153 		_reg_write_4(cca, _reg_read_4(cca) & ~bits);
154 	}
155 }
156 
157 /*
158  * cache_sh3_op_8lines_16_nway: (index-operation)
159  *
160  *	Clear the specified bits on 8 16-byte cache lines, n-ways.
161  *
162  */
163 void
164 cache_sh3_op_8lines_16_nway(int n, vaddr_t va, u_int32_t bits)
165 {
166 	__volatile__ u_int32_t *cca;
167 	int way;
168 
169 	/* extract entry # */
170 	va &= sh_cache_entry_mask;
171 
172 	/* operate for each way */
173 	for (way = 0; way < n; way++) {
174 		cca = (__volatile__ u_int32_t *)
175 		    (SH3_CCA | way << sh_cache_way_shift | va);
176 		cca[ 0] &= ~bits;
177 		cca[ 4] &= ~bits;
178 		cca[ 8] &= ~bits;
179 		cca[12] &= ~bits;
180 		cca[16] &= ~bits;
181 		cca[20] &= ~bits;
182 		cca[24] &= ~bits;
183 		cca[28] &= ~bits;
184 	}
185 }
186 
187 void
188 sh3_cache_wbinv_all()
189 {
190 	vaddr_t va;
191 
192 	for (va = 0; va < sh_cache_way_size; va += 16 * 8)
193 		cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
194 }
195 
196 void
197 sh3_cache_wbinv_range_index(vaddr_t va, vsize_t sz)
198 {
199 	vaddr_t eva = round_line(va + sz);
200 
201 	va = trunc_line(va);
202 
203 	while ((eva - va) >= (8 * 16)) {
204 		cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
205 		va += 16 * 8;
206 	}
207 
208 	while (va < eva) {
209 		cache_sh3_op_line_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
210 		va += 16;
211 	}
212 }
213 
214 void
215 sh3_cache_wbinv_range(vaddr_t va, vsize_t sz)
216 {
217 	vaddr_t eva = round_line(va + sz);
218 	vaddr_t cca;
219 
220 	va = trunc_line(va);
221 
222 	while (va < eva) {
223 		cca = SH3_CCA | CCA_A | (va & sh_cache_entry_mask);
224 		/*
225 		 * extract virtual tag-address.
226 		 * MMU translates it to physical address tag,
227 		 * and write to address-array.
228 		 * implicitly specified U = 0, V = 0.
229 		 */
230 		_reg_write_4(cca, va & CCA_TAGADDR_MASK);
231 		va += 16;
232 	}
233 }
234 
235 void
236 sh3_cache_panic(vaddr_t va, vsize_t size)
237 {
238 
239 	panic("SH3 can't invalidate without write-back");
240 }
241 
242 void
243 sh3_cache_nop(vaddr_t va, vsize_t sz)
244 {
245 	/* NO-OP */
246 }
247