1 /* $NetBSD: cache_ls2.c,v 1.5 2016/07/11 16:15:36 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.5 2016/07/11 16:15:36 matt Exp $");
34
35 #include <sys/param.h>
36
37 #include <mips/cache.h>
38 #include <mips/cache_ls2.h>
39 #include <mips/locore.h>
40
41 /*
42 * Cache operations for Loongson2-style caches:
43 *
44 * - 4-way set-associative 32b/l
45 * - Write-back
46 * - Primary is virtually indexed, physically tagged
47 * - Seconadry is physically indexed, physically tagged
48 */
49
50 #define round_line(x) (((x) + 31) & ~31)
51 #define trunc_line(x) ((x) & ~31)
52
53 __asm(".set mips3");
54
55 void
ls2_icache_sync_range(register_t va,vsize_t size)56 ls2_icache_sync_range(register_t va, vsize_t size)
57 {
58 struct mips_cache_info * const mci = &mips_cache_info;
59 const vaddr_t eva = round_line(va + size);
60
61 va = trunc_line(va);
62
63 if (va + mci->mci_picache_size <= eva) {
64 ls2_icache_sync_all();
65 return;
66 }
67
68 for (; va + 8 * 32 <= eva; va += 8 * 32) {
69 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
70 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
71 }
72
73 for (; va < eva; va += 32) {
74 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
75 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
76 }
77
78 __asm volatile("sync");
79 }
80
81 void
ls2_icache_sync_range_index(vaddr_t va,vsize_t size)82 ls2_icache_sync_range_index(vaddr_t va, vsize_t size)
83 {
84 vaddr_t eva;
85 struct mips_cache_info * const mci = &mips_cache_info;
86
87 /*
88 * Since we're doing Index ops, we expect to not be able
89 * to access the address we've been given. So, get the
90 * bits that determine the cache index, and make a KSEG0
91 * address out of them.
92 */
93
94 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);
95 eva = round_line(va + size);
96 va = trunc_line(va);
97
98 if (va + mci->mci_picache_way_size < eva) {
99 va = MIPS_PHYS_TO_KSEG0(0);
100 eva = mci->mci_picache_way_size;
101 }
102
103 for (; va + 8 * 32 <= eva; va += 8 * 32) {
104 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
105 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
106 }
107
108 for (; va < eva; va += 32) {
109 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
110 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
111 }
112
113 __asm volatile("sync");
114 }
115
116 void
ls2_icache_sync_all(void)117 ls2_icache_sync_all(void)
118 {
119 struct mips_cache_info * const mci = &mips_cache_info;
120 ls2_icache_sync_range_index(0, mci->mci_picache_way_size);
121 }
122
123 void
ls2_pdcache_inv_range(register_t va,vsize_t size)124 ls2_pdcache_inv_range(register_t va, vsize_t size)
125 {
126 const vaddr_t eva = round_line(va + size);
127
128 va = trunc_line(va);
129
130 for (; va + 8 * 32 <= eva; va += 8 * 32) {
131 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
132 }
133
134 for (; va < eva; va += 32) {
135 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
136 }
137
138 __asm volatile("sync");
139 }
140
141 void
ls2_pdcache_wbinv_range(register_t va,vsize_t size)142 ls2_pdcache_wbinv_range(register_t va, vsize_t size)
143 {
144 const vaddr_t eva = round_line(va + size);
145
146 va = trunc_line(va);
147
148 for (; va + 8 * 32 <= eva; va += 8 * 32) {
149 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
150 }
151
152 for (; va < eva; va += 32) {
153 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
154 }
155
156 __asm volatile("sync");
157 }
158
159 void
ls2_pdcache_wb_range(register_t va,vsize_t size)160 ls2_pdcache_wb_range(register_t va, vsize_t size)
161 {
162 /*
163 * Alas, can't writeback without invalidating...
164 */
165 ls2_pdcache_wbinv_range(va, size);
166 }
167
168 void
ls2_pdcache_wbinv_range_index(vaddr_t va,vsize_t size)169 ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
170 {
171 vaddr_t eva;
172 struct mips_cache_info * const mci = &mips_cache_info;
173
174 /*
175 * Since we're doing Index ops, we expect to not be able
176 * to access the address we've been given. So, get the
177 * bits that determine the cache index, and make a KSEG0
178 * address out of them.
179 */
180 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask);
181
182 eva = round_line(va + size);
183 va = trunc_line(va);
184
185 if (va + mci->mci_pdcache_way_size > eva) {
186 va = MIPS_PHYS_TO_KSEG0(0);
187 eva = mci->mci_pdcache_way_size;
188 }
189
190 for (; va + 8 * 32 <= eva; va += 8 * 32) {
191 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
192 }
193
194 for (; va < eva; va += 32) {
195 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
196 }
197
198 __asm volatile("sync");
199 }
200
201 void
ls2_pdcache_wbinv_all(void)202 ls2_pdcache_wbinv_all(void)
203 {
204 struct mips_cache_info * const mci = &mips_cache_info;
205 ls2_pdcache_wbinv_range_index(0, mci->mci_pdcache_way_size);
206 }
207
208 /*
209 * Cache operations for secondary caches:
210 *
211 * - Direct-mapped
212 * - Write-back
213 * - Physically indexed, physically tagged
214 *
215 */
216
217 void
ls2_sdcache_inv_range(register_t va,vsize_t size)218 ls2_sdcache_inv_range(register_t va, vsize_t size)
219 {
220 const vaddr_t eva = round_line(va + size);
221
222 va = trunc_line(va);
223
224 for (; va + 8 * 32 <= eva; va += 8 * 32) {
225 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
226 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_INV);
227 }
228
229 for (; va < eva; va += 32) {
230 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
231 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_INV);
232 }
233
234 __asm volatile("sync");
235 }
236
237 void
ls2_sdcache_wbinv_range(register_t va,vsize_t size)238 ls2_sdcache_wbinv_range(register_t va, vsize_t size)
239 {
240 const vaddr_t eva = round_line(va + size);
241
242 va = trunc_line(va);
243
244 for (; va + 8 * 32 <= eva; va += 8 * 32) {
245 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
246 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_WB_INV);
247 }
248
249 for (; va < eva; va += 32) {
250 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
251 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_WB_INV);
252 }
253
254 __asm volatile("sync");
255 }
256
257 void
ls2_sdcache_wb_range(register_t va,vsize_t size)258 ls2_sdcache_wb_range(register_t va, vsize_t size)
259 {
260 /*
261 * Alas, can't writeback without invalidating...
262 */
263 ls2_sdcache_wbinv_range(va, size);
264 }
265
266 void
ls2_sdcache_wbinv_range_index(vaddr_t va,vsize_t size)267 ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
268 {
269 vaddr_t eva;
270 struct mips_cache_info * const mci = &mips_cache_info;
271
272 /*
273 * Since we're doing Index ops, we expect to not be able
274 * to access the address we've been given. So, get the
275 * bits that determine the cache index, and make a KSEG0
276 * address out of them.
277 */
278 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask);
279
280 eva = round_line(va + size);
281 va = trunc_line(va);
282
283 if (va + mci->mci_sdcache_way_size > eva) {
284 va = MIPS_PHYS_TO_KSEG0(0);
285 eva = va + mci->mci_sdcache_way_size;
286 }
287
288 for (; va + 8 * 32 <= eva; va += 8 * 32) {
289 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
290 cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
291 }
292
293 for (; va < eva; va += 32) {
294 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
295 cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
296 }
297
298 __asm volatile("sync");
299 }
300
301 void
ls2_sdcache_wbinv_all(void)302 ls2_sdcache_wbinv_all(void)
303 {
304 struct mips_cache_info * const mci = &mips_cache_info;
305 ls2_sdcache_wbinv_range_index(0, mci->mci_sdcache_way_size);
306 }
307