1 /* $NetBSD: cache_mipsNN.c,v 1.16 2016/07/11 16:15:36 matt Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: cache_mipsNN.c,v 1.16 2016/07/11 16:15:36 matt Exp $");
40
41 #include <sys/param.h>
42
43 #include <mips/locore.h>
44 #include <mips/cache.h>
45 #include <mips/cache_r4k.h>
46 #include <mips/cache_mipsNN.h>
47 #include <mips/mipsNN.h>
48
49 #include <uvm/uvm_extern.h>
50
51 #define round_line(x,n) (((x) + (n) - 1) & -(n))
52 #define trunc_line(x,n) ((x) & -(n))
53
54 void
mipsNN_cache_init(uint32_t config,uint32_t config1)55 mipsNN_cache_init(uint32_t config, uint32_t config1)
56 {
57 /* nothing to do */
58 }
59
60 void
mipsNN_picache_sync_all(void)61 mipsNN_picache_sync_all(void)
62 {
63 struct mips_cache_info * const mci = &mips_cache_info;
64
65 /*
66 * Since we're hitting the whole thing, we don't have to
67 * worry about the N different "ways".
68 */
69 mips_intern_dcache_sync_all();
70 mips_intern_icache_sync_range_index(MIPS_KSEG0_START,
71 mci->mci_picache_size);
72 }
73
74 void
mipsNN_pdcache_wbinv_all(void)75 mipsNN_pdcache_wbinv_all(void)
76 {
77 struct mips_cache_info * const mci = &mips_cache_info;
78
79 /*
80 * Since we're hitting the whole thing, we don't have to
81 * worry about the N different "ways".
82 */
83 mips_intern_pdcache_wbinv_range_index(MIPS_KSEG0_START,
84 mci->mci_pdcache_size);
85 }
86
87 void
mipsNN_sdcache_wbinv_all(void)88 mipsNN_sdcache_wbinv_all(void)
89 {
90 struct mips_cache_info * const mci = &mips_cache_info;
91
92 /*
93 * Since we're hitting the whole thing, we don't have to
94 * worry about the N different "ways".
95 */
96 mips_intern_sdcache_wbinv_range_index(MIPS_KSEG0_START,
97 mci->mci_sdcache_size);
98 }
99
100 void
mipsNN_picache_sync_range(register_t va,vsize_t size)101 mipsNN_picache_sync_range(register_t va, vsize_t size)
102 {
103
104 mips_intern_dcache_sync_range(va, size);
105 mips_intern_icache_sync_range(va, size);
106 }
107
108 void
mipsNN_picache_sync_range_index(vaddr_t va,vsize_t size)109 mipsNN_picache_sync_range_index(vaddr_t va, vsize_t size)
110 {
111 struct mips_cache_info * const mci = &mips_cache_info;
112 const size_t ways = mci->mci_picache_ways;
113 const size_t line_size = mci->mci_picache_line_size;
114 const size_t way_size = mci->mci_picache_way_size;
115 const size_t way_mask = way_size - 1;
116 vaddr_t eva;
117
118 /*
119 * Since we're doing Index ops, we expect to not be able
120 * to access the address we've been given. So, get the
121 * bits that determine the cache index, and make a KSEG0
122 * address out of them.
123 */
124 va = MIPS_PHYS_TO_KSEG0(va & way_mask);
125
126 eva = round_line(va + size, line_size);
127 va = trunc_line(va, line_size);
128 size = eva - va;
129
130 /*
131 * If we are going to flush more than is in a way (or the stride
132 * need for that way), we are flushing everything.
133 */
134 if (size >= way_size) {
135 mipsNN_picache_sync_all();
136 return;
137 }
138
139 for (size_t way = 0; way < ways; way++) {
140 mips_intern_dcache_sync_range_index(va, size);
141 mips_intern_icache_sync_range_index(va, size);
142 va += way_size;
143 eva += way_size;
144 }
145 }
146
147 void
mipsNN_pdcache_wbinv_range_index(vaddr_t va,vsize_t size)148 mipsNN_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
149 {
150 struct mips_cache_info * const mci = &mips_cache_info;
151 const size_t ways = mci->mci_pdcache_ways;
152 const size_t line_size = mci->mci_pdcache_line_size;
153 const vaddr_t way_size = mci->mci_pdcache_way_size;
154 const vaddr_t way_mask = way_size - 1;
155 vaddr_t eva;
156
157 /*
158 * Since we're doing Index ops, we expect to not be able
159 * to access the address we've been given. So, get the
160 * bits that determine the cache index, and make a KSEG0
161 * address out of them.
162 */
163 va = MIPS_PHYS_TO_KSEG0(va & way_mask);
164 eva = round_line(va + size, line_size);
165 va = trunc_line(va, line_size);
166 size = eva - va;
167
168 /*
169 * If we are going to flush more than is in a way, we are flushing
170 * everything.
171 */
172 if (size >= way_size) {
173 mips_intern_pdcache_wbinv_range_index(MIPS_KSEG0_START,
174 mci->mci_pdcache_size);
175 return;
176 }
177
178 /*
179 * Invalidate each way. If the address range wraps past the end of
180 * the way, we will be invalidating in two ways but eventually things
181 * work out since the last way will wrap into the first way.
182 */
183 for (size_t way = 0; way < ways; way++) {
184 mips_intern_pdcache_wbinv_range_index(va, size);
185 va += way_size;
186 eva += way_size;
187 }
188 }
189
190 void
mipsNN_sdcache_wbinv_range_index(vaddr_t va,vsize_t size)191 mipsNN_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
192 {
193 struct mips_cache_info * const mci = &mips_cache_info;
194 const size_t ways = mci->mci_sdcache_ways;
195 const size_t line_size = mci->mci_sdcache_line_size;
196 const vaddr_t way_size = mci->mci_sdcache_way_size;
197 const vaddr_t way_mask = way_size - 1;
198 vaddr_t eva;
199
200 /*
201 * Since we're doing Index ops, we expect to not be able
202 * to access the address we've been given. So, get the
203 * bits that determine the cache index, and make a KSEG0
204 * address out of them.
205 */
206 va = MIPS_PHYS_TO_KSEG0(va & way_mask);
207 eva = round_line(va + size, line_size);
208 va = trunc_line(va, line_size);
209 size = eva - va;
210
211 /*
212 * If we are going to flush more than is in a way, we are flushing
213 * everything.
214 */
215 if (size >= way_size) {
216 mips_intern_sdcache_wbinv_range_index(MIPS_KSEG0_START,
217 mci->mci_sdcache_size);
218 return;
219 }
220
221 /*
222 * Invalidate each way. If the address range wraps past the end of
223 * the way, we will be invalidating in two ways but eventually things
224 * work out since the last way will wrap into the first way.
225 */
226 for (size_t way = 0; way < ways; way++) {
227 mips_intern_sdcache_wbinv_range_index(va, size);
228 va += way_size;
229 eva += way_size;
230 }
231 }
232