1 /*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 * This product includes software developed by the University of
12 * California, Lawrence Berkeley Laboratory.
13 *
14 * %sccs.include.redist.c%
15 *
16 * @(#)cache.c 8.2 (Berkeley) 10/30/93
17 *
18 * from: $Header: cache.c,v 1.12 93/10/31 05:27:47 torek Exp $ (LBL)
19 */
20
21 /*
22 * Cache routines.
23 *
24 * TODO:
25 * - rework range flush
26 */
27
28 #include <sys/param.h>
29
30 #include <machine/ctlreg.h>
31 #include <machine/pte.h>
32
33 #include <sparc/sparc/asm.h>
34 #include <sparc/sparc/cache.h>
35
36 enum vactype vactype;
37 struct cachestats cachestats;
38
39 /*
40 * Enable the cache.
41 * We need to clear out the valid bits first.
42 */
43 void
cache_enable()44 cache_enable()
45 {
46 register u_int i, lim, ls, ts;
47
48 ls = cacheinfo.c_linesize;
49 ts = cacheinfo.c_totalsize;
50 for (i = AC_CACHETAGS, lim = i + ts; i < lim; i += ls)
51 sta(i, ASI_CONTROL, 0);
52
53 stba(AC_SYSENABLE, ASI_CONTROL,
54 lduba(AC_SYSENABLE, ASI_CONTROL) | SYSEN_CACHE);
55 cacheinfo.c_enabled = 1;
56
57 printf("%d byte (%d/line) write-through %cw flush cache enabled\n",
58 ts, ls, cacheinfo.c_hwflush ? 'h' : 's');
59 }
60
61
62 /*
63 * Flush the current context from the cache.
64 *
65 * This is done by writing to each cache line in the `flush context'
66 * address space (or, for hardware flush, once to each page in the
67 * hardware flush space, for all cache pages).
68 */
69 void
cache_flush_context()70 cache_flush_context()
71 {
72 register char *p;
73 register int i, ls;
74
75 cachestats.cs_ncxflush++;
76 p = (char *)0; /* addresses 0..cacheinfo.c_totalsize will do fine */
77 if (cacheinfo.c_hwflush) {
78 ls = NBPG;
79 i = cacheinfo.c_totalsize >> PGSHIFT;
80 for (; --i >= 0; p += ls)
81 sta(p, ASI_HWFLUSHCTX, 0);
82 } else {
83 ls = cacheinfo.c_linesize;
84 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize;
85 for (; --i >= 0; p += ls)
86 sta(p, ASI_FLUSHCTX, 0);
87 }
88 }
89
90 /*
91 * Flush the given virtual segment from the cache.
92 *
93 * This is also done by writing to each cache line, except that
94 * now the addresses must include the virtual segment number, and
95 * we use the `flush segment' space.
96 *
97 * Again, for hardware, we just write each page (in hw-flush space).
98 */
99 void
cache_flush_segment(vseg)100 cache_flush_segment(vseg)
101 register int vseg;
102 {
103 register int i, ls;
104 register char *p;
105
106 cachestats.cs_nsgflush++;
107 p = (char *)VSTOVA(vseg); /* seg..seg+sz rather than 0..sz */
108 if (cacheinfo.c_hwflush) {
109 ls = NBPG;
110 i = cacheinfo.c_totalsize >> PGSHIFT;
111 for (; --i >= 0; p += ls)
112 sta(p, ASI_HWFLUSHSEG, 0);
113 } else {
114 ls = cacheinfo.c_linesize;
115 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize;
116 for (; --i >= 0; p += ls)
117 sta(p, ASI_FLUSHSEG, 0);
118 }
119 }
120
121 /*
122 * Flush the given virtual page from the cache.
123 * (va is the actual address, and must be aligned on a page boundary.)
124 * Again we write to each cache line.
125 */
126 void
cache_flush_page(va)127 cache_flush_page(va)
128 int va;
129 {
130 register int i, ls;
131 register char *p;
132
133 cachestats.cs_npgflush++;
134 p = (char *)va;
135 if (cacheinfo.c_hwflush)
136 sta(p, ASI_HWFLUSHPG, 0);
137 else {
138 ls = cacheinfo.c_linesize;
139 i = NBPG >> cacheinfo.c_l2linesize;
140 for (; --i >= 0; p += ls)
141 sta(p, ASI_FLUSHPG, 0);
142 }
143 }
144
145 /*
146 * Flush a range of virtual addresses (in the current context).
147 * The first byte is at (base&~PGOFSET) and the last one is just
148 * before byte (base+len).
149 *
150 * We choose the best of (context,segment,page) here.
151 */
152 void
cache_flush(base,len)153 cache_flush(base, len)
154 caddr_t base;
155 register u_int len;
156 {
157 register int i, ls, baseoff;
158 register char *p;
159
160 /*
161 * Figure out how much must be flushed.
162 *
163 * If we need to do 16 pages, we can do a segment in the same
164 * number of loop iterations. We can also do the context. If
165 * we would need to do two segments, do the whole context.
166 * This might not be ideal (e.g., fsck likes to do 65536-byte
167 * reads, which might not necessarily be aligned).
168 *
169 * We could try to be sneaky here and use the direct mapping
170 * to avoid flushing things `below' the start and `above' the
171 * ending address (rather than rounding to whole pages and
172 * segments), but I did not want to debug that now and it is
173 * not clear it would help much.
174 *
175 * (XXX the magic number 16 is now wrong, must review policy)
176 */
177 baseoff = (int)base & PGOFSET;
178 i = (baseoff + len + PGOFSET) >> PGSHIFT;
179
180 cachestats.cs_nraflush++;
181 #ifdef notyet
182 cachestats.cs_ra[min(i, MAXCACHERANGE)]++;
183 #endif
184
185 if (i <= 15) {
186 /* cache_flush_page, for i pages */
187 p = (char *)((int)base & ~baseoff);
188 if (cacheinfo.c_hwflush) {
189 for (; --i >= 0; p += NBPG)
190 sta(p, ASI_HWFLUSHPG, 0);
191 } else {
192 ls = cacheinfo.c_linesize;
193 i <<= PGSHIFT - cacheinfo.c_l2linesize;
194 for (; --i >= 0; p += ls)
195 sta(p, ASI_FLUSHPG, 0);
196 }
197 return;
198 }
199 baseoff = (u_int)base & SGOFSET;
200 i = (baseoff + len + SGOFSET) >> SGSHIFT;
201 if (i == 1)
202 cache_flush_segment(VA_VSEG(base));
203 else
204 cache_flush_context();
205 }
206