1 /* 2 * Copyright (c) 1992 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * %sccs.include.redist.c% 15 * 16 * @(#)cache.c 7.6 (Berkeley) 05/05/93 17 * 18 * from: $Header: cache.c,v 1.9 93/05/05 09:16:17 torek Exp $ (LBL) 19 */ 20 21 /* 22 * Cache routines. 23 * 24 * TODO: 25 * - rework range flush 26 */ 27 28 #include <sys/param.h> 29 30 #include <machine/ctlreg.h> 31 #include <machine/pte.h> 32 33 #include <sparc/sparc/asm.h> 34 #include <sparc/sparc/cache.h> 35 36 enum vactype vactype; 37 struct cachestats cachestats; 38 39 /* 40 * Enable the cache. 41 * We need to clear out the valid bits first. 42 */ 43 void 44 cache_enable() 45 { 46 register int i, lim, ls; 47 48 i = AC_CACHETAGS; 49 lim = i + cacheinfo.c_totalsize; 50 ls = cacheinfo.c_linesize; 51 for (; i < lim; i += ls) 52 sta(i, ASI_CONTROL, 0); 53 54 stba(AC_SYSENABLE, ASI_CONTROL, 55 lduba(AC_SYSENABLE, ASI_CONTROL) | SYSEN_CACHE); 56 cacheinfo.c_enabled = 1; 57 printf("cache enabled\n"); 58 } 59 60 61 /* 62 * Flush the current context from the cache. 63 * 64 * This is done by writing to each cache line in the `flush context' 65 * address space (or, for hardware flush, once to each page in the 66 * hardware flush space, for all cache pages). 67 */ 68 void 69 cache_flush_context() 70 { 71 register char *p; 72 register int i, ls; 73 74 cachestats.cs_ncxflush++; 75 p = (char *)0; /* addresses 0..cacheinfo.c_totalsize will do fine */ 76 if (cacheinfo.c_hwflush) { 77 ls = NBPG; 78 i = cacheinfo.c_totalsize >> PGSHIFT; 79 for (; --i >= 0; p += ls) 80 sta(p, ASI_HWFLUSHCTX, 0); 81 } else { 82 ls = cacheinfo.c_linesize; 83 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 84 for (; --i >= 0; p += ls) 85 sta(p, ASI_FLUSHCTX, 0); 86 } 87 } 88 89 /* 90 * Flush the given virtual segment from the cache. 91 * 92 * This is also done by writing to each cache line, except that 93 * now the addresses must include the virtual segment number, and 94 * we use the `flush segment' space. 95 * 96 * Again, for hardware, we just write each page (in hw-flush space). 97 */ 98 void 99 cache_flush_segment(vseg) 100 register int vseg; 101 { 102 register int i, ls; 103 register char *p; 104 105 cachestats.cs_nsgflush++; 106 p = (char *)VSTOVA(vseg); /* seg..seg+sz rather than 0..sz */ 107 if (cacheinfo.c_hwflush) { 108 ls = NBPG; 109 i = cacheinfo.c_totalsize >> PGSHIFT; 110 for (; --i >= 0; p += ls) 111 sta(p, ASI_HWFLUSHSEG, 0); 112 } else { 113 ls = cacheinfo.c_linesize; 114 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 115 for (; --i >= 0; p += ls) 116 sta(p, ASI_FLUSHSEG, 0); 117 } 118 } 119 120 /* 121 * Flush the given virtual page from the cache. 122 * (va is the actual address, and must be aligned on a page boundary.) 123 * Again we write to each cache line. 124 */ 125 void 126 cache_flush_page(va) 127 int va; 128 { 129 register int i, ls; 130 register char *p; 131 132 cachestats.cs_npgflush++; 133 p = (char *)va; 134 if (cacheinfo.c_hwflush) 135 sta(p, ASI_HWFLUSHPG, 0); 136 else { 137 ls = cacheinfo.c_linesize; 138 i = NBPG >> cacheinfo.c_l2linesize; 139 for (; --i >= 0; p += ls) 140 sta(p, ASI_FLUSHPG, 0); 141 } 142 } 143 144 /* 145 * Flush a range of virtual addresses (in the current context). 146 * The first byte is at (base&~PGOFSET) and the last one is just 147 * before byte (base+len). 148 * 149 * We choose the best of (context,segment,page) here. 150 */ 151 void 152 cache_flush(base, len) 153 caddr_t base; 154 register u_int len; 155 { 156 register int i, ls, baseoff; 157 register char *p; 158 159 /* 160 * Figure out how much must be flushed. 161 * 162 * If we need to do 16 pages, we can do a segment in the same 163 * number of loop iterations. We can also do the context. If 164 * we would need to do two segments, do the whole context. 165 * This might not be ideal (e.g., fsck likes to do 65536-byte 166 * reads, which might not necessarily be aligned). 167 * 168 * We could try to be sneaky here and use the direct mapping 169 * to avoid flushing things `below' the start and `above' the 170 * ending address (rather than rounding to whole pages and 171 * segments), but I did not want to debug that now and it is 172 * not clear it would help much. 173 * 174 * (XXX the magic number 16 is now wrong, must review policy) 175 */ 176 baseoff = (int)base & PGOFSET; 177 i = (baseoff + len + PGOFSET) >> PGSHIFT; 178 179 cachestats.cs_nraflush++; 180 #ifdef notyet 181 cachestats.cs_ra[min(i, MAXCACHERANGE)]++; 182 #endif 183 184 if (i <= 15) { 185 /* cache_flush_page, for i pages */ 186 p = (char *)((int)base & ~baseoff); 187 if (cacheinfo.c_hwflush) { 188 for (; --i >= 0; p += NBPG) 189 sta(p, ASI_HWFLUSHPG, 0); 190 } else { 191 ls = cacheinfo.c_linesize; 192 i <<= PGSHIFT - cacheinfo.c_l2linesize; 193 for (; --i >= 0; p += ls) 194 sta(p, ASI_FLUSHPG, 0); 195 } 196 return; 197 } 198 baseoff = (u_int)base & SGOFSET; 199 i = (baseoff + len + SGOFSET) >> SGSHIFT; 200 if (i == 1) 201 cache_flush_segment(VA_VSEG(base)); 202 else 203 cache_flush_context(); 204 } 205