1 /* 2 * Copyright (c) 1992 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * %sccs.include.redist.c% 15 * 16 * @(#)cache.c 7.5 (Berkeley) 04/27/93 17 * 18 * from: $Header: cache.c,v 1.8 93/04/27 14:33:36 torek Exp $ (LBL) 19 */ 20 21 /* 22 * Cache routines. 23 * 24 * TODO: 25 * - fill in hardware assist for context and segment flush 26 * - rework range flush 27 */ 28 29 #include <sys/param.h> 30 31 #include <machine/ctlreg.h> 32 #include <machine/pte.h> 33 34 #include <sparc/sparc/asm.h> 35 #include <sparc/sparc/cache.h> 36 37 enum vactype vactype; 38 struct cachestats cachestats; 39 40 /* 41 * Enable the cache. 42 * We need to clear out the valid bits first. 43 */ 44 void 45 cache_enable() 46 { 47 register int i, lim, ls; 48 49 i = AC_CACHETAGS; 50 lim = i + cacheinfo.c_totalsize; 51 ls = cacheinfo.c_linesize; 52 for (; i < lim; i += ls) 53 sta(i, ASI_CONTROL, 0); 54 55 stba(AC_SYSENABLE, ASI_CONTROL, 56 lduba(AC_SYSENABLE, ASI_CONTROL) | SYSEN_CACHE); 57 cacheinfo.c_enabled = 1; 58 printf("cache enabled\n"); 59 } 60 61 62 /* 63 * Flush the current context from the cache. 64 * 65 * This is done by writing to each cache line in the `flush context' 66 * address space. 67 */ 68 void 69 cache_flush_context() 70 { 71 register int i, ls; 72 register char *p; 73 74 cachestats.cs_ncxflush++; 75 ls = cacheinfo.c_linesize; 76 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 77 for (p = 0; --i >= 0; p += ls) 78 sta(p, ASI_FLUSHCTX, 0); 79 } 80 81 /* 82 * Flush the given virtual segment from the cache. 83 * 84 * This is also done by writing to each cache line, except that 85 * now the addresses must include the virtual segment number, and 86 * we use the `flush segment' space. 87 */ 88 void 89 cache_flush_segment(vseg) 90 register int vseg; 91 { 92 register int i, ls; 93 register char *p; 94 95 cachestats.cs_nsgflush++; 96 ls = cacheinfo.c_linesize; 97 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 98 for (p = (char *)VSTOVA(vseg); --i >= 0; p += ls) 99 sta(p, ASI_FLUSHSEG, 0); 100 } 101 102 /* 103 * Flush the given virtual page from the cache. 104 * (va is the actual address, and must be aligned on a page boundary.) 105 * Again we write to each cache line. 106 */ 107 void 108 cache_flush_page(va) 109 int va; 110 { 111 register int i, ls; 112 register char *p; 113 114 cachestats.cs_npgflush++; 115 p = (char *)va; 116 if (cacheinfo.c_hwflush) 117 sta(p, ASI_HWFLUSHPG, 0); 118 else { 119 ls = cacheinfo.c_linesize; 120 i = NBPG >> cacheinfo.c_l2linesize; 121 for (; --i >= 0; p += ls) 122 sta(p, ASI_FLUSHPG, 0); 123 } 124 } 125 126 /* 127 * Flush a range of virtual addresses (in the current context). 128 * The first byte is at (base&~PGOFSET) and the last one is just 129 * before byte (base+len). 130 * 131 * We choose the best of (context,segment,page) here. 132 */ 133 void 134 cache_flush(base, len) 135 caddr_t base; 136 register u_int len; 137 { 138 register int i, ls, baseoff; 139 register char *p; 140 141 /* 142 * Figure out how much must be flushed. 143 * 144 * If we need to do 16 pages, we can do a segment in the same 145 * number of loop iterations. We can also do the context. If 146 * we would need to do two segments, do the whole context. 147 * This might not be ideal (e.g., fsck likes to do 65536-byte 148 * reads, which might not necessarily be aligned). 149 * 150 * We could try to be sneaky here and use the direct mapping 151 * to avoid flushing things `below' the start and `above' the 152 * ending address (rather than rounding to whole pages and 153 * segments), but I did not want to debug that now and it is 154 * not clear it would help much. 155 * 156 * (XXX the magic number 16 is now wrong, must review policy) 157 */ 158 baseoff = (int)base & PGOFSET; 159 i = (baseoff + len + PGOFSET) >> PGSHIFT; 160 161 cachestats.cs_nraflush++; 162 #ifdef notyet 163 cachestats.cs_ra[min(i, MAXCACHERANGE)]++; 164 #endif 165 166 ls = cacheinfo.c_linesize; 167 if (i <= 15) { 168 /* cache_flush_page, for i pages */ 169 p = (char *)((int)base & ~baseoff); 170 if (cacheinfo.c_hwflush) { 171 for (; --i >= 0; p += NBPG) 172 sta(p, ASI_HWFLUSHPG, 0); 173 } else { 174 i <<= PGSHIFT - cacheinfo.c_l2linesize; 175 for (; --i >= 0; p += ls) 176 sta(p, ASI_FLUSHPG, 0); 177 } 178 return; 179 } 180 baseoff = (u_int)base & SGOFSET; 181 i = (baseoff + len + SGOFSET) >> SGSHIFT; 182 if (i == 1) { 183 /* cache_flush_segment */ 184 p = (char *)((int)base & ~baseoff); 185 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 186 for (; --i >= 0; p += ls) 187 sta(p, ASI_FLUSHSEG, 0); 188 return; 189 } 190 /* cache_flush_context */ 191 i = cacheinfo.c_totalsize >> cacheinfo.c_l2linesize; 192 for (p = 0; --i >= 0; p += ls) 193 sta(p, ASI_FLUSHCTX, 0); 194 } 195