1 /* $NetBSD: cache.h,v 1.25 2002/01/25 17:40:45 pk Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Aaron Brown and 24 * Harvard University. 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * @(#)cache.h 8.1 (Berkeley) 6/11/93 44 */ 45 46 #ifndef SPARC_CACHE_H 47 #define SPARC_CACHE_H 48 49 #if defined(_KERNEL_OPT) 50 #include "opt_sparc_arch.h" 51 #endif 52 53 /* 54 * Sun-4 and Sun-4c virtual address cache. 55 * 56 * Sun-4 virtual caches come in two flavors, write-through (Sun-4c) 57 * and write-back (Sun-4). The write-back caches are much faster 58 * but require a bit more care. 59 * 60 */ 61 enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK }; 62 63 /* 64 * Cache tags can be written in control space, and must be set to 0 65 * (or invalid anyway) before turning on the cache. The tags are 66 * addressed as an array of 32-bit structures of the form: 67 * 68 * struct cache_tag { 69 * u_int :7, (unused; must be zero) 70 * ct_cid:3, (context ID) 71 * ct_w:1, (write flag from PTE) 72 * ct_s:1, (supervisor flag from PTE) 73 * ct_v:1, (set => cache entry is valid) 74 * :3, (unused; must be zero) 75 * ct_tid:14, (cache tag ID) 76 * :2; (unused; must be zero) 77 * }; 78 * 79 * (The SS2 has 16 MMU contexts, which makes `ct_cid' one bit wider.) 80 * 81 * The SPARCstation 1 cache sees virtual addresses as: 82 * 83 * struct cache_va { 84 * u_int :2, (unused; probably copies of va_tid<13>) 85 * cva_tid:14, (tag ID) 86 * cva_line:12, (cache line number) 87 * cva_byte:4; (byte in cache line) 88 * }; 89 * 90 * (The SS2 cache is similar but has half as many lines, each twice as long.) 91 * 92 * Note that, because the 12-bit line ID is `wider' than the page offset, 93 * it is possible to have one page map to two different cache lines. 94 * This can happen whenever two different physical pages have the same bits 95 * in the part of the virtual address that overlaps the cache line ID, i.e., 96 * bits <15:12>. In order to prevent cache duplication, we have to 97 * make sure that no one page has more than one virtual address where 98 * (va1 & 0xf000) != (va2 & 0xf000). (The cache hardware turns off ct_v 99 * when a cache miss occurs on a write, i.e., if va1 is in the cache and 100 * va2 is not, and you write to va2, va1 goes out of the cache. If va1 101 * is in the cache and va2 is not, reading va2 also causes va1 to become 102 * uncached, and the [same] data is then read from main memory into the 103 * cache.) 104 * 105 * The other alternative, of course, is to disable caching of aliased 106 * pages. (In a few cases this might be faster anyway, but we do it 107 * only when forced.) 108 * 109 * The Sun4, since it has an 8K pagesize instead of 4K, needs to check 110 * bits that are one position higher. 111 */ 112 113 /* Some more well-known values: */ 114 115 #define CACHE_ALIAS_DIST_SUN4 0x20000 116 #define CACHE_ALIAS_DIST_SUN4C 0x10000 117 118 #define CACHE_ALIAS_BITS_SUN4 0x1e000 119 #define CACHE_ALIAS_BITS_SUN4C 0xf000 120 121 #define CACHE_ALIAS_DIST_HS128k 0x20000 122 #define CACHE_ALIAS_BITS_HS128k 0x1f000 123 #define CACHE_ALIAS_DIST_HS256k 0x40000 124 #define CACHE_ALIAS_BITS_HS256k 0x3f000 125 126 /* 127 * Assuming a tag format where the least significant bits are the byte offset 128 * into the cache line, and the next-most significant bits are the line id, 129 * we can calculate the appropriate aliasing constants. We also assume that 130 * the linesize and total cache size are powers of 2. 131 */ 132 #define GUESS_CACHE_ALIAS_BITS ((cpuinfo.cacheinfo.c_totalsize - 1) & ~PGOFSET) 133 #define GUESS_CACHE_ALIAS_DIST (cpuinfo.cacheinfo.c_totalsize) 134 135 extern int cache_alias_dist; /* */ 136 extern int cache_alias_bits; 137 extern u_long dvma_cachealign; 138 139 /* Optimize cache alias macros on single architecture kernels */ 140 #if defined(SUN4) && !defined(SUN4C) && !defined(SUN4M) 141 #define CACHE_ALIAS_DIST CACHE_ALIAS_DIST_SUN4 142 #define CACHE_ALIAS_BITS CACHE_ALIAS_BITS_SUN4 143 #elif !defined(SUN4) && defined(SUN4C) && !defined(SUN4M) 144 #define CACHE_ALIAS_DIST CACHE_ALIAS_DIST_SUN4C 145 #define CACHE_ALIAS_BITS CACHE_ALIAS_BITS_SUN4C 146 #else 147 #define CACHE_ALIAS_DIST cache_alias_dist 148 #define CACHE_ALIAS_BITS cache_alias_bits 149 #endif 150 151 /* 152 * True iff a1 and a2 are `bad' aliases (will cause cache duplication). 153 */ 154 #define BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & CACHE_ALIAS_BITS) 155 156 /* 157 * Routines for dealing with the cache. 158 */ 159 void sun4_cache_enable __P((void)); /* turn it on */ 160 void ms1_cache_enable __P((void)); /* turn it on */ 161 void viking_cache_enable __P((void)); /* turn it on */ 162 void hypersparc_cache_enable __P((void)); /* turn it on */ 163 void swift_cache_enable __P((void)); /* turn it on */ 164 void cypress_cache_enable __P((void)); /* turn it on */ 165 void turbosparc_cache_enable __P((void)); /* turn it on */ 166 167 void sun4_vcache_flush_context __P((void)); /* flush current context */ 168 void sun4_vcache_flush_region __P((int)); /* flush region in cur ctx */ 169 void sun4_vcache_flush_segment __P((int, int));/* flush seg in cur ctx */ 170 void sun4_vcache_flush_page __P((int va)); /* flush page in cur ctx */ 171 void sun4_vcache_flush_page_hw __P((int va));/* flush page in cur ctx */ 172 void sun4_cache_flush __P((caddr_t, u_int));/* flush region */ 173 174 void srmmu_vcache_flush_context __P((void)); /* flush current context */ 175 void srmmu_vcache_flush_region __P((int)); /* flush region in cur ctx */ 176 void srmmu_vcache_flush_segment __P((int, int));/* flush seg in cur ctx */ 177 void srmmu_vcache_flush_page __P((int va)); /* flush page in cur ctx */ 178 void srmmu_cache_flush __P((caddr_t, u_int));/* flush region */ 179 180 void ms1_cache_flush __P((caddr_t, u_int)); 181 void viking_cache_flush __P((caddr_t, u_int)); 182 void viking_pcache_flush_page __P((paddr_t, int)); 183 void srmmu_pcache_flush_line __P((int, int)); 184 void hypersparc_pure_vcache_flush __P((void)); 185 186 void ms1_cache_flush_all __P((void)); 187 void srmmu_cache_flush_all __P((void)); 188 void cypress_cache_flush_all __P((void)); 189 void hypersparc_cache_flush_all __P((void)); 190 191 extern void sparc_noop __P((void)); 192 193 #define noop_vcache_flush_context \ 194 (void (*)__P((void))) sparc_noop 195 #define noop_vcache_flush_region \ 196 (void (*)__P((int))) sparc_noop 197 #define noop_vcache_flush_segment \ 198 (void (*)__P((int,int))) sparc_noop 199 #define noop_vcache_flush_page \ 200 (void (*)__P((int))) sparc_noop 201 #define noop_cache_flush \ 202 (void (*)__P((caddr_t, u_int))) sparc_noop 203 #define noop_pcache_flush_page \ 204 (void (*)__P((paddr_t, int))) sparc_noop 205 #define noop_pure_vcache_flush \ 206 (void (*)__P((void))) sparc_noop 207 #define noop_cache_flush_all \ 208 (void (*)__P((void))) sparc_noop 209 210 /* 211 * The SMP versions of the cache flush functions. These functions 212 * send a "cache flush" message to each processor. 213 */ 214 void smp_vcache_flush_context __P((void)); /* flush current context */ 215 void smp_vcache_flush_region __P((int)); /* flush region in cur ctx */ 216 void smp_vcache_flush_segment __P((int, int));/* flush seg in cur ctx */ 217 void smp_vcache_flush_page __P((int va)); /* flush page in cur ctx */ 218 void smp_cache_flush __P((caddr_t, u_int)); /* flush region */ 219 220 221 #define cache_flush_page(va) cpuinfo.vcache_flush_page(va) 222 #define cache_flush_segment(vr,vs) cpuinfo.vcache_flush_segment(vr,vs) 223 #define cache_flush_region(vr) cpuinfo.vcache_flush_region(vr) 224 #define cache_flush_context() cpuinfo.vcache_flush_context() 225 226 #define pcache_flush_page(pa,flag) cpuinfo.pcache_flush_page(pa,flag) 227 228 /* 229 * Cache control information. 230 */ 231 struct cacheinfo { 232 int c_totalsize; /* total size, in bytes */ 233 /* if split, MAX(icache,dcache) */ 234 int c_enabled; /* true => cache is enabled */ 235 int c_hwflush; /* true => have hardware flush */ 236 int c_linesize; /* line size, in bytes */ 237 int c_l2linesize; /* log2(linesize) */ 238 int c_nlines; /* number of cache lines */ 239 int c_physical; /* true => cache has physical 240 address tags */ 241 int c_associativity; /* # of "buckets" in cache line */ 242 int c_split; /* true => cache is split */ 243 244 int ic_totalsize; /* instruction cache */ 245 int ic_enabled; 246 int ic_linesize; 247 int ic_l2linesize; 248 int ic_nlines; 249 int ic_associativity; 250 251 int dc_totalsize; /* data cache */ 252 int dc_enabled; 253 int dc_linesize; 254 int dc_l2linesize; 255 int dc_nlines; 256 int dc_associativity; 257 258 int ec_totalsize; /* external cache info */ 259 int ec_enabled; 260 int ec_linesize; 261 int ec_l2linesize; 262 int ec_nlines; 263 int ec_associativity; 264 265 enum vactype c_vactype; 266 }; 267 268 #define CACHEINFO cpuinfo.cacheinfo 269 270 /* 271 * Cache control statistics. 272 */ 273 struct cachestats { 274 int cs_npgflush; /* # page flushes */ 275 int cs_nsgflush; /* # seg flushes */ 276 int cs_nrgflush; /* # seg flushes */ 277 int cs_ncxflush; /* # context flushes */ 278 int cs_nraflush; /* # range flushes */ 279 #ifdef notyet 280 int cs_ra[65]; /* pages/range */ 281 #endif 282 }; 283 #endif /* SPARC_CACHE_H */ 284