1 /*	$NetBSD: cache.h,v 1.35 2007/03/04 06:00:45 christos Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  * 	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Aaron Brown and
24  *	Harvard University.
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *	@(#)cache.h	8.1 (Berkeley) 6/11/93
44  */
45 
46 #ifndef SPARC_CACHE_H
47 #define SPARC_CACHE_H
48 
49 #if defined(_KERNEL_OPT)
50 #include "opt_sparc_arch.h"
51 #endif
52 
53 /*
54  * Sun-4 and Sun-4c virtual address cache.
55  *
56  * Sun-4 virtual caches come in two flavors, write-through (Sun-4c)
57  * and write-back (Sun-4).  The write-back caches are much faster
58  * but require a bit more care.
59  *
60  */
61 enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK };
62 
63 /*
64  * Cache tags can be written in control space, and must be set to 0
65  * (or invalid anyway) before turning on the cache.  The tags are
66  * addressed as an array of 32-bit structures of the form:
67  *
68  *	struct cache_tag {
69  *		u_int	:7,		(unused; must be zero)
70  *			ct_cid:3,	(context ID)
71  *			ct_w:1,		(write flag from PTE)
72  *			ct_s:1,		(supervisor flag from PTE)
73  *			ct_v:1,		(set => cache entry is valid)
74  *			:3,		(unused; must be zero)
75  *			ct_tid:14,	(cache tag ID)
76  *			:2;		(unused; must be zero)
77  *	};
78  *
79  * (The SS2 has 16 MMU contexts, which makes `ct_cid' one bit wider.)
80  *
81  * The SPARCstation 1 cache sees virtual addresses as:
82  *
83  *	struct cache_va {
84  *		u_int	:2,		(unused; probably copies of va_tid<13>)
85  *			cva_tid:14,	(tag ID)
86  *			cva_line:12,	(cache line number)
87  *			cva_byte:4;	(byte in cache line)
88  *	};
89  *
90  * (The SS2 cache is similar but has half as many lines, each twice as long.)
91  *
92  * Note that, because the 12-bit line ID is `wider' than the page offset,
93  * it is possible to have one page map to two different cache lines.
94  * This can happen whenever two different physical pages have the same bits
95  * in the part of the virtual address that overlaps the cache line ID, i.e.,
96  * bits <15:12>.  In order to prevent cache duplication, we have to
97  * make sure that no one page has more than one virtual address where
98  * (va1 & 0xf000) != (va2 & 0xf000).  (The cache hardware turns off ct_v
99  * when a cache miss occurs on a write, i.e., if va1 is in the cache and
100  * va2 is not, and you write to va2, va1 goes out of the cache.  If va1
101  * is in the cache and va2 is not, reading va2 also causes va1 to become
102  * uncached, and the [same] data is then read from main memory into the
103  * cache.)
104  *
105  * The other alternative, of course, is to disable caching of aliased
106  * pages.  (In a few cases this might be faster anyway, but we do it
107  * only when forced.)
108  *
109  * The Sun4, since it has an 8K pagesize instead of 4K, needs to check
110  * bits that are one position higher.
111  */
112 
113 /* Some more well-known values: */
114 
115 #define	CACHE_ALIAS_DIST_SUN4	0x20000
116 #define	CACHE_ALIAS_DIST_SUN4C	0x10000
117 
118 #define	CACHE_ALIAS_BITS_SUN4	0x1e000
119 #define	CACHE_ALIAS_BITS_SUN4C	0xf000
120 
121 #define CACHE_ALIAS_DIST_HS128k		0x20000
122 #define CACHE_ALIAS_BITS_HS128k		0x1f000
123 #define CACHE_ALIAS_DIST_HS256k		0x40000
124 #define CACHE_ALIAS_BITS_HS256k		0x3f000
125 
126 /*
127  * Assuming a tag format where the least significant bits are the byte offset
128  * into the cache line, and the next-most significant bits are the line id,
129  * we can calculate the appropriate aliasing constants. We also assume that
130  * the linesize and total cache size are powers of 2.
131  */
132 #define GUESS_CACHE_ALIAS_BITS		((cpuinfo.cacheinfo.c_totalsize - 1) & ~PGOFSET)
133 #define GUESS_CACHE_ALIAS_DIST		(cpuinfo.cacheinfo.c_totalsize)
134 
135 extern int cache_alias_dist;		/* */
136 extern int cache_alias_bits;
137 extern u_long dvma_cachealign;
138 
139 /* Optimize cache alias macros on single architecture kernels */
140 #if defined(SUN4) && !defined(SUN4C) && !defined(SUN4M) && !defined(SUN4D)
141 #define	CACHE_ALIAS_DIST	CACHE_ALIAS_DIST_SUN4
142 #define	CACHE_ALIAS_BITS	CACHE_ALIAS_BITS_SUN4
143 #elif !defined(SUN4) && defined(SUN4C) && !defined(SUN4M) && !defined(SUN4D)
144 #define	CACHE_ALIAS_DIST	CACHE_ALIAS_DIST_SUN4C
145 #define	CACHE_ALIAS_BITS	CACHE_ALIAS_BITS_SUN4C
146 #else
147 #define	CACHE_ALIAS_DIST	cache_alias_dist
148 #define	CACHE_ALIAS_BITS	cache_alias_bits
149 #endif
150 
151 /*
152  * True iff a1 and a2 are `bad' aliases (will cause cache duplication).
153  */
154 #define	BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & CACHE_ALIAS_BITS)
155 
156 /*
157  * Routines for dealing with the cache.
158  */
159 void	sun4_cache_enable(void);
160 void	ms1_cache_enable(void);
161 void	viking_cache_enable(void);
162 void	hypersparc_cache_enable(void);
163 void	swift_cache_enable(void);
164 void	cypress_cache_enable(void);
165 void	turbosparc_cache_enable(void);
166 
167 void	sun4_vcache_flush_context(int);		/* flush current context */
168 void	sun4_vcache_flush_region(int, int);	/* flush region in cur ctx */
169 void	sun4_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
170 void	sun4_vcache_flush_page(int va, int);	/* flush page in cur ctx */
171 void	sun4_vcache_flush_page_hw(int va, int);	/* flush page in cur ctx */
172 void	sun4_cache_flush(void *, u_int);	/* flush range */
173 
174 void	srmmu_vcache_flush_context(int);	/* flush current context */
175 void	srmmu_vcache_flush_region(int, int);	/* flush region in cur ctx */
176 void	srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
177 void	srmmu_vcache_flush_page(int va, int);	/* flush page in cur ctx */
178 void	srmmu_vcache_flush_range(int, int, int);
179 void	srmmu_cache_flush(void *, u_int);	/* flush range */
180 
181 /* `Fast trap' versions for use in cross-call cache flushes on MP systems */
182 #if defined(MULTIPROCESSOR)
183 void	ft_srmmu_vcache_flush_context(int);	/* flush current context */
184 void	ft_srmmu_vcache_flush_region(int, int);	/* flush region in cur ctx */
185 void	ft_srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
186 void	ft_srmmu_vcache_flush_page(int va, int);/* flush page in cur ctx */
187 void	ft_srmmu_vcache_flush_range(int, int, int);/* flush range in cur ctx */
188 #else
189 #define ft_srmmu_vcache_flush_context	0
190 #define ft_srmmu_vcache_flush_region	0
191 #define ft_srmmu_vcache_flush_segment	0
192 #define ft_srmmu_vcache_flush_page	0
193 #define ft_srmmu_vcache_flush_range	0
194 #endif /* MULTIPROCESSOR */
195 
196 void	ms1_cache_flush(void *, u_int);
197 void	viking_cache_flush(void *, u_int);
198 void	viking_pcache_flush_page(paddr_t, int);
199 void	srmmu_pcache_flush_line(int, int);
200 void	hypersparc_pure_vcache_flush(void);
201 
202 void	ms1_cache_flush_all(void);
203 void	srmmu_cache_flush_all(void);
204 void	cypress_cache_flush_all(void);
205 void	hypersparc_cache_flush_all(void);
206 
207 extern void sparc_noop(void);
208 
209 #define noop_vcache_flush_context	(void (*)(int))sparc_noop
210 #define noop_vcache_flush_region	(void (*)(int,int))sparc_noop
211 #define noop_vcache_flush_segment	(void (*)(int,int,int))sparc_noop
212 #define noop_vcache_flush_page		(void (*)(int,int))sparc_noop
213 #define noop_vcache_flush_range		(void (*)(int,int,int))sparc_noop
214 #define noop_cache_flush		(void (*)(void *,u_int))sparc_noop
215 #define noop_pcache_flush_page		(void (*)(paddr_t,int))sparc_noop
216 #define noop_pure_vcache_flush		(void (*)(void))sparc_noop
217 #define noop_cache_flush_all		(void (*)(void))sparc_noop
218 
219 /*
220  * The SMP versions of the cache flush functions. These functions
221  * send a "cache flush" message to each processor.
222  */
223 void	smp_vcache_flush_context(int);		/* flush current context */
224 void	smp_vcache_flush_region(int,int);	/* flush region in cur ctx */
225 void	smp_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
226 void	smp_vcache_flush_page(int va,int);	/* flush page in cur ctx */
227 
228 
229 #define cache_flush_page(va,ctx)	cpuinfo.vcache_flush_page(va,ctx)
230 #define cache_flush_segment(vr,vs,ctx)	cpuinfo.vcache_flush_segment(vr,vs,ctx)
231 #define cache_flush_region(vr,ctx)	cpuinfo.vcache_flush_region(vr,ctx)
232 #define cache_flush_context(ctx)	cpuinfo.vcache_flush_context(ctx)
233 #define cache_flush(va,len)		cpuinfo.cache_flush(va,len)
234 
235 #define pcache_flush_page(pa,flag)	cpuinfo.pcache_flush_page(pa,flag)
236 
237 /*
238  * Cache control information.
239  */
240 struct cacheinfo {
241 	int	c_totalsize;		/* total size, in bytes */
242 					/* if split, MAX(icache,dcache) */
243 	int	c_enabled;		/* true => cache is enabled */
244 	int	c_hwflush;		/* true => have hardware flush */
245 	int	c_linesize;		/* line size, in bytes */
246 					/* if split, MIN(icache,dcache) */
247 	int	c_l2linesize;		/* log2(linesize) */
248 	int	c_nlines;		/* precomputed # of lines to flush */
249 	int	c_physical;		/* true => cache has physical
250 						   address tags */
251 	int 	c_associativity;	/* # of "buckets" in cache line */
252 	int 	c_split;		/* true => cache is split */
253 
254 	int 	ic_totalsize;		/* instruction cache */
255 	int 	ic_enabled;
256 	int 	ic_linesize;
257 	int 	ic_l2linesize;
258 	int 	ic_nlines;
259 	int 	ic_associativity;
260 
261 	int 	dc_totalsize;		/* data cache */
262 	int 	dc_enabled;
263 	int 	dc_linesize;
264 	int 	dc_l2linesize;
265 	int 	dc_nlines;
266 	int 	dc_associativity;
267 
268 	int	ec_totalsize;		/* external cache info */
269 	int 	ec_enabled;
270 	int	ec_linesize;
271 	int	ec_l2linesize;
272 	int 	ec_nlines;
273 	int 	ec_associativity;
274 
275 	enum vactype	c_vactype;
276 };
277 
278 #define CACHEINFO cpuinfo.cacheinfo
279 
280 #endif /* SPARC_CACHE_H */
281