xref: /netbsd/sys/arch/sparc/sparc/cpuvar.h (revision bf9ec67e)
1 /*	$NetBSD: cpuvar.h,v 1.39 2001/12/04 00:05:06 darrenr Exp $ */
2 
3 /*
4  *  Copyright (c) 1996 The NetBSD Foundation, Inc.
5  *  All rights reserved.
6  *
7  *  This code is derived from software contributed to The NetBSD Foundation
8  *  by Paul Kranenburg.
9  *
10  *  Redistribution and use in source and binary forms, with or without
11  *  modification, are permitted provided that the following conditions
12  *  are met:
13  *  1. Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *  2. Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in the
17  *     documentation and/or other materials provided with the distribution.
18  *  3. All advertising materials mentioning features or use of this software
19  *     must display the following acknowledgement:
20  *         This product includes software developed by the NetBSD
21  *         Foundation, Inc. and its contributors.
22  *  4. Neither the name of The NetBSD Foundation nor the names of its
23  *     contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  *  ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  *  TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  *  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  *  POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #ifndef _sparc_cpuvar_h
40 #define _sparc_cpuvar_h
41 
42 #if defined(_KERNEL_OPT)
43 #include "opt_multiprocessor.h"
44 #include "opt_lockdebug.h"
45 #include "opt_ddb.h"
46 #include "opt_sparc_arch.h"
47 #endif
48 
49 #include <sys/device.h>
50 #include <sys/lock.h>
51 #include <sys/sched.h>
52 
53 #include <sparc/include/reg.h>
54 
55 #include <sparc/sparc/cache.h>	/* for cacheinfo */
56 
57 /*
58  * CPU/MMU module information.
59  * There is one of these for each "mainline" CPU module we support.
60  * The information contained in the structure is used only during
61  * auto-configuration of the CPUs; some fields are copied into the
62  * per-cpu data structure (cpu_info) for easy access during normal
63  * operation.
64  */
65 struct cpu_info;
66 struct module_info {
67 	int  cpu_type;
68 	enum vactype vactype;
69 	void (*cpu_match)(struct cpu_info *, struct module_info *, int);
70 	void (*getcacheinfo)(struct cpu_info *sc, int node);
71 	void (*hotfix)(struct cpu_info *);
72 	void (*mmu_enable)(void);
73 	void (*cache_enable)(void);
74 	int  ncontext;			/* max. # of contexts (that we use) */
75 
76 	void (*get_syncflt)(void);
77 	int  (*get_asyncflt)(u_int *, u_int *);
78 	void (*sp_cache_flush)(caddr_t, u_int);
79 	void (*sp_vcache_flush_page)(int);
80 	void (*sp_vcache_flush_segment)(int, int);
81 	void (*sp_vcache_flush_region)(int);
82 	void (*sp_vcache_flush_context)(void);
83 	void (*pcache_flush_page)(paddr_t, int);
84 	void (*pure_vcache_flush)(void);
85 	void (*cache_flush_all)(void);
86 	void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
87 	void (*zero_page)(paddr_t);
88 	void (*copy_page)(paddr_t, paddr_t);
89 };
90 
91 struct xpmsg {
92 	struct simplelock	lock;
93 	int tag;
94 #define	XPMSG_SAVEFPU			1
95 #define	XPMSG_PAUSECPU			2
96 #define	XPMSG_RESUMECPU			3
97 #define	XPMSG_FUNC			4
98 #define	XPMSG_DEMAP_TLB_PAGE		10
99 #define	XPMSG_DEMAP_TLB_SEGMENT		11
100 #define	XPMSG_DEMAP_TLB_REGION		12
101 #define	XPMSG_DEMAP_TLB_CONTEXT		13
102 #define	XPMSG_DEMAP_TLB_ALL		14
103 #define	XPMSG_VCACHE_FLUSH_PAGE		20
104 #define	XPMSG_VCACHE_FLUSH_SEGMENT	21
105 #define	XPMSG_VCACHE_FLUSH_REGION	22
106 #define	XPMSG_VCACHE_FLUSH_CONTEXT	23
107 #define	XPMSG_VCACHE_FLUSH_RANGE	24
108 
109 	union {
110 		struct xpmsg_func {
111 			int	(*func)(int, int, int, int);
112 			int	arg0;
113 			int	arg1;
114 			int	arg2;
115 			int	arg3;
116 			int	retval;
117 		} xpmsg_func;
118 		struct xpmsg_flush_page {
119 			int	ctx;
120 			int	va;
121 		} xpmsg_flush_page;
122 		struct  xpmsg_flush_segment {
123 			int	ctx;
124 			int	vr;
125 			int	vs;
126 		} xpmsg_flush_segment;
127 		struct  xpmsg_flush_region {
128 			int	ctx;
129 			int	vr;
130 		} xpmsg_flush_region;
131 		struct  xpmsg_flush_context {
132 			int	ctx;
133 		} xpmsg_flush_context;
134 		struct  xpmsg_flush_range {
135 			int	ctx;
136 			caddr_t	va;
137 			int	size;
138 		} xpmsg_flush_range;
139 	} u;
140 };
141 
142 /*
143  * This must be locked around all message transactions to ensure only
144  * one CPU is generating them.
145  * XXX deal with different level priority IPI's.
146  */
147 extern struct simplelock xpmsg_lock;
148 
149 #define LOCK_XPMSG()	simple_lock(&xpmsg_lock);
150 #define UNLOCK_XPMSG()	simple_unlock(&xpmsg_lock);
151 
152 /*
153  * The cpuinfo structure. This structure maintains information about one
154  * currently installed CPU (there may be several of these if the machine
155  * supports multiple CPUs, as on some Sun4m architectures). The information
156  * in this structure supercedes the old "cpumod", "mmumod", and similar
157  * fields.
158  */
159 
160 struct cpu_info {
161 	struct schedstate_percpu ci_schedstate; /* scheduler state */
162 
163 	/*
164 	 * SPARC cpu_info structures live at two VAs: one global
165 	 * VA (so each CPU can access any other CPU's cpu_info)
166 	 * and an alias VA CPUINFO_VA which is the same on each
167 	 * CPU and maps to that CPU's cpu_info.  Since the alias
168 	 * CPUINFO_VA is how we locate our cpu_info, we have to
169 	 * self-reference the global VA so that we can return it
170 	 * in the curcpu() macro.
171 	 */
172 	struct cpu_info * __volatile ci_self;
173 
174 	int		node;		/* PROM node for this CPU */
175 
176 	/* CPU information */
177 	char		*cpu_name;	/* CPU model */
178 	int		cpu_impl;	/* CPU implementation code */
179 	int		cpu_vers;	/* CPU version code */
180 	int		mmu_impl;	/* MMU implementation code */
181 	int		mmu_vers;	/* MMU version code */
182 	int		master;		/* 1 if this is bootup CPU */
183 
184 	int		ci_cpuid;	/* CPU index (see cpus[] array) */
185 	int		mid;		/* Module ID for MP systems */
186 	int		mbus;		/* 1 if CPU is on MBus */
187 	int		mxcc;		/* 1 if a MBus-level MXCC is present */
188 
189 	caddr_t		mailbox;	/* VA of CPU's mailbox */
190 
191 	int		mmu_ncontext;	/* Number of contexts supported */
192 	int		mmu_nregion; 	/* Number of regions supported */
193 	int		mmu_nsegment;	/* [4/4c] Segments */
194 	int		mmu_npmeg;	/* [4/4c] Pmegs */
195 	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
196 #if defined(SUN4_MMU3L)
197 #define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
198 #else
199 #define HASSUN4_MMU3L	(0)
200 #endif
201 
202 	/* Context administration */
203 	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
204 	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
205 
206 /* XXX - of these, we currently use only cpu_type */
207 	int		arch;		/* Architecture: CPU_SUN4x */
208 	int		class;		/* Class: SuperSPARC, microSPARC... */
209 	int		classlvl;	/* Iteration in class: 1, 2, etc. */
210 	int		classsublvl;	/* stepping in class (version) */
211 	int		cpu_type;	/* Type: see CPUTYP_xxx below */
212 
213 	int		hz;		/* Clock speed */
214 
215 	/* Cache information */
216 	struct cacheinfo	cacheinfo;	/* see cache.h */
217 
218 	/* FPU information */
219 	int		fpupresent;	/* true if FPU is present */
220 	int		fpuvers;	/* FPU revision */
221 	char		*fpu_name;	/* FPU model */
222 	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
223 
224 	/* various flags to workaround anomalies in chips */
225 	int		flags;		/* see CPUFLG_xxx, below */
226 
227 	/* Per processor counter register (sun4m only) */
228 	struct counter_4m	*counterreg_4m;
229 
230 	/* Per processor interrupt mask register (sun4m only) */
231 	struct icr_pi		*intreg_4m;
232 #define raise_ipi(cpi)	do {				\
233 	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(15);	\
234 } while (0)
235 
236 	/*
237 	 * The following pointers point to processes that are somehow
238 	 * associated with this CPU--running on it, using its FPU,
239 	 * etc.
240 	 */
241 
242 	struct	proc	*ci_curproc;		/* CPU owner */
243 	struct	proc 	*fpproc;		/* FPU owner */
244 	/* XXX */
245 	void		*ci_ddb_regs;		/* DDB regs */
246 
247 	/*
248 	 * Idle PCB and Interrupt stack;
249 	 */
250 	void		*eintstack;		/* End of interrupt stack */
251 #define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
252 	struct	pcb	*idle_u;
253 	void		*redzone;		/* DEBUG: stack red zone */
254 #define REDSIZE		(8*96)			/* some room for bouncing */
255 
256 	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
257 
258 	/*
259 	 * The following are function pointers to do interesting CPU-dependent
260 	 * things without having to do type-tests all the time
261 	 */
262 
263 	/* bootup things: access to physical memory */
264 	u_int	(*read_physmem)(u_int addr, int space);
265 	void	(*write_physmem)(u_int addr, u_int data);
266 	void	(*cache_tablewalks)(void);
267 	void	(*mmu_enable)(void);
268 	void	(*hotfix)(struct cpu_info *);
269 
270 	/* locore defined: */
271 	void	(*get_syncflt)(void);		/* Not C-callable */
272 	int	(*get_asyncflt)(u_int *, u_int *);
273 
274 	/* Synchronous Fault Status; temporary storage */
275 	struct {
276 		int	sfsr;
277 		int	sfva;
278 	} syncfltdump;
279 
280 	/*
281 	 * Cache handling functions.
282 	 * Most cache flush function come in two flavours: one that
283 	 * acts only on the CPU it executes on, and another that
284 	 * uses inter-processor signals to flush the cache on
285 	 * all processor modules.
286 	 */
287 	void	(*cache_enable)(void);
288 	void	(*cache_flush)(caddr_t, u_int);
289 	void	(*sp_cache_flush)(caddr_t, u_int);
290 	void	(*vcache_flush_page)(int);
291 	void	(*sp_vcache_flush_page)(int);
292 	void	(*vcache_flush_segment)(int, int);
293 	void	(*sp_vcache_flush_segment)(int, int);
294 	void	(*vcache_flush_region)(int);
295 	void	(*sp_vcache_flush_region)(int);
296 	void	(*vcache_flush_context)(void);
297 	void	(*sp_vcache_flush_context)(void);
298 
299 	void	(*pcache_flush_page)(paddr_t, int);
300 	void	(*pure_vcache_flush)(void);
301 	void	(*cache_flush_all)(void);
302 
303 	/* Support for hardware-assisted page clear/copy */
304 	void	(*zero_page)(paddr_t);
305 	void	(*copy_page)(paddr_t, paddr_t);
306 
307 #if 0
308 	/* hardware-assisted block operation routines */
309 	void		(*hwbcopy)(const void *from, void *to, size_t len);
310 	void		(*hwbzero)(void *buf, size_t len);
311 
312 	/* routine to clear mbus-sbus buffers */
313 	void		(*mbusflush)(void);
314 #endif
315 
316 	/*
317 	 * Memory error handler; parity errors, unhandled NMIs and other
318 	 * unrecoverable faults end up here.
319 	 */
320 	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
321 
322 	/* Inter-processor message area */
323 	struct xpmsg msg;
324 
325 #if defined(DIAGNOSTIC) || defined(LOCKDEBUG)
326 	u_long ci_spin_locks;		/* # of spin locks held */
327 	u_long ci_simple_locks;		/* # of simple locks held */
328 #endif
329 };
330 
331 /*
332  * CPU architectures
333  */
334 #define CPUARCH_UNKNOWN		0
335 #define CPUARCH_SUN4		1
336 #define CPUARCH_SUN4C		2
337 #define CPUARCH_SUN4M		3
338 #define	CPUARCH_SUN4D		4
339 #define CPUARCH_SUN4U		5
340 
341 /*
342  * CPU classes
343  */
344 #define CPUCLS_UNKNOWN		0
345 
346 #if defined(SUN4)
347 #define CPUCLS_SUN4		1
348 #endif
349 
350 #if defined(SUN4C)
351 #define CPUCLS_SUN4C		5
352 #endif
353 
354 #if defined(SUN4M)
355 #define CPUCLS_MICROSPARC	10	/* MicroSPARC-II */
356 #define CPUCLS_SUPERSPARC	11	/* Generic SuperSPARC */
357 #define CPUCLS_HYPERSPARC	12	/* Ross HyperSPARC RT620 */
358 #endif
359 
360 /*
361  * CPU types. Each of these should uniquely identify one platform/type of
362  * system, i.e. "MBus-based 75 MHz SuperSPARC-II with ECache" is
363  * CPUTYP_SS2_MBUS_MXCC. The general form is
364  * 	CPUTYP_proctype_bustype_cachetype_etc_etc
365  *
366  * XXX: This is far from complete/comprehensive
367  * XXX: ADD SUN4, SUN4C TYPES
368  */
369 #define CPUTYP_UNKNOWN		0
370 
371 #define CPUTYP_4_100		1 	/* Sun4/100 */
372 #define CPUTYP_4_200		2	/* Sun4/200 */
373 #define CPUTYP_4_300		3	/* Sun4/300 */
374 #define CPUTYP_4_400		4	/* Sun4/400 */
375 
376 #define CPUTYP_SLC		10	/* SPARCstation SLC */
377 #define CPUTYP_ELC		11	/* SPARCstation ELC */
378 #define CPUTYP_IPX		12	/* SPARCstation IPX */
379 #define CPUTYP_IPC		13	/* SPARCstation IPC */
380 #define CPUTYP_1		14	/* SPARCstation 1 */
381 #define CPUTYP_1P		15	/* SPARCstation 1+ */
382 #define CPUTYP_2		16	/* SPARCstation 2 */
383 
384 /* We classify the Sun4m's by feature, not by model (XXX: do same for 4/4c) */
385 #define	CPUTYP_SS2_MBUS_MXCC	20 	/* SuperSPARC-II, Mbus, MXCC (SS20) */
386 #define CPUTYP_SS1_MBUS_MXCC	21	/* SuperSPARC-I, Mbus, MXCC (SS10) */
387 #define CPUTYP_SS2_MBUS_NOMXCC	22	/* SuperSPARC-II, on MBus w/o MXCC */
388 #define CPUTYP_SS1_MBUS_NOMXCC	23	/* SuperSPARC-I, on MBus w/o MXCC */
389 #define CPUTYP_MS2		24	/* MicroSPARC-2 */
390 #define CPUTYP_MS1		25 	/* MicroSPARC-1 */
391 #define CPUTYP_HS_MBUS		26	/* MBus-based HyperSPARC */
392 #define CPUTYP_CYPRESS		27	/* MBus-based Cypress */
393 
394 /*
395  * CPU flags
396  */
397 #define CPUFLG_CACHEPAGETABLES	0x1	/* caching pagetables OK on Sun4m */
398 #define CPUFLG_CACHEIOMMUTABLES	0x2	/* caching IOMMU translations OK */
399 #define CPUFLG_CACHEDVMA	0x4	/* DVMA goes through cache */
400 #define CPUFLG_SUN4CACHEBUG	0x8	/* trap page can't be cached */
401 #define CPUFLG_CACHE_MANDATORY	0x10	/* if cache is on, don't use
402 					   uncached access */
403 #define CPUFLG_PAUSED		0x2000	/* CPU is paused */
404 #define CPUFLG_GOTMSG		0x4000	/* CPU got an IPI */
405 #define CPUFLG_READY		0x8000	/* CPU available for IPI */
406 
407 
408 #define CPU_INFO_ITERATOR		int
409 #define CPU_INFO_FOREACH(cii, ci)	cii = 0; ci = cpus[cii], cii < ncpu; cii++
410 
411 /*
412  * Useful macros.
413  */
414 #define CPU_READY(cpi)	((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \
415 			    ((cpi)->flags & CPUFLG_READY) == 0)
416 
417 /*
418  * Related function prototypes
419  */
420 void getcpuinfo (struct cpu_info *sc, int node);
421 void mmu_install_tables (struct cpu_info *);
422 void pmap_alloc_cpu (struct cpu_info *);
423 void pmap_globalize_boot_cpu (struct cpu_info *);
424 #if defined(MULTIPROCESSOR)
425 void raise_ipi_wait_and_unlock (struct cpu_info *);
426 void cross_call (int (*)(int, int, int, int), int, int, int, int, int);
427 #endif
428 
429 extern struct cpu_info **cpus;
430 
431 #define cpuinfo	(*(struct cpu_info *)CPUINFO_VA)
432 
433 #endif	/* _sparc_cpuvar_h */
434