xref: /netbsd/sys/arch/sparc/sparc/cpuvar.h (revision 6550d01e)
1 /*	$NetBSD: cpuvar.h,v 1.86 2011/01/27 06:24:59 mrg Exp $ */
2 
3 /*
4  *  Copyright (c) 1996 The NetBSD Foundation, Inc.
5  *  All rights reserved.
6  *
7  *  This code is derived from software contributed to The NetBSD Foundation
8  *  by Paul Kranenburg.
9  *
10  *  Redistribution and use in source and binary forms, with or without
11  *  modification, are permitted provided that the following conditions
12  *  are met:
13  *  1. Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *  2. Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in the
17  *     documentation and/or other materials provided with the distribution.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  *  ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  *  TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  *  PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  *  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _sparc_cpuvar_h
33 #define _sparc_cpuvar_h
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_multiprocessor.h"
37 #include "opt_lockdebug.h"
38 #include "opt_ddb.h"
39 #include "opt_sparc_arch.h"
40 #include "opt_modular.h"
41 #endif
42 
43 #include <sys/device.h>
44 #include <sys/lock.h>
45 #include <sys/cpu_data.h>
46 
47 #include <sparc/include/reg.h>
48 #include <sparc/sparc/cache.h>	/* for cacheinfo */
49 
50 /*
51  * CPU/MMU module information.
52  * There is one of these for each "mainline" CPU module we support.
53  * The information contained in the structure is used only during
54  * auto-configuration of the CPUs; some fields are copied into the
55  * per-cpu data structure (cpu_info) for easy access during normal
56  * operation.
57  */
58 struct cpu_info;
59 struct module_info {
60 	int  cpu_type;
61 	enum vactype vactype;
62 	void (*cpu_match)(struct cpu_info *, struct module_info *, int);
63 	void (*getcacheinfo)(struct cpu_info *sc, int node);
64 	void (*hotfix)(struct cpu_info *);
65 	void (*mmu_enable)(void);
66 	void (*cache_enable)(void);
67 	int  (*getmid)(void);		/* Get MID of current CPU */
68 	int  ncontext;			/* max. # of contexts (that we use) */
69 
70 	void (*get_syncflt)(void);
71 	int  (*get_asyncflt)(u_int *, u_int *);
72 	void (*cache_flush)(void *, u_int);
73 	void (*sp_vcache_flush_page)(int, int);
74 	void (*ft_vcache_flush_page)(int, int);
75 	void (*sp_vcache_flush_segment)(int, int, int);
76 	void (*ft_vcache_flush_segment)(int, int, int);
77 	void (*sp_vcache_flush_region)(int, int);
78 	void (*ft_vcache_flush_region)(int, int);
79 	void (*sp_vcache_flush_context)(int);
80 	void (*ft_vcache_flush_context)(int);
81 	void (*sp_vcache_flush_range)(int, int, int);
82 	void (*ft_vcache_flush_range)(int, int, int);
83 	void (*pcache_flush_page)(paddr_t, int);
84 	void (*pure_vcache_flush)(void);
85 	void (*cache_flush_all)(void);
86 	void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
87 	void (*zero_page)(paddr_t);
88 	void (*copy_page)(paddr_t, paddr_t);
89 };
90 
91 /*
92  * Message structure for Inter Processor Communication in MP systems
93  */
94 struct xpmsg {
95 	volatile int tag;
96 #define	XPMSG15_PAUSECPU	1
97 #define	XPMSG_FUNC		4
98 #define	XPMSG_FTRP		5
99 
100 	volatile union {
101 		/*
102 		 * Cross call: ask to run (*func)(arg0,arg1,arg2)
103 		 * or (*trap)(arg0,arg1,arg2). `trap' should be the
104 		 * address of a `fast trap' handler that executes in
105 		 * the trap window (see locore.s).
106 		 */
107 		struct xpmsg_func {
108 			void	(*func)(int, int, int);
109 			void	(*trap)(int, int, int);
110 			int	arg0;
111 			int	arg1;
112 			int	arg2;
113 		} xpmsg_func;
114 	} u;
115 	volatile int	received;
116 	volatile int	complete;
117 };
118 
119 /*
120  * The cpuinfo structure. This structure maintains information about one
121  * currently installed CPU (there may be several of these if the machine
122  * supports multiple CPUs, as on some Sun4m architectures). The information
123  * in this structure supersedes the old "cpumod", "mmumod", and similar
124  * fields.
125  */
126 
127 struct cpu_info {
128 	struct cpu_data ci_data;	/* MI per-cpu data */
129 
130 	/*
131 	 * Primary Inter-processor message area.  Keep this aligned
132 	 * to a cache line boundary if possible, as the structure
133 	 * itself is one (normal 32 byte) cache-line.
134 	 */
135 	struct xpmsg	msg __aligned(32);
136 
137 	/* Scheduler flags */
138 	int	ci_want_ast;
139 	int	ci_want_resched;
140 
141 	/*
142 	 * SPARC cpu_info structures live at two VAs: one global
143 	 * VA (so each CPU can access any other CPU's cpu_info)
144 	 * and an alias VA CPUINFO_VA which is the same on each
145 	 * CPU and maps to that CPU's cpu_info.  Since the alias
146 	 * CPUINFO_VA is how we locate our cpu_info, we have to
147 	 * self-reference the global VA so that we can return it
148 	 * in the curcpu() macro.
149 	 */
150 	struct cpu_info * volatile ci_self;
151 
152 	int		ci_cpuid;	/* CPU index (see cpus[] array) */
153 
154 	/* Context administration */
155 	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
156 	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
157 
158 	/* Cache information */
159 	struct cacheinfo	cacheinfo;	/* see cache.h */
160 
161 	/* various flags to workaround anomalies in chips */
162 	volatile int	flags;		/* see CPUFLG_xxx, below */
163 
164 	/* Per processor counter register (sun4m only) */
165 	volatile struct counter_4m	*counterreg_4m;
166 
167 	/* Per processor interrupt mask register (sun4m only) */
168 	volatile struct icr_pi	*intreg_4m;
169 	/*
170 	 * Send a IPI to (cpi).  For Ross cpus we need to read
171 	 * the pending register to avoid a hardware bug.
172 	 */
173 #define raise_ipi(cpi,lvl)	do {			\
174 	volatile int x;					\
175 	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl);	\
176 	x = (cpi)->intreg_4m->pi_pend;			\
177 } while (0)
178 
179 	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
180 #if defined(SUN4_MMU3L)
181 #define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
182 #else
183 #define HASSUN4_MMU3L	(0)
184 #endif
185 	int		ci_idepth;		/* Interrupt depth */
186 
187 	/*
188 	 * The following pointers point to processes that are somehow
189 	 * associated with this CPU--running on it, using its FPU,
190 	 * etc.
191 	 */
192 	struct	lwp	*ci_curlwp;		/* CPU owner */
193 	struct	lwp 	*fplwp;			/* FPU owner */
194 
195 	int		ci_mtx_count;
196 	int		ci_mtx_oldspl;
197 
198 	/*
199 	 * Idle PCB and Interrupt stack;
200 	 */
201 	void		*eintstack;		/* End of interrupt stack */
202 #define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
203 	void		*redzone;		/* DEBUG: stack red zone */
204 #define REDSIZE		(8*96)			/* some room for bouncing */
205 
206 	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
207 
208 	/* locore defined: */
209 	void	(*get_syncflt)(void);		/* Not C-callable */
210 	int	(*get_asyncflt)(u_int *, u_int *);
211 
212 	/* Synchronous Fault Status; temporary storage */
213 	struct {
214 		int	sfsr;
215 		int	sfva;
216 	} syncfltdump;
217 
218 	/*
219 	 * Cache handling functions.
220 	 * Most cache flush function come in two flavours: one that
221 	 * acts only on the CPU it executes on, and another that
222 	 * uses inter-processor signals to flush the cache on
223 	 * all processor modules.
224 	 * The `ft_' versions are fast trap cache flush handlers.
225 	 */
226 	void	(*cache_flush)(void *, u_int);
227 	void	(*vcache_flush_page)(int, int);
228 	void	(*sp_vcache_flush_page)(int, int);
229 	void	(*ft_vcache_flush_page)(int, int);
230 	void	(*vcache_flush_segment)(int, int, int);
231 	void	(*sp_vcache_flush_segment)(int, int, int);
232 	void	(*ft_vcache_flush_segment)(int, int, int);
233 	void	(*vcache_flush_region)(int, int);
234 	void	(*sp_vcache_flush_region)(int, int);
235 	void	(*ft_vcache_flush_region)(int, int);
236 	void	(*vcache_flush_context)(int);
237 	void	(*sp_vcache_flush_context)(int);
238 	void	(*ft_vcache_flush_context)(int);
239 
240 	/* The are helpers for (*cache_flush)() */
241 	void	(*sp_vcache_flush_range)(int, int, int);
242 	void	(*ft_vcache_flush_range)(int, int, int);
243 
244 	void	(*pcache_flush_page)(paddr_t, int);
245 	void	(*pure_vcache_flush)(void);
246 	void	(*cache_flush_all)(void);
247 
248 	/* Support for hardware-assisted page clear/copy */
249 	void	(*zero_page)(paddr_t);
250 	void	(*copy_page)(paddr_t, paddr_t);
251 
252 	/* Virtual addresses for use in pmap copy_page/zero_page */
253 	void *	vpage[2];
254 	int	*vpage_pte[2];		/* pte location of vpage[] */
255 
256 	void	(*cache_enable)(void);
257 
258 	int	cpu_type;	/* Type: see CPUTYP_xxx below */
259 
260 	/* Inter-processor message area (high priority but used infrequently) */
261 	struct xpmsg	msg_lev15;
262 
263 	/* CPU information */
264 	int		node;		/* PROM node for this CPU */
265 	int		mid;		/* Module ID for MP systems */
266 	int		mbus;		/* 1 if CPU is on MBus */
267 	int		mxcc;		/* 1 if a MBus-level MXCC is present */
268 	const char	*cpu_longname;	/* CPU model */
269 	int		cpu_impl;	/* CPU implementation code */
270 	int		cpu_vers;	/* CPU version code */
271 	int		mmu_impl;	/* MMU implementation code */
272 	int		mmu_vers;	/* MMU version code */
273 	int		master;		/* 1 if this is bootup CPU */
274 
275 	vaddr_t		mailbox;	/* VA of CPU's mailbox */
276 
277 	int		mmu_ncontext;	/* Number of contexts supported */
278 	int		mmu_nregion; 	/* Number of regions supported */
279 	int		mmu_nsegment;	/* [4/4c] Segments */
280 	int		mmu_npmeg;	/* [4/4c] Pmegs */
281 
282 /* XXX - we currently don't actually use the following */
283 	int		arch;		/* Architecture: CPU_SUN4x */
284 	int		class;		/* Class: SuperSPARC, microSPARC... */
285 	int		classlvl;	/* Iteration in class: 1, 2, etc. */
286 	int		classsublvl;	/* stepping in class (version) */
287 
288 	int		hz;		/* Clock speed */
289 
290 	/* FPU information */
291 	int		fpupresent;	/* true if FPU is present */
292 	int		fpuvers;	/* FPU revision */
293 	const char	*fpu_name;	/* FPU model */
294 	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
295 
296 	/* XXX */
297 	volatile void	*ci_ddb_regs;		/* DDB regs */
298 
299 	/*
300 	 * The following are function pointers to do interesting CPU-dependent
301 	 * things without having to do type-tests all the time
302 	 */
303 
304 	/* bootup things: access to physical memory */
305 	u_int	(*read_physmem)(u_int addr, int space);
306 	void	(*write_physmem)(u_int addr, u_int data);
307 	void	(*cache_tablewalks)(void);
308 	void	(*mmu_enable)(void);
309 	void	(*hotfix)(struct cpu_info *);
310 
311 
312 #if 0
313 	/* hardware-assisted block operation routines */
314 	void		(*hwbcopy)(const void *from, void *to, size_t len);
315 	void		(*hwbzero)(void *buf, size_t len);
316 
317 	/* routine to clear mbus-sbus buffers */
318 	void		(*mbusflush)(void);
319 #endif
320 
321 	/*
322 	 * Memory error handler; parity errors, unhandled NMIs and other
323 	 * unrecoverable faults end up here.
324 	 */
325 	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
326 	void		(*idlespin)(struct cpu_info *);
327 	/* Module Control Registers */
328 	/*bus_space_handle_t*/ long ci_mbusport;
329 	/*bus_space_handle_t*/ long ci_mxccregs;
330 
331 	u_int	ci_tt;			/* Last trap (if tracing) */
332 
333 	/*
334 	 * Start/End VA's of this cpu_info region; we upload the other pages
335 	 * in this region that aren't part of the cpu_info to uvm.
336 	 */
337 	vaddr_t	ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
338 
339 	struct evcnt ci_savefpstate;
340 	struct evcnt ci_savefpstate_null;
341 	struct evcnt ci_xpmsg_mutex_fail;
342 	struct evcnt ci_xpmsg_mutex_fail_call;
343 	struct evcnt ci_intrcnt[16];
344 	struct evcnt ci_sintrcnt[16];
345 };
346 
347 /*
348  * CPU architectures
349  */
350 #define CPUARCH_UNKNOWN		0
351 #define CPUARCH_SUN4		1
352 #define CPUARCH_SUN4C		2
353 #define CPUARCH_SUN4M		3
354 #define	CPUARCH_SUN4D		4
355 #define CPUARCH_SUN4U		5
356 
357 /*
358  * CPU classes
359  */
360 #define CPUCLS_UNKNOWN		0
361 
362 #if defined(SUN4)
363 #define CPUCLS_SUN4		1
364 #endif
365 
366 #if defined(SUN4C)
367 #define CPUCLS_SUN4C		5
368 #endif
369 
370 #if defined(SUN4M) || defined(SUN4D)
371 #define CPUCLS_MICROSPARC	10	/* MicroSPARC-II */
372 #define CPUCLS_SUPERSPARC	11	/* Generic SuperSPARC */
373 #define CPUCLS_HYPERSPARC	12	/* Ross HyperSPARC RT620 */
374 #endif
375 
376 /*
377  * CPU types. Each of these should uniquely identify one platform/type of
378  * system, i.e. "MBus-based 75 MHz SuperSPARC-II with ECache" is
379  * CPUTYP_SS2_MBUS_MXCC. The general form is
380  * 	CPUTYP_proctype_bustype_cachetype_etc_etc
381  *
382  * XXX: This is far from complete/comprehensive
383  * XXX: ADD SUN4, SUN4C TYPES
384  */
385 #define CPUTYP_UNKNOWN		0
386 
387 #define CPUTYP_4_100		1 	/* Sun4/100 */
388 #define CPUTYP_4_200		2	/* Sun4/200 */
389 #define CPUTYP_4_300		3	/* Sun4/300 */
390 #define CPUTYP_4_400		4	/* Sun4/400 */
391 
392 #define CPUTYP_SLC		10	/* SPARCstation SLC */
393 #define CPUTYP_ELC		11	/* SPARCstation ELC */
394 #define CPUTYP_IPX		12	/* SPARCstation IPX */
395 #define CPUTYP_IPC		13	/* SPARCstation IPC */
396 #define CPUTYP_1		14	/* SPARCstation 1 */
397 #define CPUTYP_1P		15	/* SPARCstation 1+ */
398 #define CPUTYP_2		16	/* SPARCstation 2 */
399 
400 /* We classify the Sun4m's by feature, not by model (XXX: do same for 4/4c) */
401 #define	CPUTYP_SS2_MBUS_MXCC	20 	/* SuperSPARC-II, Mbus, MXCC (SS20) */
402 #define CPUTYP_SS1_MBUS_MXCC	21	/* SuperSPARC-I, Mbus, MXCC (SS10) */
403 #define CPUTYP_SS2_MBUS_NOMXCC	22	/* SuperSPARC-II, on MBus w/o MXCC */
404 #define CPUTYP_SS1_MBUS_NOMXCC	23	/* SuperSPARC-I, on MBus w/o MXCC */
405 #define CPUTYP_MS2		24	/* MicroSPARC-2 */
406 #define CPUTYP_MS1		25 	/* MicroSPARC-1 */
407 #define CPUTYP_HS_MBUS		26	/* MBus-based HyperSPARC */
408 #define CPUTYP_CYPRESS		27	/* MBus-based Cypress */
409 
410 /*
411  * CPU flags
412  */
413 #define CPUFLG_CACHEPAGETABLES	0x1	/* caching pagetables OK on Sun4m */
414 #define CPUFLG_CACHEIOMMUTABLES	0x2	/* caching IOMMU translations OK */
415 #define CPUFLG_CACHEDVMA	0x4	/* DVMA goes through cache */
416 #define CPUFLG_SUN4CACHEBUG	0x8	/* trap page can't be cached */
417 #define CPUFLG_CACHE_MANDATORY	0x10	/* if cache is on, don't use
418 					   uncached access */
419 #define CPUFLG_HATCHED		0x1000	/* CPU is alive */
420 #define CPUFLG_PAUSED		0x2000	/* CPU is paused */
421 #define CPUFLG_GOTMSG		0x4000	/* CPU got an lev13 IPI */
422 #define CPUFLG_READY		0x8000	/* CPU available for IPI */
423 
424 
425 #define CPU_INFO_ITERATOR		int
426 /*
427  * Provide two forms of CPU_INFO_FOREACH.  One fast one for non-modular
428  * non-SMP kernels, and the other for everyone else.  Both work in the
429  * non-SMP case, just involving an extra indirection through cpus[0] for
430  * the portable version.
431  */
432 #if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
433 #define	CPU_INFO_FOREACH(cii, cp)	cii = 0; (cp = cpus[cii]) && cp->eintstack && cii < sparc_ncpus; cii++
434 #else
435 #define CPU_INFO_FOREACH(cii, cp)	cii = 0, cp = curcpu(); cp != NULL; cp = NULL
436 #endif
437 
438 /*
439  * Useful macros.
440  */
441 #define CPU_NOTREADY(cpi)	((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \
442 				    ((cpi)->flags & CPUFLG_READY) == 0)
443 
444 /*
445  * Related function prototypes
446  */
447 void getcpuinfo (struct cpu_info *sc, int node);
448 void mmu_install_tables (struct cpu_info *);
449 void pmap_alloc_cpu (struct cpu_info *);
450 
451 #define	CPUSET_ALL	0xffffffffU	/* xcall to all configured CPUs */
452 
453 #if defined(MULTIPROCESSOR)
454 void cpu_init_system(void);
455 typedef void (*xcall_func_t)(int, int, int);
456 typedef void (*xcall_trap_t)(int, int, int);
457 void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int);
458 /* from intr.c */
459 void xcallintr(void *);
460 /* Shorthand */
461 #define XCALL0(f,cpuset)		\
462 	xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset)
463 #define XCALL1(f,a1,cpuset)		\
464 	xcall((xcall_func_t)f, NULL, (int)a1, 0, 0, cpuset)
465 #define XCALL2(f,a1,a2,cpuset)		\
466 	xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, 0, cpuset)
467 #define XCALL3(f,a1,a2,a3,cpuset)	\
468 	xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, (int)a3, cpuset)
469 
470 #define FXCALL0(f,tf,cpuset)		\
471 	xcall((xcall_func_t)f, (xcall_trap_t)tf, 0, 0, 0, cpuset)
472 #define FXCALL1(f,tf,a1,cpuset)		\
473 	xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, 0, 0, cpuset)
474 #define FXCALL2(f,tf,a1,a2,cpuset)	\
475 	xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, 0, cpuset)
476 #define FXCALL3(f,tf,a1,a2,a3,cpuset)	\
477 	xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, (int)a3, cpuset)
478 #else
479 #define XCALL0(f,cpuset)		/**/
480 #define XCALL1(f,a1,cpuset)		/**/
481 #define XCALL2(f,a1,a2,cpuset)		/**/
482 #define XCALL3(f,a1,a2,a3,cpuset)	/**/
483 #define FXCALL0(f,tf,cpuset)		/**/
484 #define FXCALL1(f,tf,a1,cpuset)		/**/
485 #define FXCALL2(f,tf,a1,a2,cpuset)	/**/
486 #define FXCALL3(f,tf,a1,a2,a3,cpuset)	/**/
487 #endif /* MULTIPROCESSOR */
488 
489 extern int bootmid;			/* Module ID of boot CPU */
490 #define CPU_MID2CPUNO(mid)		((mid) != 0 ? (mid) - 8 : 0)
491 
492 extern struct cpu_info *cpus[];
493 #ifdef MULTIPROCESSOR
494 extern u_int cpu_ready_mask;		/* the set of CPUs marked as READY */
495 #endif
496 
497 #define cpuinfo	(*(struct cpu_info *)CPUINFO_VA)
498 
499 
500 #endif	/* _sparc_cpuvar_h */
501