xref: /openbsd/sys/arch/mips64/include/cpu.h (revision 6f40fd34)
1 /*	$OpenBSD: cpu.h,v 1.118 2017/06/11 03:35:30 visa Exp $	*/
2 
3 /*-
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Ralph Campbell and Rick Macklem.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	Copyright (C) 1989 Digital Equipment Corporation.
35  *	Permission to use, copy, modify, and distribute this software and
36  *	its documentation for any purpose and without fee is hereby granted,
37  *	provided that the above copyright notice appears in all copies.
38  *	Digital Equipment Corporation makes no representations about the
39  *	suitability of this software for any purpose.  It is provided "as is"
40  *	without express or implied warranty.
41  *
42  *	from: @(#)cpu.h	8.4 (Berkeley) 1/4/94
43  */
44 
45 #ifndef _MIPS64_CPU_H_
46 #define	_MIPS64_CPU_H_
47 
48 #ifndef _LOCORE
49 
50 /*
51  * MIPS32-style segment definitions.
52  * They only cover the first 512MB of physical addresses.
53  */
54 #define	CKSEG0_BASE		0xffffffff80000000UL
55 #define	CKSEG1_BASE		0xffffffffa0000000UL
56 #define	CKSSEG_BASE		0xffffffffc0000000UL
57 #define	CKSEG3_BASE		0xffffffffe0000000UL
58 #define	CKSEG_SIZE		0x0000000020000000UL
59 
60 #define	CKSEG0_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
61 #define	CKSEG1_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
62 #define	PHYS_TO_CKSEG0(x)	((u_long)(x) | CKSEG0_BASE)
63 #define	PHYS_TO_CKSEG1(x)	((u_long)(x) | CKSEG1_BASE)
64 
65 /*
66  * MIPS64-style segment definitions.
67  * These allow for 36 bits of addressable physical memory, thus 64GB.
68  */
69 
70 /*
71  * Cache Coherency Attributes.
72  */
73 /* r8k only */
74 #define	CCA_NC_COPROCESSOR	0UL	/* uncached, coprocessor ordered */
75 /* common to r4, r5k, r8k and r1xk */
76 #define	CCA_NC			2UL	/* uncached, write-around */
77 #define	CCA_NONCOHERENT		3UL	/* cached, non-coherent, write-back */
78 /* r8k, r1xk only */
79 #define	CCA_COHERENT_EXCL	4UL	/* cached, coherent, exclusive */
80 #define	CCA_COHERENT_EXCLWRITE	5UL	/* cached, coherent, exclusive write */
81 /* r4k only */
82 #define	CCA_COHERENT_UPDWRITE	6UL	/* cached, coherent, update on write */
83 /* r1xk only */
84 #define	CCA_NC_ACCELERATED	7UL	/* uncached accelerated */
85 
86 #ifdef TGT_COHERENT
87 #define	CCA_CACHED		CCA_COHERENT_EXCLWRITE
88 #else
89 #define	CCA_CACHED		CCA_NONCOHERENT
90 #endif
91 
92 /*
93  * Uncached spaces.
94  * R1x000 processors use bits 58:57 of uncached virtual addresses (CCA_NC)
95  * to select different spaces. Unfortunately, other processors need these
96  * bits to be zero, so uncached address have to be decided at runtime.
97  */
98 #define	SP_HUB			0UL	/* Hub space */
99 #define	SP_IO			1UL	/* I/O space */
100 #define	SP_SPECIAL		2UL	/* Memory Special space */
101 #define	SP_NC			3UL	/* Memory Uncached space */
102 
103 #define	XKSSSEG_BASE		0x4000000000000000UL
104 #define	XKPHYS_BASE		0x8000000000000000UL
105 #define	XKSSEG_BASE		0xc000000000000000UL
106 
107 #define	XKPHYS_TO_PHYS(x)	((paddr_t)(x) & 0x0000000fffffffffUL)
108 #define	PHYS_TO_XKPHYS(x,c)	((paddr_t)(x) | XKPHYS_BASE | ((c) << 59))
109 #define	PHYS_TO_XKPHYS_UNCACHED(x,s) \
110 	(PHYS_TO_XKPHYS(x, CCA_NC) | ((s) << 57))
111 #define	IS_XKPHYS(va)		(((va) >> 62) == 2)
112 #define	XKPHYS_TO_CCA(x)	(((x) >> 59) & 0x07)
113 #define	XKPHYS_TO_SP(x)		(((x) >> 57) & 0x03)
114 
115 #endif	/* _LOCORE */
116 
117 /*
118  * Exported definitions unique to mips cpu support.
119  */
120 
121 #if defined(_KERNEL) && !defined(_LOCORE)
122 
123 #include <sys/device.h>
124 #include <machine/intr.h>
125 #include <sys/sched.h>
126 
127 struct cpu_hwinfo {
128 	uint32_t	c0prid;
129 	uint32_t	c1prid;
130 	uint32_t	clock;	/* Hz */
131 	uint32_t	tlbsize;
132 	uint		type;
133 	uint32_t	l2size;
134 };
135 
136 /*
137  * Cache memory configuration. One struct per cache.
138  */
139 struct cache_info {
140 	uint		size;		/* total cache size */
141 	uint		linesize;	/* line size */
142 	uint		setsize;	/* set size */
143 	uint		sets;		/* number of sets */
144 };
145 
146 struct cpu_info {
147 	struct device	*ci_dev;	/* our device */
148 	struct cpu_info	*ci_self;	/* pointer to this structure */
149 	struct cpu_info	*ci_next;	/* next cpu */
150 	struct proc	*ci_curproc;
151 	struct user	*ci_curprocpaddr;
152 	struct proc	*ci_fpuproc;	/* pointer to last proc to use FP */
153 	uint32_t	 ci_delayconst;
154 	struct cpu_hwinfo
155 			ci_hw;
156 
157 #if defined(MULTIPROCESSOR)
158 	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
159 #endif
160 
161 	/* cache information and pending flush state */
162 	uint		ci_cacheconfiguration;
163 	uint64_t	ci_cachepending_l1i;
164 	struct cache_info
165 			ci_l1inst,
166 			ci_l1data,
167 			ci_l2,
168 			ci_l3;
169 
170 	/* function pointers for the cache handling routines */
171 	void		(*ci_SyncCache)(struct cpu_info *);
172 	void		(*ci_InvalidateICache)(struct cpu_info *, vaddr_t,
173 			    size_t);
174 	void		(*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t);
175 	void		(*ci_SyncICache)(struct cpu_info *);
176 	void		(*ci_SyncDCachePage)(struct cpu_info *, vaddr_t,
177 			    paddr_t);
178 	void		(*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t,
179 			    paddr_t);
180 	void		(*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t);
181 	void		(*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t,
182 			    size_t);
183 	void		(*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t,
184 			    int);
185 
186 	struct schedstate_percpu
187 			ci_schedstate;
188 	int		ci_want_resched;	/* need_resched() invoked */
189 	cpuid_t		ci_cpuid;		/* our CPU ID */
190 	uint32_t	ci_randseed;		/* per cpu random seed */
191 	int		ci_ipl;			/* software IPL */
192 	uint32_t	ci_softpending;		/* pending soft interrupts */
193 	int		ci_clock_started;
194 	u_int32_t	ci_cpu_counter_last;	/* last compare value loaded */
195 	u_int32_t	ci_cpu_counter_interval; /* # of counter ticks/tick */
196 
197 	u_int32_t	ci_pendingticks;
198 
199 #ifdef TGT_ORIGIN
200 	u_int16_t	ci_nasid;
201 	u_int16_t	ci_slice;
202 #endif
203 
204 	struct pmap	*ci_curpmap;
205 	uint		ci_intrdepth;		/* interrupt depth */
206 #ifdef MULTIPROCESSOR
207 	u_long		ci_flags;		/* flags; see below */
208 	struct intrhand	ci_ipiih;
209 #endif
210 	volatile int    ci_ddb;
211 #define	CI_DDB_RUNNING		0
212 #define	CI_DDB_SHOULDSTOP	1
213 #define	CI_DDB_STOPPED		2
214 #define	CI_DDB_ENTERDDB		3
215 #define	CI_DDB_INDDB		4
216 
217 #ifdef DIAGNOSTIC
218 	int	ci_mutex_level;
219 #endif
220 #ifdef GPROF
221 	struct gmonparam *ci_gmon;
222 #endif
223 };
224 
225 #define	CPUF_PRIMARY	0x01		/* CPU is primary CPU */
226 #define	CPUF_PRESENT	0x02		/* CPU is present */
227 #define	CPUF_RUNNING	0x04		/* CPU is running */
228 
229 extern struct cpu_info cpu_info_primary;
230 extern struct cpu_info *cpu_info_list;
231 #define CPU_INFO_ITERATOR		int
232 #define	CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
233 					    ci != NULL; ci = ci->ci_next)
234 
235 #define CPU_INFO_UNIT(ci)               ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
236 
237 extern void (*cpu_idle_cycle_func)(void);
238 #define cpu_idle_cycle()		(*cpu_idle_cycle_func)()
239 
240 #ifdef MULTIPROCESSOR
241 #define MAXCPUS				4
242 #define getcurcpu()			hw_getcurcpu()
243 #define setcurcpu(ci)			hw_setcurcpu(ci)
244 extern struct cpu_info *get_cpu_info(int);
245 #define curcpu() getcurcpu()
246 #define	CPU_IS_PRIMARY(ci)		((ci)->ci_flags & CPUF_PRIMARY)
247 #define cpu_number()			(curcpu()->ci_cpuid)
248 
249 extern struct cpuset cpus_running;
250 void cpu_unidle(struct cpu_info *);
251 void cpu_boot_secondary_processors(void);
252 #define cpu_boot_secondary(ci)          hw_cpu_boot_secondary(ci)
253 #define cpu_hatch(ci)                   hw_cpu_hatch(ci)
254 
255 vaddr_t alloc_contiguous_pages(size_t);
256 
257 #define MIPS64_IPI_NOP		0x00000001
258 #define MIPS64_IPI_RENDEZVOUS	0x00000002
259 #define MIPS64_IPI_DDB		0x00000004
260 #define MIPS64_NIPIS		3	/* must not exceed 32 */
261 
262 void	mips64_ipi_init(void);
263 void	mips64_send_ipi(unsigned int, unsigned int);
264 void	smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg);
265 
266 #include <sys/mplock.h>
267 #else
268 #define MAXCPUS				1
269 #define curcpu()			(&cpu_info_primary)
270 #define	CPU_IS_PRIMARY(ci)		1
271 #define cpu_number()			0
272 #define cpu_unidle(ci)
273 #define get_cpu_info(i)			(&cpu_info_primary)
274 #endif
275 
276 #define CPU_BUSY_CYCLE()	do {} while (0)
277 
278 extern void (*md_startclock)(struct cpu_info *);
279 void	cp0_calibrate(struct cpu_info *);
280 
281 #include <machine/frame.h>
282 
283 /*
284  * Arguments to hardclock encapsulate the previous machine state in
285  * an opaque clockframe.
286  */
287 #define	clockframe trapframe	/* Use normal trap frame */
288 
289 #define	SR_KSU_USER		0x00000010
290 #define	CLKF_USERMODE(framep)	((framep)->sr & SR_KSU_USER)
291 #define	CLKF_PC(framep)		((framep)->pc)
292 #define	CLKF_INTR(framep)	(curcpu()->ci_intrdepth > 1)	/* XXX */
293 
294 /*
295  * This is used during profiling to integrate system time.
296  */
297 #define	PROC_PC(p)	((p)->p_md.md_regs->pc)
298 #define	PROC_STACK(p)	((p)->p_md.md_regs->sp)
299 
300 /*
301  * Preempt the current process if in interrupt from user mode,
302  * or after the current trap/syscall if in system mode.
303  */
304 #define	need_resched(ci) \
305 	do { \
306 		(ci)->ci_want_resched = 1; \
307 		if ((ci)->ci_curproc != NULL) \
308 			aston((ci)->ci_curproc); \
309 	} while(0)
310 #define	clear_resched(ci) 	(ci)->ci_want_resched = 0
311 
312 /*
313  * Give a profiling tick to the current process when the user profiling
314  * buffer pages are invalid.  On MIPS designs, request an ast to send us
315  * through trap, marking the proc as needing a profiling tick.
316  */
317 #define	need_proftick(p)	aston(p)
318 
319 /*
320  * Notify the current process (p) that it has a signal pending,
321  * process as soon as possible.
322  */
323 #ifdef MULTIPROCESSOR
324 #define	signotify(p)		(aston(p), cpu_unidle((p)->p_cpu))
325 #else
326 #define	signotify(p)		aston(p)
327 #endif
328 
329 #define	aston(p)		((p)->p_md.md_astpending = 1)
330 
331 #ifdef CPU_R8000
332 #define	mips_sync()		__asm__ volatile ("lw $0, 0(%0)" :: \
333 				    "r" (PHYS_TO_XKPHYS(0, CCA_NC)) : "memory")
334 #else
335 #define	mips_sync()		__asm__ volatile ("sync" ::: "memory")
336 #endif
337 
338 #endif /* _KERNEL && !_LOCORE */
339 
340 #ifdef _KERNEL
341 /*
342  * Values for the code field in a break instruction.
343  */
344 #define	BREAK_INSTR		0x0000000d
345 #define	BREAK_VAL_MASK		0x03ff0000
346 #define	BREAK_VAL_SHIFT		16
347 #define	BREAK_KDB_VAL		512
348 #define	BREAK_SSTEP_VAL		513
349 #define	BREAK_BRKPT_VAL		514
350 #define	BREAK_SOVER_VAL		515
351 #define	BREAK_DDB_VAL		516
352 #define	BREAK_FPUEMUL_VAL	517
353 #define	BREAK_KDB	(BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
354 #define	BREAK_SSTEP	(BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
355 #define	BREAK_BRKPT	(BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
356 #define	BREAK_SOVER	(BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
357 #define	BREAK_DDB	(BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
358 #define	BREAK_FPUEMUL	(BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT))
359 
360 #endif /* _KERNEL */
361 
362 /*
363  * CTL_MACHDEP definitions.
364  */
365 #define	CPU_ALLOWAPERTURE	1	/* allow mmap of /dev/xf86 */
366 		/*		2	   formerly: keyboard reset */
367 #define	CPU_LIDSUSPEND		3	/* lid close causes a suspend */
368 #define CPU_LIDACTION		4	/* action caused by lid close */
369 #define	CPU_MAXID		5	/* number of valid machdep ids */
370 
371 #define	CTL_MACHDEP_NAMES {			\
372 	{ 0, 0 },				\
373 	{ "allowaperture", CTLTYPE_INT },	\
374 	{ 0, 0 },				\
375 	{ "lidsuspend", CTLTYPE_INT },		\
376 	{ "lidaction", CTLTYPE_INT },		\
377 }
378 
379 /*
380  * MIPS CPU types (cp_imp).
381  */
382 #define	MIPS_R2000	0x01	/* MIPS R2000 CPU		ISA I   */
383 #define	MIPS_R3000	0x02	/* MIPS R3000 CPU		ISA I   */
384 #define	MIPS_R6000	0x03	/* MIPS R6000 CPU		ISA II	*/
385 #define	MIPS_R4000	0x04	/* MIPS R4000/4400 CPU		ISA III	*/
386 #define	MIPS_R3LSI	0x05	/* LSI Logic R3000 derivate	ISA I	*/
387 #define	MIPS_R6000A	0x06	/* MIPS R6000A CPU		ISA II	*/
388 #define	MIPS_CN50XX	0x06	/* Cavium OCTEON CN50xx		MIPS64R2*/
389 #define	MIPS_R3IDT	0x07	/* IDT R3000 derivate		ISA I	*/
390 #define	MIPS_R10000	0x09	/* MIPS R10000/T5 CPU		ISA IV  */
391 #define	MIPS_R4200	0x0a	/* MIPS R4200 CPU (ICE)		ISA III */
392 #define	MIPS_R4300	0x0b	/* NEC VR4300 CPU		ISA III */
393 #define	MIPS_R4100	0x0c	/* NEC VR41xx CPU MIPS-16	ISA III */
394 #define	MIPS_R12000	0x0e	/* MIPS R12000			ISA IV  */
395 #define	MIPS_R14000	0x0f	/* MIPS R14000			ISA IV  */
396 #define	MIPS_R8000	0x10	/* MIPS R8000 Blackbird/TFP	ISA IV  */
397 #define	MIPS_R4600	0x20	/* PMCS R4600 Orion		ISA III */
398 #define	MIPS_R4700	0x21	/* PMCS R4700 Orion		ISA III */
399 #define	MIPS_R3TOSH	0x22	/* Toshiba R3000 based CPU	ISA I	*/
400 #define	MIPS_R5000	0x23	/* MIPS R5000 CPU		ISA IV  */
401 #define	MIPS_RM7000	0x27	/* PMCS RM7000 CPU		ISA IV  */
402 #define	MIPS_RM52X0	0x28	/* PMCS RM52X0 CPU		ISA IV  */
403 #define	MIPS_RM9000	0x34	/* PMCS RM9000 CPU		ISA IV  */
404 #define	MIPS_LOONGSON	0x42	/* STC LoongSon CPU		ISA III */
405 #define	MIPS_VR5400	0x54	/* NEC Vr5400 CPU		ISA IV+ */
406 #define	MIPS_LOONGSON2	0x63	/* STC LoongSon2/3 CPU		ISA III+ */
407 #define	MIPS_CN61XX	0x93	/* Cavium OCTEON II CN6[01]xx	MIPS64R2 */
408 #define	MIPS_CN71XX	0x96	/* Cavium OCTEON III CN7[01]xx	MIPS64R2 */
409 #define	MIPS_CN73XX	0x97	/* Cavium OCTEON III CN7[23]xx	MIPS64R2 */
410 
411 /*
412  * MIPS FPU types. Only soft, rest is the same as cpu type.
413  */
414 #define	MIPS_SOFT	0x00	/* Software emulation		ISA I   */
415 
416 
417 #if defined(_KERNEL) && !defined(_LOCORE)
418 
419 extern register_t protosr;
420 extern int cpu_has_userlocal;
421 
422 struct exec_package;
423 struct user;
424 
425 void	tlb_asid_wrap(struct cpu_info *);
426 void	tlb_flush(int);
427 void	tlb_flush_addr(vaddr_t);
428 void	tlb_init(unsigned int);
429 int64_t	tlb_probe(vaddr_t);
430 void	tlb_set_gbase(vaddr_t, vsize_t);
431 void	tlb_set_page_mask(uint32_t);
432 void	tlb_set_pid(u_int);
433 void	tlb_set_wired(uint32_t);
434 int	tlb_update(vaddr_t, register_t);
435 void	tlb_update_indexed(vaddr_t, register_t, register_t, uint);
436 
437 void	build_trampoline(vaddr_t, vaddr_t);
438 void	cpu_switchto_asm(struct proc *, struct proc *);
439 int	exec_md_map(struct proc *, struct exec_package *);
440 void	savectx(struct user *, int);
441 
442 void	enable_fpu(struct proc *);
443 void	save_fpu(void);
444 int	fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t,
445 	    vaddr_t);
446 void	MipsSaveCurFPState(struct proc *);
447 void	MipsSaveCurFPState16(struct proc *);
448 void	MipsSwitchFPState(struct proc *, struct trapframe *);
449 void	MipsSwitchFPState16(struct proc *, struct trapframe *);
450 
451 int	guarded_read_1(paddr_t, uint8_t *);
452 int	guarded_read_2(paddr_t, uint16_t *);
453 int	guarded_read_4(paddr_t, uint32_t *);
454 int	guarded_write_4(paddr_t, uint32_t);
455 
456 void	MipsFPTrap(struct trapframe *);
457 register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t);
458 
459 int	classify_insn(uint32_t);
460 #define	INSNCLASS_NEUTRAL	0
461 #define	INSNCLASS_CALL		1
462 #define	INSNCLASS_BRANCH	2
463 
464 /*
465  * R4000 end-of-page errata workaround routines
466  */
467 
468 extern int r4000_errata;
469 u_int	eop_page_check(paddr_t);
470 void	eop_tlb_flush_addr(struct pmap *, vaddr_t, u_long);
471 int	eop_tlb_miss_handler(struct trapframe *, struct cpu_info *,
472 	    struct proc *);
473 void	eop_cleanup(struct trapframe *, struct proc *);
474 
475 /*
476  * Low level access routines to CPU registers
477  */
478 
479 void	setsoftintr0(void);
480 void	clearsoftintr0(void);
481 void	setsoftintr1(void);
482 void	clearsoftintr1(void);
483 register_t enableintr(void);
484 register_t disableintr(void);
485 register_t getsr(void);
486 register_t setsr(register_t);
487 
488 u_int	cp0_get_count(void);
489 register_t cp0_get_config(void);
490 uint32_t cp0_get_config_1(void);
491 uint32_t cp0_get_config_2(void);
492 uint32_t cp0_get_config_3(void);
493 uint32_t cp0_get_config_4(void);
494 uint32_t cp0_get_pagegrain(void);
495 register_t cp0_get_prid(void);
496 void	cp0_reset_cause(register_t);
497 void	cp0_set_compare(u_int);
498 void	cp0_set_config(register_t);
499 void	cp0_set_pagegrain(uint32_t);
500 void	cp0_set_trapbase(register_t);
501 u_int	cp1_get_prid(void);
502 
503 static inline uint32_t
504 cp0_get_hwrena(void)
505 {
506 	uint32_t value;
507 	__asm__ volatile ("mfc0 %0, $7" : "=r" (value));
508 	return value;
509 }
510 
511 static inline void
512 cp0_set_hwrena(uint32_t value)
513 {
514 	__asm__ volatile ("mtc0 %0, $7" : : "r" (value));
515 }
516 
517 static inline void
518 cp0_set_userlocal(void *value)
519 {
520 	__asm__ volatile (
521 	"	.set	push\n"
522 	"	.set	mips64r2\n"
523 	"	dmtc0	%0, $4, 2\n"
524 	"	.set	pop\n"
525 	: : "r" (value));
526 }
527 
528 /*
529  * Cache routines (may be overridden)
530  */
531 
532 #ifndef	Mips_SyncCache
533 #define	Mips_SyncCache(ci) \
534 	((ci)->ci_SyncCache)(ci)
535 #endif
536 #ifndef	Mips_InvalidateICache
537 #define	Mips_InvalidateICache(ci, va, l) \
538 	((ci)->ci_InvalidateICache)(ci, va, l)
539 #endif
540 #ifndef	Mips_InvalidateICachePage
541 #define	Mips_InvalidateICachePage(ci, va) \
542 	((ci)->ci_InvalidateICachePage)(ci, va)
543 #endif
544 #ifndef	Mips_SyncICache
545 #define	Mips_SyncICache(ci) \
546 	((ci)->ci_SyncICache)(ci)
547 #endif
548 #ifndef	Mips_SyncDCachePage
549 #define	Mips_SyncDCachePage(ci, va, pa) \
550 	((ci)->ci_SyncDCachePage)(ci, va, pa)
551 #endif
552 #ifndef	Mips_HitSyncDCachePage
553 #define	Mips_HitSyncDCachePage(ci, va, pa) \
554 	((ci)->ci_HitSyncDCachePage)(ci, va, pa)
555 #endif
556 #ifndef	Mips_HitSyncDCache
557 #define	Mips_HitSyncDCache(ci, va, l) \
558 	((ci)->ci_HitSyncDCache)(ci, va, l)
559 #endif
560 #ifndef	Mips_HitInvalidateDCache
561 #define	Mips_HitInvalidateDCache(ci, va, l) \
562 	((ci)->ci_HitInvalidateDCache)(ci, va, l)
563 #endif
564 #ifndef	Mips_IOSyncDCache
565 #define	Mips_IOSyncDCache(ci, va, l, h) \
566 	((ci)->ci_IOSyncDCache)(ci, va, l, h)
567 #endif
568 
569 #endif /* _KERNEL && !_LOCORE */
570 #endif /* !_MIPS64_CPU_H_ */
571