xref: /netbsd/sys/arch/sh3/include/locore.h (revision bf9ec67e)
1 /*	$NetBSD: locore.h,v 1.7 2002/05/09 12:25:41 uch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *        This product includes software developed by the NetBSD
18  *        Foundation, Inc. and its contributors.
19  * 4. Neither the name of The NetBSD Foundation nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #if defined(SH3) && defined(SH4)
37 #define	MOV(x, r)	mov.l _L./**/x, r; mov.l @r, r
38 #define	REG_SYMBOL(x)	_L./**/x:	.long	_C_LABEL(__sh_/**/x)
39 #define	FUNC_SYMBOL(x)	_L./**/x:	.long	_C_LABEL(__sh_/**/x)
40 #elif defined(SH3)
41 #define	MOV(x, r)	mov.l _L./**/x, r
42 #define	REG_SYMBOL(x)	_L./**/x:	.long	SH3_/**/x
43 #define	FUNC_SYMBOL(x)	_L./**/x:	.long	_C_LABEL(sh3_/**/x)
44 #elif defined(SH4)
45 #define	MOV(x, r)	mov.l _L./**/x, r
46 #define	REG_SYMBOL(x)	_L./**/x:	.long	SH4_/**/x
47 #define	FUNC_SYMBOL(x)	_L./**/x:	.long	_C_LABEL(sh4_/**/x)
48 #endif /* SH3 && SH4 */
49 
50 /*
51  * BANK1 r7 contains kernel stack top address.
52  * BANK1 r6 conatins current frame pointer. (per process)
53  */
54 /*
55  * __EXCEPTION_ENTRY:
56  *	+ setup stack pointer
57  *	+ save all register to frame. (struct trapframe)
58  *	+ setup kernel stack.
59  *	+ change bank from 1 to 0
60  *	+ set BANK0 (r4, r5, r6) = (ssr, spc, ssp)
61  */
62 #define	__EXCEPTION_ENTRY							;\
63 	/* Check kernel/user mode. */					;\
64 	mov	#0x40,	r3						;\
65 	swap.b	r3,	r3						;\
66 	stc	ssr,	r2						;\
67 	swap.w	r3,	r3	/* r3 = 0x40000000 */			;\
68 	mov	r2,	r0	/* r2 = r0 = SSR */			;\
69 	and	r3,	r0						;\
70 	tst	r0,	r0	/* if (SSR.MD == 0) T = 1 */		;\
71 	mov	r14,	r1						;\
72 	mov	r6,	r14	/* frame pointer */			;\
73 	bf/s	1f		/* T==0 ...Exception from kernel mode */;\
74 	 mov	r15,	r0						;\
75 	/* Exception from user mode */					;\
76 	mov	r7,	r15	/* change to kernel stack */		;\
77 1:									;\
78 	/* Save registers */						;\
79 	mov.l	r1,	@-r14	/* tf_r14 */				;\
80 	mov.l	r0,	@-r14	/* tf_r15 */				;\
81 	stc.l	r0_bank,@-r14	/* tf_r0  */				;\
82 	stc.l	r1_bank,@-r14	/* tf_r1  */				;\
83 	stc.l	r2_bank,@-r14	/* tf_r2  */				;\
84 	stc.l	r3_bank,@-r14	/* tf_r3  */				;\
85 	stc.l	r4_bank,@-r14	/* tf_r4  */				;\
86 	stc.l	r5_bank,@-r14	/* tf_r5  */				;\
87 	stc.l	r6_bank,@-r14	/* tf_r6  */				;\
88 	stc.l	r7_bank,@-r14	/* tf_r7  */				;\
89 	mov.l	r8,	@-r14	/* tf_r8  */				;\
90 	mov.l	r9,	@-r14	/* tf_r9  */				;\
91 	mov.l	r10,	@-r14	/* tf_r10 */				;\
92 	mov.l	r11,	@-r14	/* tf_r11 */				;\
93 	mov.l	r12,	@-r14	/* tf_r12 */				;\
94 	mov.l	r13,	@-r14	/* tf_r13 */				;\
95 	sts.l	pr,	@-r14	/* tf_pr  */				;\
96 	sts.l	mach,	@-r14	/* tf_mach*/				;\
97 	sts.l	macl,	@-r14	/* tf_macl*/				;\
98 	mov.l	r2,	@-r14	/* tf_ssr */				;\
99 	stc.l	spc,	@-r14	/* tf_spc */				;\
100 	add	#-8,	r14	/* skip tf_ubc, tf_expevt */		;\
101 	mov	r14,	r6	/* store frame pointer */		;\
102 	/* Change register bank to 0 */					;\
103 	shlr	r3		/* r3 = 0x20000000 */			;\
104 	stc	sr,	r1	/* r1 = SR */				;\
105 	not	r3,	r3						;\
106 	and	r1,	r3						;\
107 	ldc	r3,	sr	/* SR.RB = 0 */				;\
108 	/* Set up argument. r4 = ssr, r5 = spc */			;\
109 	stc	r2_bank,r4						;\
110 	stc	spc,	r5
111 
112 /*
113  * __EXCEPTION_RETURN:
114  *	+ block exception
115  *	+ restore all register from stack.
116  *	+ rte.
117  */
118 #define	__EXCEPTION_RETURN						;\
119 	mov	#0x10,	r0						;\
120 	swap.b	r0,	r0						;\
121 	swap.w	r0,	r0	/* r0 = 0x10000000 */			;\
122 	stc	sr,	r1						;\
123 	or	r0,	r1						;\
124 	ldc	r1,	sr	/* SR.BL = 1 */				;\
125 	stc	r6_bank,r0						;\
126 	mov	r0,	r14						;\
127 	add	#TF_SIZE, r0						;\
128 	ldc	r0,	r6_bank	/* roll up frame pointer */		;\
129 	add	#8,	r14	/* skip tf_expevt, tf_ubc */		;\
130 	mov.l	@r14+,	r0	/* tf_spc */				;\
131 	ldc	r0,	spc						;\
132 	mov.l	@r14+,	r0	/* tf_ssr */				;\
133 	ldc	r0,	ssr						;\
134 	lds.l	@r14+,	macl	/* tf_macl*/				;\
135 	lds.l	@r14+,	mach	/* tf_mach*/				;\
136 	lds.l	@r14+,	pr	/* tf_pr  */				;\
137 	mov.l	@r14+,	r13	/* tf_r13 */				;\
138 	mov.l	@r14+,	r12	/* tf_r12 */				;\
139 	mov.l	@r14+,	r11	/* tf_r11 */				;\
140 	mov.l	@r14+,	r10	/* tf_r10 */				;\
141 	mov.l	@r14+,	r9	/* tf_r9  */				;\
142 	mov.l	@r14+,	r8	/* tf_r8  */				;\
143 	mov.l	@r14+,	r7	/* tf_r7  */				;\
144 	mov.l	@r14+,	r6	/* tf_r6  */				;\
145 	mov.l	@r14+,	r5	/* tf_r5  */				;\
146 	mov.l	@r14+,	r4	/* tf_r4  */				;\
147 	mov.l	@r14+,	r3	/* tf_r3  */				;\
148 	mov.l	@r14+,	r2	/* tf_r2  */				;\
149 	mov.l	@r14+,	r1	/* tf_r1  */				;\
150 	mov.l	@r14+,	r0	/* tf_r0  */				;\
151 	mov.l	@r14+	r15	/* tf_r15 */				;\
152 	mov.l	@r14+,	r14	/* tf_r14 */				;\
153 	rte								;\
154 	 nop
155 
156 
157 /*
158  * Macros to disable and enable exceptions (including interrupts).
159  * This modifies SR.BL
160  */
161 #define	__0x10	#0x10
162 #define	__0x78	#0x78
163 
164 #define	__EXCEPTION_BLOCK(Rn, Rm)					;\
165 	mov	__0x10,	Rn						;\
166 	swap.b	Rn,	Rn						;\
167 	swap.w	Rn,	Rn	/* Rn = 0x10000000 */			;\
168 	stc	sr,	Rm						;\
169 	or	Rn,	Rm						;\
170 	ldc	Rm,	sr	/* block exceptions */
171 
172 #define	__EXCEPTION_UNBLOCK(Rn, Rm)					;\
173 	mov	__0x10,	Rn						;\
174 	swap.b	Rn,	Rn						;\
175 	swap.w	Rn,	Rn	/* Rn = 0x10000000 */			;\
176 	not	Rn,	Rn						;\
177 	stc	sr,	Rm						;\
178 	and	Rn,	Rm						;\
179 	ldc	Rm,	sr	/* unblock exceptions */
180 
181 /*
182  * Macros to disable and enable interrupts.
183  * This modifies SR.I[0-3]
184  */
185 #define	__INTR_MASK(Rn, Rm)						;\
186 	mov	__0x78,	Rn						;\
187 	shll	Rn		/* Rn = 0x000000f0 */			;\
188 	stc	sr,	Rm						;\
189 	or	Rn,	Rm						;\
190 	ldc	Rm,	sr	/* mask all interrupt */
191 
192 #define	__INTR_UNMASK(Rn, Rm)						;\
193 	mov	__0x78,	Rn						;\
194 	shll	Rn		/* Rn = 0x000000f0 */			;\
195 	not	Rn,	Rn						;\
196 	stc	sr,	Rm						;\
197 	and	Rn,	Rm						;\
198 	ldc	Rm,	sr	/* unmask all interrupt */
199 
200 #ifndef _LOCORE
201 void sh3_switch_setup(struct proc *);
202 void sh4_switch_setup(struct proc *);
203 void sh3_switch_resume(struct proc *);
204 void sh4_switch_resume(struct proc *);
205 extern void (*__sh_switch_resume)(struct proc *);
206 #endif /* !_LOCORE */
207