xref: /netbsd/sys/arch/sh3/sh3/locore_subr.S (revision bf9ec67e)
1/*	$NetBSD: locore_subr.S,v 1.10 2002/05/09 12:28:08 uch Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *        This product includes software developed by the NetBSD
18 *        Foundation, Inc. and its contributors.
19 * 4. Neither the name of The NetBSD Foundation nor the names of its
20 *    contributors may be used to endorse or promote products derived
21 *    from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#include "opt_cputype.h"
37#include "opt_ddb.h"
38#include "opt_kgdb.h"
39#include "assym.h"
40
41#include <sys/syscall.h>	/* SYS___sigreturn14, SYS_exit */
42#include <sh3/asm.h>
43#include <sh3/locore.h>
44#include <sh3/param.h>		/* UPAGES */
45#include <sh3/mmu_sh3.h>
46#include <sh3/mmu_sh4.h>
47
48	.text
49	.align 5	/* align cache line size (32B) */
50/*
51 * void cpu_switch(struct proc *):
52 *	Find a runnable process and switch to it.  Wait if necessary.
53 */
54ENTRY(cpu_switch)
55	/* Save current process's context to switchframe */
56	mov.l	_L.SF,	r0
57	mov.l	@(r0, r4), r1
58	add	#SF_SIZE, r1
59	stc.l	r7_bank,@-r1
60	stc.l	sr,	@-r1
61	stc.l	r6_bank,@-r1
62	sts.l	pr,	@-r1
63	mov.l	r8,	@-r1
64	mov.l	r9,	@-r1
65	mov.l	r10,	@-r1
66	mov.l	r11,	@-r1
67	mov.l	r12,	@-r1
68	mov.l	r13,	@-r1
69	mov.l	r14,	@-r1
70	mov.l	r15,	@-r1
71
72	/* Search next process. cpu_switch_search may or may not sleep. */
73	mov.l	_L.cpu_switch_search, r0
74	jsr	@r0
75	 mov	r4,	r8	/* save old proc */
76
77	/* Skip context switch if same process. */
78	cmp/eq	r8,	r0
79	bt/s	1f
80	 mov	r0,	r4	/* new proc */
81
82	/* Setup kernel stack */
83	mov.l	_L.SF,	r0
84	mov.l	@(r0, r4), r1		/* switch frame */
85	mov.l	@(SF_R7_BANK, r1), r0	/* stack top */
86	mov.l	@(SF_R6_BANK, r1), r2	/* current frame */
87	mov.l	@(SF_R15, r1), r3	/* current stack */
88	/* During kernel stack switching, all interrupts are disabled. */
89	__EXCEPTION_BLOCK(r1, r5)
90	/* switch to new kernel stack */
91	ldc	r0,	r7_bank
92	ldc	r2,	r6_bank
93	mov	r3,	r15
94
95	/* Wire u-area */
96	MOV	(switch_resume, r0)
97	jsr	@r0
98	 mov	r4,	r8	/* save new proc */
99	mov	r8,	r4
100	__EXCEPTION_UNBLOCK(r0, r1)
101	/* Now OK to use kernel stack. */
102
103	/* Restore new process's context from switchframe */
1041:	mov.l	_L.SF,	r0
105	mov.l	@(r0, r4), r1
106	add	#4,	r1		/* r15 already restored */
107	mov.l	@r1+,	r14
108	mov.l	@r1+,	r13
109	mov.l	@r1+,	r12
110	mov.l	@r1+,	r11
111	mov.l	@r1+,	r10
112	mov.l	@r1+,	r9
113	mov.l	@r1+,	r8
114	lds.l	@r1+,	pr
115	add	#4,	r1		/* r6_bank already restored */
116	ldc.l	@r1+,	sr
117	rts
118	 nop
119	.align	2
120_L.SF:			.long	(P_MD + MD_PCB)
121_L.cpu_switch_search:	.long	_C_LABEL(cpu_switch_search)
122FUNC_SYMBOL(switch_resume)
123
124#ifdef SH3
125/*
126 * void sh3_switch_resume(sturct proc *p)
127 *	Set current u-area PTE array to curupte.
128 *	No need to flush any entries. it is depended on u-area mapping is
129 *	wired, and its mapping never cause modified/reference fault.
130 *	u-area TLB fault is only covered by TLB miss exception.
131 *	When the situation that "VPN match but not Valid" occur, SH3 jump to
132 *	"generic exception" handler instead of TLB miss exception.
133 *	But NetBSD/sh3 code doesn't handle it. As the result, it causes
134 *	hard reset. (never can access kernel stack).
135 */
136NENTRY(sh3_switch_resume)
137	mov.l	_L.UPTE, r0
138	add	r4,	r0
139	mov.l	_L.curupte, r1
140	mov.l	r0,	@r1
141	rts
142	 nop
143	.align	2
144_L.curupte:		.long	_C_LABEL(curupte)
145#endif /* SH3 */
146
147#ifdef SH4
148/*
149 * void sh4_switch_resume(sturct proc *p)
150 *	Wire u-area. invalidate TLB entry for kernel stack to prevent
151 *	TLB multiple hit.
152 */
153NENTRY(sh4_switch_resume)
154	mov.l	_L.UPTE,r0
155	add	r0,	r4	/* p->p_md.md_upte */
156	mov	#UPAGES,r3
157	mov	#1,	r2
158	mov.l	@r4,	r0	/* if (p->p_md.md_upte[0].addr == 0) return; */
159	tst	r0,	r0
160	bt	2f
161
162	/* Save old ASID and set ASID to zero */
163	xor	r0,	r0
164	mov.l	_L.4_PTEH, r1
165	mov.l	@r1,	r7
166	mov.l	r0,	@r1
167
168	mov.l	_L.VPN_MASK, r6
169	mov.l	_L.4_UTLB_AA_A, r5
170
171	/* TLB address array must be accessed via P2. Setup jump address. */
172	mova	1f,	r0
173	mov.l	_L.P2BASE, r1
174	or	r1,	r0
175	jmp	@r0		/* run P2 */
176	 nop
177
178	/* Probe VPN match TLB entry and invalidate it. */
179	.align	2		/* mova target must be 4byte alignment */
1801:	mov.l	@(4, r4), r0
181	and	r6,	r0
182	mov.l	r0,	@r5	/* clear D, V */
183
184	/* Wire u-area TLB entry */
185	/* Address array */
186	mov.l	@r4+,	r0	/* addr */
187	mov.l	@r4+,	r1	/* data */
188	mov.l	r1,	@r0	/* *addr = data */
189
190	/* Data array */
191	mov.l	@r4+,	r0	/* addr */
192	mov.l	@r4+,	r1	/* data */
193	mov.l	r1,	@r0	/* *addr = data */
194	cmp/eq	r2,	r3
195	bf/s	1b
196	 add	#1,	r2
197
198	/* restore ASID */
199	mov.l	_L.4_PTEH, r0
200	mov.l	r7,	@r0
201	mova	2f,	r0
202	jmp	@r0		/* run P1 */
203	 nop
204	.align	2
2052:	rts			/* mova target must be 4byte alignment */
206	 nop
207	.align	2
208_L.4_PTEH:		.long	SH4_PTEH
209_L.4_UTLB_AA_A:		.long	(SH4_UTLB_AA | SH4_UTLB_A)
210_L.4_ITLB_AA:		.long	SH4_ITLB_AA
211_L.VPN_MASK:		.long	0xfffff000
212_L.P2BASE:		.long	0xa0000000
213#endif /* SH4 */
214_L.UPTE:		.long	(P_MD + MD_UPTE)
215
216/*
217 * int _cpu_intr_raise(int s):
218 *	raise SR.IMASK to 's'. if current SR.IMASK is greater equal 's',
219 *	nothing to do. returns previous SR.IMASK.
220 */
221NENTRY(_cpu_intr_raise)
222	stc	sr,	r2
223	mov	#0x78,	r1
224	mov	r2,	r0
225	shll	r1		/* r1 = 0xf0 */
226	and	r1,	r0	/* r0 = SR & 0xf0 */
227	cmp/ge	r4,	r0	/* r0 >= r4 ? T = 1 */
228	bt/s	1f
229	 not	r1,	r1	/* r1 = 0xffffff0f */
230	and	r1,	r2	/* r2 = SR & ~0xf0 */
231	or	r2,	r4	/* r4 = (SR & ~0xf0) | s */
232	ldc	r4,	sr	/* SR = r4 (don't move to delay slot) */
2331:	rts
234	 nop	/* return (SR & 0xf0) */
235
236/*
237 * int _cpu_intr_suspend(void):
238 *	Mask all external interrupt. Returns previous SR.IMASK.
239 */
240NENTRY(_cpu_intr_suspend)
241	stc	sr,	r0	/* r0 = SR */
242	mov	#0x78,	r1
243	shll	r1		/* r1 = 0x000000f0 */
244	mov	r0,	r2	/* r2 = SR */
245	or	r1,	r2	/* r2 |= 0x000000f0 */
246	ldc	r2,	sr	/* SR = r2 */
247	rts
248	 and	r1,	r0	/* r0 = SR & 0x000000f0 */
249
250/*
251 * int _cpu_intr_resume(int s):
252 *	Set 's' to SR.IMASK. Returns previous SR.IMASK.
253 */
254NENTRY(_cpu_intr_resume)
255	stc	sr,	r0	/* r0 = SR */
256	mov	#0x78,	r2
257	shll	r2		/* r2 = 0x000000f0 */
258	not	r2,	r1	/* r1 = 0xffffff0f */
259	and	r0,	r1	/* r1 = (SR & ~0xf0) */
260	or	r1,	r4	/* r4 = (SR & ~0xf0) | level */
261	ldc	r4,	sr	/* SR = r0 (don't move to delay slot) */
262	rts
263	 and	r2,	r0	/* return (SR & 0xf0) */
264
265/*
266 * u_int32_t _cpu_exception_suspend(void):
267 *	Block exception (SR.BL). if external interrupt raise, pending interrupt.
268 *	if exception occur, jump to 0xa0000000 (hard reset).
269 */
270NENTRY(_cpu_exception_suspend)
271	stc	sr,	r0	/* r0 = SR */
272	mov	#0x10,	r1
273	swap.b	r1,	r1
274	mov	r0,	r2	/* r2 = r0 */
275	swap.w	r1,	r1	/* r1 = 0x10000000 */
276	or	r1,	r2	/* r2 |= 0x10000000 */
277	ldc	r2,	sr	/* SR = r2 */
278	rts
279	 and	r1,	r0	/* r0 &= 0x10000000 */
280
281/*
282 * void _cpu_exception_resume(u_int32_t s):
283 *	restore 's' exception mask. (SR.BL)
284 */
285NENTRY(_cpu_exception_resume)
286	stc	sr,	r0	/* r0 = SR */
287	mov	#0x10,	r1
288	swap.b	r1,	r1
289	swap.w	r1,	r1
290	not	r1,	r1	/* r1 = ~0x10000000 */
291	and	r1,	r0	/* r0 &= ~0x10000000 */
292	or	r4,	r0	/* r0 |= old SR.BL */
293	ldc	r0,	sr	/* SR = r0 (don't move to delay slot) */
294	rts
295	 nop
296
297/*
298 * void _cpu_spin(u_int32_t count)
299 *	loop 'count' * 10 cycle.
300 * [...]
301 * add    IF ID EX MA WB
302 * nop       IF ID EX MA WB
303 * cmp/pl       IF ID EX MA WB -  -
304 * nop             IF ID EX MA -  -  WB
305 * bt                 IF ID EX .  .  MA WB
306 * nop                   IF ID -  -  EX MA WB
307 * nop                      IF -  -  ID EX MA WB
308 * nop                      -  -  -  IF ID EX MA WB
309 * add                                  IF ID EX MA WB
310 * nop                                     IF ID EX MA WB
311 * cmp/pl                                     IF ID EX MA WB -  -
312 * nop                                           IF ID EX MA -  - WB
313 * bt                                               IF ID EX .  . MA
314 * [...]
315 */
316	.align 5	/* align cache line size (32B) */
317NENTRY(_cpu_spin)
3181:	nop			/* 1 */
319	nop			/* 2 */
320	nop			/* 3 */
321	add	#-1, r4		/* 4 */
322	nop			/* 5 */
323	cmp/pl	r4		/* 6 */
324	nop			/* 7 */
325	bt	1b		/* 8, 9, 10 */
326	rts
327	 nop
328
329/*
330 * proc_trapmpoline:
331 *	Call the service funciton with one argument specified by the r12 and r11
332 *	respectively. setted by cpu_fork().
333 */
334NENTRY(proc_trampoline)
335	jsr	@r12
336	 mov	r11,	r4
337	__EXCEPTION_RETURN
338	/* NOTREACHED */
339
340/*
341 * sigcode:
342 *	Signal trampoline. copied to top of user stack.
343 */
344NENTRY(sigcode)
345	mov	r15,	r0
346	mov.l	@r0,	r4
347	add	#SIGF_HANDLER, r0
348	mov.l	@r0,	r0
349	jsr	@r0			/* (*sf_handler)(sf_signum) */
350	 nop
351	mov	r15,	r4
352	add	#SIGF_SC, r4
353	mov.l	_L.SYS___sigreturn14, r0
354	trapa	#0x80			/* enter kernel with args on stack */
355	mov.l	_L.SYS_exit, r0
356	trapa	#0x80			/* exit if sigreturn fails */
357	.align	2
358_L.SYS___sigreturn14:	.long	SYS___sigreturn14
359_L.SYS_exit:		.long	SYS_exit
360	.globl	_C_LABEL(esigcode)
361_C_LABEL(esigcode):
362
363/*
364 * void savectx(struct pcb *pcb):
365 *	save struct switchframe.
366 */
367ENTRY(savectx)
368	add	#SF_SIZE, r4
369	stc.l	r7_bank,@-r4
370	stc.l	sr,	@-r4
371	stc.l	r6_bank,@-r4
372	sts.l	pr,	@-r4
373	mov.l	r8,	@-r4
374	mov.l	r9,	@-r4
375	mov.l	r10,	@-r4
376	mov.l	r11,	@-r4
377	mov.l	r12,	@-r4
378	mov.l	r13,	@-r4
379	mov.l	r14,	@-r4
380	mov.l	r15,	@-r4
381	rts
382	 nop
383
384#if defined(DDB) || defined(KGDB)
385/*
386 * int setjmp(label_t *):
387 */
388ENTRY(setjmp)
389	add	#4*9,	r4
390	mov.l	r8,	@-r4
391	mov.l	r9,	@-r4
392	mov.l	r10,	@-r4
393	mov.l	r11,	@-r4
394	mov.l	r12,	@-r4
395	mov.l	r13,	@-r4
396	mov.l	r14,	@-r4
397	mov.l	r15,	@-r4
398	sts.l	pr,	@-r4
399	rts
400	 xor	r0, r0
401/*
402 * void longjmp(label_t *):
403 */
404ENTRY(longjmp)
405	lds.l	@r4+,	pr
406	mov.l	@r4+,	r15
407	mov.l	@r4+,	r14
408	mov.l	@r4+,	r13
409	mov.l	@r4+,	r12
410	mov.l	@r4+,	r11
411	mov.l	@r4+,	r10
412	mov.l	@r4+,	r9
413	mov.l	@r4+,	r8
414	rts
415	 nop
416#endif /* DDB || KGDB */
417