xref: /netbsd/sys/arch/arm/ofw/ofw_irq.S (revision c4a72b64)
1/*	$NetBSD: ofw_irq.S,v 1.2 2002/10/14 22:32:52 bjh21 Exp $	*/
2
3/*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by Mark Brinicombe
21 *	for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 *    endorse or promote products derived from this software without specific
24 *    prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
28 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
31 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
35 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Low level irq and fiq handlers
38 *
39 * Created      : 27/09/94
40 */
41
42#include "opt_irqstats.h"
43
44#include "assym.h"
45#include <machine/asm.h>
46#include <machine/cpu.h>
47#include <machine/frame.h>
48#include <machine/irqhandler.h>
49
50	.text
51	.align	0
52
53/*
54 *
55 * irq_entry
56 *
57 * Main entry point for the IRQ vector
58 *
59 * This function is called only on timer ticks, passed on to the
60 * kernel from the OFW tick handler.
61 *
62 * For now, I am trying to re-use as much of the code from the
63 * IOMD interrupt-handler as possible.  In time, I will strip this
64 * down to something OFW-specific.
65 *
66 * Here's the original, IOMD-specific description:
67 * This function reads the irq request bits in the IOMD registers
68 * IRQRQA, IRQRQB and DMARQ
69 * It then calls an installed handler for each bit that is set.
70 * The function stray_irqhandler is called if a handler is not defined
71 * for a particular interrupt.
72 * If a interrupt handler is found then it is called with r0 containing
73 * the argument defined in the handler structure. If the field ih_arg
74 * is zero then a pointer to the IRQ frame on the stack is passed instead.
75 */
76
77Ldisabled_mask:
78	.word	_C_LABEL(disabled_mask)
79
80Lcurrent_spl_level:
81	.word	_C_LABEL(current_spl_level)
82
83Lcurrent_intr_depth:
84	.word	_C_LABEL(current_intr_depth)
85
86Lspl_masks:
87	.word	_C_LABEL(spl_masks)
88
89Lofw_ticktmp:
90	.word	_C_LABEL(ofw_ticktmp)
91
92Lirq_entry:
93	.word	irq_entry
94
95Lofwirqstk:			/* hack */
96	.word	ofwirqstk + 4096
97
98/*
99 * Regsister usage
100 *
101 *  r6  - Address of current handler
102 *  r7  - Pointer to handler pointer list
103 *  r8  - Current IRQ requests.
104 *  r9  - Used to count through possible IRQ bits.
105 *  r10 - Base address of IOMD
106 */
107
108ASENTRY_NP(irq_entry)
109	/*
110	 *  We come here following an OFW-handled timer tick.
111	 *
112 	 *  We are in the SVC frame, and interrupts are disabled.
113 	 *  The state of the interrupted context is partially in
114 	 *  the registers and partially in the global storage area
115 	 *  labeled ofw_ticktmp.  ofw_ticktmp is filled-in by the
116 	 *  tick callback that is invoked by OFW on the way out of
117 	 *  its interrupt handler.  ofw_ticktmp contains the following:
118 	 *
119 	 *      pc			// interrupted instruction
120 	 *      lr_usr
121 	 *      sp_usr
122 	 *      r1			// makes r1 available for scratch
123 	 *      r0			// makes r0 available for scratch
124 	 *      spsr_svc		// cpsr of interrupted context
125 	 *
126 	 *  The prologue of this routine must re-construct the
127 	 *  machine state that existed at the time OFW's interrupt-
128 	 *  handler fielded the interrupt.  That allows us to use
129 	 *  the rest of the code in this routine, and have it all
130 	 *  "just work."
131	 */
132
133	/*
134	 * Switch to IRQ mode.
135	 * First check the spsr in ofw_ticktmp to see what the FIQ bit should be.
136	 *
137	 * I need 2 scratch registers to do this.
138	 * Fortunately, r0 and r1 are already saved in ofw_ticktmp.
139	 * How convenient.
140	 */
141	ldr	r0, Lofw_ticktmp
142	ldr	r0, [r0]
143	and	r0, r0, #F32_bit
144	mov	r1, #(I32_bit | PSR_IRQ32_MODE)
145	orr	r1, r1, r0
146	msr	cpsr_all, r1
147
148	/* Now we're in IRQ mode. */
149	/* Restore contents of ofw_ticktmp. */
150	adr	r0, Lofwirqstk    /* Bummer!  Mitch hasn't left me a stack. */
151	ldr	sp, [r0]		/* I'll use my own for now... */
152	ldr	r0, Lofw_ticktmp	/* r0 now points to ofw_ticktmp[0] */
153	ldr	r1, [r0], #(4*3)	/* skip over saved {r0, r1} */
154	msr	spsr_all, r1		/* restore spsr */
155	ldmia	r0, {sp, lr}^		/* restore user sp and lr */
156	add	r0, r0, #(4*2)		/* previous instruction can't writeback */
157					/* this one can't use banked registers */
158	ldr	lr, [r0], #(-4*4)	/* restore pc; point r0 at ofw_ticktmp[1] */
159	add	lr, lr, #4		/* pc += 4; will be decremented below */
160	ldmia	r0, {r0, r1}		/* restore r0 and r1 */
161
162	/* OK, the machine state should be identical now to that when */
163	/* OFW fielded the interrupt.  So just fall through... */
164
165	sub	lr, lr, #0x00000004	/* Adjust the lr */
166
167	PUSHFRAMEINSVC			/* Push an interrupt frame */
168
169	/*
170	 *  Can't field this interrupt now if priority is _SPL_CLOCK
171	 *  or higher.  For now, we'll just ignore the interrupt.
172	 *  Soon, we will have to schedule it for later action.
173	 */
174	ldr	r0, Lcurrent_spl_level
175	ldr	r0, [r0]
176	cmp	r0, #_SPL_CLOCK
177	blt	ofwtakeint
178
179	PULLFRAMEFROMSVCANDEXIT
180	movs	pc, lr			/* Exit */
181
182 	/*
183 	 *  Stuff a bit-mask into r8 indicating which interrupts
184 	 *  are pending.  In our case, that is just the timer0
185 	 *  interrupt:  (1 << TIMER0).  The existing code will take
186 	 *  care of invoking that handler and the softint/ast stuff
187 	 *  which follows it.
188	 */
189ofwtakeint:
190	mov	r8, #0x00000001		/* timer interrupt pending! */
191	mov	r8, r8, lsl #IRQ_TIMER0
192
193	/*
194	 * Note that we have entered the IRQ handler.
195	 * We are in SVC mode so we cannot use the processor mode
196	 * to determine if we are in an IRQ. Instead we will count the
197	 * each time the interrupt handler is nested.
198	 */
199
200	ldr	r0, Lcurrent_intr_depth
201	ldr	r1, [r0]
202	add	r1, r1, #1
203	str	r1, [r0]
204
205	/* Block the current requested interrupts */
206	ldr	r1, Ldisabled_mask
207	ldr	r0, [r1]
208	stmfd	sp!, {r0}
209	orr	r0, r0, r8
210
211	/*
212 	 * Need to block all interrupts at the IPL or lower for
213	 * all asserted interrupts.
214	 * This basically emulates hardware interrupt priority levels.
215	 * Means we need to go through the interrupt mask and for
216	 * every asserted interrupt we need to mask out all other
217	 * interrupts at the same or lower IPL.
218	 * If only we could wait until the main loop but we need to sort
219	 * this out first so interrupts can be re-enabled.
220	 *
221	 * This would benefit from a special ffs type routine
222	 */
223
224	mov	r9, #(_SPL_LEVELS - 1)
225	ldr	r7, Lspl_masks
226
227Lfind_highest_ipl:
228	ldr	r2, [r7, r9, lsl #2]
229	tst	r8, r2
230	subeq	r9, r9, #1
231	beq	Lfind_highest_ipl
232
233	/* r9 = SPL level of highest priority interrupt */
234	add	r9, r9, #1
235	ldr	r2, [r7, r9, lsl #2]
236	mvn	r2, r2
237	orr	r0, r0, r2
238
239	str	r0, [r1]
240
241	ldr	r0, Lcurrent_spl_level
242	ldr	r1, [r0]
243	str	r9, [r0]
244	stmfd	sp!, {r1}
245
246	/* Update the irq masks */
247	bl	_C_LABEL(irq_setmasks)
248
249        mrs     r0, cpsr_all		/* Enable IRQ's */
250	bic	r0, r0, #I32_bit
251	msr	cpsr_all, r0
252
253	ldr	r7, Lirqhandlers
254        mov	r9, #0x00000001
255
256irqloop:
257	/* This would benefit from a special ffs type routine */
258	tst	r8, r9			/* Is a bit set ? */
259	beq	nextirq			/* No ? try next bit */
260
261	ldr	r6, [r7]		/* Get address of first handler structure */
262
263	teq	r6, #0x00000000		/* Do we have a handler */
264	moveq	r0, r8			/* IRQ requests as arg 0 */
265	beq	_C_LABEL(stray_irqhandler) /* call special handler */
266
267        ldr	r0, Lcnt
268	ldr	r1, [r0, #(V_INTR)]
269	add	r1, r1, #0x00000001
270	str	r1, [r0, #(V_INTR)]
271
272/*
273 * XXX: Should stats be accumlated for every interrupt routine called
274 * or for every physical interrupt that is serviced.
275 */
276
277#ifdef IRQSTATS
278        ldr	r0, Lintrcnt
279	ldr	r1, [r6, #(IH_NUM)]
280
281	add	r0, r0, r1, lsl #2
282	ldr	r1, [r0]
283	add	r1, r1, #0x00000001
284	str	r1, [r0]
285#endif	/* IRQSTATS */
286
287irqchainloop:
288	ldr	r0, [r6, #(IH_ARG)]	/* Get argument pointer */
289	teq	r0, #0x00000000		/* If arg is zero pass stack frame */
290	addeq	r0, sp, #8		/* ... stack frame */
291	mov	lr, pc			/* return address */
292	ldr	pc, [r6, #(IH_FUNC)]	/* Call handler */
293
294	teq	r0, #0x00000001		/* Was the irq serviced ? */
295	beq	irqdone
296
297	ldr	r6, [r6, #(IH_NEXT)]
298	teq	r6, #0x00000000
299	bne	irqchainloop
300
301irqdone:
302nextirq:
303	add	r7, r7, #0x00000004	/* update pointer to handlers */
304	mov	r9, r9, lsl #1		/* move on to next bit */
305	teq	r9, #(1 << 24)		/* done the last bit ? */
306	bne	irqloop			/* no - loop back. */
307
308	ldmfd	sp!, {r2}
309	ldr	r1, Lcurrent_spl_level
310	str	r2, [r1]
311
312	/* Restore previous disabled mask */
313	ldmfd	sp!, {r2}
314	ldr	r1, Ldisabled_mask
315	str	r2, [r1]
316	bl	_C_LABEL(irq_setmasks)
317
318	bl	_C_LABEL(dosoftints)	/* Handle the soft interrupts */
319
320	/* Manage AST's. Maybe this should be done as a soft interrupt ? */
321	ldr	r0, [sp]		/* Get the SPSR from stack */
322
323	and	r0, r0, #(PSR_MODE)	/* Test for USR32 mode before the IRQ */
324	teq	r0, #(PSR_USR32_MODE)
325	ldreq	r0, Lastpending		/* Do we have an AST pending ? */
326	ldreq	r1, [r0]
327	teqeq	r1, #0x00000001
328
329	beq	irqast			/* call the AST handler */
330
331	/* Kill IRQ's in preparation for exit */
332        mrs     r0, cpsr_all
333        orr     r0, r0, #(I32_bit)
334        msr     cpsr_all, r0
335
336	/* Decrement the nest count */
337	ldr	r0, Lcurrent_intr_depth
338	ldr	r1, [r0]
339	sub	r1, r1, #1
340	str	r1, [r0]
341
342	PULLFRAMEFROMSVCANDEXIT
343
344	movs	pc, lr			/* Exit */
345
346	/*
347	 * Ok, snag with current intr depth ...
348	 * If ast() calls mi_sleep() the current_intr_depth will not be
349	 * decremented until the process is woken up. This can result
350	 * in the system believing it is still in the interrupt handler.
351	 * If we are calling ast() then correct the current_intr_depth
352	 * before the call.
353	 */
354irqast:
355	mov	r1, #0x00000000		/* Clear ast_pending */
356	str	r1, [r0]
357
358	/* Kill IRQ's so we atomically decrement current_intr_depth */
359
360        mrs     r2, cpsr_all
361        orr     r3, r2, #(I32_bit)
362        msr     cpsr_all, r3
363
364	/* Decrement the nest count */
365
366	ldr	r0, Lcurrent_intr_depth
367	ldr	r1, [r0]
368	sub	r1, r1, #1
369	str	r1, [r0]
370
371	/* Restore IRQ's */
372        msr     cpsr_all, r2
373
374	mov	r0, sp
375	bl	_C_LABEL(ast)
376
377/* Kill IRQ's in preparation for exit */
378
379        mrs     r0, cpsr_all
380        orr     r0, r0, #(I32_bit)
381        msr     cpsr_all, r0
382
383	PULLFRAMEFROMSVCANDEXIT
384
385	movs	pc, lr			/* Exit */
386
387
388Lspl_mask:
389	.word	_C_LABEL(spl_mask)	/* irq's allowed at current spl level */
390
391Lcurrent_mask:
392	.word	_C_LABEL(current_mask)	/* irq's that are usable */
393
394
395ENTRY(irq_setmasks)
396	/* Do nothing */
397	mov	pc, lr
398
399
400Lcnt:
401	.word	_C_LABEL(uvmexp)
402
403Lintrcnt:
404	.word	_C_LABEL(intrcnt)
405
406
407Lirqhandlers:
408	.word	_C_LABEL(irqhandlers)	/* Pointer to array of irqhandlers */
409
410Lastpending:
411	.word	_C_LABEL(astpending)
412
413	.text
414	.global	_C_LABEL(dotickgrovelling)
415
416/*
417 *  Do magic to cause OFW to call our irq_entry
418 *  routine when it returns from its tick-handling.
419 *
420 *  This consists of two sub-tasks:
421 *    - save some machine state in ofw_ticktmp
422 *    - punch some new machine state into the
423 *      OFW-supplied frame
424 *
425 *  We are running in the IRQ frame, with
426 *  interrupts disabled.
427 *
428 *  r0 - base of saved OFW interrupt frame, which
429 *       has the following format:
430 *
431 *         pc			// interrupted instruction
432 *         lr			// lr of interrupted context
433 *         sp			// sp of interrupted context
434 *         r12
435 *         ...		// non-banked register values
436 *         ...		//   of interrupted context
437 *         r0
438 *         spsr		// psr of interrupted context
439 *
440 */
441
442_C_LABEL(dotickgrovelling):
443	/*assert((cpsr & PSR_MODE) == PSR_IRQ32_MODE);*/
444
445	stmfd	sp!, {r1-r5}		/* scratch registers r1-r5 */
446
447	/*
448	 *  Sub-task 1:
449	 *
450	 *    Our irq_entry routine needs to re-construct
451	 *    the state of the machine at the time OFW
452	 *    fielded the interrupt, so that we can use
453	 *    the rest of the standard interrupt-handling
454	 *    code.  Specifically, irq_entry needs to get
455	 *    at the following machine state:
456	 *
457 	 *      pc              // interrupted instruction
458 	 *      lr_usr
459 	 *      sp_usr
460	 *      r0-r12          // the non-banked registers
461	 *                      //   at the time of interruption
462 	 *      spsr            // cpsr of interrupted context
463	 *
464	 *    The non-banked registers will be valid at the
465	 *    time irq_entry is called, but the other values
466	 *    will not be.  We must save them here, in the
467	 *    ofw_ticktmp storage block.  We also save r0
468	 *    and r1 so that we have some free registers
469	 *    when it's time to do the re-construction.
470	 *
471	 *    Note that interrupts are not enabled before
472	 *    irq_entry is entered, so we don't have to
473	 *    worry about ofw_ticktmp getting clobbered.
474	 */
475	ldr	r1, Lofw_ticktmp		/* r1 points to ofw_ticktmp[0] */
476
477	ldr	r2, [r0, #0]			/* ofwframe[0] is spsr */
478	stmia	r1!, {r2}			/* put it in ofw_ticktmp[0] */
479
480	ldr	r2, [r0, #(4*1)]		/* ofwframe[1] is saved r0 */
481	stmia	r1!, {r2}			/* put it in ofw_ticktmp[1] */
482
483	ldr	r2, [r0, #(4*2)]		/* ofwframe[2] is saved r1 */
484	stmia	r1!, {r2}			/* put it in ofw_ticktmp[2] */
485
486	stmia	r1, {sp, lr}^		/* put {sp,lr}_usr in ofw_ticktmp[3,4]; */
487							/* the user registers are still valid */
488							/* because we haven't left IRQ mode */
489	add	r1, r1, #(4*2)		/* previous instruction can't writeback */
490							/* this one can't use banked registers */
491
492	ldr	r2, [r0, #(4*16)]		/* ofwframe[16] is pc */
493	stmia	r1!, {r2}			/* put it in ofw_ticktmp[5] */
494
495
496	/*
497	 *  Sub-task 2:
498	 *
499	 *    Diddle the OFW-supplied frame such that
500	 *    control passes to irq_entry when OFW does
501	 *    its return from interrupt.  There are 4
502	 *    fields in that frame that we need to plug:
503	 *
504	 *        pc			// gets irq_entry
505	 *        lr			// gets lr_svc
506	 *        sp			// gets sp_svc
507	 *        spsr		// gets (I32_bit | PSR_SVC32_MODE)
508	 *
509	 */
510	mov	r1, #(I32_bit | PSR_SVC32_MODE)
511	str	r1, [r0, #0]			/* plug spsr */
512
513	/* Sneak into SVC mode to get sp and lr */
514	mrs	r3, cpsr_all
515	bic	r3, r3, #(PSR_MODE)
516	orr	r3, r3, #(PSR_SVC32_MODE)
517	msr	cpsr_all, r3
518	mov	r4, lr				/* snarf lr_svc */
519	mov	r5, sp				/* snarf sp_svc */
520	bic	r3, r3, #(PSR_MODE)
521	orr	r3, r3, #(PSR_IRQ32_MODE)
522	msr	cpsr_all, r3
523	str	r5, [r0, #(4*14)]		/* plug sp */
524	str	r4, [r0, #(4*15)]		/* plug lr */
525
526	ldr	r1, Lirq_entry
527	str	r1, [r0, #(4*16)]		/* plug pc */
528
529	ldmfd	sp!, {r1-r5}
530	mov	pc, lr
531
532
533	.bss
534	.align 0
535
536_C_LABEL(ofw_ticktmp):
537	.space	4 * 6	/* temporary storage for 6 words of machine state */
538
539ofwirqstk:			/* hack */
540	.space	4096
541
542#ifdef IRQSTATS
543/* These symbols are used by vmstat */
544
545	.text
546	.global	_C_LABEL(_intrnames)
547_C_LABEL(_intrnames):
548	.word	_C_LABEL(intrnames)
549
550	.data
551
552        .globl  _C_LABEL(intrnames), _C_LABEL(eintrnames), _C_LABEL(intrcnt), _C_LABEL(sintrcnt), _C_LABEL(eintrcnt)
553_C_LABEL(intrnames):
554	.asciz	"interrupt  0 "
555	.asciz	"interrupt  1 "
556	.asciz	"interrupt  2 "
557	.asciz	"interrupt  3 "
558	.asciz	"interrupt  4 "
559	.asciz	"interrupt  5 "
560	.asciz	"interrupt  6 "
561	.asciz	"interrupt  7 "
562	.asciz	"interrupt  8 "
563	.asciz	"interrupt  9 "
564	.asciz	"interrupt 10 "
565	.asciz	"interrupt 11 "
566	.asciz	"interrupt 12 "
567	.asciz	"interrupt 13 "
568	.asciz	"interrupt 14 "
569	.asciz	"interrupt 15 "
570	.asciz	"interrupt 16 "
571	.asciz	"interrupt 17 "
572	.asciz	"interrupt 18 "
573	.asciz	"interrupt 19 "
574	.asciz	"interrupt 20 "
575	.asciz	"interrupt 21 "
576	.asciz	"interrupt 22 "
577	.asciz	"interrupt 23 "
578	.asciz	"interrupt 24 "
579	.asciz	"interrupt 25 "
580	.asciz	"interrupt 26 "
581	.asciz	"interrupt 27 "
582	.asciz	"interrupt 28 "
583	.asciz	"interrupt 29 "
584	.asciz	"interrupt 30 "
585	.asciz	"interrupt 31 "
586
587_C_LABEL(sintrnames):
588	.asciz	"softclock    "
589	.asciz	"softnet      "
590	.asciz	"softserial   "
591	.asciz	"softintr  3  "
592	.asciz	"softintr  4  "
593	.asciz	"softintr  5  "
594	.asciz	"softintr  6  "
595	.asciz	"softintr  7   "
596	.asciz	"softintr  8  "
597	.asciz	"softintr  9  "
598	.asciz	"softintr 10  "
599	.asciz	"softintr 11  "
600	.asciz	"softintr 12  "
601	.asciz	"softintr 13  "
602	.asciz	"softintr 14  "
603	.asciz	"softintr 15  "
604	.asciz	"softintr 16  "
605	.asciz	"softintr 17  "
606	.asciz	"softintr 18  "
607	.asciz	"softintr 19  "
608	.asciz	"softintr 20  "
609	.asciz	"softintr 21  "
610	.asciz	"softintr 22  "
611	.asciz	"softintr 23  "
612	.asciz	"softintr 24  "
613	.asciz	"softintr 25  "
614	.asciz	"softintr 26  "
615	.asciz	"softintr 27  "
616	.asciz	"softintr 28  "
617	.asciz	"softintr 29  "
618	.asciz	"softintr 30  "
619	.asciz	"softintr 31  "
620_C_LABEL(eintrnames):
621
622	.bss
623	.align	0
624_C_LABEL(intrcnt):
625	.space	32*4	/* XXX Should be linked to number of interrupts */
626
627_C_LABEL(sintrcnt):
628	.space	32*4	/* XXX Should be linked to number of interrupts */
629_C_LABEL(eintrcnt):
630
631#else	/* IRQSTATS */
632/* Dummy entries to keep vmstat happy */
633
634	.text
635        .globl  _C_LABEL(intrnames), _C_LABEL(eintrnames), _C_LABEL(intrcnt), _C_LABEL(eintrcnt)
636_C_LABEL(intrnames):
637	.long	0
638_C_LABEL(eintrnames):
639
640_C_LABEL(intrcnt):
641	.long	0
642_C_LABEL(eintrcnt):
643#endif	/* IRQSTATS */
644