xref: /linux/arch/parisc/kernel/entry.S (revision db10cb9b)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 *  Copyright (C) 1999,2000 Philipp Rumpf
7 *  Copyright (C) 1999 SuSE GmbH Nuernberg
8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 *  - handle in assembly and use shadowed registers only
16 *  - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
21#include <asm/assembly.h>	/* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28#include <asm/spinlock_types.h>
29
30#include <linux/linkage.h>
31#include <linux/pgtable.h>
32
33#ifdef CONFIG_64BIT
34	.level 2.0w
35#else
36	.level 2.0
37#endif
38
39	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
40	.macro  get_ptl reg
41	mfctl	%cr28,\reg
42	.endm
43
44	/* space_to_prot macro creates a prot id from a space id */
45
46#if (SPACEID_SHIFT) == 0
47	.macro  space_to_prot spc prot
48	depd,z  \spc,62,31,\prot
49	.endm
50#else
51	.macro  space_to_prot spc prot
52	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
53	.endm
54#endif
55	/*
56	 * The "get_stack" macros are responsible for determining the
57	 * kernel stack value.
58	 *
59	 *      If sr7 == 0
60	 *          Already using a kernel stack, so call the
61	 *          get_stack_use_r30 macro to push a pt_regs structure
62	 *          on the stack, and store registers there.
63	 *      else
64	 *          Need to set up a kernel stack, so call the
65	 *          get_stack_use_cr30 macro to set up a pointer
66	 *          to the pt_regs structure contained within the
67	 *          task pointer pointed to by cr30. Load the stack
68	 *          pointer from the task structure.
69	 *
70	 * Note that we use shadowed registers for temps until
71	 * we can save %r26 and %r29. %r26 is used to preserve
72	 * %r8 (a shadowed register) which temporarily contained
73	 * either the fault type ("code") or the eirr. We need
74	 * to use a non-shadowed register to carry the value over
75	 * the rfir in virt_map. We use %r26 since this value winds
76	 * up being passed as the argument to either do_cpu_irq_mask
77	 * or handle_interruption. %r29 is used to hold a pointer
78	 * the register save area, and once again, it needs to
79	 * be a non-shadowed register so that it survives the rfir.
80	 */
81
82	.macro  get_stack_use_cr30
83
84	/* we save the registers in the task struct */
85
86	copy	%r30, %r17
87	mfctl   %cr30, %r1
88	tophys  %r1,%r9		/* task_struct */
89	LDREG	TASK_STACK(%r9),%r30
90	ldo	PT_SZ_ALGN(%r30),%r30
91	mtsp	%r0,%sr7	/* clear sr7 after kernel stack was set! */
92	mtsp	%r16,%sr3
93	ldo     TASK_REGS(%r9),%r9
94	STREG   %r17,PT_GR30(%r9)
95	STREG   %r29,PT_GR29(%r9)
96	STREG   %r26,PT_GR26(%r9)
97	STREG	%r16,PT_SR7(%r9)
98	copy    %r9,%r29
99	.endm
100
101	.macro  get_stack_use_r30
102
103	/* we put a struct pt_regs on the stack and save the registers there */
104
105	tophys  %r30,%r9
106	copy	%r30,%r1
107	ldo	PT_SZ_ALGN(%r30),%r30
108	STREG   %r1,PT_GR30(%r9)
109	STREG   %r29,PT_GR29(%r9)
110	STREG   %r26,PT_GR26(%r9)
111	STREG	%r16,PT_SR7(%r9)
112	copy    %r9,%r29
113	.endm
114
115	.macro  rest_stack
116	LDREG   PT_GR1(%r29), %r1
117	LDREG   PT_GR30(%r29),%r30
118	LDREG   PT_GR29(%r29),%r29
119	.endm
120
121	/* default interruption handler
122	 * (calls traps.c:handle_interruption) */
123	.macro	def code
124	b	intr_save
125	ldi     \code, %r8
126	.align	32
127	.endm
128
129	/* Interrupt interruption handler
130	 * (calls irq.c:do_cpu_irq_mask) */
131	.macro	extint code
132	b	intr_extint
133	mfsp    %sr7,%r16
134	.align	32
135	.endm
136
137	.import	os_hpmc, code
138
139	/* HPMC handler */
140	.macro	hpmc code
141	nop			/* must be a NOP, will be patched later */
142	load32	PA(os_hpmc), %r3
143	bv,n	0(%r3)
144	nop
145	.word	0		/* checksum (will be patched) */
146	.word	0		/* address of handler */
147	.word	0		/* length of handler */
148	.endm
149
150	/*
151	 * Performance Note: Instructions will be moved up into
152	 * this part of the code later on, once we are sure
153	 * that the tlb miss handlers are close to final form.
154	 */
155
156	/* Register definitions for tlb miss handler macros */
157
158	va  = r8	/* virtual address for which the trap occurred */
159	spc = r24	/* space for which the trap occurred */
160
161#ifndef CONFIG_64BIT
162
163	/*
164	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
165	 */
166
167	.macro	itlb_11 code
168
169	mfctl	%pcsq, spc
170	b	itlb_miss_11
171	mfctl	%pcoq, va
172
173	.align		32
174	.endm
175#endif
176
177	/*
178	 * itlb miss interruption handler (parisc 2.0)
179	 */
180
181	.macro	itlb_20 code
182	mfctl	%pcsq, spc
183#ifdef CONFIG_64BIT
184	b       itlb_miss_20w
185#else
186	b	itlb_miss_20
187#endif
188	mfctl	%pcoq, va
189
190	.align		32
191	.endm
192
193#ifndef CONFIG_64BIT
194	/*
195	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
196	 */
197
198	.macro	naitlb_11 code
199
200	mfctl	%isr,spc
201	b	naitlb_miss_11
202	mfctl 	%ior,va
203
204	.align		32
205	.endm
206#endif
207
208	/*
209	 * naitlb miss interruption handler (parisc 2.0)
210	 */
211
212	.macro	naitlb_20 code
213
214	mfctl	%isr,spc
215#ifdef CONFIG_64BIT
216	b       naitlb_miss_20w
217#else
218	b	naitlb_miss_20
219#endif
220	mfctl 	%ior,va
221
222	.align		32
223	.endm
224
225#ifndef CONFIG_64BIT
226	/*
227	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
228	 */
229
230	.macro	dtlb_11 code
231
232	mfctl	%isr, spc
233	b	dtlb_miss_11
234	mfctl	%ior, va
235
236	.align		32
237	.endm
238#endif
239
240	/*
241	 * dtlb miss interruption handler (parisc 2.0)
242	 */
243
244	.macro	dtlb_20 code
245
246	mfctl	%isr, spc
247#ifdef CONFIG_64BIT
248	b       dtlb_miss_20w
249#else
250	b	dtlb_miss_20
251#endif
252	mfctl	%ior, va
253
254	.align		32
255	.endm
256
257#ifndef CONFIG_64BIT
258	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
259
260	.macro	nadtlb_11 code
261
262	mfctl	%isr,spc
263	b       nadtlb_miss_11
264	mfctl	%ior,va
265
266	.align		32
267	.endm
268#endif
269
270	/* nadtlb miss interruption handler (parisc 2.0) */
271
272	.macro	nadtlb_20 code
273
274	mfctl	%isr,spc
275#ifdef CONFIG_64BIT
276	b       nadtlb_miss_20w
277#else
278	b       nadtlb_miss_20
279#endif
280	mfctl	%ior,va
281
282	.align		32
283	.endm
284
285#ifndef CONFIG_64BIT
286	/*
287	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
288	 */
289
290	.macro	dbit_11 code
291
292	mfctl	%isr,spc
293	b	dbit_trap_11
294	mfctl	%ior,va
295
296	.align		32
297	.endm
298#endif
299
300	/*
301	 * dirty bit trap interruption handler (parisc 2.0)
302	 */
303
304	.macro	dbit_20 code
305
306	mfctl	%isr,spc
307#ifdef CONFIG_64BIT
308	b       dbit_trap_20w
309#else
310	b	dbit_trap_20
311#endif
312	mfctl	%ior,va
313
314	.align		32
315	.endm
316
317	/* In LP64, the space contains part of the upper 32 bits of the
318	 * fault.  We have to extract this and place it in the va,
319	 * zeroing the corresponding bits in the space register */
320	.macro		space_adjust	spc,va,tmp
321#ifdef CONFIG_64BIT
322	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
323	depd		%r0,63,SPACEID_SHIFT,\spc
324	depd		\tmp,31,SPACEID_SHIFT,\va
325#endif
326	.endm
327
328	.import		swapper_pg_dir,code
329
330	/* Get the pgd.  For faults on space zero (kernel space), this
331	 * is simply swapper_pg_dir.  For user space faults, the
332	 * pgd is stored in %cr25 */
333	.macro		get_pgd		spc,reg
334	ldil		L%PA(swapper_pg_dir),\reg
335	ldo		R%PA(swapper_pg_dir)(\reg),\reg
336	or,COND(=)	%r0,\spc,%r0
337	mfctl		%cr25,\reg
338	.endm
339
340	/*
341		space_check(spc,tmp,fault)
342
343		spc - The space we saw the fault with.
344		tmp - The place to store the current space.
345		fault - Function to call on failure.
346
347		Only allow faults on different spaces from the
348		currently active one if we're the kernel
349
350	*/
351	.macro		space_check	spc,tmp,fault
352	mfsp		%sr7,\tmp
353	/* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
354	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
355					 * as kernel, so defeat the space
356					 * check if it is */
357	copy		\spc,\tmp
358	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
359	cmpb,COND(<>),n	\tmp,\spc,\fault
360	.endm
361
362	/* Look up a PTE in a 2-Level scheme (faulting at each
363	 * level if the entry isn't present
364	 *
365	 * NOTE: we use ldw even for LP64, since the short pointers
366	 * can address up to 1TB
367	 */
368	.macro		L2_ptep	pmd,pte,index,va,fault
369#if CONFIG_PGTABLE_LEVELS == 3
370	extru_safe	\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
371#else
372	extru_safe	\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
373#endif
374	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
375#if CONFIG_PGTABLE_LEVELS < 3
376	copy		%r0,\pte
377#endif
378	ldw,s		\index(\pmd),\pmd
379	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
380	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
381	SHLREG		\pmd,PxD_VALUE_SHIFT,\pmd
382	extru_safe	\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
383	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
384	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
385	.endm
386
387	/* Look up PTE in a 3-Level scheme. */
388	.macro		L3_ptep pgd,pte,index,va,fault
389#if CONFIG_PGTABLE_LEVELS == 3
390	copy		%r0,\pte
391	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
392	ldw,s		\index(\pgd),\pgd
393	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
394	shld		\pgd,PxD_VALUE_SHIFT,\pgd
395#endif
396	L2_ptep		\pgd,\pte,\index,\va,\fault
397	.endm
398
399	/* Acquire page_table_lock and check page is present. */
400	.macro		ptl_lock	spc,ptp,pte,tmp,tmp1,fault
401#ifdef CONFIG_TLB_PTLOCK
40298:	cmpib,COND(=),n	0,\spc,2f
403	get_ptl		\tmp
4041:	LDCW		0(\tmp),\tmp1
405	cmpib,COND(=)	0,\tmp1,1b
406	nop
407	LDREG		0(\ptp),\pte
408	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
409	b		\fault
410	stw		\tmp1,0(\tmp)
41199:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
412#endif
4132:	LDREG		0(\ptp),\pte
414	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
4153:
416	.endm
417
418	/* Release page_table_lock without reloading lock address.
419	   We use an ordered store to ensure all prior accesses are
420	   performed prior to releasing the lock. */
421	.macro		ptl_unlock0	spc,tmp,tmp2
422#ifdef CONFIG_TLB_PTLOCK
42398:	ldi		__ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
424	or,COND(=)	%r0,\spc,%r0
425	stw,ma		\tmp2,0(\tmp)
42699:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
427#endif
428	.endm
429
430	/* Release page_table_lock. */
431	.macro		ptl_unlock1	spc,tmp,tmp2
432#ifdef CONFIG_TLB_PTLOCK
43398:	get_ptl		\tmp
434	ptl_unlock0	\spc,\tmp,\tmp2
43599:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
436#endif
437	.endm
438
439	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
440	 * don't needlessly dirty the cache line if it was already set */
441	.macro		update_accessed	ptp,pte,tmp,tmp1
442	ldi		_PAGE_ACCESSED,\tmp1
443	or		\tmp1,\pte,\tmp
444	and,COND(<>)	\tmp1,\pte,%r0
445	STREG		\tmp,0(\ptp)
446	.endm
447
448	/* Set the dirty bit (and accessed bit).  No need to be
449	 * clever, this is only used from the dirty fault */
450	.macro		update_dirty	ptp,pte,tmp
451	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
452	or		\tmp,\pte,\pte
453	STREG		\pte,0(\ptp)
454	.endm
455
456	/* We have (depending on the page size):
457	 * - 38 to 52-bit Physical Page Number
458	 * - 12 to 26-bit page offset
459	 */
460	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
461	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
462	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
463	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
464
465	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
466	.macro		convert_for_tlb_insert20 pte,tmp
467#ifdef CONFIG_HUGETLB_PAGE
468	copy		\pte,\tmp
469	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
470				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
471
472	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
473				(63-58)+PAGE_ADD_SHIFT,\pte
474	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
475	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
476				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
477#else /* Huge pages disabled */
478	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
479				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
480	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
481				(63-58)+PAGE_ADD_SHIFT,\pte
482#endif
483	.endm
484
485	/* Convert the pte and prot to tlb insertion values.  How
486	 * this happens is quite subtle, read below */
487	.macro		make_insert_tlb	spc,pte,prot,tmp
488	space_to_prot   \spc \prot        /* create prot id from space */
489	/* The following is the real subtlety.  This is depositing
490	 * T <-> _PAGE_REFTRAP
491	 * D <-> _PAGE_DIRTY
492	 * B <-> _PAGE_DMB (memory break)
493	 *
494	 * Then incredible subtlety: The access rights are
495	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
496	 * See 3-14 of the parisc 2.0 manual
497	 *
498	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
499	 * trigger an access rights trap in user space if the user
500	 * tries to read an unreadable page */
501#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
502	/* need to drop DMB bit, as it's used as SPECIAL flag */
503	depi		0,_PAGE_SPECIAL_BIT,1,\pte
504#endif
505	depd            \pte,8,7,\prot
506
507	/* PAGE_USER indicates the page can be read with user privileges,
508	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
509	 * contains _PAGE_READ) */
510	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
511	depdi		7,11,3,\prot
512	/* If we're a gateway page, drop PL2 back to zero for promotion
513	 * to kernel privilege (so we can execute the page as kernel).
514	 * Any privilege promotion page always denys read and write */
515	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
516	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
517
518	/* Enforce uncacheable pages.
519	 * This should ONLY be use for MMIO on PA 2.0 machines.
520	 * Memory/DMA is cache coherent on all PA2.0 machines we support
521	 * (that means T-class is NOT supported) and the memory controllers
522	 * on most of those machines only handles cache transactions.
523	 */
524	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
525	depdi		1,12,1,\prot
526
527	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
528	convert_for_tlb_insert20 \pte \tmp
529	.endm
530
531	/* Identical macro to make_insert_tlb above, except it
532	 * makes the tlb entry for the differently formatted pa11
533	 * insertion instructions */
534	.macro		make_insert_tlb_11	spc,pte,prot
535#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
536	/* need to drop DMB bit, as it's used as SPECIAL flag */
537	depi		0,_PAGE_SPECIAL_BIT,1,\pte
538#endif
539	zdep		\spc,30,15,\prot
540	dep		\pte,8,7,\prot
541	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
542	depi		1,12,1,\prot
543	extru,=         \pte,_PAGE_USER_BIT,1,%r0
544	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
545	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
546	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
547
548	/* Get rid of prot bits and convert to page addr for iitlba */
549
550	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
551	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
552	.endm
553
554	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
555	 * to extend into I/O space if the address is 0xfXXXXXXX
556	 * so we extend the f's into the top word of the pte in
557	 * this case */
558	.macro		f_extend	pte,tmp
559	extrd,s		\pte,42,4,\tmp
560	addi,<>		1,\tmp,%r0
561	extrd,s		\pte,63,25,\pte
562	.endm
563
564	/* The alias region is comprised of a pair of 4 MB regions
565	 * aligned to 8 MB. It is used to clear/copy/flush user pages
566	 * using kernel virtual addresses congruent with the user
567	 * virtual address.
568	 *
569	 * To use the alias page, you set %r26 up with the to TLB
570	 * entry (identifying the physical page) and %r23 up with
571	 * the from tlb entry (or nothing if only a to entry---for
572	 * clear_user_page_asm) */
573	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
574	cmpib,COND(<>),n 0,\spc,\fault
575	ldil		L%(TMPALIAS_MAP_START),\tmp
576	copy		\va,\tmp1
577	depi_safe	0,31,TMPALIAS_SIZE_BITS+1,\tmp1
578	cmpb,COND(<>),n	\tmp,\tmp1,\fault
579	mfctl		%cr19,\tmp	/* iir */
580	/* get the opcode (first six bits) into \tmp */
581	extrw,u		\tmp,5,6,\tmp
582	/*
583	 * Only setting the T bit prevents data cache movein
584	 * Setting access rights to zero prevents instruction cache movein
585	 *
586	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
587	 * to type field and _PAGE_READ goes to top bit of PL1
588	 */
589	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
590	/*
591	 * so if the opcode is one (i.e. this is a memory management
592	 * instruction) nullify the next load so \prot is only T.
593	 * Otherwise this is a normal data operation
594	 */
595	cmpiclr,=	0x01,\tmp,%r0
596	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
597.ifc \patype,20
598	depd,z		\prot,8,7,\prot
599.else
600.ifc \patype,11
601	depw,z		\prot,8,7,\prot
602.else
603	.error "undefined PA type to do_alias"
604.endif
605.endif
606	/*
607	 * OK, it is in the temp alias region, check whether "from" or "to".
608	 * Check "subtle" note in pacache.S re: r23/r26.
609	 */
610	extrw,u,=	\va,31-TMPALIAS_SIZE_BITS,1,%r0
611	or,COND(tr)	%r23,%r0,\pte
612	or		%r26,%r0,\pte
613
614	/* convert phys addr in \pte (from r23 or r26) to tlb insert format */
615	SHRREG		\pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
616	depi_safe	_PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
617	.endm
618
619
620	/*
621	 * Fault_vectors are architecturally required to be aligned on a 2K
622	 * boundary
623	 */
624
625	.section .text.hot
626	.align 2048
627
628ENTRY(fault_vector_20)
629	/* First vector is invalid (0) */
630	.ascii	"cows can fly"
631	.byte 0
632	.align 32
633
634	hpmc		 1
635	def		 2
636	def		 3
637	extint		 4
638	def		 5
639	itlb_20		 PARISC_ITLB_TRAP
640	def		 7
641	def		 8
642	def              9
643	def		10
644	def		11
645	def		12
646	def		13
647	def		14
648	dtlb_20		15
649	naitlb_20	16
650	nadtlb_20	17
651	def		18
652	def		19
653	dbit_20		20
654	def		21
655	def		22
656	def		23
657	def		24
658	def		25
659	def		26
660	def		27
661	def		28
662	def		29
663	def		30
664	def		31
665END(fault_vector_20)
666
667#ifndef CONFIG_64BIT
668
669	.align 2048
670
671ENTRY(fault_vector_11)
672	/* First vector is invalid (0) */
673	.ascii	"cows can fly"
674	.byte 0
675	.align 32
676
677	hpmc		 1
678	def		 2
679	def		 3
680	extint		 4
681	def		 5
682	itlb_11		 PARISC_ITLB_TRAP
683	def		 7
684	def		 8
685	def              9
686	def		10
687	def		11
688	def		12
689	def		13
690	def		14
691	dtlb_11		15
692	naitlb_11	16
693	nadtlb_11	17
694	def		18
695	def		19
696	dbit_11		20
697	def		21
698	def		22
699	def		23
700	def		24
701	def		25
702	def		26
703	def		27
704	def		28
705	def		29
706	def		30
707	def		31
708END(fault_vector_11)
709
710#endif
711	/* Fault vector is separately protected and *must* be on its own page */
712	.align		PAGE_SIZE
713
714	.import		handle_interruption,code
715	.import		do_cpu_irq_mask,code
716
717	/*
718	 * Child Returns here
719	 *
720	 * copy_thread moved args into task save area.
721	 */
722
723ENTRY(ret_from_kernel_thread)
724	/* Call schedule_tail first though */
725	BL	schedule_tail, %r2
726	nop
727
728	mfctl	%cr30,%r1	/* task_struct */
729	LDREG	TASK_PT_GR25(%r1), %r26
730#ifdef CONFIG_64BIT
731	LDREG	TASK_PT_GR27(%r1), %r27
732#endif
733	LDREG	TASK_PT_GR26(%r1), %r1
734	ble	0(%sr7, %r1)
735	copy	%r31, %r2
736	b	finish_child_return
737	nop
738END(ret_from_kernel_thread)
739
740
741	/*
742	 * struct task_struct *_switch_to(struct task_struct *prev,
743	 *	struct task_struct *next)
744	 *
745	 * switch kernel stacks and return prev */
746ENTRY_CFI(_switch_to)
747	STREG	 %r2, -RP_OFFSET(%r30)
748
749	callee_save_float
750	callee_save
751
752	load32	_switch_to_ret, %r2
753
754	STREG	%r2, TASK_PT_KPC(%r26)
755	LDREG	TASK_PT_KPC(%r25), %r2
756
757	STREG	%r30, TASK_PT_KSP(%r26)
758	LDREG	TASK_PT_KSP(%r25), %r30
759	bv	%r0(%r2)
760	mtctl   %r25,%cr30
761
762ENTRY(_switch_to_ret)
763	mtctl	%r0, %cr0		/* Needed for single stepping */
764	callee_rest
765	callee_rest_float
766
767	LDREG	-RP_OFFSET(%r30), %r2
768	bv	%r0(%r2)
769	copy	%r26, %r28
770ENDPROC_CFI(_switch_to)
771
772	/*
773	 * Common rfi return path for interruptions, kernel execve, and
774	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
775	 * return via this path if the signal was received when the process
776	 * was running; if the process was blocked on a syscall then the
777	 * normal syscall_exit path is used.  All syscalls for traced
778	 * proceses exit via intr_restore.
779	 *
780	 * XXX If any syscalls that change a processes space id ever exit
781	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
782	 * adjust IASQ[0..1].
783	 *
784	 */
785
786	.align	PAGE_SIZE
787
788ENTRY_CFI(syscall_exit_rfi)
789	mfctl	%cr30,%r16		/* task_struct */
790	ldo	TASK_REGS(%r16),%r16
791	/* Force iaoq to userspace, as the user has had access to our current
792	 * context via sigcontext. Also Filter the PSW for the same reason.
793	 */
794	LDREG	PT_IAOQ0(%r16),%r19
795	depi	PRIV_USER,31,2,%r19
796	STREG	%r19,PT_IAOQ0(%r16)
797	LDREG	PT_IAOQ1(%r16),%r19
798	depi	PRIV_USER,31,2,%r19
799	STREG	%r19,PT_IAOQ1(%r16)
800	LDREG   PT_PSW(%r16),%r19
801	load32	USER_PSW_MASK,%r1
802#ifdef CONFIG_64BIT
803	load32	USER_PSW_HI_MASK,%r20
804	depd    %r20,31,32,%r1
805#endif
806	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
807	load32	USER_PSW,%r1
808	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
809	STREG   %r19,PT_PSW(%r16)
810
811	/*
812	 * If we aren't being traced, we never saved space registers
813	 * (we don't store them in the sigcontext), so set them
814	 * to "proper" values now (otherwise we'll wind up restoring
815	 * whatever was last stored in the task structure, which might
816	 * be inconsistent if an interrupt occurred while on the gateway
817	 * page). Note that we may be "trashing" values the user put in
818	 * them, but we don't support the user changing them.
819	 */
820
821	STREG   %r0,PT_SR2(%r16)
822	mfsp    %sr3,%r19
823	STREG   %r19,PT_SR0(%r16)
824	STREG   %r19,PT_SR1(%r16)
825	STREG   %r19,PT_SR3(%r16)
826	STREG   %r19,PT_SR4(%r16)
827	STREG   %r19,PT_SR5(%r16)
828	STREG   %r19,PT_SR6(%r16)
829	STREG   %r19,PT_SR7(%r16)
830
831ENTRY(intr_return)
832	/* check for reschedule */
833	mfctl   %cr30,%r1
834	LDREG   TASK_TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
835	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
836
837	.import do_notify_resume,code
838intr_check_sig:
839	/* As above */
840	mfctl   %cr30,%r1
841	LDREG	TASK_TI_FLAGS(%r1),%r19
842	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
843	and,COND(<>)	%r19, %r20, %r0
844	b,n	intr_restore	/* skip past if we've nothing to do */
845
846	/* This check is critical to having LWS
847	 * working. The IASQ is zero on the gateway
848	 * page and we cannot deliver any signals until
849	 * we get off the gateway page.
850	 *
851	 * Only do signals if we are returning to user space
852	 */
853	LDREG	PT_IASQ0(%r16), %r20
854	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
855	LDREG	PT_IASQ1(%r16), %r20
856	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
857
858	copy	%r0, %r25			/* long in_syscall = 0 */
859#ifdef CONFIG_64BIT
860	ldo	-16(%r30),%r29			/* Reference param save area */
861#endif
862
863	/* NOTE: We need to enable interrupts if we have to deliver
864	 * signals. We used to do this earlier but it caused kernel
865	 * stack overflows. */
866	ssm	PSW_SM_I, %r0
867
868	BL	do_notify_resume,%r2
869	copy	%r16, %r26			/* struct pt_regs *regs */
870
871	b,n	intr_check_sig
872
873intr_restore:
874	copy            %r16,%r29
875	ldo             PT_FR31(%r29),%r1
876	rest_fp         %r1
877	rest_general    %r29
878
879	/* inverse of virt_map */
880	pcxt_ssm_bug
881	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
882	tophys_r1       %r29
883
884	/* Restore space id's and special cr's from PT_REGS
885	 * structure pointed to by r29
886	 */
887	rest_specials	%r29
888
889	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
890	 * It also restores r1 and r30.
891	 */
892	rest_stack
893
894	rfi
895	nop
896
897#ifndef CONFIG_PREEMPTION
898# define intr_do_preempt	intr_restore
899#endif /* !CONFIG_PREEMPTION */
900
901	.import schedule,code
902intr_do_resched:
903	/* Only call schedule on return to userspace. If we're returning
904	 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
905	 * we jump back to intr_restore.
906	 */
907	LDREG	PT_IASQ0(%r16), %r20
908	cmpib,COND(=)	0, %r20, intr_do_preempt
909	nop
910	LDREG	PT_IASQ1(%r16), %r20
911	cmpib,COND(=)	0, %r20, intr_do_preempt
912	nop
913
914	/* NOTE: We need to enable interrupts if we schedule.  We used
915	 * to do this earlier but it caused kernel stack overflows. */
916	ssm     PSW_SM_I, %r0
917
918#ifdef CONFIG_64BIT
919	ldo	-16(%r30),%r29		/* Reference param save area */
920#endif
921
922	ldil	L%intr_check_sig, %r2
923#ifndef CONFIG_64BIT
924	b	schedule
925#else
926	load32	schedule, %r20
927	bv	%r0(%r20)
928#endif
929	ldo	R%intr_check_sig(%r2), %r2
930
931	/* preempt the current task on returning to kernel
932	 * mode from an interrupt, iff need_resched is set,
933	 * and preempt_count is 0. otherwise, we continue on
934	 * our merry way back to the current running task.
935	 */
936#ifdef CONFIG_PREEMPTION
937	.import preempt_schedule_irq,code
938intr_do_preempt:
939	rsm	PSW_SM_I, %r0		/* disable interrupts */
940
941	/* current_thread_info()->preempt_count */
942	mfctl	%cr30, %r1
943	ldw	TI_PRE_COUNT(%r1), %r19
944	cmpib,<>	0, %r19, intr_restore	/* if preempt_count > 0 */
945	nop				/* prev insn branched backwards */
946
947	/* check if we interrupted a critical path */
948	LDREG	PT_PSW(%r16), %r20
949	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
950	nop
951
952	/* ssm PSW_SM_I done later in intr_restore */
953#ifdef CONFIG_MLONGCALLS
954	ldil	L%intr_restore, %r2
955	load32	preempt_schedule_irq, %r1
956	bv	%r0(%r1)
957	ldo	R%intr_restore(%r2), %r2
958#else
959	ldil	L%intr_restore, %r1
960	BL	preempt_schedule_irq, %r2
961	ldo	R%intr_restore(%r1), %r2
962#endif
963#endif /* CONFIG_PREEMPTION */
964
965	/*
966	 * External interrupts.
967	 */
968
969intr_extint:
970	cmpib,COND(=),n 0,%r16,1f
971
972	get_stack_use_cr30
973	b,n 2f
974
9751:
976	get_stack_use_r30
9772:
978	save_specials	%r29
979	virt_map
980	save_general	%r29
981
982	ldo	PT_FR0(%r29), %r24
983	save_fp	%r24
984
985	loadgp
986
987	copy	%r29, %r26	/* arg0 is pt_regs */
988	copy	%r29, %r16	/* save pt_regs */
989
990	ldil	L%intr_return, %r2
991
992#ifdef CONFIG_64BIT
993	ldo	-16(%r30),%r29	/* Reference param save area */
994#endif
995
996	b	do_cpu_irq_mask
997	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
998ENDPROC_CFI(syscall_exit_rfi)
999
1000
1001	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1002
1003ENTRY_CFI(intr_save)		/* for os_hpmc */
1004	mfsp    %sr7,%r16
1005	cmpib,COND(=),n 0,%r16,1f
1006	get_stack_use_cr30
1007	b	2f
1008	copy    %r8,%r26
1009
10101:
1011	get_stack_use_r30
1012	copy    %r8,%r26
1013
10142:
1015	save_specials	%r29
1016
1017	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1018	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1019
1020
1021	mfctl           %isr, %r16
1022	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1023	mfctl           %ior, %r17
1024
1025
1026#ifdef CONFIG_64BIT
1027	/*
1028	 * If the interrupted code was running with W bit off (32 bit),
1029	 * clear the b bits (bits 0 & 1) in the ior.
1030	 * save_specials left ipsw value in r8 for us to test.
1031	 */
1032	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1033	depdi           0,1,2,%r17
1034
1035	/* adjust isr/ior: get high bits from isr and deposit in ior */
1036	space_adjust	%r16,%r17,%r1
1037#endif
1038	STREG           %r16, PT_ISR(%r29)
1039	STREG           %r17, PT_IOR(%r29)
1040
1041#if 0 && defined(CONFIG_64BIT)
1042	/* Revisit when we have 64-bit code above 4Gb */
1043	b,n		intr_save2
1044
1045skip_save_ior:
1046	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1047	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1048	 * above.
1049	 */
1050	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1051	cmpib,COND(=),n	1,%r1,intr_save2
1052	LDREG		PT_IASQ0(%r29), %r16
1053	LDREG		PT_IAOQ0(%r29), %r17
1054	/* adjust iasq/iaoq */
1055	space_adjust	%r16,%r17,%r1
1056	STREG           %r16, PT_IASQ0(%r29)
1057	STREG           %r17, PT_IAOQ0(%r29)
1058#else
1059skip_save_ior:
1060#endif
1061
1062intr_save2:
1063	virt_map
1064	save_general	%r29
1065
1066	ldo		PT_FR0(%r29), %r25
1067	save_fp		%r25
1068
1069	loadgp
1070
1071	copy		%r29, %r25	/* arg1 is pt_regs */
1072#ifdef CONFIG_64BIT
1073	ldo		-16(%r30),%r29	/* Reference param save area */
1074#endif
1075
1076	ldil		L%intr_check_sig, %r2
1077	copy		%r25, %r16	/* save pt_regs */
1078
1079	b		handle_interruption
1080	ldo		R%intr_check_sig(%r2), %r2
1081ENDPROC_CFI(intr_save)
1082
1083
1084	/*
1085	 * Note for all tlb miss handlers:
1086	 *
1087	 * cr24 contains a pointer to the kernel address space
1088	 * page directory.
1089	 *
1090	 * cr25 contains a pointer to the current user address
1091	 * space page directory.
1092	 *
1093	 * sr3 will contain the space id of the user address space
1094	 * of the current running thread while that thread is
1095	 * running in the kernel.
1096	 */
1097
1098	/*
1099	 * register number allocations.  Note that these are all
1100	 * in the shadowed registers
1101	 */
1102
1103	t0 = r1		/* temporary register 0 */
1104	va = r8		/* virtual address for which the trap occurred */
1105	t1 = r9		/* temporary register 1 */
1106	pte  = r16	/* pte/phys page # */
1107	prot = r17	/* prot bits */
1108	spc  = r24	/* space for which the trap occurred */
1109	ptp = r25	/* page directory/page table pointer */
1110
1111#ifdef CONFIG_64BIT
1112
1113dtlb_miss_20w:
1114	space_adjust	spc,va,t0
1115	get_pgd		spc,ptp
1116	space_check	spc,t0,dtlb_fault
1117
1118	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1119
1120	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1121	update_accessed	ptp,pte,t0,t1
1122
1123	make_insert_tlb	spc,pte,prot,t1
1124
1125	idtlbt          pte,prot
1126
1127	ptl_unlock1	spc,t0,t1
1128	rfir
1129	nop
1130
1131dtlb_check_alias_20w:
1132	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1133
1134	idtlbt          pte,prot
1135
1136	rfir
1137	nop
1138
1139nadtlb_miss_20w:
1140	space_adjust	spc,va,t0
1141	get_pgd		spc,ptp
1142	space_check	spc,t0,nadtlb_fault
1143
1144	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1145
1146	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1147	update_accessed	ptp,pte,t0,t1
1148
1149	make_insert_tlb	spc,pte,prot,t1
1150
1151	idtlbt          pte,prot
1152
1153	ptl_unlock1	spc,t0,t1
1154	rfir
1155	nop
1156
1157nadtlb_check_alias_20w:
1158	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1159
1160	idtlbt          pte,prot
1161
1162	rfir
1163	nop
1164
1165#else
1166
1167dtlb_miss_11:
1168	get_pgd		spc,ptp
1169
1170	space_check	spc,t0,dtlb_fault
1171
1172	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1173
1174	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1175	update_accessed	ptp,pte,t0,t1
1176
1177	make_insert_tlb_11	spc,pte,prot
1178
1179	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1180	mtsp		spc,%sr1
1181
1182	idtlba		pte,(%sr1,va)
1183	idtlbp		prot,(%sr1,va)
1184
1185	mtsp		t1, %sr1	/* Restore sr1 */
1186
1187	ptl_unlock1	spc,t0,t1
1188	rfir
1189	nop
1190
1191dtlb_check_alias_11:
1192	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1193
1194	idtlba          pte,(va)
1195	idtlbp          prot,(va)
1196
1197	rfir
1198	nop
1199
1200nadtlb_miss_11:
1201	get_pgd		spc,ptp
1202
1203	space_check	spc,t0,nadtlb_fault
1204
1205	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1206
1207	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1208	update_accessed	ptp,pte,t0,t1
1209
1210	make_insert_tlb_11	spc,pte,prot
1211
1212	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1213	mtsp		spc,%sr1
1214
1215	idtlba		pte,(%sr1,va)
1216	idtlbp		prot,(%sr1,va)
1217
1218	mtsp		t1, %sr1	/* Restore sr1 */
1219
1220	ptl_unlock1	spc,t0,t1
1221	rfir
1222	nop
1223
1224nadtlb_check_alias_11:
1225	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1226
1227	idtlba          pte,(va)
1228	idtlbp          prot,(va)
1229
1230	rfir
1231	nop
1232
1233dtlb_miss_20:
1234	space_adjust	spc,va,t0
1235	get_pgd		spc,ptp
1236	space_check	spc,t0,dtlb_fault
1237
1238	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1239
1240	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1241	update_accessed	ptp,pte,t0,t1
1242
1243	make_insert_tlb	spc,pte,prot,t1
1244
1245	f_extend	pte,t1
1246
1247	idtlbt          pte,prot
1248
1249	ptl_unlock1	spc,t0,t1
1250	rfir
1251	nop
1252
1253dtlb_check_alias_20:
1254	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1255
1256	idtlbt          pte,prot
1257
1258	rfir
1259	nop
1260
1261nadtlb_miss_20:
1262	get_pgd		spc,ptp
1263
1264	space_check	spc,t0,nadtlb_fault
1265
1266	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1267
1268	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1269	update_accessed	ptp,pte,t0,t1
1270
1271	make_insert_tlb	spc,pte,prot,t1
1272
1273	f_extend	pte,t1
1274
1275	idtlbt		pte,prot
1276
1277	ptl_unlock1	spc,t0,t1
1278	rfir
1279	nop
1280
1281nadtlb_check_alias_20:
1282	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1283
1284	idtlbt          pte,prot
1285
1286	rfir
1287	nop
1288
1289#endif
1290
1291nadtlb_emulate:
1292
1293	/*
1294	 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1295	 * probei instructions. The kernel no longer faults doing flushes.
1296	 * Use of lpa and probe instructions is rare. Given the issue
1297	 * with shadow registers, we defer everything to the "slow" path.
1298	 */
1299	b,n		nadtlb_fault
1300
1301#ifdef CONFIG_64BIT
1302itlb_miss_20w:
1303
1304	/*
1305	 * I miss is a little different, since we allow users to fault
1306	 * on the gateway page which is in the kernel address space.
1307	 */
1308
1309	space_adjust	spc,va,t0
1310	get_pgd		spc,ptp
1311	space_check	spc,t0,itlb_fault
1312
1313	L3_ptep		ptp,pte,t0,va,itlb_fault
1314
1315	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1316	update_accessed	ptp,pte,t0,t1
1317
1318	make_insert_tlb	spc,pte,prot,t1
1319
1320	iitlbt          pte,prot
1321
1322	ptl_unlock1	spc,t0,t1
1323	rfir
1324	nop
1325
1326naitlb_miss_20w:
1327
1328	/*
1329	 * I miss is a little different, since we allow users to fault
1330	 * on the gateway page which is in the kernel address space.
1331	 */
1332
1333	space_adjust	spc,va,t0
1334	get_pgd		spc,ptp
1335	space_check	spc,t0,naitlb_fault
1336
1337	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1338
1339	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1340	update_accessed	ptp,pte,t0,t1
1341
1342	make_insert_tlb	spc,pte,prot,t1
1343
1344	iitlbt          pte,prot
1345
1346	ptl_unlock1	spc,t0,t1
1347	rfir
1348	nop
1349
1350naitlb_check_alias_20w:
1351	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1352
1353	iitlbt		pte,prot
1354
1355	rfir
1356	nop
1357
1358#else
1359
1360itlb_miss_11:
1361	get_pgd		spc,ptp
1362
1363	space_check	spc,t0,itlb_fault
1364
1365	L2_ptep		ptp,pte,t0,va,itlb_fault
1366
1367	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1368	update_accessed	ptp,pte,t0,t1
1369
1370	make_insert_tlb_11	spc,pte,prot
1371
1372	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1373	mtsp		spc,%sr1
1374
1375	iitlba		pte,(%sr1,va)
1376	iitlbp		prot,(%sr1,va)
1377
1378	mtsp		t1, %sr1	/* Restore sr1 */
1379
1380	ptl_unlock1	spc,t0,t1
1381	rfir
1382	nop
1383
1384naitlb_miss_11:
1385	get_pgd		spc,ptp
1386
1387	space_check	spc,t0,naitlb_fault
1388
1389	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1390
1391	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1392	update_accessed	ptp,pte,t0,t1
1393
1394	make_insert_tlb_11	spc,pte,prot
1395
1396	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1397	mtsp		spc,%sr1
1398
1399	iitlba		pte,(%sr1,va)
1400	iitlbp		prot,(%sr1,va)
1401
1402	mtsp		t1, %sr1	/* Restore sr1 */
1403
1404	ptl_unlock1	spc,t0,t1
1405	rfir
1406	nop
1407
1408naitlb_check_alias_11:
1409	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1410
1411	iitlba          pte,(%sr0, va)
1412	iitlbp          prot,(%sr0, va)
1413
1414	rfir
1415	nop
1416
1417
1418itlb_miss_20:
1419	get_pgd		spc,ptp
1420
1421	space_check	spc,t0,itlb_fault
1422
1423	L2_ptep		ptp,pte,t0,va,itlb_fault
1424
1425	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1426	update_accessed	ptp,pte,t0,t1
1427
1428	make_insert_tlb	spc,pte,prot,t1
1429
1430	f_extend	pte,t1
1431
1432	iitlbt          pte,prot
1433
1434	ptl_unlock1	spc,t0,t1
1435	rfir
1436	nop
1437
1438naitlb_miss_20:
1439	get_pgd		spc,ptp
1440
1441	space_check	spc,t0,naitlb_fault
1442
1443	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1444
1445	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1446	update_accessed	ptp,pte,t0,t1
1447
1448	make_insert_tlb	spc,pte,prot,t1
1449
1450	f_extend	pte,t1
1451
1452	iitlbt          pte,prot
1453
1454	ptl_unlock1	spc,t0,t1
1455	rfir
1456	nop
1457
1458naitlb_check_alias_20:
1459	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1460
1461	iitlbt          pte,prot
1462
1463	rfir
1464	nop
1465
1466#endif
1467
1468#ifdef CONFIG_64BIT
1469
1470dbit_trap_20w:
1471	space_adjust	spc,va,t0
1472	get_pgd		spc,ptp
1473	space_check	spc,t0,dbit_fault
1474
1475	L3_ptep		ptp,pte,t0,va,dbit_fault
1476
1477	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1478	update_dirty	ptp,pte,t1
1479
1480	make_insert_tlb	spc,pte,prot,t1
1481
1482	idtlbt          pte,prot
1483
1484	ptl_unlock0	spc,t0,t1
1485	rfir
1486	nop
1487#else
1488
1489dbit_trap_11:
1490
1491	get_pgd		spc,ptp
1492
1493	space_check	spc,t0,dbit_fault
1494
1495	L2_ptep		ptp,pte,t0,va,dbit_fault
1496
1497	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1498	update_dirty	ptp,pte,t1
1499
1500	make_insert_tlb_11	spc,pte,prot
1501
1502	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1503	mtsp		spc,%sr1
1504
1505	idtlba		pte,(%sr1,va)
1506	idtlbp		prot,(%sr1,va)
1507
1508	mtsp            t1, %sr1     /* Restore sr1 */
1509
1510	ptl_unlock0	spc,t0,t1
1511	rfir
1512	nop
1513
1514dbit_trap_20:
1515	get_pgd		spc,ptp
1516
1517	space_check	spc,t0,dbit_fault
1518
1519	L2_ptep		ptp,pte,t0,va,dbit_fault
1520
1521	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1522	update_dirty	ptp,pte,t1
1523
1524	make_insert_tlb	spc,pte,prot,t1
1525
1526	f_extend	pte,t1
1527
1528	idtlbt		pte,prot
1529
1530	ptl_unlock0	spc,t0,t1
1531	rfir
1532	nop
1533#endif
1534
1535	.import handle_interruption,code
1536
1537kernel_bad_space:
1538	b               intr_save
1539	ldi             31,%r8  /* Use an unused code */
1540
1541dbit_fault:
1542	b               intr_save
1543	ldi             20,%r8
1544
1545itlb_fault:
1546	b               intr_save
1547	ldi             PARISC_ITLB_TRAP,%r8
1548
1549nadtlb_fault:
1550	b               intr_save
1551	ldi             17,%r8
1552
1553naitlb_fault:
1554	b               intr_save
1555	ldi             16,%r8
1556
1557dtlb_fault:
1558	b               intr_save
1559	ldi             15,%r8
1560
1561	/* Register saving semantics for system calls:
1562
1563	   %r1		   clobbered by system call macro in userspace
1564	   %r2		   saved in PT_REGS by gateway page
1565	   %r3  - %r18	   preserved by C code (saved by signal code)
1566	   %r19 - %r20	   saved in PT_REGS by gateway page
1567	   %r21 - %r22	   non-standard syscall args
1568			   stored in kernel stack by gateway page
1569	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1570	   %r27 - %r30	   saved in PT_REGS by gateway page
1571	   %r31		   syscall return pointer
1572	 */
1573
1574	/* Floating point registers (FIXME: what do we do with these?)
1575
1576	   %fr0  - %fr3	   status/exception, not preserved
1577	   %fr4  - %fr7	   arguments
1578	   %fr8	 - %fr11   not preserved by C code
1579	   %fr12 - %fr21   preserved by C code
1580	   %fr22 - %fr31   not preserved by C code
1581	 */
1582
1583	.macro	reg_save regs
1584	STREG	%r3, PT_GR3(\regs)
1585	STREG	%r4, PT_GR4(\regs)
1586	STREG	%r5, PT_GR5(\regs)
1587	STREG	%r6, PT_GR6(\regs)
1588	STREG	%r7, PT_GR7(\regs)
1589	STREG	%r8, PT_GR8(\regs)
1590	STREG	%r9, PT_GR9(\regs)
1591	STREG   %r10,PT_GR10(\regs)
1592	STREG   %r11,PT_GR11(\regs)
1593	STREG   %r12,PT_GR12(\regs)
1594	STREG   %r13,PT_GR13(\regs)
1595	STREG   %r14,PT_GR14(\regs)
1596	STREG   %r15,PT_GR15(\regs)
1597	STREG   %r16,PT_GR16(\regs)
1598	STREG   %r17,PT_GR17(\regs)
1599	STREG   %r18,PT_GR18(\regs)
1600	.endm
1601
1602	.macro	reg_restore regs
1603	LDREG	PT_GR3(\regs), %r3
1604	LDREG	PT_GR4(\regs), %r4
1605	LDREG	PT_GR5(\regs), %r5
1606	LDREG	PT_GR6(\regs), %r6
1607	LDREG	PT_GR7(\regs), %r7
1608	LDREG	PT_GR8(\regs), %r8
1609	LDREG	PT_GR9(\regs), %r9
1610	LDREG   PT_GR10(\regs),%r10
1611	LDREG   PT_GR11(\regs),%r11
1612	LDREG   PT_GR12(\regs),%r12
1613	LDREG   PT_GR13(\regs),%r13
1614	LDREG   PT_GR14(\regs),%r14
1615	LDREG   PT_GR15(\regs),%r15
1616	LDREG   PT_GR16(\regs),%r16
1617	LDREG   PT_GR17(\regs),%r17
1618	LDREG   PT_GR18(\regs),%r18
1619	.endm
1620
1621	.macro	fork_like name
1622ENTRY_CFI(sys_\name\()_wrapper)
1623	mfctl	%cr30,%r1
1624	ldo	TASK_REGS(%r1),%r1
1625	reg_save %r1
1626	mfctl	%cr27, %r28
1627	ldil	L%sys_\name, %r31
1628	be	R%sys_\name(%sr4,%r31)
1629	STREG	%r28, PT_CR27(%r1)
1630ENDPROC_CFI(sys_\name\()_wrapper)
1631	.endm
1632
1633fork_like clone
1634fork_like clone3
1635fork_like fork
1636fork_like vfork
1637
1638	/* Set the return value for the child */
1639ENTRY(child_return)
1640	BL	schedule_tail, %r2
1641	nop
1642finish_child_return:
1643	mfctl	%cr30,%r1
1644	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1645
1646	LDREG	PT_CR27(%r1), %r3
1647	mtctl	%r3, %cr27
1648	reg_restore %r1
1649	b	syscall_exit
1650	copy	%r0,%r28
1651END(child_return)
1652
1653ENTRY_CFI(sys_rt_sigreturn_wrapper)
1654	mfctl	%cr30,%r26
1655	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1656	/* Don't save regs, we are going to restore them from sigcontext. */
1657	STREG	%r2, -RP_OFFSET(%r30)
1658#ifdef CONFIG_64BIT
1659	ldo	FRAME_SIZE(%r30), %r30
1660	BL	sys_rt_sigreturn,%r2
1661	ldo	-16(%r30),%r29		/* Reference param save area */
1662#else
1663	BL	sys_rt_sigreturn,%r2
1664	ldo	FRAME_SIZE(%r30), %r30
1665#endif
1666
1667	ldo	-FRAME_SIZE(%r30), %r30
1668	LDREG	-RP_OFFSET(%r30), %r2
1669
1670	/* FIXME: I think we need to restore a few more things here. */
1671	mfctl	%cr30,%r1
1672	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1673	reg_restore %r1
1674
1675	/* If the signal was received while the process was blocked on a
1676	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1677	 * take us to syscall_exit_rfi and on to intr_return.
1678	 */
1679	bv	%r0(%r2)
1680	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1681ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1682
1683ENTRY(syscall_exit)
1684	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1685	 * via syscall_exit_rfi if the signal was received while the process
1686	 * was running.
1687	 */
1688
1689	/* save return value now */
1690	mfctl     %cr30, %r1
1691	STREG     %r28,TASK_PT_GR28(%r1)
1692
1693	/* Seems to me that dp could be wrong here, if the syscall involved
1694	 * calling a module, and nothing got round to restoring dp on return.
1695	 */
1696	loadgp
1697
1698syscall_check_resched:
1699
1700	/* check for reschedule */
1701	mfctl	%cr30,%r19
1702	LDREG	TASK_TI_FLAGS(%r19),%r19	/* long */
1703	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1704
1705	.import do_signal,code
1706syscall_check_sig:
1707	mfctl	%cr30,%r19
1708	LDREG	TASK_TI_FLAGS(%r19),%r19
1709	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1710	and,COND(<>)	%r19, %r26, %r0
1711	b,n	syscall_restore	/* skip past if we've nothing to do */
1712
1713syscall_do_signal:
1714	/* Save callee-save registers (for sigcontext).
1715	 * FIXME: After this point the process structure should be
1716	 * consistent with all the relevant state of the process
1717	 * before the syscall.  We need to verify this.
1718	 */
1719	mfctl	%cr30,%r1
1720	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1721	reg_save %r26
1722
1723#ifdef CONFIG_64BIT
1724	ldo	-16(%r30),%r29			/* Reference param save area */
1725#endif
1726
1727	BL	do_notify_resume,%r2
1728	ldi	1, %r25				/* long in_syscall = 1 */
1729
1730	mfctl	%cr30,%r1
1731	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1732	reg_restore %r20
1733
1734	b,n     syscall_check_sig
1735
1736syscall_restore:
1737	mfctl	%cr30,%r1
1738
1739	/* Are we being ptraced? */
1740	LDREG	TASK_TI_FLAGS(%r1),%r19
1741	ldi	_TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1742	and,COND(=)	%r19,%r2,%r0
1743	b,n	syscall_restore_rfi
1744
1745	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1746	rest_fp	%r19
1747
1748	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1749	mtsar	%r19
1750
1751	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1752	LDREG	TASK_PT_GR19(%r1),%r19
1753	LDREG   TASK_PT_GR20(%r1),%r20
1754	LDREG	TASK_PT_GR21(%r1),%r21
1755	LDREG	TASK_PT_GR22(%r1),%r22
1756	LDREG	TASK_PT_GR23(%r1),%r23
1757	LDREG	TASK_PT_GR24(%r1),%r24
1758	LDREG	TASK_PT_GR25(%r1),%r25
1759	LDREG	TASK_PT_GR26(%r1),%r26
1760	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1761	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1762	LDREG	TASK_PT_GR29(%r1),%r29
1763	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1764
1765	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1766	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1767	rsm     PSW_SM_I, %r0
1768	copy    %r1,%r30                           /* Restore user sp */
1769	mfsp    %sr3,%r1                           /* Get user space id */
1770	mtsp    %r1,%sr7                           /* Restore sr7 */
1771	ssm     PSW_SM_I, %r0
1772
1773	/* Set sr2 to zero for userspace syscalls to work. */
1774	mtsp	%r0,%sr2
1775	mtsp	%r1,%sr4			   /* Restore sr4 */
1776	mtsp	%r1,%sr5			   /* Restore sr5 */
1777	mtsp	%r1,%sr6			   /* Restore sr6 */
1778
1779	depi	PRIV_USER,31,2,%r31	/* ensure return to user mode. */
1780
1781#ifdef CONFIG_64BIT
1782	/* decide whether to reset the wide mode bit
1783	 *
1784	 * For a syscall, the W bit is stored in the lowest bit
1785	 * of sp.  Extract it and reset W if it is zero */
1786	extrd,u,*<>	%r30,63,1,%r1
1787	rsm	PSW_SM_W, %r0
1788	/* now reset the lowest bit of sp if it was set */
1789	xor	%r30,%r1,%r30
1790#endif
1791	be,n    0(%sr3,%r31)                       /* return to user space */
1792
1793	/* We have to return via an RFI, so that PSW T and R bits can be set
1794	 * appropriately.
1795	 * This sets up pt_regs so we can return via intr_restore, which is not
1796	 * the most efficient way of doing things, but it works.
1797	 */
1798syscall_restore_rfi:
1799	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1800	mtctl	%r2,%cr0			   /*   for immediate trap */
1801	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1802	ldi	0x0b,%r20			   /* Create new PSW */
1803	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1804
1805	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1806	 * set in thread_info.h and converted to PA bitmap
1807	 * numbers in asm-offsets.c */
1808
1809	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1810	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1811	depi	-1,27,1,%r20			   /* R bit */
1812
1813	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1814	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1815	depi	-1,7,1,%r20			   /* T bit */
1816
1817	STREG	%r20,TASK_PT_PSW(%r1)
1818
1819	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1820
1821	mfsp    %sr3,%r25
1822	STREG   %r25,TASK_PT_SR3(%r1)
1823	STREG   %r25,TASK_PT_SR4(%r1)
1824	STREG   %r25,TASK_PT_SR5(%r1)
1825	STREG   %r25,TASK_PT_SR6(%r1)
1826	STREG   %r25,TASK_PT_SR7(%r1)
1827	STREG   %r25,TASK_PT_IASQ0(%r1)
1828	STREG   %r25,TASK_PT_IASQ1(%r1)
1829
1830	/* XXX W bit??? */
1831	/* Now if old D bit is clear, it means we didn't save all registers
1832	 * on syscall entry, so do that now.  This only happens on TRACEME
1833	 * calls, or if someone attached to us while we were on a syscall.
1834	 * We could make this more efficient by not saving r3-r18, but
1835	 * then we wouldn't be able to use the common intr_restore path.
1836	 * It is only for traced processes anyway, so performance is not
1837	 * an issue.
1838	 */
1839	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1840	ldo	TASK_REGS(%r1),%r25
1841	reg_save %r25				   /* Save r3 to r18 */
1842
1843	/* Save the current sr */
1844	mfsp	%sr0,%r2
1845	STREG	%r2,TASK_PT_SR0(%r1)
1846
1847	/* Save the scratch sr */
1848	mfsp	%sr1,%r2
1849	STREG	%r2,TASK_PT_SR1(%r1)
1850
1851	/* sr2 should be set to zero for userspace syscalls */
1852	STREG	%r0,TASK_PT_SR2(%r1)
1853
1854	LDREG	TASK_PT_GR31(%r1),%r2
1855	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1856	STREG   %r2,TASK_PT_IAOQ0(%r1)
1857	ldo	4(%r2),%r2
1858	STREG	%r2,TASK_PT_IAOQ1(%r1)
1859	b	intr_restore
1860	copy	%r25,%r16
1861
1862pt_regs_ok:
1863	LDREG	TASK_PT_IAOQ0(%r1),%r2
1864	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1865	STREG	%r2,TASK_PT_IAOQ0(%r1)
1866	LDREG	TASK_PT_IAOQ1(%r1),%r2
1867	depi	PRIV_USER,31,2,%r2
1868	STREG	%r2,TASK_PT_IAOQ1(%r1)
1869	b	intr_restore
1870	copy	%r25,%r16
1871
1872syscall_do_resched:
1873	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1874	load32	schedule,%r19
1875	bv	%r0(%r19)		/* jumps to schedule() */
1876#ifdef CONFIG_64BIT
1877	ldo	-16(%r30),%r29		/* Reference param save area */
1878#else
1879	nop
1880#endif
1881END(syscall_exit)
1882
1883
1884#ifdef CONFIG_FUNCTION_TRACER
1885
1886	.import ftrace_function_trampoline,code
1887	.align L1_CACHE_BYTES
1888ENTRY_CFI(mcount, caller)
1889_mcount:
1890	.export _mcount,data
1891	/*
1892	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1893	 * first two are free.  We optimize it here and put 2 instructions for
1894	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1895	 * have all on one L1 cacheline.
1896	 */
1897	ldi	0, %arg3
1898	b	ftrace_function_trampoline
1899	copy	%r3, %arg2	/* caller original %sp */
1900ftrace_stub:
1901	.globl ftrace_stub
1902        .type  ftrace_stub, @function
1903#ifdef CONFIG_64BIT
1904	bve	(%rp)
1905#else
1906	bv	%r0(%rp)
1907#endif
1908	nop
1909#ifdef CONFIG_64BIT
1910	.dword mcount
1911	.dword 0 /* code in head.S puts value of global gp here */
1912#endif
1913ENDPROC_CFI(mcount)
1914
1915#ifdef CONFIG_DYNAMIC_FTRACE
1916
1917#ifdef CONFIG_64BIT
1918#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1919#else
1920#define FTRACE_FRAME_SIZE FRAME_SIZE
1921#endif
1922ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1923ftrace_caller:
1924	.global ftrace_caller
1925
1926	STREG	%r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1927	ldo	-FTRACE_FRAME_SIZE(%sp), %r3
1928	STREG	%rp, -RP_OFFSET(%r3)
1929
1930	/* Offset 0 is already allocated for %r1 */
1931	STREG	%r23, 2*REG_SZ(%r3)
1932	STREG	%r24, 3*REG_SZ(%r3)
1933	STREG	%r25, 4*REG_SZ(%r3)
1934	STREG	%r26, 5*REG_SZ(%r3)
1935	STREG	%r28, 6*REG_SZ(%r3)
1936	STREG	%r29, 7*REG_SZ(%r3)
1937#ifdef CONFIG_64BIT
1938	STREG	%r19, 8*REG_SZ(%r3)
1939	STREG	%r20, 9*REG_SZ(%r3)
1940	STREG	%r21, 10*REG_SZ(%r3)
1941	STREG	%r22, 11*REG_SZ(%r3)
1942	STREG	%r27, 12*REG_SZ(%r3)
1943	STREG	%r31, 13*REG_SZ(%r3)
1944	loadgp
1945	ldo	-16(%sp),%r29
1946#endif
1947	LDREG	0(%r3), %r25
1948	copy	%rp, %r26
1949	ldo	-8(%r25), %r25
1950	ldi	0, %r23		/* no pt_regs */
1951	b,l	ftrace_function_trampoline, %rp
1952	copy	%r3, %r24
1953
1954	LDREG	-RP_OFFSET(%r3), %rp
1955	LDREG	2*REG_SZ(%r3), %r23
1956	LDREG	3*REG_SZ(%r3), %r24
1957	LDREG	4*REG_SZ(%r3), %r25
1958	LDREG	5*REG_SZ(%r3), %r26
1959	LDREG	6*REG_SZ(%r3), %r28
1960	LDREG	7*REG_SZ(%r3), %r29
1961#ifdef CONFIG_64BIT
1962	LDREG	8*REG_SZ(%r3), %r19
1963	LDREG	9*REG_SZ(%r3), %r20
1964	LDREG	10*REG_SZ(%r3), %r21
1965	LDREG	11*REG_SZ(%r3), %r22
1966	LDREG	12*REG_SZ(%r3), %r27
1967	LDREG	13*REG_SZ(%r3), %r31
1968#endif
1969	LDREG	1*REG_SZ(%r3), %r3
1970
1971	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
1972	/* Adjust return point to jump back to beginning of traced function */
1973	ldo	-4(%r1), %r1
1974	bv,n	(%r1)
1975
1976ENDPROC_CFI(ftrace_caller)
1977
1978#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1979ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1980	CALLS,SAVE_RP,SAVE_SP)
1981ftrace_regs_caller:
1982	.global ftrace_regs_caller
1983
1984	ldo	-FTRACE_FRAME_SIZE(%sp), %r1
1985	STREG	%rp, -RP_OFFSET(%r1)
1986
1987	copy	%sp, %r1
1988	ldo	PT_SZ_ALGN(%sp), %sp
1989
1990	STREG	%rp, PT_GR2(%r1)
1991	STREG	%r3, PT_GR3(%r1)
1992	STREG	%r4, PT_GR4(%r1)
1993	STREG	%r5, PT_GR5(%r1)
1994	STREG	%r6, PT_GR6(%r1)
1995	STREG	%r7, PT_GR7(%r1)
1996	STREG	%r8, PT_GR8(%r1)
1997	STREG	%r9, PT_GR9(%r1)
1998	STREG   %r10, PT_GR10(%r1)
1999	STREG   %r11, PT_GR11(%r1)
2000	STREG   %r12, PT_GR12(%r1)
2001	STREG   %r13, PT_GR13(%r1)
2002	STREG   %r14, PT_GR14(%r1)
2003	STREG   %r15, PT_GR15(%r1)
2004	STREG   %r16, PT_GR16(%r1)
2005	STREG   %r17, PT_GR17(%r1)
2006	STREG   %r18, PT_GR18(%r1)
2007	STREG	%r19, PT_GR19(%r1)
2008	STREG	%r20, PT_GR20(%r1)
2009	STREG	%r21, PT_GR21(%r1)
2010	STREG	%r22, PT_GR22(%r1)
2011	STREG	%r23, PT_GR23(%r1)
2012	STREG	%r24, PT_GR24(%r1)
2013	STREG	%r25, PT_GR25(%r1)
2014	STREG	%r26, PT_GR26(%r1)
2015	STREG	%r27, PT_GR27(%r1)
2016	STREG	%r28, PT_GR28(%r1)
2017	STREG	%r29, PT_GR29(%r1)
2018	STREG	%r30, PT_GR30(%r1)
2019	STREG	%r31, PT_GR31(%r1)
2020	mfctl	%cr11, %r26
2021	STREG	%r26, PT_SAR(%r1)
2022
2023	copy	%rp, %r26
2024	LDREG	-FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2025	ldo	-8(%r25), %r25
2026	ldo	-FTRACE_FRAME_SIZE(%r1), %arg2
2027	b,l	ftrace_function_trampoline, %rp
2028	copy	%r1, %arg3 /* struct pt_regs */
2029
2030	ldo	-PT_SZ_ALGN(%sp), %r1
2031
2032	LDREG	PT_SAR(%r1), %rp
2033	mtctl	%rp, %cr11
2034
2035	LDREG	PT_GR2(%r1), %rp
2036	LDREG	PT_GR3(%r1), %r3
2037	LDREG	PT_GR4(%r1), %r4
2038	LDREG	PT_GR5(%r1), %r5
2039	LDREG	PT_GR6(%r1), %r6
2040	LDREG	PT_GR7(%r1), %r7
2041	LDREG	PT_GR8(%r1), %r8
2042	LDREG	PT_GR9(%r1), %r9
2043	LDREG   PT_GR10(%r1),%r10
2044	LDREG   PT_GR11(%r1),%r11
2045	LDREG   PT_GR12(%r1),%r12
2046	LDREG   PT_GR13(%r1),%r13
2047	LDREG   PT_GR14(%r1),%r14
2048	LDREG   PT_GR15(%r1),%r15
2049	LDREG   PT_GR16(%r1),%r16
2050	LDREG   PT_GR17(%r1),%r17
2051	LDREG   PT_GR18(%r1),%r18
2052	LDREG   PT_GR19(%r1),%r19
2053	LDREG   PT_GR20(%r1),%r20
2054	LDREG   PT_GR21(%r1),%r21
2055	LDREG   PT_GR22(%r1),%r22
2056	LDREG   PT_GR23(%r1),%r23
2057	LDREG   PT_GR24(%r1),%r24
2058	LDREG   PT_GR25(%r1),%r25
2059	LDREG   PT_GR26(%r1),%r26
2060	LDREG   PT_GR27(%r1),%r27
2061	LDREG   PT_GR28(%r1),%r28
2062	LDREG   PT_GR29(%r1),%r29
2063	LDREG   PT_GR30(%r1),%r30
2064	LDREG   PT_GR31(%r1),%r31
2065
2066	ldo	-PT_SZ_ALGN(%sp), %sp
2067	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2068	/* Adjust return point to jump back to beginning of traced function */
2069	ldo	-4(%r1), %r1
2070	bv,n	(%r1)
2071
2072ENDPROC_CFI(ftrace_regs_caller)
2073
2074#endif
2075#endif
2076
2077#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2078	.align 8
2079ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2080	.export parisc_return_to_handler,data
2081parisc_return_to_handler:
2082	copy %r3,%r1
2083	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2084	copy %sp,%r3
2085	STREGM %r1,FRAME_SIZE(%sp)
2086	STREG %ret0,8(%r3)
2087	STREG %ret1,16(%r3)
2088
2089#ifdef CONFIG_64BIT
2090	loadgp
2091#endif
2092
2093	/* call ftrace_return_to_handler(0) */
2094	.import ftrace_return_to_handler,code
2095	load32 ftrace_return_to_handler,%ret0
2096	load32 .Lftrace_ret,%r2
2097#ifdef CONFIG_64BIT
2098	ldo -16(%sp),%ret1		/* Reference param save area */
2099	bve	(%ret0)
2100#else
2101	bv	%r0(%ret0)
2102#endif
2103	ldi 0,%r26
2104.Lftrace_ret:
2105	copy %ret0,%rp
2106
2107	/* restore original return values */
2108	LDREG 8(%r3),%ret0
2109	LDREG 16(%r3),%ret1
2110
2111	/* return from function */
2112#ifdef CONFIG_64BIT
2113	bve	(%rp)
2114#else
2115	bv	%r0(%rp)
2116#endif
2117	LDREGM -FRAME_SIZE(%sp),%r3
2118ENDPROC_CFI(return_to_handler)
2119
2120#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2121
2122#endif	/* CONFIG_FUNCTION_TRACER */
2123
2124#ifdef CONFIG_IRQSTACKS
2125/* void call_on_stack(unsigned long param1, void *func,
2126		      unsigned long new_stack) */
2127ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2128ENTRY(_call_on_stack)
2129	copy	%sp, %r1
2130
2131	/* Regarding the HPPA calling conventions for function pointers,
2132	   we assume the PIC register is not changed across call.  For
2133	   CONFIG_64BIT, the argument pointer is left to point at the
2134	   argument region allocated for the call to call_on_stack. */
2135
2136	/* Switch to new stack.  We allocate two frames.  */
2137	ldo	2*FRAME_SIZE(%arg2), %sp
2138# ifdef CONFIG_64BIT
2139	/* Save previous stack pointer and return pointer in frame marker */
2140	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2141	/* Calls always use function descriptor */
2142	LDREG	16(%arg1), %arg1
2143	bve,l	(%arg1), %rp
2144	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2145	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2146	bve	(%rp)
2147	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2148# else
2149	/* Save previous stack pointer and return pointer in frame marker */
2150	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2151	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2152	/* Calls use function descriptor if PLABEL bit is set */
2153	bb,>=,n	%arg1, 30, 1f
2154	depwi	0,31,2, %arg1
2155	LDREG	0(%arg1), %arg1
21561:
2157	be,l	0(%sr4,%arg1), %sr0, %r31
2158	copy	%r31, %rp
2159	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2160	bv	(%rp)
2161	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2162# endif /* CONFIG_64BIT */
2163ENDPROC_CFI(call_on_stack)
2164#endif /* CONFIG_IRQSTACKS */
2165
2166ENTRY_CFI(get_register)
2167	/*
2168	 * get_register is used by the non access tlb miss handlers to
2169	 * copy the value of the general register specified in r8 into
2170	 * r1. This routine can't be used for shadowed registers, since
2171	 * the rfir will restore the original value. So, for the shadowed
2172	 * registers we put a -1 into r1 to indicate that the register
2173	 * should not be used (the register being copied could also have
2174	 * a -1 in it, but that is OK, it just means that we will have
2175	 * to use the slow path instead).
2176	 */
2177	blr     %r8,%r0
2178	nop
2179	bv      %r0(%r25)    /* r0 */
2180	copy    %r0,%r1
2181	bv      %r0(%r25)    /* r1 - shadowed */
2182	ldi     -1,%r1
2183	bv      %r0(%r25)    /* r2 */
2184	copy    %r2,%r1
2185	bv      %r0(%r25)    /* r3 */
2186	copy    %r3,%r1
2187	bv      %r0(%r25)    /* r4 */
2188	copy    %r4,%r1
2189	bv      %r0(%r25)    /* r5 */
2190	copy    %r5,%r1
2191	bv      %r0(%r25)    /* r6 */
2192	copy    %r6,%r1
2193	bv      %r0(%r25)    /* r7 */
2194	copy    %r7,%r1
2195	bv      %r0(%r25)    /* r8 - shadowed */
2196	ldi     -1,%r1
2197	bv      %r0(%r25)    /* r9 - shadowed */
2198	ldi     -1,%r1
2199	bv      %r0(%r25)    /* r10 */
2200	copy    %r10,%r1
2201	bv      %r0(%r25)    /* r11 */
2202	copy    %r11,%r1
2203	bv      %r0(%r25)    /* r12 */
2204	copy    %r12,%r1
2205	bv      %r0(%r25)    /* r13 */
2206	copy    %r13,%r1
2207	bv      %r0(%r25)    /* r14 */
2208	copy    %r14,%r1
2209	bv      %r0(%r25)    /* r15 */
2210	copy    %r15,%r1
2211	bv      %r0(%r25)    /* r16 - shadowed */
2212	ldi     -1,%r1
2213	bv      %r0(%r25)    /* r17 - shadowed */
2214	ldi     -1,%r1
2215	bv      %r0(%r25)    /* r18 */
2216	copy    %r18,%r1
2217	bv      %r0(%r25)    /* r19 */
2218	copy    %r19,%r1
2219	bv      %r0(%r25)    /* r20 */
2220	copy    %r20,%r1
2221	bv      %r0(%r25)    /* r21 */
2222	copy    %r21,%r1
2223	bv      %r0(%r25)    /* r22 */
2224	copy    %r22,%r1
2225	bv      %r0(%r25)    /* r23 */
2226	copy    %r23,%r1
2227	bv      %r0(%r25)    /* r24 - shadowed */
2228	ldi     -1,%r1
2229	bv      %r0(%r25)    /* r25 - shadowed */
2230	ldi     -1,%r1
2231	bv      %r0(%r25)    /* r26 */
2232	copy    %r26,%r1
2233	bv      %r0(%r25)    /* r27 */
2234	copy    %r27,%r1
2235	bv      %r0(%r25)    /* r28 */
2236	copy    %r28,%r1
2237	bv      %r0(%r25)    /* r29 */
2238	copy    %r29,%r1
2239	bv      %r0(%r25)    /* r30 */
2240	copy    %r30,%r1
2241	bv      %r0(%r25)    /* r31 */
2242	copy    %r31,%r1
2243ENDPROC_CFI(get_register)
2244
2245
2246ENTRY_CFI(set_register)
2247	/*
2248	 * set_register is used by the non access tlb miss handlers to
2249	 * copy the value of r1 into the general register specified in
2250	 * r8.
2251	 */
2252	blr     %r8,%r0
2253	nop
2254	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2255	copy    %r1,%r0
2256	bv      %r0(%r25)    /* r1 */
2257	copy    %r1,%r1
2258	bv      %r0(%r25)    /* r2 */
2259	copy    %r1,%r2
2260	bv      %r0(%r25)    /* r3 */
2261	copy    %r1,%r3
2262	bv      %r0(%r25)    /* r4 */
2263	copy    %r1,%r4
2264	bv      %r0(%r25)    /* r5 */
2265	copy    %r1,%r5
2266	bv      %r0(%r25)    /* r6 */
2267	copy    %r1,%r6
2268	bv      %r0(%r25)    /* r7 */
2269	copy    %r1,%r7
2270	bv      %r0(%r25)    /* r8 */
2271	copy    %r1,%r8
2272	bv      %r0(%r25)    /* r9 */
2273	copy    %r1,%r9
2274	bv      %r0(%r25)    /* r10 */
2275	copy    %r1,%r10
2276	bv      %r0(%r25)    /* r11 */
2277	copy    %r1,%r11
2278	bv      %r0(%r25)    /* r12 */
2279	copy    %r1,%r12
2280	bv      %r0(%r25)    /* r13 */
2281	copy    %r1,%r13
2282	bv      %r0(%r25)    /* r14 */
2283	copy    %r1,%r14
2284	bv      %r0(%r25)    /* r15 */
2285	copy    %r1,%r15
2286	bv      %r0(%r25)    /* r16 */
2287	copy    %r1,%r16
2288	bv      %r0(%r25)    /* r17 */
2289	copy    %r1,%r17
2290	bv      %r0(%r25)    /* r18 */
2291	copy    %r1,%r18
2292	bv      %r0(%r25)    /* r19 */
2293	copy    %r1,%r19
2294	bv      %r0(%r25)    /* r20 */
2295	copy    %r1,%r20
2296	bv      %r0(%r25)    /* r21 */
2297	copy    %r1,%r21
2298	bv      %r0(%r25)    /* r22 */
2299	copy    %r1,%r22
2300	bv      %r0(%r25)    /* r23 */
2301	copy    %r1,%r23
2302	bv      %r0(%r25)    /* r24 */
2303	copy    %r1,%r24
2304	bv      %r0(%r25)    /* r25 */
2305	copy    %r1,%r25
2306	bv      %r0(%r25)    /* r26 */
2307	copy    %r1,%r26
2308	bv      %r0(%r25)    /* r27 */
2309	copy    %r1,%r27
2310	bv      %r0(%r25)    /* r28 */
2311	copy    %r1,%r28
2312	bv      %r0(%r25)    /* r29 */
2313	copy    %r1,%r29
2314	bv      %r0(%r25)    /* r30 */
2315	copy    %r1,%r30
2316	bv      %r0(%r25)    /* r31 */
2317	copy    %r1,%r31
2318ENDPROC_CFI(set_register)
2319
2320