1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_LPP_OFFSET	= __LC_LPP
55
56	.macro	CHECK_STACK savearea
57#ifdef CONFIG_CHECK_STACK
58	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
59	lghi	%r14,\savearea
60	jz	stack_overflow
61#endif
62	.endm
63
64	.macro	CHECK_VMAP_STACK savearea,oklabel
65#ifdef CONFIG_VMAP_STACK
66	lgr	%r14,%r15
67	nill	%r14,0x10000 - STACK_SIZE
68	oill	%r14,STACK_INIT
69	clg	%r14,__LC_KERNEL_STACK
70	je	\oklabel
71	clg	%r14,__LC_ASYNC_STACK
72	je	\oklabel
73	clg	%r14,__LC_MCCK_STACK
74	je	\oklabel
75	clg	%r14,__LC_NODAT_STACK
76	je	\oklabel
77	clg	%r14,__LC_RESTART_STACK
78	je	\oklabel
79	lghi	%r14,\savearea
80	j	stack_overflow
81#else
82	j	\oklabel
83#endif
84	.endm
85
86	.macro STCK savearea
87	ALTERNATIVE ".insn	s,0xb2050000,\savearea", \
88		    ".insn	s,0xb27c0000,\savearea", 25
89	.endm
90
91	/*
92	 * The TSTMSK macro generates a test-under-mask instruction by
93	 * calculating the memory offset for the specified mask value.
94	 * Mask value can be any constant.  The macro shifts the mask
95	 * value to calculate the memory offset for the test-under-mask
96	 * instruction.
97	 */
98	.macro TSTMSK addr, mask, size=8, bytepos=0
99		.if (\bytepos < \size) && (\mask >> 8)
100			.if (\mask & 0xff)
101				.error "Mask exceeds byte boundary"
102			.endif
103			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
104			.exitm
105		.endif
106		.ifeq \mask
107			.error "Mask must not be zero"
108		.endif
109		off = \size - \bytepos - 1
110		tm	off+\addr, \mask
111	.endm
112
113	.macro BPOFF
114	ALTERNATIVE "", ".long 0xb2e8c000", 82
115	.endm
116
117	.macro BPON
118	ALTERNATIVE "", ".long 0xb2e8d000", 82
119	.endm
120
121	.macro BPENTER tif_ptr,tif_mask
122	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
123		    "", 82
124	.endm
125
126	.macro BPEXIT tif_ptr,tif_mask
127	TSTMSK	\tif_ptr,\tif_mask
128	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
129		    "jnz .+8; .long 0xb2e8d000", 82
130	.endm
131
132	GEN_BR_THUNK %r14
133	GEN_BR_THUNK %r14,%r13
134
135	.section .kprobes.text, "ax"
136.Ldummy:
137	/*
138	 * This nop exists only in order to avoid that __bpon starts at
139	 * the beginning of the kprobes text section. In that case we would
140	 * have several symbols at the same address. E.g. objdump would take
141	 * an arbitrary symbol name when disassembling this code.
142	 * With the added nop in between the __bpon symbol is unique
143	 * again.
144	 */
145	nop	0
146
147ENTRY(__bpon)
148	.globl __bpon
149	BPON
150	BR_EX	%r14
151ENDPROC(__bpon)
152
153/*
154 * Scheduler resume function, called by switch_to
155 *  gpr2 = (task_struct *) prev
156 *  gpr3 = (task_struct *) next
157 * Returns:
158 *  gpr2 = prev
159 */
160ENTRY(__switch_to)
161	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
162	lghi	%r4,__TASK_stack
163	lghi	%r1,__TASK_thread
164	llill	%r5,STACK_INIT
165	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
166	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
167	agr	%r15,%r5			# end of kernel stack of next
168	stg	%r3,__LC_CURRENT		# store task struct of next
169	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
170	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
171	aghi	%r3,__TASK_pid
172	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
173	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
174	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
175	BR_EX	%r14
176ENDPROC(__switch_to)
177
178#if IS_ENABLED(CONFIG_KVM)
179/*
180 * sie64a calling convention:
181 * %r2 pointer to sie control block
182 * %r3 guest register save area
183 */
184ENTRY(sie64a)
185	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
186	lg	%r12,__LC_CURRENT
187	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
188	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
189	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
190	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
191	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
192	lg	%r14,__LC_GMAP			# get gmap pointer
193	ltgr	%r14,%r14
194	jz	.Lsie_gmap
195	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
196.Lsie_gmap:
197	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
198	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
199	tm	__SIE_PROG20+3(%r14),3		# last exit...
200	jnz	.Lsie_skip
201	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
202	jo	.Lsie_skip			# exit if fp/vx regs changed
203	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
204.Lsie_entry:
205	sie	0(%r14)
206	BPOFF
207	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
208.Lsie_skip:
209	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
210	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
211.Lsie_done:
212# some program checks are suppressing. C code (e.g. do_protection_exception)
213# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
214# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
215# Other instructions between sie64a and .Lsie_done should not cause program
216# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
217# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int
218.Lrewind_pad6:
219	nopr	7
220.Lrewind_pad4:
221	nopr	7
222.Lrewind_pad2:
223	nopr	7
224	.globl sie_exit
225sie_exit:
226	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
227	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
228	xgr	%r0,%r0				# clear guest registers to
229	xgr	%r1,%r1				# prevent speculative use
230	xgr	%r3,%r3
231	xgr	%r4,%r4
232	xgr	%r5,%r5
233	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
234	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
235	BR_EX	%r14
236.Lsie_fault:
237	lghi	%r14,-EFAULT
238	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
239	j	sie_exit
240
241	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
242	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
243	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
244	EX_TABLE(sie_exit,.Lsie_fault)
245ENDPROC(sie64a)
246EXPORT_SYMBOL(sie64a)
247EXPORT_SYMBOL(sie_exit)
248#endif
249
250/*
251 * SVC interrupt handler routine. System calls are synchronous events and
252 * are entered with interrupts disabled.
253 */
254
255ENTRY(system_call)
256	stpt	__LC_SYS_ENTER_TIMER
257	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
258	BPOFF
259	lghi	%r14,0
260.Lsysc_per:
261	lctlg	%c1,%c1,__LC_KERNEL_ASCE
262	lg	%r12,__LC_CURRENT
263	lg	%r15,__LC_KERNEL_STACK
264	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
265	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
266	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
267	# clear user controlled register to prevent speculative use
268	xgr	%r0,%r0
269	xgr	%r1,%r1
270	xgr	%r4,%r4
271	xgr	%r5,%r5
272	xgr	%r6,%r6
273	xgr	%r7,%r7
274	xgr	%r8,%r8
275	xgr	%r9,%r9
276	xgr	%r10,%r10
277	xgr	%r11,%r11
278	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
279	lgr	%r3,%r14
280	brasl	%r14,__do_syscall
281	lctlg	%c1,%c1,__LC_USER_ASCE
282	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
283	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
284	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
285	stpt	__LC_EXIT_TIMER
286	b	__LC_RETURN_LPSWE
287ENDPROC(system_call)
288
289#
290# a new process exits the kernel with ret_from_fork
291#
292ENTRY(ret_from_fork)
293	lgr	%r3,%r11
294	brasl	%r14,__ret_from_fork
295	lctlg	%c1,%c1,__LC_USER_ASCE
296	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
297	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
298	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
299	stpt	__LC_EXIT_TIMER
300	b	__LC_RETURN_LPSWE
301ENDPROC(ret_from_fork)
302
303/*
304 * Program check handler routine
305 */
306
307ENTRY(pgm_check_handler)
308	stpt	__LC_SYS_ENTER_TIMER
309	BPOFF
310	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
311	lg	%r12,__LC_CURRENT
312	lghi	%r10,0
313	lmg	%r8,%r9,__LC_PGM_OLD_PSW
314	tmhh	%r8,0x0001		# coming from user space?
315	jno	.Lpgm_skip_asce
316	lctlg	%c1,%c1,__LC_KERNEL_ASCE
317	j	3f			# -> fault in user space
318.Lpgm_skip_asce:
319#if IS_ENABLED(CONFIG_KVM)
320	# cleanup critical section for program checks in sie64a
321	lgr	%r14,%r9
322	larl	%r13,.Lsie_gmap
323	slgr	%r14,%r13
324	lghi	%r13,.Lsie_done - .Lsie_gmap
325	clgr	%r14,%r13
326	jhe	1f
327	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
328	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
329	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
330	larl	%r9,sie_exit			# skip forward to sie_exit
331	lghi	%r10,_PIF_GUEST_FAULT
332#endif
3331:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
334	jnz	2f			# -> enabled, can't be a double fault
335	tm	__LC_PGM_ILC+3,0x80	# check for per exception
336	jnz	.Lpgm_svcper		# -> single stepped svc
3372:	CHECK_STACK __LC_SAVE_AREA_SYNC
338	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
339	# CHECK_VMAP_STACK branches to stack_overflow or 4f
340	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3413:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
342	lg	%r15,__LC_KERNEL_STACK
3434:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
344	stg	%r10,__PT_FLAGS(%r11)
345	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
346	stmg	%r0,%r7,__PT_R0(%r11)
347	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
348	stmg	%r8,%r9,__PT_PSW(%r11)
349
350	# clear user controlled registers to prevent speculative use
351	xgr	%r0,%r0
352	xgr	%r1,%r1
353	xgr	%r3,%r3
354	xgr	%r4,%r4
355	xgr	%r5,%r5
356	xgr	%r6,%r6
357	xgr	%r7,%r7
358	lgr	%r2,%r11
359	brasl	%r14,__do_pgm_check
360	tmhh	%r8,0x0001		# returning to user space?
361	jno	.Lpgm_exit_kernel
362	lctlg	%c1,%c1,__LC_USER_ASCE
363	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
364	stpt	__LC_EXIT_TIMER
365.Lpgm_exit_kernel:
366	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
367	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
368	b	__LC_RETURN_LPSWE
369
370#
371# single stepped system call
372#
373.Lpgm_svcper:
374	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
375	larl	%r14,.Lsysc_per
376	stg	%r14,__LC_RETURN_PSW+8
377	lghi	%r14,1
378	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
379ENDPROC(pgm_check_handler)
380
381/*
382 * Interrupt handler macro used for external and IO interrupts.
383 */
384.macro INT_HANDLER name,lc_old_psw,handler
385ENTRY(\name)
386	STCK	__LC_INT_CLOCK
387	stpt	__LC_SYS_ENTER_TIMER
388	BPOFF
389	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
390	lg	%r12,__LC_CURRENT
391	lmg	%r8,%r9,\lc_old_psw
392	tmhh	%r8,0x0001			# interrupting from user ?
393	jnz	1f
394#if IS_ENABLED(CONFIG_KVM)
395	lgr	%r14,%r9
396	larl	%r13,.Lsie_gmap
397	slgr	%r14,%r13
398	lghi	%r13,.Lsie_done - .Lsie_gmap
399	clgr	%r14,%r13
400	jhe	0f
401	brasl	%r14,.Lcleanup_sie_int
402#endif
4030:	CHECK_STACK __LC_SAVE_AREA_ASYNC
404	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
405	j	2f
4061:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
407	lctlg	%c1,%c1,__LC_KERNEL_ASCE
408	lg	%r15,__LC_KERNEL_STACK
4092:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
410	la	%r11,STACK_FRAME_OVERHEAD(%r15)
411	stmg	%r0,%r7,__PT_R0(%r11)
412	# clear user controlled registers to prevent speculative use
413	xgr	%r0,%r0
414	xgr	%r1,%r1
415	xgr	%r3,%r3
416	xgr	%r4,%r4
417	xgr	%r5,%r5
418	xgr	%r6,%r6
419	xgr	%r7,%r7
420	xgr	%r10,%r10
421	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
422	stmg	%r8,%r9,__PT_PSW(%r11)
423	tm	%r8,0x0001		# coming from user space?
424	jno	1f
425	lctlg	%c1,%c1,__LC_KERNEL_ASCE
4261:	lgr	%r2,%r11		# pass pointer to pt_regs
427	brasl	%r14,\handler
428	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
429	tmhh	%r8,0x0001		# returning to user ?
430	jno	2f
431	lctlg	%c1,%c1,__LC_USER_ASCE
432	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
433	stpt	__LC_EXIT_TIMER
4342:	lmg	%r0,%r15,__PT_R0(%r11)
435	b	__LC_RETURN_LPSWE
436ENDPROC(\name)
437.endm
438
439INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
440INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
441
442/*
443 * Load idle PSW.
444 */
445ENTRY(psw_idle)
446	stg	%r14,(__SF_GPRS+8*8)(%r15)
447	stg	%r3,__SF_EMPTY(%r15)
448	larl	%r1,psw_idle_exit
449	stg	%r1,__SF_EMPTY+8(%r15)
450	larl	%r1,smp_cpu_mtid
451	llgf	%r1,0(%r1)
452	ltgr	%r1,%r1
453	jz	.Lpsw_idle_stcctm
454	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
455.Lpsw_idle_stcctm:
456	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
457	BPON
458	STCK	__CLOCK_IDLE_ENTER(%r2)
459	stpt	__TIMER_IDLE_ENTER(%r2)
460	lpswe	__SF_EMPTY(%r15)
461.globl psw_idle_exit
462psw_idle_exit:
463	BR_EX	%r14
464ENDPROC(psw_idle)
465
466/*
467 * Machine check handler routines
468 */
469ENTRY(mcck_int_handler)
470	STCK	__LC_MCCK_CLOCK
471	BPOFF
472	la	%r1,4095		# validate r1
473	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
474	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
475	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
476	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
477	lg	%r12,__LC_CURRENT
478	lmg	%r8,%r9,__LC_MCK_OLD_PSW
479	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
480	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
481	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
482	jno	.Lmcck_panic		# control registers invalid -> panic
483	la	%r14,4095
484	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
485	ptlb
486	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
487	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
488	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
489	jno	0f
490	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
491	jno	0f
492	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
4930:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
494	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
495	jo	0f
496	sr	%r14,%r14
4970:	sfpc	%r14
498	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
499	jo	0f
500	lghi	%r14,__LC_FPREGS_SAVE_AREA
501	ld	%f0,0(%r14)
502	ld	%f1,8(%r14)
503	ld	%f2,16(%r14)
504	ld	%f3,24(%r14)
505	ld	%f4,32(%r14)
506	ld	%f5,40(%r14)
507	ld	%f6,48(%r14)
508	ld	%f7,56(%r14)
509	ld	%f8,64(%r14)
510	ld	%f9,72(%r14)
511	ld	%f10,80(%r14)
512	ld	%f11,88(%r14)
513	ld	%f12,96(%r14)
514	ld	%f13,104(%r14)
515	ld	%f14,112(%r14)
516	ld	%f15,120(%r14)
517	j	1f
5180:	VLM	%v0,%v15,0,%r11
519	VLM	%v16,%v31,256,%r11
5201:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
521	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
522	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
523	jo	3f
524	la	%r14,__LC_SYS_ENTER_TIMER
525	clc	0(8,%r14),__LC_EXIT_TIMER
526	jl	1f
527	la	%r14,__LC_EXIT_TIMER
5281:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
529	jl	2f
530	la	%r14,__LC_LAST_UPDATE_TIMER
5312:	spt	0(%r14)
532	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5333:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
534	jno	.Lmcck_panic
535	tmhh	%r8,0x0001		# interrupting from user ?
536	jnz	4f
537	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
538	jno	.Lmcck_panic
5394:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
540	tmhh	%r8,0x0001			# interrupting from user ?
541	jnz	.Lmcck_user
542#if IS_ENABLED(CONFIG_KVM)
543	lgr	%r14,%r9
544	larl	%r13,.Lsie_gmap
545	slgr	%r14,%r13
546	lghi	%r13,.Lsie_done - .Lsie_gmap
547	clgr	%r14,%r13
548	jhe	.Lmcck_stack
549	brasl	%r14,.Lcleanup_sie_mcck
550#endif
551	j	.Lmcck_stack
552.Lmcck_user:
553	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
554.Lmcck_stack:
555	lg	%r15,__LC_MCCK_STACK
556.Lmcck_skip:
557	la	%r11,STACK_FRAME_OVERHEAD(%r15)
558	stctg	%c1,%c1,__PT_CR1(%r11)
559	lctlg	%c1,%c1,__LC_KERNEL_ASCE
560	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
561	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
562	stmg	%r0,%r7,__PT_R0(%r11)
563	# clear user controlled registers to prevent speculative use
564	xgr	%r0,%r0
565	xgr	%r1,%r1
566	xgr	%r3,%r3
567	xgr	%r4,%r4
568	xgr	%r5,%r5
569	xgr	%r6,%r6
570	xgr	%r7,%r7
571	xgr	%r10,%r10
572	mvc	__PT_R8(64,%r11),0(%r14)
573	stmg	%r8,%r9,__PT_PSW(%r11)
574	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
575	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
576	lgr	%r2,%r11		# pass pointer to pt_regs
577	brasl	%r14,s390_do_machine_check
578	cghi	%r2,0
579	je	.Lmcck_return
580	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
581	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
582	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
583	la	%r11,STACK_FRAME_OVERHEAD(%r1)
584	lgr	%r15,%r1
585	brasl	%r14,s390_handle_mcck
586.Lmcck_return:
587	lctlg	%c1,%c1,__PT_CR1(%r11)
588	lmg	%r0,%r10,__PT_R0(%r11)
589	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
590	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
591	jno	0f
592	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
593	stpt	__LC_EXIT_TIMER
5940:	lmg	%r11,%r15,__PT_R11(%r11)
595	b	__LC_RETURN_MCCK_LPSWE
596
597.Lmcck_panic:
598	lg	%r15,__LC_NODAT_STACK
599	j	.Lmcck_skip
600ENDPROC(mcck_int_handler)
601
602#
603# PSW restart interrupt handler
604#
605ENTRY(restart_int_handler)
606	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
607	stg	%r15,__LC_SAVE_AREA_RESTART
608	lg	%r15,__LC_RESTART_STACK
609	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
610	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
611	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
612	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
613	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
614	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
615	lg	%r2,__LC_RESTART_DATA
616	lg	%r3,__LC_RESTART_SOURCE
617	ltgr	%r3,%r3				# test source cpu address
618	jm	1f				# negative -> skip source stop
6190:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
620	brc	10,0b				# wait for status stored
6211:	basr	%r14,%r1			# call function
622	stap	__SF_EMPTY(%r15)		# store cpu address
623	llgh	%r3,__SF_EMPTY(%r15)
6242:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
625	brc	2,2b
6263:	j	3b
627ENDPROC(restart_int_handler)
628
629	.section .kprobes.text, "ax"
630
631#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
632/*
633 * The synchronous or the asynchronous stack overflowed. We are dead.
634 * No need to properly save the registers, we are going to panic anyway.
635 * Setup a pt_regs so that show_trace can provide a good call trace.
636 */
637ENTRY(stack_overflow)
638	lg	%r15,__LC_NODAT_STACK	# change to panic stack
639	la	%r11,STACK_FRAME_OVERHEAD(%r15)
640	stmg	%r0,%r7,__PT_R0(%r11)
641	stmg	%r8,%r9,__PT_PSW(%r11)
642	mvc	__PT_R8(64,%r11),0(%r14)
643	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
644	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
645	lgr	%r2,%r11		# pass pointer to pt_regs
646	jg	kernel_stack_overflow
647ENDPROC(stack_overflow)
648#endif
649
650#if IS_ENABLED(CONFIG_KVM)
651.Lcleanup_sie_mcck:
652	larl	%r13,.Lsie_entry
653	slgr	%r9,%r13
654	larl	%r13,.Lsie_skip
655	clgr	%r9,%r13
656	jh	.Lcleanup_sie_int
657	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
658.Lcleanup_sie_int:
659	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
660	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
661	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
662	lctlg	%c1,%c1,__LC_KERNEL_ASCE
663	larl	%r9,sie_exit			# skip forward to sie_exit
664	BR_EX	%r14,%r13
665
666#endif
667	.section .rodata, "a"
668#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
669	.globl	sys_call_table
670sys_call_table:
671#include "asm/syscall_table.h"
672#undef SYSCALL
673
674#ifdef CONFIG_COMPAT
675
676#define SYSCALL(esame,emu)	.quad __s390_ ## emu
677	.globl	sys_call_table_emu
678sys_call_table_emu:
679#include "asm/syscall_table.h"
680#undef SYSCALL
681#endif
682