xref: /linux/arch/s390/kernel/entry.S (revision 0be3ff0c)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-extable.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32__PT_R0      =	__PT_GPRS
33__PT_R1      =	__PT_GPRS + 8
34__PT_R2      =	__PT_GPRS + 16
35__PT_R3      =	__PT_GPRS + 24
36__PT_R4      =	__PT_GPRS + 32
37__PT_R5      =	__PT_GPRS + 40
38__PT_R6      =	__PT_GPRS + 48
39__PT_R7      =	__PT_GPRS + 56
40__PT_R8      =	__PT_GPRS + 64
41__PT_R9      =	__PT_GPRS + 72
42__PT_R10     =	__PT_GPRS + 80
43__PT_R11     =	__PT_GPRS + 88
44__PT_R12     =	__PT_GPRS + 96
45__PT_R13     =	__PT_GPRS + 104
46__PT_R14     =	__PT_GPRS + 112
47__PT_R15     =	__PT_GPRS + 120
48
49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
50STACK_SIZE  = 1 << STACK_SHIFT
51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
52
53_LPP_OFFSET	= __LC_LPP
54
55	.macro STBEAR address
56	ALTERNATIVE "", ".insn	s,0xb2010000,\address", 193
57	.endm
58
59	.macro LBEAR address
60	ALTERNATIVE "", ".insn	s,0xb2000000,\address", 193
61	.endm
62
63	.macro LPSWEY address,lpswe
64	ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
65	.endm
66
67	.macro MBEAR reg
68	ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
69	.endm
70
71	.macro	CHECK_STACK savearea
72#ifdef CONFIG_CHECK_STACK
73	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
74	lghi	%r14,\savearea
75	jz	stack_overflow
76#endif
77	.endm
78
79	.macro	CHECK_VMAP_STACK savearea,oklabel
80#ifdef CONFIG_VMAP_STACK
81	lgr	%r14,%r15
82	nill	%r14,0x10000 - STACK_SIZE
83	oill	%r14,STACK_INIT
84	clg	%r14,__LC_KERNEL_STACK
85	je	\oklabel
86	clg	%r14,__LC_ASYNC_STACK
87	je	\oklabel
88	clg	%r14,__LC_MCCK_STACK
89	je	\oklabel
90	clg	%r14,__LC_NODAT_STACK
91	je	\oklabel
92	clg	%r14,__LC_RESTART_STACK
93	je	\oklabel
94	lghi	%r14,\savearea
95	j	stack_overflow
96#else
97	j	\oklabel
98#endif
99	.endm
100
101	/*
102	 * The TSTMSK macro generates a test-under-mask instruction by
103	 * calculating the memory offset for the specified mask value.
104	 * Mask value can be any constant.  The macro shifts the mask
105	 * value to calculate the memory offset for the test-under-mask
106	 * instruction.
107	 */
108	.macro TSTMSK addr, mask, size=8, bytepos=0
109		.if (\bytepos < \size) && (\mask >> 8)
110			.if (\mask & 0xff)
111				.error "Mask exceeds byte boundary"
112			.endif
113			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
114			.exitm
115		.endif
116		.ifeq \mask
117			.error "Mask must not be zero"
118		.endif
119		off = \size - \bytepos - 1
120		tm	off+\addr, \mask
121	.endm
122
123	.macro BPOFF
124	ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
125	.endm
126
127	.macro BPON
128	ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
129	.endm
130
131	.macro BPENTER tif_ptr,tif_mask
132	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
133		    "", 82
134	.endm
135
136	.macro BPEXIT tif_ptr,tif_mask
137	TSTMSK	\tif_ptr,\tif_mask
138	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
139		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
140	.endm
141
142	/*
143	 * The CHKSTG macro jumps to the provided label in case the
144	 * machine check interruption code reports one of unrecoverable
145	 * storage errors:
146	 * - Storage error uncorrected
147	 * - Storage key error uncorrected
148	 * - Storage degradation with Failing-storage-address validity
149	 */
150	.macro CHKSTG errlabel
151	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
152	jnz	\errlabel
153	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
154	jz	.Loklabel\@
155	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
156	jnz	\errlabel
157.Loklabel\@:
158	.endm
159
160#if IS_ENABLED(CONFIG_KVM)
161	/*
162	 * The OUTSIDE macro jumps to the provided label in case the value
163	 * in the provided register is outside of the provided range. The
164	 * macro is useful for checking whether a PSW stored in a register
165	 * pair points inside or outside of a block of instructions.
166	 * @reg: register to check
167	 * @start: start of the range
168	 * @end: end of the range
169	 * @outside_label: jump here if @reg is outside of [@start..@end)
170	 */
171	.macro OUTSIDE reg,start,end,outside_label
172	lgr	%r14,\reg
173	larl	%r13,\start
174	slgr	%r14,%r13
175	lghi	%r13,\end - \start
176	clgr	%r14,%r13
177	jhe	\outside_label
178	.endm
179
180	.macro SIEEXIT
181	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
182	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
183	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
184	larl	%r9,sie_exit			# skip forward to sie_exit
185	.endm
186#endif
187
188	GEN_BR_THUNK %r14
189
190	.section .kprobes.text, "ax"
191.Ldummy:
192	/*
193	 * This nop exists only in order to avoid that __bpon starts at
194	 * the beginning of the kprobes text section. In that case we would
195	 * have several symbols at the same address. E.g. objdump would take
196	 * an arbitrary symbol name when disassembling this code.
197	 * With the added nop in between the __bpon symbol is unique
198	 * again.
199	 */
200	nop	0
201
202ENTRY(__bpon)
203	.globl __bpon
204	BPON
205	BR_EX	%r14
206ENDPROC(__bpon)
207
208/*
209 * Scheduler resume function, called by switch_to
210 *  gpr2 = (task_struct *) prev
211 *  gpr3 = (task_struct *) next
212 * Returns:
213 *  gpr2 = prev
214 */
215ENTRY(__switch_to)
216	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
217	lghi	%r4,__TASK_stack
218	lghi	%r1,__TASK_thread
219	llill	%r5,STACK_INIT
220	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
221	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
222	agr	%r15,%r5			# end of kernel stack of next
223	stg	%r3,__LC_CURRENT		# store task struct of next
224	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
225	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
226	aghi	%r3,__TASK_pid
227	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
228	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
229	ALTERNATIVE "", "lpp _LPP_OFFSET", 40
230	BR_EX	%r14
231ENDPROC(__switch_to)
232
233#if IS_ENABLED(CONFIG_KVM)
234/*
235 * sie64a calling convention:
236 * %r2 pointer to sie control block
237 * %r3 guest register save area
238 */
239ENTRY(sie64a)
240	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
241	lg	%r12,__LC_CURRENT
242	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
243	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
244	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
245	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
246	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
247	lg	%r14,__LC_GMAP			# get gmap pointer
248	ltgr	%r14,%r14
249	jz	.Lsie_gmap
250	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
251.Lsie_gmap:
252	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
253	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
254	tm	__SIE_PROG20+3(%r14),3		# last exit...
255	jnz	.Lsie_skip
256	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
257	jo	.Lsie_skip			# exit if fp/vx regs changed
258	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
259.Lsie_entry:
260	sie	0(%r14)
261	BPOFF
262	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
263.Lsie_skip:
264	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
265	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
266.Lsie_done:
267# some program checks are suppressing. C code (e.g. do_protection_exception)
268# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
269# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
270# Other instructions between sie64a and .Lsie_done should not cause program
271# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
272.Lrewind_pad6:
273	nopr	7
274.Lrewind_pad4:
275	nopr	7
276.Lrewind_pad2:
277	nopr	7
278	.globl sie_exit
279sie_exit:
280	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
281	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
282	xgr	%r0,%r0				# clear guest registers to
283	xgr	%r1,%r1				# prevent speculative use
284	xgr	%r3,%r3
285	xgr	%r4,%r4
286	xgr	%r5,%r5
287	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
288	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
289	BR_EX	%r14
290.Lsie_fault:
291	lghi	%r14,-EFAULT
292	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
293	j	sie_exit
294
295	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
296	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
297	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
298	EX_TABLE(sie_exit,.Lsie_fault)
299ENDPROC(sie64a)
300EXPORT_SYMBOL(sie64a)
301EXPORT_SYMBOL(sie_exit)
302#endif
303
304/*
305 * SVC interrupt handler routine. System calls are synchronous events and
306 * are entered with interrupts disabled.
307 */
308
309ENTRY(system_call)
310	stpt	__LC_SYS_ENTER_TIMER
311	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
312	BPOFF
313	lghi	%r14,0
314.Lsysc_per:
315	STBEAR	__LC_LAST_BREAK
316	lctlg	%c1,%c1,__LC_KERNEL_ASCE
317	lg	%r12,__LC_CURRENT
318	lg	%r15,__LC_KERNEL_STACK
319	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
320	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
321	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
322	# clear user controlled register to prevent speculative use
323	xgr	%r0,%r0
324	xgr	%r1,%r1
325	xgr	%r4,%r4
326	xgr	%r5,%r5
327	xgr	%r6,%r6
328	xgr	%r7,%r7
329	xgr	%r8,%r8
330	xgr	%r9,%r9
331	xgr	%r10,%r10
332	xgr	%r11,%r11
333	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
334	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
335	MBEAR	%r2
336	lgr	%r3,%r14
337	brasl	%r14,__do_syscall
338	lctlg	%c1,%c1,__LC_USER_ASCE
339	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
340	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
341	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
342	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
343	stpt	__LC_EXIT_TIMER
344	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
345ENDPROC(system_call)
346
347#
348# a new process exits the kernel with ret_from_fork
349#
350ENTRY(ret_from_fork)
351	lgr	%r3,%r11
352	brasl	%r14,__ret_from_fork
353	lctlg	%c1,%c1,__LC_USER_ASCE
354	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
355	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
356	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
357	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
358	stpt	__LC_EXIT_TIMER
359	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
360ENDPROC(ret_from_fork)
361
362/*
363 * Program check handler routine
364 */
365
366ENTRY(pgm_check_handler)
367	stpt	__LC_SYS_ENTER_TIMER
368	BPOFF
369	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
370	lg	%r12,__LC_CURRENT
371	lghi	%r10,0
372	lmg	%r8,%r9,__LC_PGM_OLD_PSW
373	tmhh	%r8,0x0001		# coming from user space?
374	jno	.Lpgm_skip_asce
375	lctlg	%c1,%c1,__LC_KERNEL_ASCE
376	j	3f			# -> fault in user space
377.Lpgm_skip_asce:
378#if IS_ENABLED(CONFIG_KVM)
379	# cleanup critical section for program checks in sie64a
380	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
381	SIEEXIT
382	lghi	%r10,_PIF_GUEST_FAULT
383#endif
3841:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
385	jnz	2f			# -> enabled, can't be a double fault
386	tm	__LC_PGM_ILC+3,0x80	# check for per exception
387	jnz	.Lpgm_svcper		# -> single stepped svc
3882:	CHECK_STACK __LC_SAVE_AREA_SYNC
389	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
390	# CHECK_VMAP_STACK branches to stack_overflow or 4f
391	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3923:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
393	lg	%r15,__LC_KERNEL_STACK
3944:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
395	stg	%r10,__PT_FLAGS(%r11)
396	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
397	stmg	%r0,%r7,__PT_R0(%r11)
398	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
399	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
400	stmg	%r8,%r9,__PT_PSW(%r11)
401
402	# clear user controlled registers to prevent speculative use
403	xgr	%r0,%r0
404	xgr	%r1,%r1
405	xgr	%r3,%r3
406	xgr	%r4,%r4
407	xgr	%r5,%r5
408	xgr	%r6,%r6
409	xgr	%r7,%r7
410	lgr	%r2,%r11
411	brasl	%r14,__do_pgm_check
412	tmhh	%r8,0x0001		# returning to user space?
413	jno	.Lpgm_exit_kernel
414	lctlg	%c1,%c1,__LC_USER_ASCE
415	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
416	stpt	__LC_EXIT_TIMER
417.Lpgm_exit_kernel:
418	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
419	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
420	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
421	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
422
423#
424# single stepped system call
425#
426.Lpgm_svcper:
427	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
428	larl	%r14,.Lsysc_per
429	stg	%r14,__LC_RETURN_PSW+8
430	lghi	%r14,1
431	LBEAR	__LC_PGM_LAST_BREAK
432	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
433ENDPROC(pgm_check_handler)
434
435/*
436 * Interrupt handler macro used for external and IO interrupts.
437 */
438.macro INT_HANDLER name,lc_old_psw,handler
439ENTRY(\name)
440	stckf	__LC_INT_CLOCK
441	stpt	__LC_SYS_ENTER_TIMER
442	STBEAR	__LC_LAST_BREAK
443	BPOFF
444	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
445	lg	%r12,__LC_CURRENT
446	lmg	%r8,%r9,\lc_old_psw
447	tmhh	%r8,0x0001			# interrupting from user ?
448	jnz	1f
449#if IS_ENABLED(CONFIG_KVM)
450	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
451	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
452	SIEEXIT
453#endif
4540:	CHECK_STACK __LC_SAVE_AREA_ASYNC
455	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
456	j	2f
4571:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
458	lctlg	%c1,%c1,__LC_KERNEL_ASCE
459	lg	%r15,__LC_KERNEL_STACK
4602:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
461	la	%r11,STACK_FRAME_OVERHEAD(%r15)
462	stmg	%r0,%r7,__PT_R0(%r11)
463	# clear user controlled registers to prevent speculative use
464	xgr	%r0,%r0
465	xgr	%r1,%r1
466	xgr	%r3,%r3
467	xgr	%r4,%r4
468	xgr	%r5,%r5
469	xgr	%r6,%r6
470	xgr	%r7,%r7
471	xgr	%r10,%r10
472	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
473	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
474	MBEAR	%r11
475	stmg	%r8,%r9,__PT_PSW(%r11)
476	tm	%r8,0x0001		# coming from user space?
477	jno	1f
478	lctlg	%c1,%c1,__LC_KERNEL_ASCE
4791:	lgr	%r2,%r11		# pass pointer to pt_regs
480	brasl	%r14,\handler
481	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
482	tmhh	%r8,0x0001		# returning to user ?
483	jno	2f
484	lctlg	%c1,%c1,__LC_USER_ASCE
485	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
486	stpt	__LC_EXIT_TIMER
4872:	LBEAR	__PT_LAST_BREAK(%r11)
488	lmg	%r0,%r15,__PT_R0(%r11)
489	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
490ENDPROC(\name)
491.endm
492
493INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
494INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
495
496/*
497 * Load idle PSW.
498 */
499ENTRY(psw_idle)
500	stg	%r14,(__SF_GPRS+8*8)(%r15)
501	stg	%r3,__SF_EMPTY(%r15)
502	larl	%r1,psw_idle_exit
503	stg	%r1,__SF_EMPTY+8(%r15)
504	larl	%r1,smp_cpu_mtid
505	llgf	%r1,0(%r1)
506	ltgr	%r1,%r1
507	jz	.Lpsw_idle_stcctm
508	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
509.Lpsw_idle_stcctm:
510	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
511	BPON
512	stckf	__CLOCK_IDLE_ENTER(%r2)
513	stpt	__TIMER_IDLE_ENTER(%r2)
514	lpswe	__SF_EMPTY(%r15)
515.globl psw_idle_exit
516psw_idle_exit:
517	BR_EX	%r14
518ENDPROC(psw_idle)
519
520/*
521 * Machine check handler routines
522 */
523ENTRY(mcck_int_handler)
524	stckf	__LC_MCCK_CLOCK
525	BPOFF
526	la	%r1,4095		# validate r1
527	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
528	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
529	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
530	lg	%r12,__LC_CURRENT
531	lmg	%r8,%r9,__LC_MCK_OLD_PSW
532	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
533	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
534	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
535	jno	.Lmcck_panic		# control registers invalid -> panic
536	la	%r14,4095
537	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
538	ptlb
539	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
540	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
541	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
542	jo	3f
543	la	%r14,__LC_SYS_ENTER_TIMER
544	clc	0(8,%r14),__LC_EXIT_TIMER
545	jl	1f
546	la	%r14,__LC_EXIT_TIMER
5471:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
548	jl	2f
549	la	%r14,__LC_LAST_UPDATE_TIMER
5502:	spt	0(%r14)
551	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5523:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
553	jno	.Lmcck_panic
554	tmhh	%r8,0x0001		# interrupting from user ?
555	jnz	6f
556	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
557	jno	.Lmcck_panic
558#if IS_ENABLED(CONFIG_KVM)
559	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,6f
560	OUTSIDE	%r9,.Lsie_entry,.Lsie_skip,4f
561	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
562	j	5f
5634:	CHKSTG	.Lmcck_panic
5645:	larl	%r14,.Lstosm_tmp
565	stosm	0(%r14),0x04		# turn dat on, keep irqs off
566	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
567	SIEEXIT
568	j	.Lmcck_stack
569#endif
5706:	CHKSTG	.Lmcck_panic
571	larl	%r14,.Lstosm_tmp
572	stosm	0(%r14),0x04		# turn dat on, keep irqs off
573	tmhh	%r8,0x0001		# interrupting from user ?
574	jz	.Lmcck_stack
575	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
576.Lmcck_stack:
577	lg	%r15,__LC_MCCK_STACK
578	la	%r11,STACK_FRAME_OVERHEAD(%r15)
579	stctg	%c1,%c1,__PT_CR1(%r11)
580	lctlg	%c1,%c1,__LC_KERNEL_ASCE
581	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
582	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
583	stmg	%r0,%r7,__PT_R0(%r11)
584	# clear user controlled registers to prevent speculative use
585	xgr	%r0,%r0
586	xgr	%r1,%r1
587	xgr	%r3,%r3
588	xgr	%r4,%r4
589	xgr	%r5,%r5
590	xgr	%r6,%r6
591	xgr	%r7,%r7
592	xgr	%r10,%r10
593	mvc	__PT_R8(64,%r11),0(%r14)
594	stmg	%r8,%r9,__PT_PSW(%r11)
595	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
596	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
597	lgr	%r2,%r11		# pass pointer to pt_regs
598	brasl	%r14,s390_do_machine_check
599	cghi	%r2,0
600	je	.Lmcck_return
601	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
602	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
603	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
604	la	%r11,STACK_FRAME_OVERHEAD(%r1)
605	lgr	%r15,%r1
606	brasl	%r14,s390_handle_mcck
607.Lmcck_return:
608	lctlg	%c1,%c1,__PT_CR1(%r11)
609	lmg	%r0,%r10,__PT_R0(%r11)
610	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
611	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
612	jno	0f
613	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
614	stpt	__LC_EXIT_TIMER
6150:	ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
616	LBEAR	0(%r12)
617	lmg	%r11,%r15,__PT_R11(%r11)
618	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
619
620.Lmcck_panic:
621	/*
622	 * Iterate over all possible CPU addresses in the range 0..0xffff
623	 * and stop each CPU using signal processor. Use compare and swap
624	 * to allow just one CPU-stopper and prevent concurrent CPUs from
625	 * stopping each other while leaving the others running.
626	 */
627	lhi	%r5,0
628	lhi	%r6,1
629	larl	%r7,.Lstop_lock
630	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
631	jnz	4f
632	larl	%r7,.Lthis_cpu
633	stap	0(%r7)			# this CPU address
634	lh	%r4,0(%r7)
635	nilh	%r4,0
636	lhi	%r0,1
637	sll	%r0,16			# CPU counter
638	lhi	%r3,0			# next CPU address
6390:	cr	%r3,%r4
640	je	2f
6411:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
642	brc	SIGP_CC_BUSY,1b
6432:	ahi	%r3,1
644	brct	%r0,0b
6453:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
646	brc	SIGP_CC_BUSY,3b
6474:	j	4b
648ENDPROC(mcck_int_handler)
649
650ENTRY(restart_int_handler)
651	ALTERNATIVE "", "lpp _LPP_OFFSET", 40
652	stg	%r15,__LC_SAVE_AREA_RESTART
653	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
654	jz	0f
655	la	%r15,4095
656	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6570:	larl	%r15,.Lstosm_tmp
658	stosm	0(%r15),0x04			# turn dat on, keep irqs off
659	lg	%r15,__LC_RESTART_STACK
660	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
661	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
662	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
663	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
664	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
665	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
666	lg	%r2,__LC_RESTART_DATA
667	lgf	%r3,__LC_RESTART_SOURCE
668	ltgr	%r3,%r3				# test source cpu address
669	jm	1f				# negative -> skip source stop
6700:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
671	brc	10,0b				# wait for status stored
6721:	basr	%r14,%r1			# call function
673	stap	__SF_EMPTY(%r15)		# store cpu address
674	llgh	%r3,__SF_EMPTY(%r15)
6752:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
676	brc	2,2b
6773:	j	3b
678ENDPROC(restart_int_handler)
679
680	.section .kprobes.text, "ax"
681
682#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
683/*
684 * The synchronous or the asynchronous stack overflowed. We are dead.
685 * No need to properly save the registers, we are going to panic anyway.
686 * Setup a pt_regs so that show_trace can provide a good call trace.
687 */
688ENTRY(stack_overflow)
689	lg	%r15,__LC_NODAT_STACK	# change to panic stack
690	la	%r11,STACK_FRAME_OVERHEAD(%r15)
691	stmg	%r0,%r7,__PT_R0(%r11)
692	stmg	%r8,%r9,__PT_PSW(%r11)
693	mvc	__PT_R8(64,%r11),0(%r14)
694	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
695	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
696	lgr	%r2,%r11		# pass pointer to pt_regs
697	jg	kernel_stack_overflow
698ENDPROC(stack_overflow)
699#endif
700
701	.section .data, "aw"
702		.align	4
703.Lstop_lock:	.long	0
704.Lthis_cpu:	.short	0
705.Lstosm_tmp:	.byte	0
706	.section .rodata, "a"
707#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
708	.globl	sys_call_table
709sys_call_table:
710#include "asm/syscall_table.h"
711#undef SYSCALL
712
713#ifdef CONFIG_COMPAT
714
715#define SYSCALL(esame,emu)	.quad __s390_ ## emu
716	.globl	sys_call_table_emu
717sys_call_table_emu:
718#include "asm/syscall_table.h"
719#undef SYSCALL
720#endif
721