1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 /*
62  * Userspace code may pass a ucontext which doesn't include VSX added
63  * at the end.  We need to check for this case.
64  */
65 #define UCONTEXTSIZEWITHOUTVSX \
66 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67 
68 /*
69  * Returning 0 means we return to userspace via
70  * ret_from_except and thus restore all user
71  * registers from *regs.  This is what we need
72  * to do when a signal has been delivered.
73  */
74 
75 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76 #undef __SIGNAL_FRAMESIZE
77 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
78 #undef ELF_NVRREG
79 #define ELF_NVRREG	ELF_NVRREG32
80 
81 /*
82  * Functions for flipping sigsets (thanks to brain dead generic
83  * implementation that makes things simple for little endian only)
84  */
85 #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86 #define unsafe_get_sigset_t	unsafe_get_compat_sigset
87 
88 #define to_user_ptr(p)		ptr_to_compat(p)
89 #define from_user_ptr(p)	compat_ptr(p)
90 
91 static __always_inline int
__unsafe_save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)92 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93 {
94 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 	int val, i;
96 
97 	for (i = 0; i <= PT_RESULT; i ++) {
98 		/* Force usr to alway see softe as 1 (interrupts enabled) */
99 		if (i == PT_SOFTE)
100 			val = 1;
101 		else
102 			val = gregs[i];
103 
104 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 	}
106 	return 0;
107 
108 failed:
109 	return 1;
110 }
111 
112 static __always_inline int
__unsafe_restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114 {
115 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 	int i;
117 
118 	for (i = 0; i <= PT_RESULT; i++) {
119 		if ((i == PT_MSR) || (i == PT_SOFTE))
120 			continue;
121 		unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 	}
123 	return 0;
124 
125 failed:
126 	return 1;
127 }
128 
129 #else /* CONFIG_PPC64 */
130 
131 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132 
133 #define unsafe_put_sigset_t(uset, set, label) do {			\
134 	sigset_t __user *__us = uset	;				\
135 	const sigset_t *__s = set;					\
136 									\
137 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
138 } while (0)
139 
140 #define unsafe_get_sigset_t	unsafe_get_user_sigset
141 
142 #define to_user_ptr(p)		((unsigned long)(p))
143 #define from_user_ptr(p)	((void __user *)(p))
144 
145 static __always_inline int
__unsafe_save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147 {
148 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 	return 0;
150 
151 failed:
152 	return 1;
153 }
154 
155 static __always_inline
__unsafe_restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157 {
158 	/* copy up to but not including MSR */
159 	unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160 
161 	/* copy from orig_r3 (the word after the MSR) up to the end */
162 	unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 			      GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164 
165 	return 0;
166 
167 failed:
168 	return 1;
169 }
170 #endif
171 
172 #define unsafe_save_general_regs(regs, frame, label) do {	\
173 	if (__unsafe_save_general_regs(regs, frame))		\
174 		goto label;					\
175 } while (0)
176 
177 #define unsafe_restore_general_regs(regs, frame, label) do {	\
178 	if (__unsafe_restore_general_regs(regs, frame))		\
179 		goto label;					\
180 } while (0)
181 
182 /*
183  * When we have signals to deliver, we set up on the
184  * user stack, going down from the original stack pointer:
185  *	an ABI gap of 56 words
186  *	an mcontext struct
187  *	a sigcontext struct
188  *	a gap of __SIGNAL_FRAMESIZE bytes
189  *
190  * Each of these things must be a multiple of 16 bytes in size. The following
191  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
192  *
193  */
194 struct sigframe {
195 	struct sigcontext sctx;		/* the sigcontext */
196 	struct mcontext	mctx;		/* all the register values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 	struct sigcontext sctx_transact;
199 	struct mcontext	mctx_transact;
200 #endif
201 	/*
202 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
203 	 * regs and 18 fp regs below sp before decrementing it.
204 	 */
205 	int			abigap[56];
206 };
207 
208 /*
209  *  When we have rt signals to deliver, we set up on the
210  *  user stack, going down from the original stack pointer:
211  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
212  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
213  *  (the +16 is to get the siginfo and ucontext in the same
214  *  positions as in older kernels).
215  *
216  *  Each of these things must be a multiple of 16 bytes in size.
217  *
218  */
219 struct rt_sigframe {
220 #ifdef CONFIG_PPC64
221 	compat_siginfo_t info;
222 #else
223 	struct siginfo info;
224 #endif
225 	struct ucontext	uc;
226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 	struct ucontext	uc_transact;
228 #endif
229 	/*
230 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
231 	 * regs and 18 fp regs below sp before decrementing it.
232 	 */
233 	int			abigap[56];
234 };
235 
236 /*
237  * Save the current user registers on the user stack.
238  * We only save the altivec/spe registers if the process has used
239  * altivec/spe instructions at some point.
240  */
prepare_save_user_regs(int ctx_has_vsx_region)241 static void prepare_save_user_regs(int ctx_has_vsx_region)
242 {
243 	/* Make sure floating point registers are stored in regs */
244 	flush_fp_to_thread(current);
245 #ifdef CONFIG_ALTIVEC
246 	if (current->thread.used_vr)
247 		flush_altivec_to_thread(current);
248 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
249 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
250 #endif
251 #ifdef CONFIG_VSX
252 	if (current->thread.used_vsr && ctx_has_vsx_region)
253 		flush_vsx_to_thread(current);
254 #endif
255 #ifdef CONFIG_SPE
256 	if (current->thread.used_spe)
257 		flush_spe_to_thread(current);
258 #endif
259 }
260 
__unsafe_save_user_regs(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,int ctx_has_vsx_region)261 static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
262 				   struct mcontext __user *tm_frame, int ctx_has_vsx_region)
263 {
264 	unsigned long msr = regs->msr;
265 
266 	/* save general registers */
267 	unsafe_save_general_regs(regs, frame, failed);
268 
269 #ifdef CONFIG_ALTIVEC
270 	/* save altivec registers */
271 	if (current->thread.used_vr) {
272 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
273 				    ELF_NVRREG * sizeof(vector128), failed);
274 		/* set MSR_VEC in the saved MSR value to indicate that
275 		   frame->mc_vregs contains valid data */
276 		msr |= MSR_VEC;
277 	}
278 	/* else assert((regs->msr & MSR_VEC) == 0) */
279 
280 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
281 	 * use altivec. Since VSCR only contains 32 bits saved in the least
282 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
283 	 * most significant bits of that same vector. --BenH
284 	 * Note that the current VRSAVE value is in the SPR at this point.
285 	 */
286 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
287 			failed);
288 #endif /* CONFIG_ALTIVEC */
289 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
290 
291 	/*
292 	 * Clear the MSR VSX bit to indicate there is no valid state attached
293 	 * to this context, except in the specific case below where we set it.
294 	 */
295 	msr &= ~MSR_VSX;
296 #ifdef CONFIG_VSX
297 	/*
298 	 * Copy VSR 0-31 upper half from thread_struct to local
299 	 * buffer, then write that to userspace.  Also set MSR_VSX in
300 	 * the saved MSR value to indicate that frame->mc_vregs
301 	 * contains valid data
302 	 */
303 	if (current->thread.used_vsr && ctx_has_vsx_region) {
304 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
305 		msr |= MSR_VSX;
306 	}
307 #endif /* CONFIG_VSX */
308 #ifdef CONFIG_SPE
309 	/* save spe registers */
310 	if (current->thread.used_spe) {
311 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
312 				    ELF_NEVRREG * sizeof(u32), failed);
313 		/* set MSR_SPE in the saved MSR value to indicate that
314 		   frame->mc_vregs contains valid data */
315 		msr |= MSR_SPE;
316 	}
317 	/* else assert((regs->msr & MSR_SPE) == 0) */
318 
319 	/* We always copy to/from spefscr */
320 	unsafe_put_user(current->thread.spefscr,
321 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
322 #endif /* CONFIG_SPE */
323 
324 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
325 
326 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
327 	 * can check it on the restore to see if TM is active
328 	 */
329 	if (tm_frame)
330 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
331 
332 	return 0;
333 
334 failed:
335 	return 1;
336 }
337 
338 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
339 	if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))	\
340 		goto label;						\
341 } while (0)
342 
343 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
344 /*
345  * Save the current user registers on the user stack.
346  * We only save the altivec/spe registers if the process has used
347  * altivec/spe instructions at some point.
348  * We also save the transactional registers to a second ucontext in the
349  * frame.
350  *
351  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
352  */
prepare_save_tm_user_regs(void)353 static void prepare_save_tm_user_regs(void)
354 {
355 	WARN_ON(tm_suspend_disabled);
356 
357 #ifdef CONFIG_ALTIVEC
358 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
359 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
360 #endif
361 #ifdef CONFIG_SPE
362 	if (current->thread.used_spe)
363 		flush_spe_to_thread(current);
364 #endif
365 }
366 
save_tm_user_regs_unsafe(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,unsigned long msr)367 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
368 				    struct mcontext __user *tm_frame, unsigned long msr)
369 {
370 	/* Save both sets of general registers */
371 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
372 	unsafe_save_general_regs(regs, tm_frame, failed);
373 
374 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
375 	 * of the transactional mcontext.  This way we have a backward-compatible
376 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
377 	 * also look at what type of transaction (T or S) was active at the
378 	 * time of the signal.
379 	 */
380 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
381 
382 #ifdef CONFIG_ALTIVEC
383 	/* save altivec registers */
384 	if (current->thread.used_vr) {
385 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
386 				    ELF_NVRREG * sizeof(vector128), failed);
387 		if (msr & MSR_VEC)
388 			unsafe_copy_to_user(&tm_frame->mc_vregs,
389 					    &current->thread.vr_state,
390 					    ELF_NVRREG * sizeof(vector128), failed);
391 		else
392 			unsafe_copy_to_user(&tm_frame->mc_vregs,
393 					    &current->thread.ckvr_state,
394 					    ELF_NVRREG * sizeof(vector128), failed);
395 
396 		/* set MSR_VEC in the saved MSR value to indicate that
397 		 * frame->mc_vregs contains valid data
398 		 */
399 		msr |= MSR_VEC;
400 	}
401 
402 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
403 	 * use altivec. Since VSCR only contains 32 bits saved in the least
404 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
405 	 * most significant bits of that same vector. --BenH
406 	 */
407 	unsafe_put_user(current->thread.ckvrsave,
408 			(u32 __user *)&frame->mc_vregs[32], failed);
409 	if (msr & MSR_VEC)
410 		unsafe_put_user(current->thread.vrsave,
411 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
412 	else
413 		unsafe_put_user(current->thread.ckvrsave,
414 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
415 #endif /* CONFIG_ALTIVEC */
416 
417 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418 	if (msr & MSR_FP)
419 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420 	else
421 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
422 
423 #ifdef CONFIG_VSX
424 	/*
425 	 * Copy VSR 0-31 upper half from thread_struct to local
426 	 * buffer, then write that to userspace.  Also set MSR_VSX in
427 	 * the saved MSR value to indicate that frame->mc_vregs
428 	 * contains valid data
429 	 */
430 	if (current->thread.used_vsr) {
431 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
432 		if (msr & MSR_VSX)
433 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
434 		else
435 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
436 
437 		msr |= MSR_VSX;
438 	}
439 #endif /* CONFIG_VSX */
440 #ifdef CONFIG_SPE
441 	/* SPE regs are not checkpointed with TM, so this section is
442 	 * simply the same as in __unsafe_save_user_regs().
443 	 */
444 	if (current->thread.used_spe) {
445 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
446 				    ELF_NEVRREG * sizeof(u32), failed);
447 		/* set MSR_SPE in the saved MSR value to indicate that
448 		 * frame->mc_vregs contains valid data */
449 		msr |= MSR_SPE;
450 	}
451 
452 	/* We always copy to/from spefscr */
453 	unsafe_put_user(current->thread.spefscr,
454 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
455 #endif /* CONFIG_SPE */
456 
457 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
458 
459 	return 0;
460 
461 failed:
462 	return 1;
463 }
464 #else
prepare_save_tm_user_regs(void)465 static void prepare_save_tm_user_regs(void) { }
466 
save_tm_user_regs_unsafe(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,unsigned long msr)467 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
468 				    struct mcontext __user *tm_frame, unsigned long msr)
469 {
470 	return 0;
471 }
472 #endif
473 
474 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
475 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
476 		goto label;						\
477 } while (0)
478 
479 /*
480  * Restore the current user register values from the user stack,
481  * (except for MSR).
482  */
restore_user_regs(struct pt_regs * regs,struct mcontext __user * sr,int sig)483 static long restore_user_regs(struct pt_regs *regs,
484 			      struct mcontext __user *sr, int sig)
485 {
486 	unsigned int save_r2 = 0;
487 	unsigned long msr;
488 #ifdef CONFIG_VSX
489 	int i;
490 #endif
491 
492 	if (!user_read_access_begin(sr, sizeof(*sr)))
493 		return 1;
494 	/*
495 	 * restore general registers but not including MSR or SOFTE. Also
496 	 * take care of keeping r2 (TLS) intact if not a signal
497 	 */
498 	if (!sig)
499 		save_r2 = (unsigned int)regs->gpr[2];
500 	unsafe_restore_general_regs(regs, sr, failed);
501 	set_trap_norestart(regs);
502 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
503 	if (!sig)
504 		regs->gpr[2] = (unsigned long) save_r2;
505 
506 	/* if doing signal return, restore the previous little-endian mode */
507 	if (sig)
508 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
509 
510 #ifdef CONFIG_ALTIVEC
511 	/*
512 	 * Force the process to reload the altivec registers from
513 	 * current->thread when it next does altivec instructions
514 	 */
515 	regs->msr &= ~MSR_VEC;
516 	if (msr & MSR_VEC) {
517 		/* restore altivec registers from the stack */
518 		unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
519 				      sizeof(sr->mc_vregs), failed);
520 		current->thread.used_vr = true;
521 	} else if (current->thread.used_vr)
522 		memset(&current->thread.vr_state, 0,
523 		       ELF_NVRREG * sizeof(vector128));
524 
525 	/* Always get VRSAVE back */
526 	unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
527 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
528 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
529 #endif /* CONFIG_ALTIVEC */
530 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
531 
532 #ifdef CONFIG_VSX
533 	/*
534 	 * Force the process to reload the VSX registers from
535 	 * current->thread when it next does VSX instruction.
536 	 */
537 	regs->msr &= ~MSR_VSX;
538 	if (msr & MSR_VSX) {
539 		/*
540 		 * Restore altivec registers from the stack to a local
541 		 * buffer, then write this out to the thread_struct
542 		 */
543 		unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
544 		current->thread.used_vsr = true;
545 	} else if (current->thread.used_vsr)
546 		for (i = 0; i < 32 ; i++)
547 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
548 #endif /* CONFIG_VSX */
549 	/*
550 	 * force the process to reload the FP registers from
551 	 * current->thread when it next does FP instructions
552 	 */
553 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
554 
555 #ifdef CONFIG_SPE
556 	/* force the process to reload the spe registers from
557 	   current->thread when it next does spe instructions */
558 	regs->msr &= ~MSR_SPE;
559 	if (msr & MSR_SPE) {
560 		/* restore spe registers from the stack */
561 		unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
562 				      ELF_NEVRREG * sizeof(u32), failed);
563 		current->thread.used_spe = true;
564 	} else if (current->thread.used_spe)
565 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
566 
567 	/* Always get SPEFSCR back */
568 	unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
569 #endif /* CONFIG_SPE */
570 
571 	user_read_access_end();
572 	return 0;
573 
574 failed:
575 	user_read_access_end();
576 	return 1;
577 }
578 
579 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
580 /*
581  * Restore the current user register values from the user stack, except for
582  * MSR, and recheckpoint the original checkpointed register state for processes
583  * in transactions.
584  */
restore_tm_user_regs(struct pt_regs * regs,struct mcontext __user * sr,struct mcontext __user * tm_sr)585 static long restore_tm_user_regs(struct pt_regs *regs,
586 				 struct mcontext __user *sr,
587 				 struct mcontext __user *tm_sr)
588 {
589 	unsigned long msr, msr_hi;
590 #ifdef CONFIG_VSX
591 	int i;
592 #endif
593 
594 	if (tm_suspend_disabled)
595 		return 1;
596 	/*
597 	 * restore general registers but not including MSR or SOFTE. Also
598 	 * take care of keeping r2 (TLS) intact if not a signal.
599 	 * See comment in signal_64.c:restore_tm_sigcontexts();
600 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
601 	 * were set by the signal delivery.
602 	 */
603 	if (!user_read_access_begin(sr, sizeof(*sr)))
604 		return 1;
605 
606 	unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
607 	unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
608 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
609 
610 	/* Restore the previous little-endian mode */
611 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
612 
613 #ifdef CONFIG_ALTIVEC
614 	regs->msr &= ~MSR_VEC;
615 	if (msr & MSR_VEC) {
616 		/* restore altivec registers from the stack */
617 		unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
618 				      sizeof(sr->mc_vregs), failed);
619 		current->thread.used_vr = true;
620 	} else if (current->thread.used_vr) {
621 		memset(&current->thread.vr_state, 0,
622 		       ELF_NVRREG * sizeof(vector128));
623 		memset(&current->thread.ckvr_state, 0,
624 		       ELF_NVRREG * sizeof(vector128));
625 	}
626 
627 	/* Always get VRSAVE back */
628 	unsafe_get_user(current->thread.ckvrsave,
629 			(u32 __user *)&sr->mc_vregs[32], failed);
630 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
631 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
632 #endif /* CONFIG_ALTIVEC */
633 
634 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
635 
636 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
637 
638 #ifdef CONFIG_VSX
639 	regs->msr &= ~MSR_VSX;
640 	if (msr & MSR_VSX) {
641 		/*
642 		 * Restore altivec registers from the stack to a local
643 		 * buffer, then write this out to the thread_struct
644 		 */
645 		unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
646 		current->thread.used_vsr = true;
647 	} else if (current->thread.used_vsr)
648 		for (i = 0; i < 32 ; i++) {
649 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
650 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
651 		}
652 #endif /* CONFIG_VSX */
653 
654 #ifdef CONFIG_SPE
655 	/* SPE regs are not checkpointed with TM, so this section is
656 	 * simply the same as in restore_user_regs().
657 	 */
658 	regs->msr &= ~MSR_SPE;
659 	if (msr & MSR_SPE) {
660 		unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
661 				      ELF_NEVRREG * sizeof(u32), failed);
662 		current->thread.used_spe = true;
663 	} else if (current->thread.used_spe)
664 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
665 
666 	/* Always get SPEFSCR back */
667 	unsafe_get_user(current->thread.spefscr,
668 			(u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
669 #endif /* CONFIG_SPE */
670 
671 	user_read_access_end();
672 
673 	if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
674 		return 1;
675 
676 	unsafe_restore_general_regs(regs, tm_sr, failed);
677 
678 #ifdef CONFIG_ALTIVEC
679 	/* restore altivec registers from the stack */
680 	if (msr & MSR_VEC)
681 		unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
682 				      sizeof(sr->mc_vregs), failed);
683 
684 	/* Always get VRSAVE back */
685 	unsafe_get_user(current->thread.vrsave,
686 			(u32 __user *)&tm_sr->mc_vregs[32], failed);
687 #endif /* CONFIG_ALTIVEC */
688 
689 	unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
690 
691 #ifdef CONFIG_VSX
692 	if (msr & MSR_VSX) {
693 		/*
694 		 * Restore altivec registers from the stack to a local
695 		 * buffer, then write this out to the thread_struct
696 		 */
697 		unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
698 		current->thread.used_vsr = true;
699 	}
700 #endif /* CONFIG_VSX */
701 
702 	/* Get the top half of the MSR from the user context */
703 	unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
704 	msr_hi <<= 32;
705 
706 	user_read_access_end();
707 
708 	/* If TM bits are set to the reserved value, it's an invalid context */
709 	if (MSR_TM_RESV(msr_hi))
710 		return 1;
711 
712 	/*
713 	 * Disabling preemption, since it is unsafe to be preempted
714 	 * with MSR[TS] set without recheckpointing.
715 	 */
716 	preempt_disable();
717 
718 	/*
719 	 * CAUTION:
720 	 * After regs->MSR[TS] being updated, make sure that get_user(),
721 	 * put_user() or similar functions are *not* called. These
722 	 * functions can generate page faults which will cause the process
723 	 * to be de-scheduled with MSR[TS] set but without calling
724 	 * tm_recheckpoint(). This can cause a bug.
725 	 *
726 	 * Pull in the MSR TM bits from the user context
727 	 */
728 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
729 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
730 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
731 	 * transactional versions should be loaded.
732 	 */
733 	tm_enable();
734 	/* Make sure the transaction is marked as failed */
735 	current->thread.tm_texasr |= TEXASR_FS;
736 	/* This loads the checkpointed FP/VEC state, if used */
737 	tm_recheckpoint(&current->thread);
738 
739 	/* This loads the speculative FP/VEC state, if used */
740 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
741 	if (msr & MSR_FP) {
742 		load_fp_state(&current->thread.fp_state);
743 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
744 	}
745 #ifdef CONFIG_ALTIVEC
746 	if (msr & MSR_VEC) {
747 		load_vr_state(&current->thread.vr_state);
748 		regs->msr |= MSR_VEC;
749 	}
750 #endif
751 
752 	preempt_enable();
753 
754 	return 0;
755 
756 failed:
757 	user_read_access_end();
758 	return 1;
759 }
760 #else
restore_tm_user_regs(struct pt_regs * regs,struct mcontext __user * sr,struct mcontext __user * tm_sr)761 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
762 				 struct mcontext __user *tm_sr)
763 {
764 	return 0;
765 }
766 #endif
767 
768 #ifdef CONFIG_PPC64
769 
770 #define copy_siginfo_to_user	copy_siginfo_to_user32
771 
772 #endif /* CONFIG_PPC64 */
773 
774 /*
775  * Set up a signal frame for a "real-time" signal handler
776  * (one which gets siginfo).
777  */
handle_rt_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)778 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
779 		       struct task_struct *tsk)
780 {
781 	struct rt_sigframe __user *frame;
782 	struct mcontext __user *mctx;
783 	struct mcontext __user *tm_mctx = NULL;
784 	unsigned long newsp = 0;
785 	unsigned long tramp;
786 	struct pt_regs *regs = tsk->thread.regs;
787 	/* Save the thread's msr before get_tm_stackpointer() changes it */
788 	unsigned long msr = regs->msr;
789 
790 	/* Set up Signal Frame */
791 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
792 	mctx = &frame->uc.uc_mcontext;
793 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
794 	tm_mctx = &frame->uc_transact.uc_mcontext;
795 #endif
796 	if (MSR_TM_ACTIVE(msr))
797 		prepare_save_tm_user_regs();
798 	else
799 		prepare_save_user_regs(1);
800 
801 	if (!user_access_begin(frame, sizeof(*frame)))
802 		goto badframe;
803 
804 	/* Put the siginfo & fill in most of the ucontext */
805 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
806 #ifdef CONFIG_PPC64
807 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
808 #else
809 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
810 #endif
811 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
812 
813 	if (MSR_TM_ACTIVE(msr)) {
814 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
815 		unsafe_put_user((unsigned long)&frame->uc_transact,
816 				&frame->uc.uc_link, failed);
817 		unsafe_put_user((unsigned long)tm_mctx,
818 				&frame->uc_transact.uc_regs, failed);
819 #endif
820 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
821 	} else {
822 		unsafe_put_user(0, &frame->uc.uc_link, failed);
823 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
824 	}
825 
826 	/* Save user registers on the stack */
827 	if (tsk->mm->context.vdso) {
828 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
829 	} else {
830 		tramp = (unsigned long)mctx->mc_pad;
831 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
832 		unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
833 				failed);
834 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
835 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
836 	}
837 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
838 
839 	user_access_end();
840 
841 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
842 		goto badframe;
843 
844 	regs->link = tramp;
845 
846 #ifdef CONFIG_PPC_FPU_REGS
847 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
848 #endif
849 
850 	/* create a stack frame for the caller of the handler */
851 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
852 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
853 		goto badframe;
854 
855 	/* Fill registers for signal handler */
856 	regs->gpr[1] = newsp;
857 	regs->gpr[3] = ksig->sig;
858 	regs->gpr[4] = (unsigned long)&frame->info;
859 	regs->gpr[5] = (unsigned long)&frame->uc;
860 	regs->gpr[6] = (unsigned long)frame;
861 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
862 	/* enter the signal handler in native-endian mode */
863 	regs->msr &= ~MSR_LE;
864 	regs->msr |= (MSR_KERNEL & MSR_LE);
865 	return 0;
866 
867 failed:
868 	user_access_end();
869 
870 badframe:
871 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
872 
873 	return 1;
874 }
875 
876 /*
877  * OK, we're invoking a handler
878  */
handle_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)879 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
880 		struct task_struct *tsk)
881 {
882 	struct sigcontext __user *sc;
883 	struct sigframe __user *frame;
884 	struct mcontext __user *mctx;
885 	struct mcontext __user *tm_mctx = NULL;
886 	unsigned long newsp = 0;
887 	unsigned long tramp;
888 	struct pt_regs *regs = tsk->thread.regs;
889 	/* Save the thread's msr before get_tm_stackpointer() changes it */
890 	unsigned long msr = regs->msr;
891 
892 	/* Set up Signal Frame */
893 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
894 	mctx = &frame->mctx;
895 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
896 	tm_mctx = &frame->mctx_transact;
897 #endif
898 	if (MSR_TM_ACTIVE(msr))
899 		prepare_save_tm_user_regs();
900 	else
901 		prepare_save_user_regs(1);
902 
903 	if (!user_access_begin(frame, sizeof(*frame)))
904 		goto badframe;
905 	sc = (struct sigcontext __user *) &frame->sctx;
906 
907 #if _NSIG != 64
908 #error "Please adjust handle_signal()"
909 #endif
910 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
911 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
912 #ifdef CONFIG_PPC64
913 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
914 #else
915 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
916 #endif
917 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
918 	unsafe_put_user(ksig->sig, &sc->signal, failed);
919 
920 	if (MSR_TM_ACTIVE(msr))
921 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
922 	else
923 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
924 
925 	if (tsk->mm->context.vdso) {
926 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
927 	} else {
928 		tramp = (unsigned long)mctx->mc_pad;
929 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
930 		unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
931 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
932 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
933 	}
934 	user_access_end();
935 
936 	regs->link = tramp;
937 
938 #ifdef CONFIG_PPC_FPU_REGS
939 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
940 #endif
941 
942 	/* create a stack frame for the caller of the handler */
943 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
944 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
945 		goto badframe;
946 
947 	regs->gpr[1] = newsp;
948 	regs->gpr[3] = ksig->sig;
949 	regs->gpr[4] = (unsigned long) sc;
950 	regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
951 	/* enter the signal handler in native-endian mode */
952 	regs->msr &= ~MSR_LE;
953 	regs->msr |= (MSR_KERNEL & MSR_LE);
954 	return 0;
955 
956 failed:
957 	user_access_end();
958 
959 badframe:
960 	signal_fault(tsk, regs, "handle_signal32", frame);
961 
962 	return 1;
963 }
964 
do_setcontext(struct ucontext __user * ucp,struct pt_regs * regs,int sig)965 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
966 {
967 	sigset_t set;
968 	struct mcontext __user *mcp;
969 
970 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
971 		return -EFAULT;
972 
973 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
974 #ifdef CONFIG_PPC64
975 	{
976 		u32 cmcp;
977 
978 		unsafe_get_user(cmcp, &ucp->uc_regs, failed);
979 		mcp = (struct mcontext __user *)(u64)cmcp;
980 	}
981 #else
982 	unsafe_get_user(mcp, &ucp->uc_regs, failed);
983 #endif
984 	user_read_access_end();
985 
986 	set_current_blocked(&set);
987 	if (restore_user_regs(regs, mcp, sig))
988 		return -EFAULT;
989 
990 	return 0;
991 
992 failed:
993 	user_read_access_end();
994 	return -EFAULT;
995 }
996 
997 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
do_setcontext_tm(struct ucontext __user * ucp,struct ucontext __user * tm_ucp,struct pt_regs * regs)998 static int do_setcontext_tm(struct ucontext __user *ucp,
999 			    struct ucontext __user *tm_ucp,
1000 			    struct pt_regs *regs)
1001 {
1002 	sigset_t set;
1003 	struct mcontext __user *mcp;
1004 	struct mcontext __user *tm_mcp;
1005 	u32 cmcp;
1006 	u32 tm_cmcp;
1007 
1008 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
1009 		return -EFAULT;
1010 
1011 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
1012 	unsafe_get_user(cmcp, &ucp->uc_regs, failed);
1013 
1014 	user_read_access_end();
1015 
1016 	if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
1017 		return -EFAULT;
1018 	mcp = (struct mcontext __user *)(u64)cmcp;
1019 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1020 	/* no need to check access_ok(mcp), since mcp < 4GB */
1021 
1022 	set_current_blocked(&set);
1023 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1024 		return -EFAULT;
1025 
1026 	return 0;
1027 
1028 failed:
1029 	user_read_access_end();
1030 	return -EFAULT;
1031 }
1032 #endif
1033 
1034 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE3(swapcontext,struct ucontext __user *,old_ctx,struct ucontext __user *,new_ctx,int,ctx_size)1035 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1036 		       struct ucontext __user *, new_ctx, int, ctx_size)
1037 #else
1038 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1039 		       struct ucontext __user *, new_ctx, long, ctx_size)
1040 #endif
1041 {
1042 	struct pt_regs *regs = current_pt_regs();
1043 	int ctx_has_vsx_region = 0;
1044 
1045 #ifdef CONFIG_PPC64
1046 	unsigned long new_msr = 0;
1047 
1048 	if (new_ctx) {
1049 		struct mcontext __user *mcp;
1050 		u32 cmcp;
1051 
1052 		/*
1053 		 * Get pointer to the real mcontext.  No need for
1054 		 * access_ok since we are dealing with compat
1055 		 * pointers.
1056 		 */
1057 		if (__get_user(cmcp, &new_ctx->uc_regs))
1058 			return -EFAULT;
1059 		mcp = (struct mcontext __user *)(u64)cmcp;
1060 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1061 			return -EFAULT;
1062 	}
1063 	/*
1064 	 * Check that the context is not smaller than the original
1065 	 * size (with VMX but without VSX)
1066 	 */
1067 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1068 		return -EINVAL;
1069 	/*
1070 	 * If the new context state sets the MSR VSX bits but
1071 	 * it doesn't provide VSX state.
1072 	 */
1073 	if ((ctx_size < sizeof(struct ucontext)) &&
1074 	    (new_msr & MSR_VSX))
1075 		return -EINVAL;
1076 	/* Does the context have enough room to store VSX data? */
1077 	if (ctx_size >= sizeof(struct ucontext))
1078 		ctx_has_vsx_region = 1;
1079 #else
1080 	/* Context size is for future use. Right now, we only make sure
1081 	 * we are passed something we understand
1082 	 */
1083 	if (ctx_size < sizeof(struct ucontext))
1084 		return -EINVAL;
1085 #endif
1086 	if (old_ctx != NULL) {
1087 		struct mcontext __user *mctx;
1088 
1089 		/*
1090 		 * old_ctx might not be 16-byte aligned, in which
1091 		 * case old_ctx->uc_mcontext won't be either.
1092 		 * Because we have the old_ctx->uc_pad2 field
1093 		 * before old_ctx->uc_mcontext, we need to round down
1094 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1095 		 */
1096 		mctx = (struct mcontext __user *)
1097 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1098 		prepare_save_user_regs(ctx_has_vsx_region);
1099 		if (!user_write_access_begin(old_ctx, ctx_size))
1100 			return -EFAULT;
1101 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1102 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1103 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1104 		user_write_access_end();
1105 	}
1106 	if (new_ctx == NULL)
1107 		return 0;
1108 	if (!access_ok(new_ctx, ctx_size) ||
1109 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1110 		return -EFAULT;
1111 
1112 	/*
1113 	 * If we get a fault copying the context into the kernel's
1114 	 * image of the user's registers, we can't just return -EFAULT
1115 	 * because the user's registers will be corrupted.  For instance
1116 	 * the NIP value may have been updated but not some of the
1117 	 * other registers.  Given that we have done the access_ok
1118 	 * and successfully read the first and last bytes of the region
1119 	 * above, this should only happen in an out-of-memory situation
1120 	 * or if another thread unmaps the region containing the context.
1121 	 * We kill the task with a SIGSEGV in this situation.
1122 	 */
1123 	if (do_setcontext(new_ctx, regs, 0))
1124 		do_exit(SIGSEGV);
1125 
1126 	set_thread_flag(TIF_RESTOREALL);
1127 	return 0;
1128 
1129 failed:
1130 	user_write_access_end();
1131 	return -EFAULT;
1132 }
1133 
1134 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)1135 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1136 #else
1137 SYSCALL_DEFINE0(rt_sigreturn)
1138 #endif
1139 {
1140 	struct rt_sigframe __user *rt_sf;
1141 	struct pt_regs *regs = current_pt_regs();
1142 	int tm_restore = 0;
1143 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1144 	struct ucontext __user *uc_transact;
1145 	unsigned long msr_hi;
1146 	unsigned long tmp;
1147 #endif
1148 	/* Always make any pending restarted system calls return -EINTR */
1149 	current->restart_block.fn = do_no_restart_syscall;
1150 
1151 	rt_sf = (struct rt_sigframe __user *)
1152 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1153 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1154 		goto bad;
1155 
1156 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1157 	/*
1158 	 * If there is a transactional state then throw it away.
1159 	 * The purpose of a sigreturn is to destroy all traces of the
1160 	 * signal frame, this includes any transactional state created
1161 	 * within in. We only check for suspended as we can never be
1162 	 * active in the kernel, we are active, there is nothing better to
1163 	 * do than go ahead and Bad Thing later.
1164 	 * The cause is not important as there will never be a
1165 	 * recheckpoint so it's not user visible.
1166 	 */
1167 	if (MSR_TM_SUSPENDED(mfmsr()))
1168 		tm_reclaim_current(0);
1169 
1170 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1171 		goto bad;
1172 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1173 	if (uc_transact) {
1174 		u32 cmcp;
1175 		struct mcontext __user *mcp;
1176 
1177 		if (__get_user(cmcp, &uc_transact->uc_regs))
1178 			return -EFAULT;
1179 		mcp = (struct mcontext __user *)(u64)cmcp;
1180 		/* The top 32 bits of the MSR are stashed in the transactional
1181 		 * ucontext. */
1182 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1183 			goto bad;
1184 
1185 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1186 			/* Trying to start TM on non TM system */
1187 			if (!cpu_has_feature(CPU_FTR_TM))
1188 				goto bad;
1189 			/* We only recheckpoint on return if we're
1190 			 * transaction.
1191 			 */
1192 			tm_restore = 1;
1193 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1194 				goto bad;
1195 		}
1196 	}
1197 	if (!tm_restore) {
1198 		/*
1199 		 * Unset regs->msr because ucontext MSR TS is not
1200 		 * set, and recheckpoint was not called. This avoid
1201 		 * hitting a TM Bad thing at RFID
1202 		 */
1203 		regs->msr &= ~MSR_TS_MASK;
1204 	}
1205 	/* Fall through, for non-TM restore */
1206 #endif
1207 	if (!tm_restore)
1208 		if (do_setcontext(&rt_sf->uc, regs, 1))
1209 			goto bad;
1210 
1211 	/*
1212 	 * It's not clear whether or why it is desirable to save the
1213 	 * sigaltstack setting on signal delivery and restore it on
1214 	 * signal return.  But other architectures do this and we have
1215 	 * always done it up until now so it is probably better not to
1216 	 * change it.  -- paulus
1217 	 */
1218 #ifdef CONFIG_PPC64
1219 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1220 		goto bad;
1221 #else
1222 	if (restore_altstack(&rt_sf->uc.uc_stack))
1223 		goto bad;
1224 #endif
1225 	set_thread_flag(TIF_RESTOREALL);
1226 	return 0;
1227 
1228  bad:
1229 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1230 
1231 	force_sig(SIGSEGV);
1232 	return 0;
1233 }
1234 
1235 #ifdef CONFIG_PPC32
SYSCALL_DEFINE3(debug_setcontext,struct ucontext __user *,ctx,int,ndbg,struct sig_dbg_op __user *,dbg)1236 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1237 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1238 {
1239 	struct pt_regs *regs = current_pt_regs();
1240 	struct sig_dbg_op op;
1241 	int i;
1242 	unsigned long new_msr = regs->msr;
1243 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1244 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1245 #endif
1246 
1247 	for (i=0; i<ndbg; i++) {
1248 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1249 			return -EFAULT;
1250 		switch (op.dbg_type) {
1251 		case SIG_DBG_SINGLE_STEPPING:
1252 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1253 			if (op.dbg_value) {
1254 				new_msr |= MSR_DE;
1255 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1256 			} else {
1257 				new_dbcr0 &= ~DBCR0_IC;
1258 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1259 						current->thread.debug.dbcr1)) {
1260 					new_msr &= ~MSR_DE;
1261 					new_dbcr0 &= ~DBCR0_IDM;
1262 				}
1263 			}
1264 #else
1265 			if (op.dbg_value)
1266 				new_msr |= MSR_SE;
1267 			else
1268 				new_msr &= ~MSR_SE;
1269 #endif
1270 			break;
1271 		case SIG_DBG_BRANCH_TRACING:
1272 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 			return -EINVAL;
1274 #else
1275 			if (op.dbg_value)
1276 				new_msr |= MSR_BE;
1277 			else
1278 				new_msr &= ~MSR_BE;
1279 #endif
1280 			break;
1281 
1282 		default:
1283 			return -EINVAL;
1284 		}
1285 	}
1286 
1287 	/* We wait until here to actually install the values in the
1288 	   registers so if we fail in the above loop, it will not
1289 	   affect the contents of these registers.  After this point,
1290 	   failure is a problem, anyway, and it's very unlikely unless
1291 	   the user is really doing something wrong. */
1292 	regs->msr = new_msr;
1293 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1294 	current->thread.debug.dbcr0 = new_dbcr0;
1295 #endif
1296 
1297 	if (!access_ok(ctx, sizeof(*ctx)) ||
1298 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1299 		return -EFAULT;
1300 
1301 	/*
1302 	 * If we get a fault copying the context into the kernel's
1303 	 * image of the user's registers, we can't just return -EFAULT
1304 	 * because the user's registers will be corrupted.  For instance
1305 	 * the NIP value may have been updated but not some of the
1306 	 * other registers.  Given that we have done the access_ok
1307 	 * and successfully read the first and last bytes of the region
1308 	 * above, this should only happen in an out-of-memory situation
1309 	 * or if another thread unmaps the region containing the context.
1310 	 * We kill the task with a SIGSEGV in this situation.
1311 	 */
1312 	if (do_setcontext(ctx, regs, 1)) {
1313 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1314 
1315 		force_sig(SIGSEGV);
1316 		goto out;
1317 	}
1318 
1319 	/*
1320 	 * It's not clear whether or why it is desirable to save the
1321 	 * sigaltstack setting on signal delivery and restore it on
1322 	 * signal return.  But other architectures do this and we have
1323 	 * always done it up until now so it is probably better not to
1324 	 * change it.  -- paulus
1325 	 */
1326 	restore_altstack(&ctx->uc_stack);
1327 
1328 	set_thread_flag(TIF_RESTOREALL);
1329  out:
1330 	return 0;
1331 }
1332 #endif
1333 
1334 /*
1335  * Do a signal return; undo the signal stack.
1336  */
1337 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(sigreturn)1338 COMPAT_SYSCALL_DEFINE0(sigreturn)
1339 #else
1340 SYSCALL_DEFINE0(sigreturn)
1341 #endif
1342 {
1343 	struct pt_regs *regs = current_pt_regs();
1344 	struct sigframe __user *sf;
1345 	struct sigcontext __user *sc;
1346 	struct sigcontext sigctx;
1347 	struct mcontext __user *sr;
1348 	sigset_t set;
1349 	struct mcontext __user *mcp;
1350 	struct mcontext __user *tm_mcp = NULL;
1351 	unsigned long long msr_hi = 0;
1352 
1353 	/* Always make any pending restarted system calls return -EINTR */
1354 	current->restart_block.fn = do_no_restart_syscall;
1355 
1356 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1357 	sc = &sf->sctx;
1358 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1359 		goto badframe;
1360 
1361 #ifdef CONFIG_PPC64
1362 	/*
1363 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1364 	 * unused part of the signal stackframe
1365 	 */
1366 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1367 #else
1368 	set.sig[0] = sigctx.oldmask;
1369 	set.sig[1] = sigctx._unused[3];
1370 #endif
1371 	set_current_blocked(&set);
1372 
1373 	mcp = (struct mcontext __user *)&sf->mctx;
1374 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1375 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1376 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1377 		goto badframe;
1378 #endif
1379 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1380 		if (!cpu_has_feature(CPU_FTR_TM))
1381 			goto badframe;
1382 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1383 			goto badframe;
1384 	} else {
1385 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1386 		if (restore_user_regs(regs, sr, 1)) {
1387 			signal_fault(current, regs, "sys_sigreturn", sr);
1388 
1389 			force_sig(SIGSEGV);
1390 			return 0;
1391 		}
1392 	}
1393 
1394 	set_thread_flag(TIF_RESTOREALL);
1395 	return 0;
1396 
1397 badframe:
1398 	signal_fault(current, regs, "sys_sigreturn", sc);
1399 
1400 	force_sig(SIGSEGV);
1401 	return 0;
1402 }
1403