xref: /linux/arch/powerpc/kernel/signal_32.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 /*
62  * Userspace code may pass a ucontext which doesn't include VSX added
63  * at the end.  We need to check for this case.
64  */
65 #define UCONTEXTSIZEWITHOUTVSX \
66 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67 
68 /*
69  * Returning 0 means we return to userspace via
70  * ret_from_except and thus restore all user
71  * registers from *regs.  This is what we need
72  * to do when a signal has been delivered.
73  */
74 
75 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76 #undef __SIGNAL_FRAMESIZE
77 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
78 #undef ELF_NVRREG
79 #define ELF_NVRREG	ELF_NVRREG32
80 
81 /*
82  * Functions for flipping sigsets (thanks to brain dead generic
83  * implementation that makes things simple for little endian only)
84  */
85 #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86 #define unsafe_get_sigset_t	unsafe_get_compat_sigset
87 
88 #define to_user_ptr(p)		ptr_to_compat(p)
89 #define from_user_ptr(p)	compat_ptr(p)
90 
91 static __always_inline int
92 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93 {
94 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 	int val, i;
96 
97 	for (i = 0; i <= PT_RESULT; i ++) {
98 		/* Force usr to alway see softe as 1 (interrupts enabled) */
99 		if (i == PT_SOFTE)
100 			val = 1;
101 		else
102 			val = gregs[i];
103 
104 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 	}
106 	return 0;
107 
108 failed:
109 	return 1;
110 }
111 
112 static __always_inline int
113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114 {
115 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 	int i;
117 
118 	for (i = 0; i <= PT_RESULT; i++) {
119 		if ((i == PT_MSR) || (i == PT_SOFTE))
120 			continue;
121 		unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 	}
123 	return 0;
124 
125 failed:
126 	return 1;
127 }
128 
129 #else /* CONFIG_PPC64 */
130 
131 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132 
133 #define unsafe_put_sigset_t(uset, set, label) do {			\
134 	sigset_t __user *__us = uset	;				\
135 	const sigset_t *__s = set;					\
136 									\
137 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
138 } while (0)
139 
140 #define unsafe_get_sigset_t	unsafe_get_user_sigset
141 
142 #define to_user_ptr(p)		((unsigned long)(p))
143 #define from_user_ptr(p)	((void __user *)(p))
144 
145 static __always_inline int
146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147 {
148 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 	return 0;
150 
151 failed:
152 	return 1;
153 }
154 
155 static __always_inline
156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157 {
158 	/* copy up to but not including MSR */
159 	unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160 
161 	/* copy from orig_r3 (the word after the MSR) up to the end */
162 	unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 			      GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164 
165 	return 0;
166 
167 failed:
168 	return 1;
169 }
170 #endif
171 
172 #define unsafe_save_general_regs(regs, frame, label) do {	\
173 	if (__unsafe_save_general_regs(regs, frame))		\
174 		goto label;					\
175 } while (0)
176 
177 #define unsafe_restore_general_regs(regs, frame, label) do {	\
178 	if (__unsafe_restore_general_regs(regs, frame))		\
179 		goto label;					\
180 } while (0)
181 
182 /*
183  * When we have signals to deliver, we set up on the
184  * user stack, going down from the original stack pointer:
185  *	an ABI gap of 56 words
186  *	an mcontext struct
187  *	a sigcontext struct
188  *	a gap of __SIGNAL_FRAMESIZE bytes
189  *
190  * Each of these things must be a multiple of 16 bytes in size. The following
191  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
192  *
193  */
194 struct sigframe {
195 	struct sigcontext sctx;		/* the sigcontext */
196 	struct mcontext	mctx;		/* all the register values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 	struct sigcontext sctx_transact;
199 	struct mcontext	mctx_transact;
200 #endif
201 	/*
202 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
203 	 * regs and 18 fp regs below sp before decrementing it.
204 	 */
205 	int			abigap[56];
206 };
207 
208 /*
209  *  When we have rt signals to deliver, we set up on the
210  *  user stack, going down from the original stack pointer:
211  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
212  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
213  *  (the +16 is to get the siginfo and ucontext in the same
214  *  positions as in older kernels).
215  *
216  *  Each of these things must be a multiple of 16 bytes in size.
217  *
218  */
219 struct rt_sigframe {
220 #ifdef CONFIG_PPC64
221 	compat_siginfo_t info;
222 #else
223 	struct siginfo info;
224 #endif
225 	struct ucontext	uc;
226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 	struct ucontext	uc_transact;
228 #endif
229 	/*
230 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
231 	 * regs and 18 fp regs below sp before decrementing it.
232 	 */
233 	int			abigap[56];
234 };
235 
236 /*
237  * Save the current user registers on the user stack.
238  * We only save the altivec/spe registers if the process has used
239  * altivec/spe instructions at some point.
240  */
241 static void prepare_save_user_regs(int ctx_has_vsx_region)
242 {
243 	/* Make sure floating point registers are stored in regs */
244 	flush_fp_to_thread(current);
245 #ifdef CONFIG_ALTIVEC
246 	if (current->thread.used_vr)
247 		flush_altivec_to_thread(current);
248 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
249 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
250 #endif
251 #ifdef CONFIG_VSX
252 	if (current->thread.used_vsr && ctx_has_vsx_region)
253 		flush_vsx_to_thread(current);
254 #endif
255 #ifdef CONFIG_SPE
256 	if (current->thread.used_spe)
257 		flush_spe_to_thread(current);
258 #endif
259 }
260 
261 static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
262 				   struct mcontext __user *tm_frame, int ctx_has_vsx_region)
263 {
264 	unsigned long msr = regs->msr;
265 
266 	/* save general registers */
267 	unsafe_save_general_regs(regs, frame, failed);
268 
269 #ifdef CONFIG_ALTIVEC
270 	/* save altivec registers */
271 	if (current->thread.used_vr) {
272 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
273 				    ELF_NVRREG * sizeof(vector128), failed);
274 		/* set MSR_VEC in the saved MSR value to indicate that
275 		   frame->mc_vregs contains valid data */
276 		msr |= MSR_VEC;
277 	}
278 	/* else assert((regs->msr & MSR_VEC) == 0) */
279 
280 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
281 	 * use altivec. Since VSCR only contains 32 bits saved in the least
282 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
283 	 * most significant bits of that same vector. --BenH
284 	 * Note that the current VRSAVE value is in the SPR at this point.
285 	 */
286 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
287 			failed);
288 #endif /* CONFIG_ALTIVEC */
289 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
290 
291 	/*
292 	 * Clear the MSR VSX bit to indicate there is no valid state attached
293 	 * to this context, except in the specific case below where we set it.
294 	 */
295 	msr &= ~MSR_VSX;
296 #ifdef CONFIG_VSX
297 	/*
298 	 * Copy VSR 0-31 upper half from thread_struct to local
299 	 * buffer, then write that to userspace.  Also set MSR_VSX in
300 	 * the saved MSR value to indicate that frame->mc_vregs
301 	 * contains valid data
302 	 */
303 	if (current->thread.used_vsr && ctx_has_vsx_region) {
304 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
305 		msr |= MSR_VSX;
306 	}
307 #endif /* CONFIG_VSX */
308 #ifdef CONFIG_SPE
309 	/* save spe registers */
310 	if (current->thread.used_spe) {
311 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
312 				    ELF_NEVRREG * sizeof(u32), failed);
313 		/* set MSR_SPE in the saved MSR value to indicate that
314 		   frame->mc_vregs contains valid data */
315 		msr |= MSR_SPE;
316 	}
317 	/* else assert((regs->msr & MSR_SPE) == 0) */
318 
319 	/* We always copy to/from spefscr */
320 	unsafe_put_user(current->thread.spefscr,
321 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
322 #endif /* CONFIG_SPE */
323 
324 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
325 
326 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
327 	 * can check it on the restore to see if TM is active
328 	 */
329 	if (tm_frame)
330 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
331 
332 	return 0;
333 
334 failed:
335 	return 1;
336 }
337 
338 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
339 	if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))	\
340 		goto label;						\
341 } while (0)
342 
343 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
344 /*
345  * Save the current user registers on the user stack.
346  * We only save the altivec/spe registers if the process has used
347  * altivec/spe instructions at some point.
348  * We also save the transactional registers to a second ucontext in the
349  * frame.
350  *
351  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
352  */
353 static void prepare_save_tm_user_regs(void)
354 {
355 	WARN_ON(tm_suspend_disabled);
356 
357 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
358 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
359 }
360 
361 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
362 				    struct mcontext __user *tm_frame, unsigned long msr)
363 {
364 	/* Save both sets of general registers */
365 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
366 	unsafe_save_general_regs(regs, tm_frame, failed);
367 
368 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
369 	 * of the transactional mcontext.  This way we have a backward-compatible
370 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
371 	 * also look at what type of transaction (T or S) was active at the
372 	 * time of the signal.
373 	 */
374 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
375 
376 	/* save altivec registers */
377 	if (current->thread.used_vr) {
378 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
379 				    ELF_NVRREG * sizeof(vector128), failed);
380 		if (msr & MSR_VEC)
381 			unsafe_copy_to_user(&tm_frame->mc_vregs,
382 					    &current->thread.vr_state,
383 					    ELF_NVRREG * sizeof(vector128), failed);
384 		else
385 			unsafe_copy_to_user(&tm_frame->mc_vregs,
386 					    &current->thread.ckvr_state,
387 					    ELF_NVRREG * sizeof(vector128), failed);
388 
389 		/* set MSR_VEC in the saved MSR value to indicate that
390 		 * frame->mc_vregs contains valid data
391 		 */
392 		msr |= MSR_VEC;
393 	}
394 
395 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
396 	 * use altivec. Since VSCR only contains 32 bits saved in the least
397 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
398 	 * most significant bits of that same vector. --BenH
399 	 */
400 	unsafe_put_user(current->thread.ckvrsave,
401 			(u32 __user *)&frame->mc_vregs[32], failed);
402 	if (msr & MSR_VEC)
403 		unsafe_put_user(current->thread.vrsave,
404 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
405 	else
406 		unsafe_put_user(current->thread.ckvrsave,
407 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
408 
409 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
410 	if (msr & MSR_FP)
411 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
412 	else
413 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
414 
415 	/*
416 	 * Copy VSR 0-31 upper half from thread_struct to local
417 	 * buffer, then write that to userspace.  Also set MSR_VSX in
418 	 * the saved MSR value to indicate that frame->mc_vregs
419 	 * contains valid data
420 	 */
421 	if (current->thread.used_vsr) {
422 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
423 		if (msr & MSR_VSX)
424 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
425 		else
426 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
427 
428 		msr |= MSR_VSX;
429 	}
430 
431 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
432 
433 	return 0;
434 
435 failed:
436 	return 1;
437 }
438 #else
439 static void prepare_save_tm_user_regs(void) { }
440 
441 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
442 				    struct mcontext __user *tm_frame, unsigned long msr)
443 {
444 	return 0;
445 }
446 #endif
447 
448 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
449 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
450 		goto label;						\
451 } while (0)
452 
453 /*
454  * Restore the current user register values from the user stack,
455  * (except for MSR).
456  */
457 static long restore_user_regs(struct pt_regs *regs,
458 			      struct mcontext __user *sr, int sig)
459 {
460 	unsigned int save_r2 = 0;
461 	unsigned long msr;
462 #ifdef CONFIG_VSX
463 	int i;
464 #endif
465 
466 	if (!user_read_access_begin(sr, sizeof(*sr)))
467 		return 1;
468 	/*
469 	 * restore general registers but not including MSR or SOFTE. Also
470 	 * take care of keeping r2 (TLS) intact if not a signal
471 	 */
472 	if (!sig)
473 		save_r2 = (unsigned int)regs->gpr[2];
474 	unsafe_restore_general_regs(regs, sr, failed);
475 	set_trap_norestart(regs);
476 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
477 	if (!sig)
478 		regs->gpr[2] = (unsigned long) save_r2;
479 
480 	/* if doing signal return, restore the previous little-endian mode */
481 	if (sig)
482 		regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
483 
484 #ifdef CONFIG_ALTIVEC
485 	/*
486 	 * Force the process to reload the altivec registers from
487 	 * current->thread when it next does altivec instructions
488 	 */
489 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
490 	if (msr & MSR_VEC) {
491 		/* restore altivec registers from the stack */
492 		unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
493 				      sizeof(sr->mc_vregs), failed);
494 		current->thread.used_vr = true;
495 	} else if (current->thread.used_vr)
496 		memset(&current->thread.vr_state, 0,
497 		       ELF_NVRREG * sizeof(vector128));
498 
499 	/* Always get VRSAVE back */
500 	unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
501 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
502 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
503 #endif /* CONFIG_ALTIVEC */
504 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
505 
506 #ifdef CONFIG_VSX
507 	/*
508 	 * Force the process to reload the VSX registers from
509 	 * current->thread when it next does VSX instruction.
510 	 */
511 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
512 	if (msr & MSR_VSX) {
513 		/*
514 		 * Restore altivec registers from the stack to a local
515 		 * buffer, then write this out to the thread_struct
516 		 */
517 		unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
518 		current->thread.used_vsr = true;
519 	} else if (current->thread.used_vsr)
520 		for (i = 0; i < 32 ; i++)
521 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
522 #endif /* CONFIG_VSX */
523 	/*
524 	 * force the process to reload the FP registers from
525 	 * current->thread when it next does FP instructions
526 	 */
527 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
528 
529 #ifdef CONFIG_SPE
530 	/*
531 	 * Force the process to reload the spe registers from
532 	 * current->thread when it next does spe instructions.
533 	 * Since this is user ABI, we must enforce the sizing.
534 	 */
535 	BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
536 	regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
537 	if (msr & MSR_SPE) {
538 		/* restore spe registers from the stack */
539 		unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
540 				      sizeof(current->thread.spe), failed);
541 		current->thread.used_spe = true;
542 	} else if (current->thread.used_spe)
543 		memset(&current->thread.spe, 0, sizeof(current->thread.spe));
544 
545 	/* Always get SPEFSCR back */
546 	unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
547 #endif /* CONFIG_SPE */
548 
549 	user_read_access_end();
550 	return 0;
551 
552 failed:
553 	user_read_access_end();
554 	return 1;
555 }
556 
557 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
558 /*
559  * Restore the current user register values from the user stack, except for
560  * MSR, and recheckpoint the original checkpointed register state for processes
561  * in transactions.
562  */
563 static long restore_tm_user_regs(struct pt_regs *regs,
564 				 struct mcontext __user *sr,
565 				 struct mcontext __user *tm_sr)
566 {
567 	unsigned long msr, msr_hi;
568 	int i;
569 
570 	if (tm_suspend_disabled)
571 		return 1;
572 	/*
573 	 * restore general registers but not including MSR or SOFTE. Also
574 	 * take care of keeping r2 (TLS) intact if not a signal.
575 	 * See comment in signal_64.c:restore_tm_sigcontexts();
576 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
577 	 * were set by the signal delivery.
578 	 */
579 	if (!user_read_access_begin(sr, sizeof(*sr)))
580 		return 1;
581 
582 	unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
583 	unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
584 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
585 
586 	/* Restore the previous little-endian mode */
587 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
588 
589 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
590 	if (msr & MSR_VEC) {
591 		/* restore altivec registers from the stack */
592 		unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
593 				      sizeof(sr->mc_vregs), failed);
594 		current->thread.used_vr = true;
595 	} else if (current->thread.used_vr) {
596 		memset(&current->thread.vr_state, 0,
597 		       ELF_NVRREG * sizeof(vector128));
598 		memset(&current->thread.ckvr_state, 0,
599 		       ELF_NVRREG * sizeof(vector128));
600 	}
601 
602 	/* Always get VRSAVE back */
603 	unsafe_get_user(current->thread.ckvrsave,
604 			(u32 __user *)&sr->mc_vregs[32], failed);
605 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
606 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
607 
608 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
609 
610 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
611 
612 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
613 	if (msr & MSR_VSX) {
614 		/*
615 		 * Restore altivec registers from the stack to a local
616 		 * buffer, then write this out to the thread_struct
617 		 */
618 		unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
619 		current->thread.used_vsr = true;
620 	} else if (current->thread.used_vsr)
621 		for (i = 0; i < 32 ; i++) {
622 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
623 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
624 		}
625 
626 	user_read_access_end();
627 
628 	if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
629 		return 1;
630 
631 	unsafe_restore_general_regs(regs, tm_sr, failed);
632 
633 	/* restore altivec registers from the stack */
634 	if (msr & MSR_VEC)
635 		unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
636 				      sizeof(sr->mc_vregs), failed);
637 
638 	/* Always get VRSAVE back */
639 	unsafe_get_user(current->thread.vrsave,
640 			(u32 __user *)&tm_sr->mc_vregs[32], failed);
641 
642 	unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
643 
644 	if (msr & MSR_VSX) {
645 		/*
646 		 * Restore altivec registers from the stack to a local
647 		 * buffer, then write this out to the thread_struct
648 		 */
649 		unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
650 		current->thread.used_vsr = true;
651 	}
652 
653 	/* Get the top half of the MSR from the user context */
654 	unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
655 	msr_hi <<= 32;
656 
657 	user_read_access_end();
658 
659 	/* If TM bits are set to the reserved value, it's an invalid context */
660 	if (MSR_TM_RESV(msr_hi))
661 		return 1;
662 
663 	/*
664 	 * Disabling preemption, since it is unsafe to be preempted
665 	 * with MSR[TS] set without recheckpointing.
666 	 */
667 	preempt_disable();
668 
669 	/*
670 	 * CAUTION:
671 	 * After regs->MSR[TS] being updated, make sure that get_user(),
672 	 * put_user() or similar functions are *not* called. These
673 	 * functions can generate page faults which will cause the process
674 	 * to be de-scheduled with MSR[TS] set but without calling
675 	 * tm_recheckpoint(). This can cause a bug.
676 	 *
677 	 * Pull in the MSR TM bits from the user context
678 	 */
679 	regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
680 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
681 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
682 	 * transactional versions should be loaded.
683 	 */
684 	tm_enable();
685 	/* Make sure the transaction is marked as failed */
686 	current->thread.tm_texasr |= TEXASR_FS;
687 	/* This loads the checkpointed FP/VEC state, if used */
688 	tm_recheckpoint(&current->thread);
689 
690 	/* This loads the speculative FP/VEC state, if used */
691 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
692 	if (msr & MSR_FP) {
693 		load_fp_state(&current->thread.fp_state);
694 		regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
695 	}
696 	if (msr & MSR_VEC) {
697 		load_vr_state(&current->thread.vr_state);
698 		regs_set_return_msr(regs, regs->msr | MSR_VEC);
699 	}
700 
701 	preempt_enable();
702 
703 	return 0;
704 
705 failed:
706 	user_read_access_end();
707 	return 1;
708 }
709 #else
710 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
711 				 struct mcontext __user *tm_sr)
712 {
713 	return 0;
714 }
715 #endif
716 
717 #ifdef CONFIG_PPC64
718 
719 #define copy_siginfo_to_user	copy_siginfo_to_user32
720 
721 #endif /* CONFIG_PPC64 */
722 
723 /*
724  * Set up a signal frame for a "real-time" signal handler
725  * (one which gets siginfo).
726  */
727 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
728 		       struct task_struct *tsk)
729 {
730 	struct rt_sigframe __user *frame;
731 	struct mcontext __user *mctx;
732 	struct mcontext __user *tm_mctx = NULL;
733 	unsigned long newsp = 0;
734 	unsigned long tramp;
735 	struct pt_regs *regs = tsk->thread.regs;
736 	/* Save the thread's msr before get_tm_stackpointer() changes it */
737 	unsigned long msr = regs->msr;
738 
739 	/* Set up Signal Frame */
740 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
741 	mctx = &frame->uc.uc_mcontext;
742 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
743 	tm_mctx = &frame->uc_transact.uc_mcontext;
744 #endif
745 	if (MSR_TM_ACTIVE(msr))
746 		prepare_save_tm_user_regs();
747 	else
748 		prepare_save_user_regs(1);
749 
750 	if (!user_access_begin(frame, sizeof(*frame)))
751 		goto badframe;
752 
753 	/* Put the siginfo & fill in most of the ucontext */
754 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
755 #ifdef CONFIG_PPC64
756 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
757 #else
758 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
759 #endif
760 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
761 
762 	if (MSR_TM_ACTIVE(msr)) {
763 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
764 		unsafe_put_user((unsigned long)&frame->uc_transact,
765 				&frame->uc.uc_link, failed);
766 		unsafe_put_user((unsigned long)tm_mctx,
767 				&frame->uc_transact.uc_regs, failed);
768 #endif
769 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
770 	} else {
771 		unsafe_put_user(0, &frame->uc.uc_link, failed);
772 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
773 	}
774 
775 	/* Save user registers on the stack */
776 	if (tsk->mm->context.vdso) {
777 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
778 	} else {
779 		tramp = (unsigned long)mctx->mc_pad;
780 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
781 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
782 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
783 	}
784 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
785 
786 	user_access_end();
787 
788 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
789 		goto badframe;
790 
791 	regs->link = tramp;
792 
793 #ifdef CONFIG_PPC_FPU_REGS
794 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
795 #endif
796 
797 	/* create a stack frame for the caller of the handler */
798 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
799 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
800 		goto badframe;
801 
802 	/* Fill registers for signal handler */
803 	regs->gpr[1] = newsp;
804 	regs->gpr[3] = ksig->sig;
805 	regs->gpr[4] = (unsigned long)&frame->info;
806 	regs->gpr[5] = (unsigned long)&frame->uc;
807 	regs->gpr[6] = (unsigned long)frame;
808 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
809 	/* enter the signal handler in native-endian mode */
810 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
811 
812 	return 0;
813 
814 failed:
815 	user_access_end();
816 
817 badframe:
818 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
819 
820 	return 1;
821 }
822 
823 /*
824  * OK, we're invoking a handler
825  */
826 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
827 		struct task_struct *tsk)
828 {
829 	struct sigcontext __user *sc;
830 	struct sigframe __user *frame;
831 	struct mcontext __user *mctx;
832 	struct mcontext __user *tm_mctx = NULL;
833 	unsigned long newsp = 0;
834 	unsigned long tramp;
835 	struct pt_regs *regs = tsk->thread.regs;
836 	/* Save the thread's msr before get_tm_stackpointer() changes it */
837 	unsigned long msr = regs->msr;
838 
839 	/* Set up Signal Frame */
840 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
841 	mctx = &frame->mctx;
842 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
843 	tm_mctx = &frame->mctx_transact;
844 #endif
845 	if (MSR_TM_ACTIVE(msr))
846 		prepare_save_tm_user_regs();
847 	else
848 		prepare_save_user_regs(1);
849 
850 	if (!user_access_begin(frame, sizeof(*frame)))
851 		goto badframe;
852 	sc = (struct sigcontext __user *) &frame->sctx;
853 
854 #if _NSIG != 64
855 #error "Please adjust handle_signal()"
856 #endif
857 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
858 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
859 #ifdef CONFIG_PPC64
860 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
861 #else
862 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
863 #endif
864 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
865 	unsafe_put_user(ksig->sig, &sc->signal, failed);
866 
867 	if (MSR_TM_ACTIVE(msr))
868 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
869 	else
870 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
871 
872 	if (tsk->mm->context.vdso) {
873 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
874 	} else {
875 		tramp = (unsigned long)mctx->mc_pad;
876 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
877 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
878 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
879 	}
880 	user_access_end();
881 
882 	regs->link = tramp;
883 
884 #ifdef CONFIG_PPC_FPU_REGS
885 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
886 #endif
887 
888 	/* create a stack frame for the caller of the handler */
889 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
890 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
891 		goto badframe;
892 
893 	regs->gpr[1] = newsp;
894 	regs->gpr[3] = ksig->sig;
895 	regs->gpr[4] = (unsigned long) sc;
896 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
897 	/* enter the signal handler in native-endian mode */
898 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
899 
900 	return 0;
901 
902 failed:
903 	user_access_end();
904 
905 badframe:
906 	signal_fault(tsk, regs, "handle_signal32", frame);
907 
908 	return 1;
909 }
910 
911 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
912 {
913 	sigset_t set;
914 	struct mcontext __user *mcp;
915 
916 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
917 		return -EFAULT;
918 
919 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
920 #ifdef CONFIG_PPC64
921 	{
922 		u32 cmcp;
923 
924 		unsafe_get_user(cmcp, &ucp->uc_regs, failed);
925 		mcp = (struct mcontext __user *)(u64)cmcp;
926 	}
927 #else
928 	unsafe_get_user(mcp, &ucp->uc_regs, failed);
929 #endif
930 	user_read_access_end();
931 
932 	set_current_blocked(&set);
933 	if (restore_user_regs(regs, mcp, sig))
934 		return -EFAULT;
935 
936 	return 0;
937 
938 failed:
939 	user_read_access_end();
940 	return -EFAULT;
941 }
942 
943 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
944 static int do_setcontext_tm(struct ucontext __user *ucp,
945 			    struct ucontext __user *tm_ucp,
946 			    struct pt_regs *regs)
947 {
948 	sigset_t set;
949 	struct mcontext __user *mcp;
950 	struct mcontext __user *tm_mcp;
951 	u32 cmcp;
952 	u32 tm_cmcp;
953 
954 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
955 		return -EFAULT;
956 
957 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
958 	unsafe_get_user(cmcp, &ucp->uc_regs, failed);
959 
960 	user_read_access_end();
961 
962 	if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
963 		return -EFAULT;
964 	mcp = (struct mcontext __user *)(u64)cmcp;
965 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
966 	/* no need to check access_ok(mcp), since mcp < 4GB */
967 
968 	set_current_blocked(&set);
969 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
970 		return -EFAULT;
971 
972 	return 0;
973 
974 failed:
975 	user_read_access_end();
976 	return -EFAULT;
977 }
978 #endif
979 
980 #ifdef CONFIG_PPC64
981 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
982 		       struct ucontext __user *, new_ctx, int, ctx_size)
983 #else
984 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
985 		       struct ucontext __user *, new_ctx, long, ctx_size)
986 #endif
987 {
988 	struct pt_regs *regs = current_pt_regs();
989 	int ctx_has_vsx_region = 0;
990 
991 #ifdef CONFIG_PPC64
992 	unsigned long new_msr = 0;
993 
994 	if (new_ctx) {
995 		struct mcontext __user *mcp;
996 		u32 cmcp;
997 
998 		/*
999 		 * Get pointer to the real mcontext.  No need for
1000 		 * access_ok since we are dealing with compat
1001 		 * pointers.
1002 		 */
1003 		if (__get_user(cmcp, &new_ctx->uc_regs))
1004 			return -EFAULT;
1005 		mcp = (struct mcontext __user *)(u64)cmcp;
1006 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1007 			return -EFAULT;
1008 	}
1009 	/*
1010 	 * Check that the context is not smaller than the original
1011 	 * size (with VMX but without VSX)
1012 	 */
1013 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1014 		return -EINVAL;
1015 	/*
1016 	 * If the new context state sets the MSR VSX bits but
1017 	 * it doesn't provide VSX state.
1018 	 */
1019 	if ((ctx_size < sizeof(struct ucontext)) &&
1020 	    (new_msr & MSR_VSX))
1021 		return -EINVAL;
1022 	/* Does the context have enough room to store VSX data? */
1023 	if (ctx_size >= sizeof(struct ucontext))
1024 		ctx_has_vsx_region = 1;
1025 #else
1026 	/* Context size is for future use. Right now, we only make sure
1027 	 * we are passed something we understand
1028 	 */
1029 	if (ctx_size < sizeof(struct ucontext))
1030 		return -EINVAL;
1031 #endif
1032 	if (old_ctx != NULL) {
1033 		struct mcontext __user *mctx;
1034 
1035 		/*
1036 		 * old_ctx might not be 16-byte aligned, in which
1037 		 * case old_ctx->uc_mcontext won't be either.
1038 		 * Because we have the old_ctx->uc_pad2 field
1039 		 * before old_ctx->uc_mcontext, we need to round down
1040 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1041 		 */
1042 		mctx = (struct mcontext __user *)
1043 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1044 		prepare_save_user_regs(ctx_has_vsx_region);
1045 		if (!user_write_access_begin(old_ctx, ctx_size))
1046 			return -EFAULT;
1047 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1048 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1049 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1050 		user_write_access_end();
1051 	}
1052 	if (new_ctx == NULL)
1053 		return 0;
1054 	if (!access_ok(new_ctx, ctx_size) ||
1055 	    fault_in_readable((char __user *)new_ctx, ctx_size))
1056 		return -EFAULT;
1057 
1058 	/*
1059 	 * If we get a fault copying the context into the kernel's
1060 	 * image of the user's registers, we can't just return -EFAULT
1061 	 * because the user's registers will be corrupted.  For instance
1062 	 * the NIP value may have been updated but not some of the
1063 	 * other registers.  Given that we have done the access_ok
1064 	 * and successfully read the first and last bytes of the region
1065 	 * above, this should only happen in an out-of-memory situation
1066 	 * or if another thread unmaps the region containing the context.
1067 	 * We kill the task with a SIGSEGV in this situation.
1068 	 */
1069 	if (do_setcontext(new_ctx, regs, 0)) {
1070 		force_exit_sig(SIGSEGV);
1071 		return -EFAULT;
1072 	}
1073 
1074 	set_thread_flag(TIF_RESTOREALL);
1075 	return 0;
1076 
1077 failed:
1078 	user_write_access_end();
1079 	return -EFAULT;
1080 }
1081 
1082 #ifdef CONFIG_PPC64
1083 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1084 #else
1085 SYSCALL_DEFINE0(rt_sigreturn)
1086 #endif
1087 {
1088 	struct rt_sigframe __user *rt_sf;
1089 	struct pt_regs *regs = current_pt_regs();
1090 	int tm_restore = 0;
1091 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1092 	struct ucontext __user *uc_transact;
1093 	unsigned long msr_hi;
1094 	unsigned long tmp;
1095 #endif
1096 	/* Always make any pending restarted system calls return -EINTR */
1097 	current->restart_block.fn = do_no_restart_syscall;
1098 
1099 	rt_sf = (struct rt_sigframe __user *)
1100 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1101 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1102 		goto bad;
1103 
1104 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1105 	/*
1106 	 * If there is a transactional state then throw it away.
1107 	 * The purpose of a sigreturn is to destroy all traces of the
1108 	 * signal frame, this includes any transactional state created
1109 	 * within in. We only check for suspended as we can never be
1110 	 * active in the kernel, we are active, there is nothing better to
1111 	 * do than go ahead and Bad Thing later.
1112 	 * The cause is not important as there will never be a
1113 	 * recheckpoint so it's not user visible.
1114 	 */
1115 	if (MSR_TM_SUSPENDED(mfmsr()))
1116 		tm_reclaim_current(0);
1117 
1118 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1119 		goto bad;
1120 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1121 	if (uc_transact) {
1122 		u32 cmcp;
1123 		struct mcontext __user *mcp;
1124 
1125 		if (__get_user(cmcp, &uc_transact->uc_regs))
1126 			return -EFAULT;
1127 		mcp = (struct mcontext __user *)(u64)cmcp;
1128 		/* The top 32 bits of the MSR are stashed in the transactional
1129 		 * ucontext. */
1130 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1131 			goto bad;
1132 
1133 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1134 			/* Trying to start TM on non TM system */
1135 			if (!cpu_has_feature(CPU_FTR_TM))
1136 				goto bad;
1137 			/* We only recheckpoint on return if we're
1138 			 * transaction.
1139 			 */
1140 			tm_restore = 1;
1141 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1142 				goto bad;
1143 		}
1144 	}
1145 	if (!tm_restore) {
1146 		/*
1147 		 * Unset regs->msr because ucontext MSR TS is not
1148 		 * set, and recheckpoint was not called. This avoid
1149 		 * hitting a TM Bad thing at RFID
1150 		 */
1151 		regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
1152 	}
1153 	/* Fall through, for non-TM restore */
1154 #endif
1155 	if (!tm_restore)
1156 		if (do_setcontext(&rt_sf->uc, regs, 1))
1157 			goto bad;
1158 
1159 	/*
1160 	 * It's not clear whether or why it is desirable to save the
1161 	 * sigaltstack setting on signal delivery and restore it on
1162 	 * signal return.  But other architectures do this and we have
1163 	 * always done it up until now so it is probably better not to
1164 	 * change it.  -- paulus
1165 	 */
1166 #ifdef CONFIG_PPC64
1167 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1168 		goto bad;
1169 #else
1170 	if (restore_altstack(&rt_sf->uc.uc_stack))
1171 		goto bad;
1172 #endif
1173 	set_thread_flag(TIF_RESTOREALL);
1174 	return 0;
1175 
1176  bad:
1177 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1178 
1179 	force_sig(SIGSEGV);
1180 	return 0;
1181 }
1182 
1183 #ifdef CONFIG_PPC32
1184 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1185 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1186 {
1187 	struct pt_regs *regs = current_pt_regs();
1188 	struct sig_dbg_op op;
1189 	int i;
1190 	unsigned long new_msr = regs->msr;
1191 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1192 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1193 #endif
1194 
1195 	for (i=0; i<ndbg; i++) {
1196 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1197 			return -EFAULT;
1198 		switch (op.dbg_type) {
1199 		case SIG_DBG_SINGLE_STEPPING:
1200 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1201 			if (op.dbg_value) {
1202 				new_msr |= MSR_DE;
1203 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1204 			} else {
1205 				new_dbcr0 &= ~DBCR0_IC;
1206 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1207 						current->thread.debug.dbcr1)) {
1208 					new_msr &= ~MSR_DE;
1209 					new_dbcr0 &= ~DBCR0_IDM;
1210 				}
1211 			}
1212 #else
1213 			if (op.dbg_value)
1214 				new_msr |= MSR_SE;
1215 			else
1216 				new_msr &= ~MSR_SE;
1217 #endif
1218 			break;
1219 		case SIG_DBG_BRANCH_TRACING:
1220 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1221 			return -EINVAL;
1222 #else
1223 			if (op.dbg_value)
1224 				new_msr |= MSR_BE;
1225 			else
1226 				new_msr &= ~MSR_BE;
1227 #endif
1228 			break;
1229 
1230 		default:
1231 			return -EINVAL;
1232 		}
1233 	}
1234 
1235 	/* We wait until here to actually install the values in the
1236 	   registers so if we fail in the above loop, it will not
1237 	   affect the contents of these registers.  After this point,
1238 	   failure is a problem, anyway, and it's very unlikely unless
1239 	   the user is really doing something wrong. */
1240 	regs_set_return_msr(regs, new_msr);
1241 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1242 	current->thread.debug.dbcr0 = new_dbcr0;
1243 #endif
1244 
1245 	if (!access_ok(ctx, sizeof(*ctx)) ||
1246 	    fault_in_readable((char __user *)ctx, sizeof(*ctx)))
1247 		return -EFAULT;
1248 
1249 	/*
1250 	 * If we get a fault copying the context into the kernel's
1251 	 * image of the user's registers, we can't just return -EFAULT
1252 	 * because the user's registers will be corrupted.  For instance
1253 	 * the NIP value may have been updated but not some of the
1254 	 * other registers.  Given that we have done the access_ok
1255 	 * and successfully read the first and last bytes of the region
1256 	 * above, this should only happen in an out-of-memory situation
1257 	 * or if another thread unmaps the region containing the context.
1258 	 * We kill the task with a SIGSEGV in this situation.
1259 	 */
1260 	if (do_setcontext(ctx, regs, 1)) {
1261 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1262 
1263 		force_sig(SIGSEGV);
1264 		goto out;
1265 	}
1266 
1267 	/*
1268 	 * It's not clear whether or why it is desirable to save the
1269 	 * sigaltstack setting on signal delivery and restore it on
1270 	 * signal return.  But other architectures do this and we have
1271 	 * always done it up until now so it is probably better not to
1272 	 * change it.  -- paulus
1273 	 */
1274 	restore_altstack(&ctx->uc_stack);
1275 
1276 	set_thread_flag(TIF_RESTOREALL);
1277  out:
1278 	return 0;
1279 }
1280 #endif
1281 
1282 /*
1283  * Do a signal return; undo the signal stack.
1284  */
1285 #ifdef CONFIG_PPC64
1286 COMPAT_SYSCALL_DEFINE0(sigreturn)
1287 #else
1288 SYSCALL_DEFINE0(sigreturn)
1289 #endif
1290 {
1291 	struct pt_regs *regs = current_pt_regs();
1292 	struct sigframe __user *sf;
1293 	struct sigcontext __user *sc;
1294 	struct sigcontext sigctx;
1295 	struct mcontext __user *sr;
1296 	sigset_t set;
1297 	struct mcontext __user *mcp;
1298 	struct mcontext __user *tm_mcp = NULL;
1299 	unsigned long long msr_hi = 0;
1300 
1301 	/* Always make any pending restarted system calls return -EINTR */
1302 	current->restart_block.fn = do_no_restart_syscall;
1303 
1304 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1305 	sc = &sf->sctx;
1306 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1307 		goto badframe;
1308 
1309 #ifdef CONFIG_PPC64
1310 	/*
1311 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1312 	 * unused part of the signal stackframe
1313 	 */
1314 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1315 #else
1316 	set.sig[0] = sigctx.oldmask;
1317 	set.sig[1] = sigctx._unused[3];
1318 #endif
1319 	set_current_blocked(&set);
1320 
1321 	mcp = (struct mcontext __user *)&sf->mctx;
1322 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1323 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1324 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1325 		goto badframe;
1326 #endif
1327 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1328 		if (!cpu_has_feature(CPU_FTR_TM))
1329 			goto badframe;
1330 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1331 			goto badframe;
1332 	} else {
1333 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1334 		if (restore_user_regs(regs, sr, 1)) {
1335 			signal_fault(current, regs, "sys_sigreturn", sr);
1336 
1337 			force_sig(SIGSEGV);
1338 			return 0;
1339 		}
1340 	}
1341 
1342 	set_thread_flag(TIF_RESTOREALL);
1343 	return 0;
1344 
1345 badframe:
1346 	signal_fault(current, regs, "sys_sigreturn", sc);
1347 
1348 	force_sig(SIGSEGV);
1349 	return 0;
1350 }
1351