xref: /linux/arch/arm/kernel/ptrace.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/ptrace.c
4  *
5  *  By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  */
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/mm.h>
13 #include <linux/elf.h>
14 #include <linux/smp.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/init.h>
19 #include <linux/signal.h>
20 #include <linux/uaccess.h>
21 #include <linux/perf_event.h>
22 #include <linux/hw_breakpoint.h>
23 #include <linux/regset.h>
24 #include <linux/audit.h>
25 #include <linux/tracehook.h>
26 #include <linux/unistd.h>
27 
28 #include <asm/pgtable.h>
29 #include <asm/traps.h>
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/syscalls.h>
33 
34 #define REG_PC	15
35 #define REG_PSR	16
36 /*
37  * does not yet catch signals sent when the child dies.
38  * in exit.c or in signal.c.
39  */
40 
41 #if 0
42 /*
43  * Breakpoint SWI instruction: SWI &9F0001
44  */
45 #define BREAKINST_ARM	0xef9f0001
46 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
47 #else
48 /*
49  * New breakpoints - use an undefined instruction.  The ARM architecture
50  * reference manual guarantees that the following instruction space
51  * will produce an undefined instruction exception on all CPUs:
52  *
53  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
54  *  Thumb: 1101 1110 xxxx xxxx
55  */
56 #define BREAKINST_ARM	0xe7f001f0
57 #define BREAKINST_THUMB	0xde01
58 #endif
59 
60 struct pt_regs_offset {
61 	const char *name;
62 	int offset;
63 };
64 
65 #define REG_OFFSET_NAME(r) \
66 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
67 #define REG_OFFSET_END {.name = NULL, .offset = 0}
68 
69 static const struct pt_regs_offset regoffset_table[] = {
70 	REG_OFFSET_NAME(r0),
71 	REG_OFFSET_NAME(r1),
72 	REG_OFFSET_NAME(r2),
73 	REG_OFFSET_NAME(r3),
74 	REG_OFFSET_NAME(r4),
75 	REG_OFFSET_NAME(r5),
76 	REG_OFFSET_NAME(r6),
77 	REG_OFFSET_NAME(r7),
78 	REG_OFFSET_NAME(r8),
79 	REG_OFFSET_NAME(r9),
80 	REG_OFFSET_NAME(r10),
81 	REG_OFFSET_NAME(fp),
82 	REG_OFFSET_NAME(ip),
83 	REG_OFFSET_NAME(sp),
84 	REG_OFFSET_NAME(lr),
85 	REG_OFFSET_NAME(pc),
86 	REG_OFFSET_NAME(cpsr),
87 	REG_OFFSET_NAME(ORIG_r0),
88 	REG_OFFSET_END,
89 };
90 
91 /**
92  * regs_query_register_offset() - query register offset from its name
93  * @name:	the name of a register
94  *
95  * regs_query_register_offset() returns the offset of a register in struct
96  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
97  */
98 int regs_query_register_offset(const char *name)
99 {
100 	const struct pt_regs_offset *roff;
101 	for (roff = regoffset_table; roff->name != NULL; roff++)
102 		if (!strcmp(roff->name, name))
103 			return roff->offset;
104 	return -EINVAL;
105 }
106 
107 /**
108  * regs_query_register_name() - query register name from its offset
109  * @offset:	the offset of a register in struct pt_regs.
110  *
111  * regs_query_register_name() returns the name of a register from its
112  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
113  */
114 const char *regs_query_register_name(unsigned int offset)
115 {
116 	const struct pt_regs_offset *roff;
117 	for (roff = regoffset_table; roff->name != NULL; roff++)
118 		if (roff->offset == offset)
119 			return roff->name;
120 	return NULL;
121 }
122 
123 /**
124  * regs_within_kernel_stack() - check the address in the stack
125  * @regs:      pt_regs which contains kernel stack pointer.
126  * @addr:      address which is checked.
127  *
128  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
129  * If @addr is within the kernel stack, it returns true. If not, returns false.
130  */
131 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
132 {
133 	return ((addr & ~(THREAD_SIZE - 1))  ==
134 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
135 }
136 
137 /**
138  * regs_get_kernel_stack_nth() - get Nth entry of the stack
139  * @regs:	pt_regs which contains kernel stack pointer.
140  * @n:		stack entry number.
141  *
142  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
143  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
144  * this returns 0.
145  */
146 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
147 {
148 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
149 	addr += n;
150 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
151 		return *addr;
152 	else
153 		return 0;
154 }
155 
156 /*
157  * this routine will get a word off of the processes privileged stack.
158  * the offset is how far from the base addr as stored in the THREAD.
159  * this routine assumes that all the privileged stacks are in our
160  * data space.
161  */
162 static inline long get_user_reg(struct task_struct *task, int offset)
163 {
164 	return task_pt_regs(task)->uregs[offset];
165 }
166 
167 /*
168  * this routine will put a word on the processes privileged stack.
169  * the offset is how far from the base addr as stored in the THREAD.
170  * this routine assumes that all the privileged stacks are in our
171  * data space.
172  */
173 static inline int
174 put_user_reg(struct task_struct *task, int offset, long data)
175 {
176 	struct pt_regs newregs, *regs = task_pt_regs(task);
177 	int ret = -EINVAL;
178 
179 	newregs = *regs;
180 	newregs.uregs[offset] = data;
181 
182 	if (valid_user_regs(&newregs)) {
183 		regs->uregs[offset] = data;
184 		ret = 0;
185 	}
186 
187 	return ret;
188 }
189 
190 /*
191  * Called by kernel/ptrace.c when detaching..
192  */
193 void ptrace_disable(struct task_struct *child)
194 {
195 	/* Nothing to do. */
196 }
197 
198 /*
199  * Handle hitting a breakpoint.
200  */
201 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
202 {
203 	force_sig_fault(SIGTRAP, TRAP_BRKPT,
204 			(void __user *)instruction_pointer(regs), tsk);
205 }
206 
207 static int break_trap(struct pt_regs *regs, unsigned int instr)
208 {
209 	ptrace_break(current, regs);
210 	return 0;
211 }
212 
213 static struct undef_hook arm_break_hook = {
214 	.instr_mask	= 0x0fffffff,
215 	.instr_val	= 0x07f001f0,
216 	.cpsr_mask	= PSR_T_BIT,
217 	.cpsr_val	= 0,
218 	.fn		= break_trap,
219 };
220 
221 static struct undef_hook thumb_break_hook = {
222 	.instr_mask	= 0xffff,
223 	.instr_val	= 0xde01,
224 	.cpsr_mask	= PSR_T_BIT,
225 	.cpsr_val	= PSR_T_BIT,
226 	.fn		= break_trap,
227 };
228 
229 static struct undef_hook thumb2_break_hook = {
230 	.instr_mask	= 0xffffffff,
231 	.instr_val	= 0xf7f0a000,
232 	.cpsr_mask	= PSR_T_BIT,
233 	.cpsr_val	= PSR_T_BIT,
234 	.fn		= break_trap,
235 };
236 
237 static int __init ptrace_break_init(void)
238 {
239 	register_undef_hook(&arm_break_hook);
240 	register_undef_hook(&thumb_break_hook);
241 	register_undef_hook(&thumb2_break_hook);
242 	return 0;
243 }
244 
245 core_initcall(ptrace_break_init);
246 
247 /*
248  * Read the word at offset "off" into the "struct user".  We
249  * actually access the pt_regs stored on the kernel stack.
250  */
251 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
252 			    unsigned long __user *ret)
253 {
254 	unsigned long tmp;
255 
256 	if (off & 3)
257 		return -EIO;
258 
259 	tmp = 0;
260 	if (off == PT_TEXT_ADDR)
261 		tmp = tsk->mm->start_code;
262 	else if (off == PT_DATA_ADDR)
263 		tmp = tsk->mm->start_data;
264 	else if (off == PT_TEXT_END_ADDR)
265 		tmp = tsk->mm->end_code;
266 	else if (off < sizeof(struct pt_regs))
267 		tmp = get_user_reg(tsk, off >> 2);
268 	else if (off >= sizeof(struct user))
269 		return -EIO;
270 
271 	return put_user(tmp, ret);
272 }
273 
274 /*
275  * Write the word at offset "off" into "struct user".  We
276  * actually access the pt_regs stored on the kernel stack.
277  */
278 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
279 			     unsigned long val)
280 {
281 	if (off & 3 || off >= sizeof(struct user))
282 		return -EIO;
283 
284 	if (off >= sizeof(struct pt_regs))
285 		return 0;
286 
287 	return put_user_reg(tsk, off >> 2, val);
288 }
289 
290 #ifdef CONFIG_IWMMXT
291 
292 /*
293  * Get the child iWMMXt state.
294  */
295 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
296 {
297 	struct thread_info *thread = task_thread_info(tsk);
298 
299 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
300 		return -ENODATA;
301 	iwmmxt_task_disable(thread);  /* force it to ram */
302 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
303 		? -EFAULT : 0;
304 }
305 
306 /*
307  * Set the child iWMMXt state.
308  */
309 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
310 {
311 	struct thread_info *thread = task_thread_info(tsk);
312 
313 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
314 		return -EACCES;
315 	iwmmxt_task_release(thread);  /* force a reload */
316 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
317 		? -EFAULT : 0;
318 }
319 
320 #endif
321 
322 #ifdef CONFIG_CRUNCH
323 /*
324  * Get the child Crunch state.
325  */
326 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
327 {
328 	struct thread_info *thread = task_thread_info(tsk);
329 
330 	crunch_task_disable(thread);  /* force it to ram */
331 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
332 		? -EFAULT : 0;
333 }
334 
335 /*
336  * Set the child Crunch state.
337  */
338 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
339 {
340 	struct thread_info *thread = task_thread_info(tsk);
341 
342 	crunch_task_release(thread);  /* force a reload */
343 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
344 		? -EFAULT : 0;
345 }
346 #endif
347 
348 #ifdef CONFIG_HAVE_HW_BREAKPOINT
349 /*
350  * Convert a virtual register number into an index for a thread_info
351  * breakpoint array. Breakpoints are identified using positive numbers
352  * whilst watchpoints are negative. The registers are laid out as pairs
353  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
354  * Register 0 is reserved for describing resource information.
355  */
356 static int ptrace_hbp_num_to_idx(long num)
357 {
358 	if (num < 0)
359 		num = (ARM_MAX_BRP << 1) - num;
360 	return (num - 1) >> 1;
361 }
362 
363 /*
364  * Returns the virtual register number for the address of the
365  * breakpoint at index idx.
366  */
367 static long ptrace_hbp_idx_to_num(int idx)
368 {
369 	long mid = ARM_MAX_BRP << 1;
370 	long num = (idx << 1) + 1;
371 	return num > mid ? mid - num : num;
372 }
373 
374 /*
375  * Handle hitting a HW-breakpoint.
376  */
377 static void ptrace_hbptriggered(struct perf_event *bp,
378 				     struct perf_sample_data *data,
379 				     struct pt_regs *regs)
380 {
381 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
382 	long num;
383 	int i;
384 
385 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
386 		if (current->thread.debug.hbp[i] == bp)
387 			break;
388 
389 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
390 
391 	force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
392 }
393 
394 /*
395  * Set ptrace breakpoint pointers to zero for this task.
396  * This is required in order to prevent child processes from unregistering
397  * breakpoints held by their parent.
398  */
399 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
400 {
401 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
402 }
403 
404 /*
405  * Unregister breakpoints from this task and reset the pointers in
406  * the thread_struct.
407  */
408 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
409 {
410 	int i;
411 	struct thread_struct *t = &tsk->thread;
412 
413 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
414 		if (t->debug.hbp[i]) {
415 			unregister_hw_breakpoint(t->debug.hbp[i]);
416 			t->debug.hbp[i] = NULL;
417 		}
418 	}
419 }
420 
421 static u32 ptrace_get_hbp_resource_info(void)
422 {
423 	u8 num_brps, num_wrps, debug_arch, wp_len;
424 	u32 reg = 0;
425 
426 	num_brps	= hw_breakpoint_slots(TYPE_INST);
427 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
428 	debug_arch	= arch_get_debug_arch();
429 	wp_len		= arch_get_max_wp_len();
430 
431 	reg		|= debug_arch;
432 	reg		<<= 8;
433 	reg		|= wp_len;
434 	reg		<<= 8;
435 	reg		|= num_wrps;
436 	reg		<<= 8;
437 	reg		|= num_brps;
438 
439 	return reg;
440 }
441 
442 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
443 {
444 	struct perf_event_attr attr;
445 
446 	ptrace_breakpoint_init(&attr);
447 
448 	/* Initialise fields to sane defaults. */
449 	attr.bp_addr	= 0;
450 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
451 	attr.bp_type	= type;
452 	attr.disabled	= 1;
453 
454 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
455 					   tsk);
456 }
457 
458 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
459 			     unsigned long  __user *data)
460 {
461 	u32 reg;
462 	int idx, ret = 0;
463 	struct perf_event *bp;
464 	struct arch_hw_breakpoint_ctrl arch_ctrl;
465 
466 	if (num == 0) {
467 		reg = ptrace_get_hbp_resource_info();
468 	} else {
469 		idx = ptrace_hbp_num_to_idx(num);
470 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
471 			ret = -EINVAL;
472 			goto out;
473 		}
474 
475 		bp = tsk->thread.debug.hbp[idx];
476 		if (!bp) {
477 			reg = 0;
478 			goto put;
479 		}
480 
481 		arch_ctrl = counter_arch_bp(bp)->ctrl;
482 
483 		/*
484 		 * Fix up the len because we may have adjusted it
485 		 * to compensate for an unaligned address.
486 		 */
487 		while (!(arch_ctrl.len & 0x1))
488 			arch_ctrl.len >>= 1;
489 
490 		if (num & 0x1)
491 			reg = bp->attr.bp_addr;
492 		else
493 			reg = encode_ctrl_reg(arch_ctrl);
494 	}
495 
496 put:
497 	if (put_user(reg, data))
498 		ret = -EFAULT;
499 
500 out:
501 	return ret;
502 }
503 
504 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
505 			     unsigned long __user *data)
506 {
507 	int idx, gen_len, gen_type, implied_type, ret = 0;
508 	u32 user_val;
509 	struct perf_event *bp;
510 	struct arch_hw_breakpoint_ctrl ctrl;
511 	struct perf_event_attr attr;
512 
513 	if (num == 0)
514 		goto out;
515 	else if (num < 0)
516 		implied_type = HW_BREAKPOINT_RW;
517 	else
518 		implied_type = HW_BREAKPOINT_X;
519 
520 	idx = ptrace_hbp_num_to_idx(num);
521 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
522 		ret = -EINVAL;
523 		goto out;
524 	}
525 
526 	if (get_user(user_val, data)) {
527 		ret = -EFAULT;
528 		goto out;
529 	}
530 
531 	bp = tsk->thread.debug.hbp[idx];
532 	if (!bp) {
533 		bp = ptrace_hbp_create(tsk, implied_type);
534 		if (IS_ERR(bp)) {
535 			ret = PTR_ERR(bp);
536 			goto out;
537 		}
538 		tsk->thread.debug.hbp[idx] = bp;
539 	}
540 
541 	attr = bp->attr;
542 
543 	if (num & 0x1) {
544 		/* Address */
545 		attr.bp_addr	= user_val;
546 	} else {
547 		/* Control */
548 		decode_ctrl_reg(user_val, &ctrl);
549 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
550 		if (ret)
551 			goto out;
552 
553 		if ((gen_type & implied_type) != gen_type) {
554 			ret = -EINVAL;
555 			goto out;
556 		}
557 
558 		attr.bp_len	= gen_len;
559 		attr.bp_type	= gen_type;
560 		attr.disabled	= !ctrl.enabled;
561 	}
562 
563 	ret = modify_user_hw_breakpoint(bp, &attr);
564 out:
565 	return ret;
566 }
567 #endif
568 
569 /* regset get/set implementations */
570 
571 static int gpr_get(struct task_struct *target,
572 		   const struct user_regset *regset,
573 		   unsigned int pos, unsigned int count,
574 		   void *kbuf, void __user *ubuf)
575 {
576 	struct pt_regs *regs = task_pt_regs(target);
577 
578 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
579 				   regs,
580 				   0, sizeof(*regs));
581 }
582 
583 static int gpr_set(struct task_struct *target,
584 		   const struct user_regset *regset,
585 		   unsigned int pos, unsigned int count,
586 		   const void *kbuf, const void __user *ubuf)
587 {
588 	int ret;
589 	struct pt_regs newregs = *task_pt_regs(target);
590 
591 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
592 				 &newregs,
593 				 0, sizeof(newregs));
594 	if (ret)
595 		return ret;
596 
597 	if (!valid_user_regs(&newregs))
598 		return -EINVAL;
599 
600 	*task_pt_regs(target) = newregs;
601 	return 0;
602 }
603 
604 static int fpa_get(struct task_struct *target,
605 		   const struct user_regset *regset,
606 		   unsigned int pos, unsigned int count,
607 		   void *kbuf, void __user *ubuf)
608 {
609 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
610 				   &task_thread_info(target)->fpstate,
611 				   0, sizeof(struct user_fp));
612 }
613 
614 static int fpa_set(struct task_struct *target,
615 		   const struct user_regset *regset,
616 		   unsigned int pos, unsigned int count,
617 		   const void *kbuf, const void __user *ubuf)
618 {
619 	struct thread_info *thread = task_thread_info(target);
620 
621 	thread->used_cp[1] = thread->used_cp[2] = 1;
622 
623 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
624 		&thread->fpstate,
625 		0, sizeof(struct user_fp));
626 }
627 
628 #ifdef CONFIG_VFP
629 /*
630  * VFP register get/set implementations.
631  *
632  * With respect to the kernel, struct user_fp is divided into three chunks:
633  * 16 or 32 real VFP registers (d0-d15 or d0-31)
634  *	These are transferred to/from the real registers in the task's
635  *	vfp_hard_struct.  The number of registers depends on the kernel
636  *	configuration.
637  *
638  * 16 or 0 fake VFP registers (d16-d31 or empty)
639  *	i.e., the user_vfp structure has space for 32 registers even if
640  *	the kernel doesn't have them all.
641  *
642  *	vfp_get() reads this chunk as zero where applicable
643  *	vfp_set() ignores this chunk
644  *
645  * 1 word for the FPSCR
646  *
647  * The bounds-checking logic built into user_regset_copyout and friends
648  * means that we can make a simple sequence of calls to map the relevant data
649  * to/from the specified slice of the user regset structure.
650  */
651 static int vfp_get(struct task_struct *target,
652 		   const struct user_regset *regset,
653 		   unsigned int pos, unsigned int count,
654 		   void *kbuf, void __user *ubuf)
655 {
656 	int ret;
657 	struct thread_info *thread = task_thread_info(target);
658 	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
659 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
660 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
661 
662 	vfp_sync_hwstate(thread);
663 
664 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
665 				  &vfp->fpregs,
666 				  user_fpregs_offset,
667 				  user_fpregs_offset + sizeof(vfp->fpregs));
668 	if (ret)
669 		return ret;
670 
671 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
672 				       user_fpregs_offset + sizeof(vfp->fpregs),
673 				       user_fpscr_offset);
674 	if (ret)
675 		return ret;
676 
677 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
678 				   &vfp->fpscr,
679 				   user_fpscr_offset,
680 				   user_fpscr_offset + sizeof(vfp->fpscr));
681 }
682 
683 /*
684  * For vfp_set() a read-modify-write is done on the VFP registers,
685  * in order to avoid writing back a half-modified set of registers on
686  * failure.
687  */
688 static int vfp_set(struct task_struct *target,
689 			  const struct user_regset *regset,
690 			  unsigned int pos, unsigned int count,
691 			  const void *kbuf, const void __user *ubuf)
692 {
693 	int ret;
694 	struct thread_info *thread = task_thread_info(target);
695 	struct vfp_hard_struct new_vfp;
696 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
697 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
698 
699 	vfp_sync_hwstate(thread);
700 	new_vfp = thread->vfpstate.hard;
701 
702 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
703 				  &new_vfp.fpregs,
704 				  user_fpregs_offset,
705 				  user_fpregs_offset + sizeof(new_vfp.fpregs));
706 	if (ret)
707 		return ret;
708 
709 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
710 				user_fpregs_offset + sizeof(new_vfp.fpregs),
711 				user_fpscr_offset);
712 	if (ret)
713 		return ret;
714 
715 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
716 				 &new_vfp.fpscr,
717 				 user_fpscr_offset,
718 				 user_fpscr_offset + sizeof(new_vfp.fpscr));
719 	if (ret)
720 		return ret;
721 
722 	thread->vfpstate.hard = new_vfp;
723 	vfp_flush_hwstate(thread);
724 
725 	return 0;
726 }
727 #endif /* CONFIG_VFP */
728 
729 enum arm_regset {
730 	REGSET_GPR,
731 	REGSET_FPR,
732 #ifdef CONFIG_VFP
733 	REGSET_VFP,
734 #endif
735 };
736 
737 static const struct user_regset arm_regsets[] = {
738 	[REGSET_GPR] = {
739 		.core_note_type = NT_PRSTATUS,
740 		.n = ELF_NGREG,
741 		.size = sizeof(u32),
742 		.align = sizeof(u32),
743 		.get = gpr_get,
744 		.set = gpr_set
745 	},
746 	[REGSET_FPR] = {
747 		/*
748 		 * For the FPA regs in fpstate, the real fields are a mixture
749 		 * of sizes, so pretend that the registers are word-sized:
750 		 */
751 		.core_note_type = NT_PRFPREG,
752 		.n = sizeof(struct user_fp) / sizeof(u32),
753 		.size = sizeof(u32),
754 		.align = sizeof(u32),
755 		.get = fpa_get,
756 		.set = fpa_set
757 	},
758 #ifdef CONFIG_VFP
759 	[REGSET_VFP] = {
760 		/*
761 		 * Pretend that the VFP regs are word-sized, since the FPSCR is
762 		 * a single word dangling at the end of struct user_vfp:
763 		 */
764 		.core_note_type = NT_ARM_VFP,
765 		.n = ARM_VFPREGS_SIZE / sizeof(u32),
766 		.size = sizeof(u32),
767 		.align = sizeof(u32),
768 		.get = vfp_get,
769 		.set = vfp_set
770 	},
771 #endif /* CONFIG_VFP */
772 };
773 
774 static const struct user_regset_view user_arm_view = {
775 	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
776 	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
777 };
778 
779 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
780 {
781 	return &user_arm_view;
782 }
783 
784 long arch_ptrace(struct task_struct *child, long request,
785 		 unsigned long addr, unsigned long data)
786 {
787 	int ret;
788 	unsigned long __user *datap = (unsigned long __user *) data;
789 
790 	switch (request) {
791 		case PTRACE_PEEKUSR:
792 			ret = ptrace_read_user(child, addr, datap);
793 			break;
794 
795 		case PTRACE_POKEUSR:
796 			ret = ptrace_write_user(child, addr, data);
797 			break;
798 
799 		case PTRACE_GETREGS:
800 			ret = copy_regset_to_user(child,
801 						  &user_arm_view, REGSET_GPR,
802 						  0, sizeof(struct pt_regs),
803 						  datap);
804 			break;
805 
806 		case PTRACE_SETREGS:
807 			ret = copy_regset_from_user(child,
808 						    &user_arm_view, REGSET_GPR,
809 						    0, sizeof(struct pt_regs),
810 						    datap);
811 			break;
812 
813 		case PTRACE_GETFPREGS:
814 			ret = copy_regset_to_user(child,
815 						  &user_arm_view, REGSET_FPR,
816 						  0, sizeof(union fp_state),
817 						  datap);
818 			break;
819 
820 		case PTRACE_SETFPREGS:
821 			ret = copy_regset_from_user(child,
822 						    &user_arm_view, REGSET_FPR,
823 						    0, sizeof(union fp_state),
824 						    datap);
825 			break;
826 
827 #ifdef CONFIG_IWMMXT
828 		case PTRACE_GETWMMXREGS:
829 			ret = ptrace_getwmmxregs(child, datap);
830 			break;
831 
832 		case PTRACE_SETWMMXREGS:
833 			ret = ptrace_setwmmxregs(child, datap);
834 			break;
835 #endif
836 
837 		case PTRACE_GET_THREAD_AREA:
838 			ret = put_user(task_thread_info(child)->tp_value[0],
839 				       datap);
840 			break;
841 
842 		case PTRACE_SET_SYSCALL:
843 			task_thread_info(child)->syscall = data;
844 			ret = 0;
845 			break;
846 
847 #ifdef CONFIG_CRUNCH
848 		case PTRACE_GETCRUNCHREGS:
849 			ret = ptrace_getcrunchregs(child, datap);
850 			break;
851 
852 		case PTRACE_SETCRUNCHREGS:
853 			ret = ptrace_setcrunchregs(child, datap);
854 			break;
855 #endif
856 
857 #ifdef CONFIG_VFP
858 		case PTRACE_GETVFPREGS:
859 			ret = copy_regset_to_user(child,
860 						  &user_arm_view, REGSET_VFP,
861 						  0, ARM_VFPREGS_SIZE,
862 						  datap);
863 			break;
864 
865 		case PTRACE_SETVFPREGS:
866 			ret = copy_regset_from_user(child,
867 						    &user_arm_view, REGSET_VFP,
868 						    0, ARM_VFPREGS_SIZE,
869 						    datap);
870 			break;
871 #endif
872 
873 #ifdef CONFIG_HAVE_HW_BREAKPOINT
874 		case PTRACE_GETHBPREGS:
875 			ret = ptrace_gethbpregs(child, addr,
876 						(unsigned long __user *)data);
877 			break;
878 		case PTRACE_SETHBPREGS:
879 			ret = ptrace_sethbpregs(child, addr,
880 						(unsigned long __user *)data);
881 			break;
882 #endif
883 
884 		default:
885 			ret = ptrace_request(child, request, addr, data);
886 			break;
887 	}
888 
889 	return ret;
890 }
891 
892 enum ptrace_syscall_dir {
893 	PTRACE_SYSCALL_ENTER = 0,
894 	PTRACE_SYSCALL_EXIT,
895 };
896 
897 static void tracehook_report_syscall(struct pt_regs *regs,
898 				    enum ptrace_syscall_dir dir)
899 {
900 	unsigned long ip;
901 
902 	/*
903 	 * IP is used to denote syscall entry/exit:
904 	 * IP = 0 -> entry, =1 -> exit
905 	 */
906 	ip = regs->ARM_ip;
907 	regs->ARM_ip = dir;
908 
909 	if (dir == PTRACE_SYSCALL_EXIT)
910 		tracehook_report_syscall_exit(regs, 0);
911 	else if (tracehook_report_syscall_entry(regs))
912 		current_thread_info()->syscall = -1;
913 
914 	regs->ARM_ip = ip;
915 }
916 
917 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
918 {
919 	current_thread_info()->syscall = scno;
920 
921 	if (test_thread_flag(TIF_SYSCALL_TRACE))
922 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
923 
924 	/* Do seccomp after ptrace; syscall may have changed. */
925 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
926 	if (secure_computing(NULL) == -1)
927 		return -1;
928 #else
929 	/* XXX: remove this once OABI gets fixed */
930 	secure_computing_strict(current_thread_info()->syscall);
931 #endif
932 
933 	/* Tracer or seccomp may have changed syscall. */
934 	scno = current_thread_info()->syscall;
935 
936 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
937 		trace_sys_enter(regs, scno);
938 
939 	audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
940 			    regs->ARM_r3);
941 
942 	return scno;
943 }
944 
945 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
946 {
947 	/*
948 	 * Audit the syscall before anything else, as a debugger may
949 	 * come in and change the current registers.
950 	 */
951 	audit_syscall_exit(regs);
952 
953 	/*
954 	 * Note that we haven't updated the ->syscall field for the
955 	 * current thread. This isn't a problem because it will have
956 	 * been set on syscall entry and there hasn't been an opportunity
957 	 * for a PTRACE_SET_SYSCALL since then.
958 	 */
959 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
960 		trace_sys_exit(regs, regs_return_value(regs));
961 
962 	if (test_thread_flag(TIF_SYSCALL_TRACE))
963 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
964 }
965