xref: /linux/arch/sparc/kernel/sys_sparc_64.c (revision b80fa3cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* linux/arch/sparc64/kernel/sys_sparc.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/sparc
6  * platform.
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/debug.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/mm.h>
17 #include <linux/sem.h>
18 #include <linux/msg.h>
19 #include <linux/shm.h>
20 #include <linux/stat.h>
21 #include <linux/mman.h>
22 #include <linux/utsname.h>
23 #include <linux/smp.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
28 #include <linux/random.h>
29 #include <linux/export.h>
30 #include <linux/context_tracking.h>
31 #include <linux/timex.h>
32 #include <linux/uaccess.h>
33 
34 #include <asm/utrap.h>
35 #include <asm/unistd.h>
36 
37 #include "entry.h"
38 #include "kernel.h"
39 #include "systbls.h"
40 
41 /* #define DEBUG_UNIMP_SYSCALL */
42 
SYSCALL_DEFINE0(getpagesize)43 SYSCALL_DEFINE0(getpagesize)
44 {
45 	return PAGE_SIZE;
46 }
47 
48 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
49  * overflow past the end of the 64-bit address space?
50  */
invalid_64bit_range(unsigned long addr,unsigned long len)51 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
52 {
53 	unsigned long va_exclude_start, va_exclude_end;
54 
55 	va_exclude_start = VA_EXCLUDE_START;
56 	va_exclude_end   = VA_EXCLUDE_END;
57 
58 	if (unlikely(len >= va_exclude_start))
59 		return 1;
60 
61 	if (unlikely((addr + len) < addr))
62 		return 1;
63 
64 	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
65 		     ((addr + len) >= va_exclude_start &&
66 		      (addr + len) < va_exclude_end)))
67 		return 1;
68 
69 	return 0;
70 }
71 
72 /* These functions differ from the default implementations in
73  * mm/mmap.c in two ways:
74  *
75  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
76  *    for fixed such mappings we just validate what the user gave us.
77  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
78  *    the spitfire/niagara VA-hole.
79  */
80 
COLOR_ALIGN(unsigned long addr,unsigned long pgoff)81 static inline unsigned long COLOR_ALIGN(unsigned long addr,
82 					 unsigned long pgoff)
83 {
84 	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
85 	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
86 
87 	return base + off;
88 }
89 
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)90 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
91 {
92 	struct mm_struct *mm = current->mm;
93 	struct vm_area_struct * vma;
94 	unsigned long task_size = TASK_SIZE;
95 	int do_color_align;
96 	struct vm_unmapped_area_info info = {};
97 
98 	if (flags & MAP_FIXED) {
99 		/* We do not accept a shared mapping if it would violate
100 		 * cache aliasing constraints.
101 		 */
102 		if ((flags & MAP_SHARED) &&
103 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
104 			return -EINVAL;
105 		return addr;
106 	}
107 
108 	if (test_thread_flag(TIF_32BIT))
109 		task_size = STACK_TOP32;
110 	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
111 		return -ENOMEM;
112 
113 	do_color_align = 0;
114 	if (filp || (flags & MAP_SHARED))
115 		do_color_align = 1;
116 
117 	if (addr) {
118 		if (do_color_align)
119 			addr = COLOR_ALIGN(addr, pgoff);
120 		else
121 			addr = PAGE_ALIGN(addr);
122 
123 		vma = find_vma(mm, addr);
124 		if (task_size - len >= addr &&
125 		    (!vma || addr + len <= vm_start_gap(vma)))
126 			return addr;
127 	}
128 
129 	info.length = len;
130 	info.low_limit = TASK_UNMAPPED_BASE;
131 	info.high_limit = min(task_size, VA_EXCLUDE_START);
132 	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
133 	info.align_offset = pgoff << PAGE_SHIFT;
134 	addr = vm_unmapped_area(&info);
135 
136 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
137 		VM_BUG_ON(addr != -ENOMEM);
138 		info.low_limit = VA_EXCLUDE_END;
139 		info.high_limit = task_size;
140 		addr = vm_unmapped_area(&info);
141 	}
142 
143 	return addr;
144 }
145 
146 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)147 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148 			  const unsigned long len, const unsigned long pgoff,
149 			  const unsigned long flags)
150 {
151 	struct vm_area_struct *vma;
152 	struct mm_struct *mm = current->mm;
153 	unsigned long task_size = STACK_TOP32;
154 	unsigned long addr = addr0;
155 	int do_color_align;
156 	struct vm_unmapped_area_info info = {};
157 
158 	/* This should only ever run for 32-bit processes.  */
159 	BUG_ON(!test_thread_flag(TIF_32BIT));
160 
161 	if (flags & MAP_FIXED) {
162 		/* We do not accept a shared mapping if it would violate
163 		 * cache aliasing constraints.
164 		 */
165 		if ((flags & MAP_SHARED) &&
166 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
167 			return -EINVAL;
168 		return addr;
169 	}
170 
171 	if (unlikely(len > task_size))
172 		return -ENOMEM;
173 
174 	do_color_align = 0;
175 	if (filp || (flags & MAP_SHARED))
176 		do_color_align = 1;
177 
178 	/* requesting a specific address */
179 	if (addr) {
180 		if (do_color_align)
181 			addr = COLOR_ALIGN(addr, pgoff);
182 		else
183 			addr = PAGE_ALIGN(addr);
184 
185 		vma = find_vma(mm, addr);
186 		if (task_size - len >= addr &&
187 		    (!vma || addr + len <= vm_start_gap(vma)))
188 			return addr;
189 	}
190 
191 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
192 	info.length = len;
193 	info.low_limit = PAGE_SIZE;
194 	info.high_limit = mm->mmap_base;
195 	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
196 	info.align_offset = pgoff << PAGE_SHIFT;
197 	addr = vm_unmapped_area(&info);
198 
199 	/*
200 	 * A failed mmap() very likely causes application failure,
201 	 * so fall back to the bottom-up function here. This scenario
202 	 * can happen with large stack limits and large mmap()
203 	 * allocations.
204 	 */
205 	if (addr & ~PAGE_MASK) {
206 		VM_BUG_ON(addr != -ENOMEM);
207 		info.flags = 0;
208 		info.low_limit = TASK_UNMAPPED_BASE;
209 		info.high_limit = STACK_TOP32;
210 		addr = vm_unmapped_area(&info);
211 	}
212 
213 	return addr;
214 }
215 
216 /* Try to align mapping such that we align it as much as possible. */
get_fb_unmapped_area(struct file * filp,unsigned long orig_addr,unsigned long len,unsigned long pgoff,unsigned long flags)217 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
218 {
219 	unsigned long align_goal, addr = -ENOMEM;
220 
221 	if (flags & MAP_FIXED) {
222 		/* Ok, don't mess with it. */
223 		return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
224 	}
225 	flags &= ~MAP_SHARED;
226 
227 	align_goal = PAGE_SIZE;
228 	if (len >= (4UL * 1024 * 1024))
229 		align_goal = (4UL * 1024 * 1024);
230 	else if (len >= (512UL * 1024))
231 		align_goal = (512UL * 1024);
232 	else if (len >= (64UL * 1024))
233 		align_goal = (64UL * 1024);
234 
235 	do {
236 		addr = mm_get_unmapped_area(current->mm, NULL, orig_addr,
237 					    len + (align_goal - PAGE_SIZE), pgoff, flags);
238 		if (!(addr & ~PAGE_MASK)) {
239 			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
240 			break;
241 		}
242 
243 		if (align_goal == (4UL * 1024 * 1024))
244 			align_goal = (512UL * 1024);
245 		else if (align_goal == (512UL * 1024))
246 			align_goal = (64UL * 1024);
247 		else
248 			align_goal = PAGE_SIZE;
249 	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
250 
251 	/* Mapping is smaller than 64K or larger areas could not
252 	 * be obtained.
253 	 */
254 	if (addr & ~PAGE_MASK)
255 		addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
256 
257 	return addr;
258 }
259 EXPORT_SYMBOL(get_fb_unmapped_area);
260 
261 /* Essentially the same as PowerPC.  */
mmap_rnd(void)262 static unsigned long mmap_rnd(void)
263 {
264 	unsigned long rnd = 0UL;
265 
266 	if (current->flags & PF_RANDOMIZE) {
267 		unsigned long val = get_random_long();
268 		if (test_thread_flag(TIF_32BIT))
269 			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
270 		else
271 			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
272 	}
273 	return rnd << PAGE_SHIFT;
274 }
275 
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)276 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
277 {
278 	unsigned long random_factor = mmap_rnd();
279 	unsigned long gap;
280 
281 	/*
282 	 * Fall back to the standard layout if the personality
283 	 * bit is set, or if the expected stack growth is unlimited:
284 	 */
285 	gap = rlim_stack->rlim_cur;
286 	if (!test_thread_flag(TIF_32BIT) ||
287 	    (current->personality & ADDR_COMPAT_LAYOUT) ||
288 	    gap == RLIM_INFINITY ||
289 	    sysctl_legacy_va_layout) {
290 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
291 		clear_bit(MMF_TOPDOWN, &mm->flags);
292 	} else {
293 		/* We know it's 32-bit */
294 		unsigned long task_size = STACK_TOP32;
295 
296 		if (gap < 128 * 1024 * 1024)
297 			gap = 128 * 1024 * 1024;
298 		if (gap > (task_size / 6 * 5))
299 			gap = (task_size / 6 * 5);
300 
301 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
302 		set_bit(MMF_TOPDOWN, &mm->flags);
303 	}
304 }
305 
306 /*
307  * sys_pipe() is the normal C calling standard for creating
308  * a pipe. It's not the way unix traditionally does this, though.
309  */
SYSCALL_DEFINE0(sparc_pipe)310 SYSCALL_DEFINE0(sparc_pipe)
311 {
312 	int fd[2];
313 	int error;
314 
315 	error = do_pipe_flags(fd, 0);
316 	if (error)
317 		goto out;
318 	current_pt_regs()->u_regs[UREG_I1] = fd[1];
319 	error = fd[0];
320 out:
321 	return error;
322 }
323 
324 /*
325  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
326  *
327  * This is really horribly ugly.
328  */
329 
SYSCALL_DEFINE6(sparc_ipc,unsigned int,call,int,first,unsigned long,second,unsigned long,third,void __user *,ptr,long,fifth)330 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
331 		unsigned long, third, void __user *, ptr, long, fifth)
332 {
333 	long err;
334 
335 	if (!IS_ENABLED(CONFIG_SYSVIPC))
336 		return -ENOSYS;
337 
338 	/* No need for backward compatibility. We can start fresh... */
339 	if (call <= SEMTIMEDOP) {
340 		switch (call) {
341 		case SEMOP:
342 			err = ksys_semtimedop(first, ptr,
343 					      (unsigned int)second, NULL);
344 			goto out;
345 		case SEMTIMEDOP:
346 			err = ksys_semtimedop(first, ptr, (unsigned int)second,
347 				(const struct __kernel_timespec __user *)
348 					      (unsigned long) fifth);
349 			goto out;
350 		case SEMGET:
351 			err = ksys_semget(first, (int)second, (int)third);
352 			goto out;
353 		case SEMCTL: {
354 			err = ksys_old_semctl(first, second,
355 					      (int)third | IPC_64,
356 					      (unsigned long) ptr);
357 			goto out;
358 		}
359 		default:
360 			err = -ENOSYS;
361 			goto out;
362 		}
363 	}
364 	if (call <= MSGCTL) {
365 		switch (call) {
366 		case MSGSND:
367 			err = ksys_msgsnd(first, ptr, (size_t)second,
368 					 (int)third);
369 			goto out;
370 		case MSGRCV:
371 			err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
372 					 (int)third);
373 			goto out;
374 		case MSGGET:
375 			err = ksys_msgget((key_t)first, (int)second);
376 			goto out;
377 		case MSGCTL:
378 			err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
379 			goto out;
380 		default:
381 			err = -ENOSYS;
382 			goto out;
383 		}
384 	}
385 	if (call <= SHMCTL) {
386 		switch (call) {
387 		case SHMAT: {
388 			ulong raddr;
389 			err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
390 			if (!err) {
391 				if (put_user(raddr,
392 					     (ulong __user *) third))
393 					err = -EFAULT;
394 			}
395 			goto out;
396 		}
397 		case SHMDT:
398 			err = ksys_shmdt(ptr);
399 			goto out;
400 		case SHMGET:
401 			err = ksys_shmget(first, (size_t)second, (int)third);
402 			goto out;
403 		case SHMCTL:
404 			err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
405 			goto out;
406 		default:
407 			err = -ENOSYS;
408 			goto out;
409 		}
410 	} else {
411 		err = -ENOSYS;
412 	}
413 out:
414 	return err;
415 }
416 
SYSCALL_DEFINE1(sparc64_personality,unsigned long,personality)417 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
418 {
419 	long ret;
420 
421 	if (personality(current->personality) == PER_LINUX32 &&
422 	    personality(personality) == PER_LINUX)
423 		personality |= PER_LINUX32;
424 	ret = sys_personality(personality);
425 	if (personality(ret) == PER_LINUX32)
426 		ret &= ~PER_LINUX32;
427 
428 	return ret;
429 }
430 
sparc_mmap_check(unsigned long addr,unsigned long len)431 int sparc_mmap_check(unsigned long addr, unsigned long len)
432 {
433 	if (test_thread_flag(TIF_32BIT)) {
434 		if (len >= STACK_TOP32)
435 			return -EINVAL;
436 
437 		if (addr > STACK_TOP32 - len)
438 			return -EINVAL;
439 	} else {
440 		if (len >= VA_EXCLUDE_START)
441 			return -EINVAL;
442 
443 		if (invalid_64bit_range(addr, len))
444 			return -EINVAL;
445 	}
446 
447 	return 0;
448 }
449 
450 /* Linux version of mmap */
SYSCALL_DEFINE6(mmap,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,off)451 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
452 		unsigned long, prot, unsigned long, flags, unsigned long, fd,
453 		unsigned long, off)
454 {
455 	unsigned long retval = -EINVAL;
456 
457 	if ((off + PAGE_ALIGN(len)) < off)
458 		goto out;
459 	if (off & ~PAGE_MASK)
460 		goto out;
461 	retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
462 out:
463 	return retval;
464 }
465 
466 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
467 {
468 	if (invalid_64bit_range(addr, len))
469 		return -EINVAL;
470 
471 	return vm_munmap(addr, len);
472 }
473 
474 SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
475 		unsigned long, new_len, unsigned long, flags,
476 		unsigned long, new_addr)
477 {
478 	if (test_thread_flag(TIF_32BIT))
479 		return -EINVAL;
480 	return sys_mremap(addr, old_len, new_len, flags, new_addr);
481 }
482 
SYSCALL_DEFINE0(nis_syscall)483 SYSCALL_DEFINE0(nis_syscall)
484 {
485 	static int count;
486 	struct pt_regs *regs = current_pt_regs();
487 
488 	/* Don't make the system unusable, if someone goes stuck */
489 	if (count++ > 5)
490 		return -ENOSYS;
491 
492 	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
493 #ifdef DEBUG_UNIMP_SYSCALL
494 	show_regs (regs);
495 #endif
496 
497 	return -ENOSYS;
498 }
499 
500 /* #define DEBUG_SPARC_BREAKPOINT */
501 
sparc_breakpoint(struct pt_regs * regs)502 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
503 {
504 	enum ctx_state prev_state = exception_enter();
505 
506 	if (test_thread_flag(TIF_32BIT)) {
507 		regs->tpc &= 0xffffffff;
508 		regs->tnpc &= 0xffffffff;
509 	}
510 #ifdef DEBUG_SPARC_BREAKPOINT
511         printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
512 #endif
513 	force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc);
514 #ifdef DEBUG_SPARC_BREAKPOINT
515 	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
516 #endif
517 	exception_exit(prev_state);
518 }
519 
SYSCALL_DEFINE2(getdomainname,char __user *,name,int,len)520 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
521 {
522 	int nlen, err;
523 	char tmp[__NEW_UTS_LEN + 1];
524 
525 	if (len < 0)
526 		return -EINVAL;
527 
528 	down_read(&uts_sem);
529 
530 	nlen = strlen(utsname()->domainname) + 1;
531 	err = -EINVAL;
532 	if (nlen > len)
533 		goto out_unlock;
534 	memcpy(tmp, utsname()->domainname, nlen);
535 
536 	up_read(&uts_sem);
537 
538 	if (copy_to_user(name, tmp, nlen))
539 		return -EFAULT;
540 	return 0;
541 
542 out_unlock:
543 	up_read(&uts_sem);
544 	return err;
545 }
546 
SYSCALL_DEFINE1(sparc_adjtimex,struct __kernel_timex __user *,txc_p)547 SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
548 {
549 	struct __kernel_timex txc;
550 	struct __kernel_old_timeval *tv = (void *)&txc.time;
551 	int ret;
552 
553 	/* Copy the user data space into the kernel copy
554 	 * structure. But bear in mind that the structures
555 	 * may change
556 	 */
557 	if (copy_from_user(&txc, txc_p, sizeof(txc)))
558 		return -EFAULT;
559 
560 	/*
561 	 * override for sparc64 specific timeval type: tv_usec
562 	 * is 32 bit wide instead of 64-bit in __kernel_timex
563 	 */
564 	txc.time.tv_usec = tv->tv_usec;
565 	ret = do_adjtimex(&txc);
566 	tv->tv_usec = txc.time.tv_usec;
567 
568 	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
569 }
570 
SYSCALL_DEFINE2(sparc_clock_adjtime,const clockid_t,which_clock,struct __kernel_timex __user *,txc_p)571 SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
572 		struct __kernel_timex __user *, txc_p)
573 {
574 	struct __kernel_timex txc;
575 	struct __kernel_old_timeval *tv = (void *)&txc.time;
576 	int ret;
577 
578 	if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
579 		pr_err_once("process %d (%s) attempted a POSIX timer syscall "
580 		    "while CONFIG_POSIX_TIMERS is not set\n",
581 		    current->pid, current->comm);
582 
583 		return -ENOSYS;
584 	}
585 
586 	/* Copy the user data space into the kernel copy
587 	 * structure. But bear in mind that the structures
588 	 * may change
589 	 */
590 	if (copy_from_user(&txc, txc_p, sizeof(txc)))
591 		return -EFAULT;
592 
593 	/*
594 	 * override for sparc64 specific timeval type: tv_usec
595 	 * is 32 bit wide instead of 64-bit in __kernel_timex
596 	 */
597 	txc.time.tv_usec = tv->tv_usec;
598 	ret = do_clock_adjtime(which_clock, &txc);
599 	tv->tv_usec = txc.time.tv_usec;
600 
601 	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
602 }
603 
SYSCALL_DEFINE5(utrap_install,utrap_entry_t,type,utrap_handler_t,new_p,utrap_handler_t,new_d,utrap_handler_t __user *,old_p,utrap_handler_t __user *,old_d)604 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
605 		utrap_handler_t, new_p, utrap_handler_t, new_d,
606 		utrap_handler_t __user *, old_p,
607 		utrap_handler_t __user *, old_d)
608 {
609 	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
610 		return -EINVAL;
611 	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
612 		if (old_p) {
613 			if (!current_thread_info()->utraps) {
614 				if (put_user(NULL, old_p))
615 					return -EFAULT;
616 			} else {
617 				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
618 					return -EFAULT;
619 			}
620 		}
621 		if (old_d) {
622 			if (put_user(NULL, old_d))
623 				return -EFAULT;
624 		}
625 		return 0;
626 	}
627 	if (!current_thread_info()->utraps) {
628 		current_thread_info()->utraps =
629 			kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
630 				GFP_KERNEL);
631 		if (!current_thread_info()->utraps)
632 			return -ENOMEM;
633 		current_thread_info()->utraps[0] = 1;
634 	} else {
635 		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
636 		    current_thread_info()->utraps[0] > 1) {
637 			unsigned long *p = current_thread_info()->utraps;
638 
639 			current_thread_info()->utraps =
640 				kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
641 					      sizeof(long),
642 					      GFP_KERNEL);
643 			if (!current_thread_info()->utraps) {
644 				current_thread_info()->utraps = p;
645 				return -ENOMEM;
646 			}
647 			p[0]--;
648 			current_thread_info()->utraps[0] = 1;
649 			memcpy(current_thread_info()->utraps+1, p+1,
650 			       UT_TRAP_INSTRUCTION_31*sizeof(long));
651 		}
652 	}
653 	if (old_p) {
654 		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
655 			return -EFAULT;
656 	}
657 	if (old_d) {
658 		if (put_user(NULL, old_d))
659 			return -EFAULT;
660 	}
661 	current_thread_info()->utraps[type] = (long)new_p;
662 
663 	return 0;
664 }
665 
SYSCALL_DEFINE1(memory_ordering,unsigned long,model)666 SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
667 {
668 	struct pt_regs *regs = current_pt_regs();
669 	if (model >= 3)
670 		return -EINVAL;
671 	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
672 	return 0;
673 }
674 
SYSCALL_DEFINE5(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,void __user *,restorer,size_t,sigsetsize)675 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
676 		struct sigaction __user *, oact, void __user *, restorer,
677 		size_t, sigsetsize)
678 {
679 	struct k_sigaction new_ka, old_ka;
680 	int ret;
681 
682 	/* XXX: Don't preclude handling different sized sigset_t's.  */
683 	if (sigsetsize != sizeof(sigset_t))
684 		return -EINVAL;
685 
686 	if (act) {
687 		new_ka.ka_restorer = restorer;
688 		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
689 			return -EFAULT;
690 	}
691 
692 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
693 
694 	if (!ret && oact) {
695 		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
696 			return -EFAULT;
697 	}
698 
699 	return ret;
700 }
701 
SYSCALL_DEFINE0(kern_features)702 SYSCALL_DEFINE0(kern_features)
703 {
704 	return KERN_FEATURE_MIXED_MODE_STACK;
705 }
706