xref: /linux/arch/s390/mm/fault.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/diag.h>
36 #include <asm/pgtable.h>
37 #include <asm/gmap.h>
38 #include <asm/irq.h>
39 #include <asm/mmu_context.h>
40 #include <asm/facility.h>
41 #include "../kernel/entry.h"
42 
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
46 
47 #define VM_FAULT_BADCONTEXT	0x010000
48 #define VM_FAULT_BADMAP		0x020000
49 #define VM_FAULT_BADACCESS	0x040000
50 #define VM_FAULT_SIGNAL		0x080000
51 #define VM_FAULT_PFAULT		0x100000
52 
53 enum fault_type {
54 	KERNEL_FAULT,
55 	USER_FAULT,
56 	VDSO_FAULT,
57 	GMAP_FAULT,
58 };
59 
60 static unsigned long store_indication __read_mostly;
61 
62 static int __init fault_init(void)
63 {
64 	if (test_facility(75))
65 		store_indication = 0xc00;
66 	return 0;
67 }
68 early_initcall(fault_init);
69 
70 static inline int notify_page_fault(struct pt_regs *regs)
71 {
72 	int ret = 0;
73 
74 	/* kprobe_running() needs smp_processor_id() */
75 	if (kprobes_built_in() && !user_mode(regs)) {
76 		preempt_disable();
77 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
78 			ret = 1;
79 		preempt_enable();
80 	}
81 	return ret;
82 }
83 
84 /*
85  * Find out which address space caused the exception.
86  */
87 static enum fault_type get_fault_type(struct pt_regs *regs)
88 {
89 	unsigned long trans_exc_code;
90 
91 	trans_exc_code = regs->int_parm_long & 3;
92 	if (likely(trans_exc_code == 0)) {
93 		/* primary space exception */
94 		if (IS_ENABLED(CONFIG_PGSTE) &&
95 		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
96 			return GMAP_FAULT;
97 		if (current->thread.mm_segment == USER_DS)
98 			return USER_FAULT;
99 		return KERNEL_FAULT;
100 	}
101 	if (trans_exc_code == 2) {
102 		/* secondary space exception */
103 		if (current->thread.mm_segment & 1) {
104 			if (current->thread.mm_segment == USER_DS_SACF)
105 				return USER_FAULT;
106 			return KERNEL_FAULT;
107 		}
108 		return VDSO_FAULT;
109 	}
110 	if (trans_exc_code == 1) {
111 		/* access register mode, not used in the kernel */
112 		return USER_FAULT;
113 	}
114 	/* home space exception -> access via kernel ASCE */
115 	return KERNEL_FAULT;
116 }
117 
118 static int bad_address(void *p)
119 {
120 	unsigned long dummy;
121 
122 	return probe_kernel_address((unsigned long *)p, dummy);
123 }
124 
125 static void dump_pagetable(unsigned long asce, unsigned long address)
126 {
127 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
128 
129 	pr_alert("AS:%016lx ", asce);
130 	switch (asce & _ASCE_TYPE_MASK) {
131 	case _ASCE_TYPE_REGION1:
132 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
133 		if (bad_address(table))
134 			goto bad;
135 		pr_cont("R1:%016lx ", *table);
136 		if (*table & _REGION_ENTRY_INVALID)
137 			goto out;
138 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
139 		/* fallthrough */
140 	case _ASCE_TYPE_REGION2:
141 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
142 		if (bad_address(table))
143 			goto bad;
144 		pr_cont("R2:%016lx ", *table);
145 		if (*table & _REGION_ENTRY_INVALID)
146 			goto out;
147 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
148 		/* fallthrough */
149 	case _ASCE_TYPE_REGION3:
150 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
151 		if (bad_address(table))
152 			goto bad;
153 		pr_cont("R3:%016lx ", *table);
154 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
155 			goto out;
156 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
157 		/* fallthrough */
158 	case _ASCE_TYPE_SEGMENT:
159 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
160 		if (bad_address(table))
161 			goto bad;
162 		pr_cont("S:%016lx ", *table);
163 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
164 			goto out;
165 		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
166 	}
167 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
168 	if (bad_address(table))
169 		goto bad;
170 	pr_cont("P:%016lx ", *table);
171 out:
172 	pr_cont("\n");
173 	return;
174 bad:
175 	pr_cont("BAD\n");
176 }
177 
178 static void dump_fault_info(struct pt_regs *regs)
179 {
180 	unsigned long asce;
181 
182 	pr_alert("Failing address: %016lx TEID: %016lx\n",
183 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
184 	pr_alert("Fault in ");
185 	switch (regs->int_parm_long & 3) {
186 	case 3:
187 		pr_cont("home space ");
188 		break;
189 	case 2:
190 		pr_cont("secondary space ");
191 		break;
192 	case 1:
193 		pr_cont("access register ");
194 		break;
195 	case 0:
196 		pr_cont("primary space ");
197 		break;
198 	}
199 	pr_cont("mode while using ");
200 	switch (get_fault_type(regs)) {
201 	case USER_FAULT:
202 		asce = S390_lowcore.user_asce;
203 		pr_cont("user ");
204 		break;
205 	case VDSO_FAULT:
206 		asce = S390_lowcore.vdso_asce;
207 		pr_cont("vdso ");
208 		break;
209 	case GMAP_FAULT:
210 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
211 		pr_cont("gmap ");
212 		break;
213 	case KERNEL_FAULT:
214 		asce = S390_lowcore.kernel_asce;
215 		pr_cont("kernel ");
216 		break;
217 	default:
218 		unreachable();
219 	}
220 	pr_cont("ASCE.\n");
221 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
222 }
223 
224 int show_unhandled_signals = 1;
225 
226 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
227 {
228 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
229 		return;
230 	if (!unhandled_signal(current, signr))
231 		return;
232 	if (!printk_ratelimit())
233 		return;
234 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
235 	       regs->int_code & 0xffff, regs->int_code >> 17);
236 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
237 	printk(KERN_CONT "\n");
238 	if (is_mm_fault)
239 		dump_fault_info(regs);
240 	show_regs(regs);
241 }
242 
243 /*
244  * Send SIGSEGV to task.  This is an external routine
245  * to keep the stack usage of do_page_fault small.
246  */
247 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
248 {
249 	report_user_fault(regs, SIGSEGV, 1);
250 	force_sig_fault(SIGSEGV, si_code,
251 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
252 			current);
253 }
254 
255 const struct exception_table_entry *s390_search_extables(unsigned long addr)
256 {
257 	const struct exception_table_entry *fixup;
258 
259 	fixup = search_extable(__start_dma_ex_table,
260 			       __stop_dma_ex_table - __start_dma_ex_table,
261 			       addr);
262 	if (!fixup)
263 		fixup = search_exception_tables(addr);
264 	return fixup;
265 }
266 
267 static noinline void do_no_context(struct pt_regs *regs)
268 {
269 	const struct exception_table_entry *fixup;
270 
271 	/* Are we prepared to handle this kernel fault?  */
272 	fixup = s390_search_extables(regs->psw.addr);
273 	if (fixup) {
274 		regs->psw.addr = extable_fixup(fixup);
275 		return;
276 	}
277 
278 	/*
279 	 * Oops. The kernel tried to access some bad page. We'll have to
280 	 * terminate things with extreme prejudice.
281 	 */
282 	if (get_fault_type(regs) == KERNEL_FAULT)
283 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
284 		       " in virtual kernel address space\n");
285 	else
286 		printk(KERN_ALERT "Unable to handle kernel paging request"
287 		       " in virtual user address space\n");
288 	dump_fault_info(regs);
289 	die(regs, "Oops");
290 	do_exit(SIGKILL);
291 }
292 
293 static noinline void do_low_address(struct pt_regs *regs)
294 {
295 	/* Low-address protection hit in kernel mode means
296 	   NULL pointer write access in kernel mode.  */
297 	if (regs->psw.mask & PSW_MASK_PSTATE) {
298 		/* Low-address protection hit in user mode 'cannot happen'. */
299 		die (regs, "Low-address protection");
300 		do_exit(SIGKILL);
301 	}
302 
303 	do_no_context(regs);
304 }
305 
306 static noinline void do_sigbus(struct pt_regs *regs)
307 {
308 	/*
309 	 * Send a sigbus, regardless of whether we were in kernel
310 	 * or user mode.
311 	 */
312 	force_sig_fault(SIGBUS, BUS_ADRERR,
313 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
314 			current);
315 }
316 
317 static noinline int signal_return(struct pt_regs *regs)
318 {
319 	u16 instruction;
320 	int rc;
321 
322 	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
323 	if (rc)
324 		return rc;
325 	if (instruction == 0x0a77) {
326 		set_pt_regs_flag(regs, PIF_SYSCALL);
327 		regs->int_code = 0x00040077;
328 		return 0;
329 	} else if (instruction == 0x0aad) {
330 		set_pt_regs_flag(regs, PIF_SYSCALL);
331 		regs->int_code = 0x000400ad;
332 		return 0;
333 	}
334 	return -EACCES;
335 }
336 
337 static noinline void do_fault_error(struct pt_regs *regs, int access,
338 					vm_fault_t fault)
339 {
340 	int si_code;
341 
342 	switch (fault) {
343 	case VM_FAULT_BADACCESS:
344 		if (access == VM_EXEC && signal_return(regs) == 0)
345 			break;
346 	case VM_FAULT_BADMAP:
347 		/* Bad memory access. Check if it is kernel or user space. */
348 		if (user_mode(regs)) {
349 			/* User mode accesses just cause a SIGSEGV */
350 			si_code = (fault == VM_FAULT_BADMAP) ?
351 				SEGV_MAPERR : SEGV_ACCERR;
352 			do_sigsegv(regs, si_code);
353 			break;
354 		}
355 	case VM_FAULT_BADCONTEXT:
356 	case VM_FAULT_PFAULT:
357 		do_no_context(regs);
358 		break;
359 	case VM_FAULT_SIGNAL:
360 		if (!user_mode(regs))
361 			do_no_context(regs);
362 		break;
363 	default: /* fault & VM_FAULT_ERROR */
364 		if (fault & VM_FAULT_OOM) {
365 			if (!user_mode(regs))
366 				do_no_context(regs);
367 			else
368 				pagefault_out_of_memory();
369 		} else if (fault & VM_FAULT_SIGSEGV) {
370 			/* Kernel mode? Handle exceptions or die */
371 			if (!user_mode(regs))
372 				do_no_context(regs);
373 			else
374 				do_sigsegv(regs, SEGV_MAPERR);
375 		} else if (fault & VM_FAULT_SIGBUS) {
376 			/* Kernel mode? Handle exceptions or die */
377 			if (!user_mode(regs))
378 				do_no_context(regs);
379 			else
380 				do_sigbus(regs);
381 		} else
382 			BUG();
383 		break;
384 	}
385 }
386 
387 /*
388  * This routine handles page faults.  It determines the address,
389  * and the problem, and then passes it off to one of the appropriate
390  * routines.
391  *
392  * interruption code (int_code):
393  *   04       Protection           ->  Write-Protection  (suprression)
394  *   10       Segment translation  ->  Not present       (nullification)
395  *   11       Page translation     ->  Not present       (nullification)
396  *   3b       Region third trans.  ->  Not present       (nullification)
397  */
398 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
399 {
400 	struct gmap *gmap;
401 	struct task_struct *tsk;
402 	struct mm_struct *mm;
403 	struct vm_area_struct *vma;
404 	enum fault_type type;
405 	unsigned long trans_exc_code;
406 	unsigned long address;
407 	unsigned int flags;
408 	vm_fault_t fault;
409 
410 	tsk = current;
411 	/*
412 	 * The instruction that caused the program check has
413 	 * been nullified. Don't signal single step via SIGTRAP.
414 	 */
415 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
416 
417 	if (notify_page_fault(regs))
418 		return 0;
419 
420 	mm = tsk->mm;
421 	trans_exc_code = regs->int_parm_long;
422 
423 	/*
424 	 * Verify that the fault happened in user space, that
425 	 * we are not in an interrupt and that there is a
426 	 * user context.
427 	 */
428 	fault = VM_FAULT_BADCONTEXT;
429 	type = get_fault_type(regs);
430 	switch (type) {
431 	case KERNEL_FAULT:
432 		goto out;
433 	case VDSO_FAULT:
434 		fault = VM_FAULT_BADMAP;
435 		goto out;
436 	case USER_FAULT:
437 	case GMAP_FAULT:
438 		if (faulthandler_disabled() || !mm)
439 			goto out;
440 		break;
441 	}
442 
443 	address = trans_exc_code & __FAIL_ADDR_MASK;
444 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
445 	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
446 	if (user_mode(regs))
447 		flags |= FAULT_FLAG_USER;
448 	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
449 		flags |= FAULT_FLAG_WRITE;
450 	down_read(&mm->mmap_sem);
451 
452 	gmap = NULL;
453 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
454 		gmap = (struct gmap *) S390_lowcore.gmap;
455 		current->thread.gmap_addr = address;
456 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
457 		current->thread.gmap_int_code = regs->int_code & 0xffff;
458 		address = __gmap_translate(gmap, address);
459 		if (address == -EFAULT) {
460 			fault = VM_FAULT_BADMAP;
461 			goto out_up;
462 		}
463 		if (gmap->pfault_enabled)
464 			flags |= FAULT_FLAG_RETRY_NOWAIT;
465 	}
466 
467 retry:
468 	fault = VM_FAULT_BADMAP;
469 	vma = find_vma(mm, address);
470 	if (!vma)
471 		goto out_up;
472 
473 	if (unlikely(vma->vm_start > address)) {
474 		if (!(vma->vm_flags & VM_GROWSDOWN))
475 			goto out_up;
476 		if (expand_stack(vma, address))
477 			goto out_up;
478 	}
479 
480 	/*
481 	 * Ok, we have a good vm_area for this memory access, so
482 	 * we can handle it..
483 	 */
484 	fault = VM_FAULT_BADACCESS;
485 	if (unlikely(!(vma->vm_flags & access)))
486 		goto out_up;
487 
488 	if (is_vm_hugetlb_page(vma))
489 		address &= HPAGE_MASK;
490 	/*
491 	 * If for any reason at all we couldn't handle the fault,
492 	 * make sure we exit gracefully rather than endlessly redo
493 	 * the fault.
494 	 */
495 	fault = handle_mm_fault(vma, address, flags);
496 	/* No reason to continue if interrupted by SIGKILL. */
497 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
498 		fault = VM_FAULT_SIGNAL;
499 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
500 			goto out_up;
501 		goto out;
502 	}
503 	if (unlikely(fault & VM_FAULT_ERROR))
504 		goto out_up;
505 
506 	/*
507 	 * Major/minor page fault accounting is only done on the
508 	 * initial attempt. If we go through a retry, it is extremely
509 	 * likely that the page will be found in page cache at that point.
510 	 */
511 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
512 		if (fault & VM_FAULT_MAJOR) {
513 			tsk->maj_flt++;
514 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
515 				      regs, address);
516 		} else {
517 			tsk->min_flt++;
518 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
519 				      regs, address);
520 		}
521 		if (fault & VM_FAULT_RETRY) {
522 			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
523 			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
524 				/* FAULT_FLAG_RETRY_NOWAIT has been set,
525 				 * mmap_sem has not been released */
526 				current->thread.gmap_pfault = 1;
527 				fault = VM_FAULT_PFAULT;
528 				goto out_up;
529 			}
530 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
531 			 * of starvation. */
532 			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
533 				   FAULT_FLAG_RETRY_NOWAIT);
534 			flags |= FAULT_FLAG_TRIED;
535 			down_read(&mm->mmap_sem);
536 			goto retry;
537 		}
538 	}
539 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
540 		address =  __gmap_link(gmap, current->thread.gmap_addr,
541 				       address);
542 		if (address == -EFAULT) {
543 			fault = VM_FAULT_BADMAP;
544 			goto out_up;
545 		}
546 		if (address == -ENOMEM) {
547 			fault = VM_FAULT_OOM;
548 			goto out_up;
549 		}
550 	}
551 	fault = 0;
552 out_up:
553 	up_read(&mm->mmap_sem);
554 out:
555 	return fault;
556 }
557 
558 void do_protection_exception(struct pt_regs *regs)
559 {
560 	unsigned long trans_exc_code;
561 	int access;
562 	vm_fault_t fault;
563 
564 	trans_exc_code = regs->int_parm_long;
565 	/*
566 	 * Protection exceptions are suppressing, decrement psw address.
567 	 * The exception to this rule are aborted transactions, for these
568 	 * the PSW already points to the correct location.
569 	 */
570 	if (!(regs->int_code & 0x200))
571 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
572 	/*
573 	 * Check for low-address protection.  This needs to be treated
574 	 * as a special case because the translation exception code
575 	 * field is not guaranteed to contain valid data in this case.
576 	 */
577 	if (unlikely(!(trans_exc_code & 4))) {
578 		do_low_address(regs);
579 		return;
580 	}
581 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
582 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
583 					(regs->psw.addr & PAGE_MASK);
584 		access = VM_EXEC;
585 		fault = VM_FAULT_BADACCESS;
586 	} else {
587 		access = VM_WRITE;
588 		fault = do_exception(regs, access);
589 	}
590 	if (unlikely(fault))
591 		do_fault_error(regs, access, fault);
592 }
593 NOKPROBE_SYMBOL(do_protection_exception);
594 
595 void do_dat_exception(struct pt_regs *regs)
596 {
597 	int access;
598 	vm_fault_t fault;
599 
600 	access = VM_READ | VM_EXEC | VM_WRITE;
601 	fault = do_exception(regs, access);
602 	if (unlikely(fault))
603 		do_fault_error(regs, access, fault);
604 }
605 NOKPROBE_SYMBOL(do_dat_exception);
606 
607 #ifdef CONFIG_PFAULT
608 /*
609  * 'pfault' pseudo page faults routines.
610  */
611 static int pfault_disable;
612 
613 static int __init nopfault(char *str)
614 {
615 	pfault_disable = 1;
616 	return 1;
617 }
618 
619 __setup("nopfault", nopfault);
620 
621 struct pfault_refbk {
622 	u16 refdiagc;
623 	u16 reffcode;
624 	u16 refdwlen;
625 	u16 refversn;
626 	u64 refgaddr;
627 	u64 refselmk;
628 	u64 refcmpmk;
629 	u64 reserved;
630 } __attribute__ ((packed, aligned(8)));
631 
632 static struct pfault_refbk pfault_init_refbk = {
633 	.refdiagc = 0x258,
634 	.reffcode = 0,
635 	.refdwlen = 5,
636 	.refversn = 2,
637 	.refgaddr = __LC_LPP,
638 	.refselmk = 1ULL << 48,
639 	.refcmpmk = 1ULL << 48,
640 	.reserved = __PF_RES_FIELD
641 };
642 
643 int pfault_init(void)
644 {
645         int rc;
646 
647 	if (pfault_disable)
648 		return -1;
649 	diag_stat_inc(DIAG_STAT_X258);
650 	asm volatile(
651 		"	diag	%1,%0,0x258\n"
652 		"0:	j	2f\n"
653 		"1:	la	%0,8\n"
654 		"2:\n"
655 		EX_TABLE(0b,1b)
656 		: "=d" (rc)
657 		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
658         return rc;
659 }
660 
661 static struct pfault_refbk pfault_fini_refbk = {
662 	.refdiagc = 0x258,
663 	.reffcode = 1,
664 	.refdwlen = 5,
665 	.refversn = 2,
666 };
667 
668 void pfault_fini(void)
669 {
670 
671 	if (pfault_disable)
672 		return;
673 	diag_stat_inc(DIAG_STAT_X258);
674 	asm volatile(
675 		"	diag	%0,0,0x258\n"
676 		"0:	nopr	%%r7\n"
677 		EX_TABLE(0b,0b)
678 		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
679 }
680 
681 static DEFINE_SPINLOCK(pfault_lock);
682 static LIST_HEAD(pfault_list);
683 
684 #define PF_COMPLETE	0x0080
685 
686 /*
687  * The mechanism of our pfault code: if Linux is running as guest, runs a user
688  * space process and the user space process accesses a page that the host has
689  * paged out we get a pfault interrupt.
690  *
691  * This allows us, within the guest, to schedule a different process. Without
692  * this mechanism the host would have to suspend the whole virtual cpu until
693  * the page has been paged in.
694  *
695  * So when we get such an interrupt then we set the state of the current task
696  * to uninterruptible and also set the need_resched flag. Both happens within
697  * interrupt context(!). If we later on want to return to user space we
698  * recognize the need_resched flag and then call schedule().  It's not very
699  * obvious how this works...
700  *
701  * Of course we have a lot of additional fun with the completion interrupt (->
702  * host signals that a page of a process has been paged in and the process can
703  * continue to run). This interrupt can arrive on any cpu and, since we have
704  * virtual cpus, actually appear before the interrupt that signals that a page
705  * is missing.
706  */
707 static void pfault_interrupt(struct ext_code ext_code,
708 			     unsigned int param32, unsigned long param64)
709 {
710 	struct task_struct *tsk;
711 	__u16 subcode;
712 	pid_t pid;
713 
714 	/*
715 	 * Get the external interruption subcode & pfault initial/completion
716 	 * signal bit. VM stores this in the 'cpu address' field associated
717 	 * with the external interrupt.
718 	 */
719 	subcode = ext_code.subcode;
720 	if ((subcode & 0xff00) != __SUBCODE_MASK)
721 		return;
722 	inc_irq_stat(IRQEXT_PFL);
723 	/* Get the token (= pid of the affected task). */
724 	pid = param64 & LPP_PID_MASK;
725 	rcu_read_lock();
726 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
727 	if (tsk)
728 		get_task_struct(tsk);
729 	rcu_read_unlock();
730 	if (!tsk)
731 		return;
732 	spin_lock(&pfault_lock);
733 	if (subcode & PF_COMPLETE) {
734 		/* signal bit is set -> a page has been swapped in by VM */
735 		if (tsk->thread.pfault_wait == 1) {
736 			/* Initial interrupt was faster than the completion
737 			 * interrupt. pfault_wait is valid. Set pfault_wait
738 			 * back to zero and wake up the process. This can
739 			 * safely be done because the task is still sleeping
740 			 * and can't produce new pfaults. */
741 			tsk->thread.pfault_wait = 0;
742 			list_del(&tsk->thread.list);
743 			wake_up_process(tsk);
744 			put_task_struct(tsk);
745 		} else {
746 			/* Completion interrupt was faster than initial
747 			 * interrupt. Set pfault_wait to -1 so the initial
748 			 * interrupt doesn't put the task to sleep.
749 			 * If the task is not running, ignore the completion
750 			 * interrupt since it must be a leftover of a PFAULT
751 			 * CANCEL operation which didn't remove all pending
752 			 * completion interrupts. */
753 			if (tsk->state == TASK_RUNNING)
754 				tsk->thread.pfault_wait = -1;
755 		}
756 	} else {
757 		/* signal bit not set -> a real page is missing. */
758 		if (WARN_ON_ONCE(tsk != current))
759 			goto out;
760 		if (tsk->thread.pfault_wait == 1) {
761 			/* Already on the list with a reference: put to sleep */
762 			goto block;
763 		} else if (tsk->thread.pfault_wait == -1) {
764 			/* Completion interrupt was faster than the initial
765 			 * interrupt (pfault_wait == -1). Set pfault_wait
766 			 * back to zero and exit. */
767 			tsk->thread.pfault_wait = 0;
768 		} else {
769 			/* Initial interrupt arrived before completion
770 			 * interrupt. Let the task sleep.
771 			 * An extra task reference is needed since a different
772 			 * cpu may set the task state to TASK_RUNNING again
773 			 * before the scheduler is reached. */
774 			get_task_struct(tsk);
775 			tsk->thread.pfault_wait = 1;
776 			list_add(&tsk->thread.list, &pfault_list);
777 block:
778 			/* Since this must be a userspace fault, there
779 			 * is no kernel task state to trample. Rely on the
780 			 * return to userspace schedule() to block. */
781 			__set_current_state(TASK_UNINTERRUPTIBLE);
782 			set_tsk_need_resched(tsk);
783 			set_preempt_need_resched();
784 		}
785 	}
786 out:
787 	spin_unlock(&pfault_lock);
788 	put_task_struct(tsk);
789 }
790 
791 static int pfault_cpu_dead(unsigned int cpu)
792 {
793 	struct thread_struct *thread, *next;
794 	struct task_struct *tsk;
795 
796 	spin_lock_irq(&pfault_lock);
797 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
798 		thread->pfault_wait = 0;
799 		list_del(&thread->list);
800 		tsk = container_of(thread, struct task_struct, thread);
801 		wake_up_process(tsk);
802 		put_task_struct(tsk);
803 	}
804 	spin_unlock_irq(&pfault_lock);
805 	return 0;
806 }
807 
808 static int __init pfault_irq_init(void)
809 {
810 	int rc;
811 
812 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
813 	if (rc)
814 		goto out_extint;
815 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
816 	if (rc)
817 		goto out_pfault;
818 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
819 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
820 				  NULL, pfault_cpu_dead);
821 	return 0;
822 
823 out_pfault:
824 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
825 out_extint:
826 	pfault_disable = 1;
827 	return rc;
828 }
829 early_initcall(pfault_irq_init);
830 
831 #endif /* CONFIG_PFAULT */
832