xref: /linux/arch/s390/mm/pfault.c (revision 46a923fd)
1c28c07feSHeiko Carstens // SPDX-License-Identifier: GPL-2.0
2c28c07feSHeiko Carstens /*
3c28c07feSHeiko Carstens  * Copyright IBM Corp. 1999, 2023
4c28c07feSHeiko Carstens  */
5c28c07feSHeiko Carstens 
6c28c07feSHeiko Carstens #include <linux/cpuhotplug.h>
7c28c07feSHeiko Carstens #include <linux/sched/task.h>
8c28c07feSHeiko Carstens #include <linux/errno.h>
9c28c07feSHeiko Carstens #include <linux/init.h>
10c28c07feSHeiko Carstens #include <linux/irq.h>
11c28c07feSHeiko Carstens #include <asm/asm-extable.h>
12c28c07feSHeiko Carstens #include <asm/pfault.h>
13c28c07feSHeiko Carstens #include <asm/diag.h>
14c28c07feSHeiko Carstens 
15c28c07feSHeiko Carstens #define __SUBCODE_MASK 0x0600
16b60624bbSHeiko Carstens #define __PF_RES_FIELD 0x8000000000000000UL
17c28c07feSHeiko Carstens 
18c28c07feSHeiko Carstens /*
19c28c07feSHeiko Carstens  * 'pfault' pseudo page faults routines.
20c28c07feSHeiko Carstens  */
21c28c07feSHeiko Carstens static int pfault_disable;
22c28c07feSHeiko Carstens 
nopfault(char * str)23c28c07feSHeiko Carstens static int __init nopfault(char *str)
24c28c07feSHeiko Carstens {
25c28c07feSHeiko Carstens 	pfault_disable = 1;
26c28c07feSHeiko Carstens 	return 1;
27c28c07feSHeiko Carstens }
2828254f36SHeiko Carstens early_param("nopfault", nopfault);
29c28c07feSHeiko Carstens 
30c28c07feSHeiko Carstens struct pfault_refbk {
31c28c07feSHeiko Carstens 	u16 refdiagc;
32c28c07feSHeiko Carstens 	u16 reffcode;
33c28c07feSHeiko Carstens 	u16 refdwlen;
34c28c07feSHeiko Carstens 	u16 refversn;
35c28c07feSHeiko Carstens 	u64 refgaddr;
36c28c07feSHeiko Carstens 	u64 refselmk;
37c28c07feSHeiko Carstens 	u64 refcmpmk;
38c28c07feSHeiko Carstens 	u64 reserved;
39c5b6eef5SHeiko Carstens };
40c28c07feSHeiko Carstens 
41c28c07feSHeiko Carstens static struct pfault_refbk pfault_init_refbk = {
42c28c07feSHeiko Carstens 	.refdiagc = 0x258,
43c28c07feSHeiko Carstens 	.reffcode = 0,
44c28c07feSHeiko Carstens 	.refdwlen = 5,
45c28c07feSHeiko Carstens 	.refversn = 2,
46c28c07feSHeiko Carstens 	.refgaddr = __LC_LPP,
47b60624bbSHeiko Carstens 	.refselmk = 1UL << 48,
48b60624bbSHeiko Carstens 	.refcmpmk = 1UL << 48,
49c28c07feSHeiko Carstens 	.reserved = __PF_RES_FIELD
50c28c07feSHeiko Carstens };
51c28c07feSHeiko Carstens 
__pfault_init(void)52c28c07feSHeiko Carstens int __pfault_init(void)
53c28c07feSHeiko Carstens {
544c89eb87SHeiko Carstens 	int rc = -EOPNOTSUPP;
55c28c07feSHeiko Carstens 
56c28c07feSHeiko Carstens 	if (pfault_disable)
574c89eb87SHeiko Carstens 		return rc;
58c28c07feSHeiko Carstens 	diag_stat_inc(DIAG_STAT_X258);
59c28c07feSHeiko Carstens 	asm volatile(
604c89eb87SHeiko Carstens 		"	diag	%[refbk],%[rc],0x258\n"
614c89eb87SHeiko Carstens 		"0:	nopr	%%r7\n"
624c89eb87SHeiko Carstens 		EX_TABLE(0b, 0b)
634c89eb87SHeiko Carstens 		: [rc] "+d" (rc)
644c89eb87SHeiko Carstens 		: [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
654c89eb87SHeiko Carstens 		: "cc");
66c28c07feSHeiko Carstens 	return rc;
67c28c07feSHeiko Carstens }
68c28c07feSHeiko Carstens 
69c28c07feSHeiko Carstens static struct pfault_refbk pfault_fini_refbk = {
70c28c07feSHeiko Carstens 	.refdiagc = 0x258,
71c28c07feSHeiko Carstens 	.reffcode = 1,
72c28c07feSHeiko Carstens 	.refdwlen = 5,
73c28c07feSHeiko Carstens 	.refversn = 2,
74c28c07feSHeiko Carstens };
75c28c07feSHeiko Carstens 
__pfault_fini(void)76c28c07feSHeiko Carstens void __pfault_fini(void)
77c28c07feSHeiko Carstens {
78c28c07feSHeiko Carstens 	if (pfault_disable)
79c28c07feSHeiko Carstens 		return;
80c28c07feSHeiko Carstens 	diag_stat_inc(DIAG_STAT_X258);
81c28c07feSHeiko Carstens 	asm volatile(
824c89eb87SHeiko Carstens 		"	diag	%[refbk],0,0x258\n"
83c28c07feSHeiko Carstens 		"0:	nopr	%%r7\n"
84c28c07feSHeiko Carstens 		EX_TABLE(0b, 0b)
854c89eb87SHeiko Carstens 		:
864c89eb87SHeiko Carstens 		: [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
874c89eb87SHeiko Carstens 		: "cc");
88c28c07feSHeiko Carstens }
89c28c07feSHeiko Carstens 
90c28c07feSHeiko Carstens static DEFINE_SPINLOCK(pfault_lock);
91c28c07feSHeiko Carstens static LIST_HEAD(pfault_list);
92c28c07feSHeiko Carstens 
93c28c07feSHeiko Carstens #define PF_COMPLETE	0x0080
94c28c07feSHeiko Carstens 
95c28c07feSHeiko Carstens /*
96c28c07feSHeiko Carstens  * The mechanism of our pfault code: if Linux is running as guest, runs a user
97c28c07feSHeiko Carstens  * space process and the user space process accesses a page that the host has
98c28c07feSHeiko Carstens  * paged out we get a pfault interrupt.
99c28c07feSHeiko Carstens  *
100c28c07feSHeiko Carstens  * This allows us, within the guest, to schedule a different process. Without
101c28c07feSHeiko Carstens  * this mechanism the host would have to suspend the whole virtual cpu until
102c28c07feSHeiko Carstens  * the page has been paged in.
103c28c07feSHeiko Carstens  *
104c28c07feSHeiko Carstens  * So when we get such an interrupt then we set the state of the current task
105c28c07feSHeiko Carstens  * to uninterruptible and also set the need_resched flag. Both happens within
106c28c07feSHeiko Carstens  * interrupt context(!). If we later on want to return to user space we
107c28c07feSHeiko Carstens  * recognize the need_resched flag and then call schedule().  It's not very
108c28c07feSHeiko Carstens  * obvious how this works...
109c28c07feSHeiko Carstens  *
110c28c07feSHeiko Carstens  * Of course we have a lot of additional fun with the completion interrupt (->
111c28c07feSHeiko Carstens  * host signals that a page of a process has been paged in and the process can
112c28c07feSHeiko Carstens  * continue to run). This interrupt can arrive on any cpu and, since we have
113c28c07feSHeiko Carstens  * virtual cpus, actually appear before the interrupt that signals that a page
114c28c07feSHeiko Carstens  * is missing.
115c28c07feSHeiko Carstens  */
pfault_interrupt(struct ext_code ext_code,unsigned int param32,unsigned long param64)116c28c07feSHeiko Carstens static void pfault_interrupt(struct ext_code ext_code,
117c28c07feSHeiko Carstens 			     unsigned int param32, unsigned long param64)
118c28c07feSHeiko Carstens {
119c28c07feSHeiko Carstens 	struct task_struct *tsk;
120c28c07feSHeiko Carstens 	__u16 subcode;
121c28c07feSHeiko Carstens 	pid_t pid;
122c28c07feSHeiko Carstens 
123c28c07feSHeiko Carstens 	/*
124c28c07feSHeiko Carstens 	 * Get the external interruption subcode & pfault initial/completion
125c28c07feSHeiko Carstens 	 * signal bit. VM stores this in the 'cpu address' field associated
126c28c07feSHeiko Carstens 	 * with the external interrupt.
127c28c07feSHeiko Carstens 	 */
128c28c07feSHeiko Carstens 	subcode = ext_code.subcode;
129c28c07feSHeiko Carstens 	if ((subcode & 0xff00) != __SUBCODE_MASK)
130c28c07feSHeiko Carstens 		return;
131c28c07feSHeiko Carstens 	inc_irq_stat(IRQEXT_PFL);
132c28c07feSHeiko Carstens 	/* Get the token (= pid of the affected task). */
133c28c07feSHeiko Carstens 	pid = param64 & LPP_PID_MASK;
134c28c07feSHeiko Carstens 	rcu_read_lock();
135c28c07feSHeiko Carstens 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
136c28c07feSHeiko Carstens 	if (tsk)
137c28c07feSHeiko Carstens 		get_task_struct(tsk);
138c28c07feSHeiko Carstens 	rcu_read_unlock();
139c28c07feSHeiko Carstens 	if (!tsk)
140c28c07feSHeiko Carstens 		return;
141c28c07feSHeiko Carstens 	spin_lock(&pfault_lock);
142c28c07feSHeiko Carstens 	if (subcode & PF_COMPLETE) {
143c28c07feSHeiko Carstens 		/* signal bit is set -> a page has been swapped in by VM */
144c28c07feSHeiko Carstens 		if (tsk->thread.pfault_wait == 1) {
145*46a923fdSHeiko Carstens 			/*
146*46a923fdSHeiko Carstens 			 * Initial interrupt was faster than the completion
147c28c07feSHeiko Carstens 			 * interrupt. pfault_wait is valid. Set pfault_wait
148c28c07feSHeiko Carstens 			 * back to zero and wake up the process. This can
149c28c07feSHeiko Carstens 			 * safely be done because the task is still sleeping
150*46a923fdSHeiko Carstens 			 * and can't produce new pfaults.
151*46a923fdSHeiko Carstens 			 */
152c28c07feSHeiko Carstens 			tsk->thread.pfault_wait = 0;
153c28c07feSHeiko Carstens 			list_del(&tsk->thread.list);
154c28c07feSHeiko Carstens 			wake_up_process(tsk);
155c28c07feSHeiko Carstens 			put_task_struct(tsk);
156c28c07feSHeiko Carstens 		} else {
157*46a923fdSHeiko Carstens 			/*
158*46a923fdSHeiko Carstens 			 * Completion interrupt was faster than initial
159c28c07feSHeiko Carstens 			 * interrupt. Set pfault_wait to -1 so the initial
160c28c07feSHeiko Carstens 			 * interrupt doesn't put the task to sleep.
161c28c07feSHeiko Carstens 			 * If the task is not running, ignore the completion
162c28c07feSHeiko Carstens 			 * interrupt since it must be a leftover of a PFAULT
163c28c07feSHeiko Carstens 			 * CANCEL operation which didn't remove all pending
164*46a923fdSHeiko Carstens 			 * completion interrupts.
165*46a923fdSHeiko Carstens 			 */
166c28c07feSHeiko Carstens 			if (task_is_running(tsk))
167c28c07feSHeiko Carstens 				tsk->thread.pfault_wait = -1;
168c28c07feSHeiko Carstens 		}
169c28c07feSHeiko Carstens 	} else {
170c28c07feSHeiko Carstens 		/* signal bit not set -> a real page is missing. */
171c28c07feSHeiko Carstens 		if (WARN_ON_ONCE(tsk != current))
172c28c07feSHeiko Carstens 			goto out;
173c28c07feSHeiko Carstens 		if (tsk->thread.pfault_wait == 1) {
174c28c07feSHeiko Carstens 			/* Already on the list with a reference: put to sleep */
175c28c07feSHeiko Carstens 			goto block;
176c28c07feSHeiko Carstens 		} else if (tsk->thread.pfault_wait == -1) {
177*46a923fdSHeiko Carstens 			/*
178*46a923fdSHeiko Carstens 			 * Completion interrupt was faster than the initial
179c28c07feSHeiko Carstens 			 * interrupt (pfault_wait == -1). Set pfault_wait
180*46a923fdSHeiko Carstens 			 * back to zero and exit.
181*46a923fdSHeiko Carstens 			 */
182c28c07feSHeiko Carstens 			tsk->thread.pfault_wait = 0;
183c28c07feSHeiko Carstens 		} else {
184*46a923fdSHeiko Carstens 			/*
185*46a923fdSHeiko Carstens 			 * Initial interrupt arrived before completion
186c28c07feSHeiko Carstens 			 * interrupt. Let the task sleep.
187c28c07feSHeiko Carstens 			 * An extra task reference is needed since a different
188c28c07feSHeiko Carstens 			 * cpu may set the task state to TASK_RUNNING again
189*46a923fdSHeiko Carstens 			 * before the scheduler is reached.
190*46a923fdSHeiko Carstens 			 */
191c28c07feSHeiko Carstens 			get_task_struct(tsk);
192c28c07feSHeiko Carstens 			tsk->thread.pfault_wait = 1;
193c28c07feSHeiko Carstens 			list_add(&tsk->thread.list, &pfault_list);
194c28c07feSHeiko Carstens block:
195*46a923fdSHeiko Carstens 			/*
196*46a923fdSHeiko Carstens 			 * Since this must be a userspace fault, there
197c28c07feSHeiko Carstens 			 * is no kernel task state to trample. Rely on the
198*46a923fdSHeiko Carstens 			 * return to userspace schedule() to block.
199*46a923fdSHeiko Carstens 			 */
200c28c07feSHeiko Carstens 			__set_current_state(TASK_UNINTERRUPTIBLE);
201c28c07feSHeiko Carstens 			set_tsk_need_resched(tsk);
202c28c07feSHeiko Carstens 			set_preempt_need_resched();
203c28c07feSHeiko Carstens 		}
204c28c07feSHeiko Carstens 	}
205c28c07feSHeiko Carstens out:
206c28c07feSHeiko Carstens 	spin_unlock(&pfault_lock);
207c28c07feSHeiko Carstens 	put_task_struct(tsk);
208c28c07feSHeiko Carstens }
209c28c07feSHeiko Carstens 
pfault_cpu_dead(unsigned int cpu)210c28c07feSHeiko Carstens static int pfault_cpu_dead(unsigned int cpu)
211c28c07feSHeiko Carstens {
212c28c07feSHeiko Carstens 	struct thread_struct *thread, *next;
213c28c07feSHeiko Carstens 	struct task_struct *tsk;
214c28c07feSHeiko Carstens 
215c28c07feSHeiko Carstens 	spin_lock_irq(&pfault_lock);
216c28c07feSHeiko Carstens 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
217c28c07feSHeiko Carstens 		thread->pfault_wait = 0;
218c28c07feSHeiko Carstens 		list_del(&thread->list);
219c28c07feSHeiko Carstens 		tsk = container_of(thread, struct task_struct, thread);
220c28c07feSHeiko Carstens 		wake_up_process(tsk);
221c28c07feSHeiko Carstens 		put_task_struct(tsk);
222c28c07feSHeiko Carstens 	}
223c28c07feSHeiko Carstens 	spin_unlock_irq(&pfault_lock);
224c28c07feSHeiko Carstens 	return 0;
225c28c07feSHeiko Carstens }
226c28c07feSHeiko Carstens 
pfault_irq_init(void)227c28c07feSHeiko Carstens static int __init pfault_irq_init(void)
228c28c07feSHeiko Carstens {
229c28c07feSHeiko Carstens 	int rc;
230c28c07feSHeiko Carstens 
231c28c07feSHeiko Carstens 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
232c28c07feSHeiko Carstens 	if (rc)
233c28c07feSHeiko Carstens 		goto out_extint;
234c28c07feSHeiko Carstens 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
235c28c07feSHeiko Carstens 	if (rc)
236c28c07feSHeiko Carstens 		goto out_pfault;
237c28c07feSHeiko Carstens 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
238c28c07feSHeiko Carstens 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
239c28c07feSHeiko Carstens 				  NULL, pfault_cpu_dead);
240c28c07feSHeiko Carstens 	return 0;
241c28c07feSHeiko Carstens 
242c28c07feSHeiko Carstens out_pfault:
243c28c07feSHeiko Carstens 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
244c28c07feSHeiko Carstens out_extint:
245c28c07feSHeiko Carstens 	pfault_disable = 1;
246c28c07feSHeiko Carstens 	return rc;
247c28c07feSHeiko Carstens }
248c28c07feSHeiko Carstens early_initcall(pfault_irq_init);
249