xref: /linux/arch/powerpc/kvm/book3s_xics.c (revision 1fd02f66)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2bc5ad3f3SBenjamin Herrenschmidt /*
3bc5ad3f3SBenjamin Herrenschmidt  * Copyright 2012 Michael Ellerman, IBM Corporation.
4bc5ad3f3SBenjamin Herrenschmidt  * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5bc5ad3f3SBenjamin Herrenschmidt  */
6bc5ad3f3SBenjamin Herrenschmidt 
7bc5ad3f3SBenjamin Herrenschmidt #include <linux/kernel.h>
8bc5ad3f3SBenjamin Herrenschmidt #include <linux/kvm_host.h>
9bc5ad3f3SBenjamin Herrenschmidt #include <linux/err.h>
10bc5ad3f3SBenjamin Herrenschmidt #include <linux/gfp.h>
115975a2e0SPaul Mackerras #include <linux/anon_inodes.h>
12433c5c20SMichael Ellerman #include <linux/spinlock.h>
13dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h>
147c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
15dbf77fedSAneesh Kumar K.V 
16bc5ad3f3SBenjamin Herrenschmidt #include <asm/kvm_book3s.h>
17bc5ad3f3SBenjamin Herrenschmidt #include <asm/kvm_ppc.h>
18bc5ad3f3SBenjamin Herrenschmidt #include <asm/hvcall.h>
19bc5ad3f3SBenjamin Herrenschmidt #include <asm/xics.h>
207bfa9ad5SPaul Mackerras #include <asm/time.h>
21bc5ad3f3SBenjamin Herrenschmidt 
22bc5ad3f3SBenjamin Herrenschmidt #include <linux/seq_file.h>
23bc5ad3f3SBenjamin Herrenschmidt 
24bc5ad3f3SBenjamin Herrenschmidt #include "book3s_xics.h"
25bc5ad3f3SBenjamin Herrenschmidt 
26bc5ad3f3SBenjamin Herrenschmidt #if 1
27bc5ad3f3SBenjamin Herrenschmidt #define XICS_DBG(fmt...) do { } while (0)
28bc5ad3f3SBenjamin Herrenschmidt #else
29bc5ad3f3SBenjamin Herrenschmidt #define XICS_DBG(fmt...) trace_printk(fmt)
30bc5ad3f3SBenjamin Herrenschmidt #endif
31bc5ad3f3SBenjamin Herrenschmidt 
32e7d26f28SBenjamin Herrenschmidt #define ENABLE_REALMODE	true
33e7d26f28SBenjamin Herrenschmidt #define DEBUG_REALMODE	false
34e7d26f28SBenjamin Herrenschmidt 
35bc5ad3f3SBenjamin Herrenschmidt /*
36bc5ad3f3SBenjamin Herrenschmidt  * LOCKING
37bc5ad3f3SBenjamin Herrenschmidt  * =======
38bc5ad3f3SBenjamin Herrenschmidt  *
3934cb7954SSuresh Warrier  * Each ICS has a spin lock protecting the information about the IRQ
404e33d1f0SGreg Kurz  * sources and avoiding simultaneous deliveries of the same interrupt.
41bc5ad3f3SBenjamin Herrenschmidt  *
42bc5ad3f3SBenjamin Herrenschmidt  * ICP operations are done via a single compare & swap transaction
43bc5ad3f3SBenjamin Herrenschmidt  * (most ICP state fits in the union kvmppc_icp_state)
44bc5ad3f3SBenjamin Herrenschmidt  */
45bc5ad3f3SBenjamin Herrenschmidt 
46bc5ad3f3SBenjamin Herrenschmidt /*
47bc5ad3f3SBenjamin Herrenschmidt  * TODO
48bc5ad3f3SBenjamin Herrenschmidt  * ====
49bc5ad3f3SBenjamin Herrenschmidt  *
50bc5ad3f3SBenjamin Herrenschmidt  * - To speed up resends, keep a bitmap of "resend" set bits in the
51bc5ad3f3SBenjamin Herrenschmidt  *   ICS
52bc5ad3f3SBenjamin Herrenschmidt  *
53bc5ad3f3SBenjamin Herrenschmidt  * - Speed up server# -> ICP lookup (array ? hash table ?)
54bc5ad3f3SBenjamin Herrenschmidt  *
55bc5ad3f3SBenjamin Herrenschmidt  * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
56bc5ad3f3SBenjamin Herrenschmidt  *   locks array to improve scalability
57bc5ad3f3SBenjamin Herrenschmidt  */
58bc5ad3f3SBenjamin Herrenschmidt 
59bc5ad3f3SBenjamin Herrenschmidt /* -- ICS routines -- */
60bc5ad3f3SBenjamin Herrenschmidt 
61bc5ad3f3SBenjamin Herrenschmidt static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
6221acd0e4SLi Zhong 			    u32 new_irq, bool check_resend);
63bc5ad3f3SBenjamin Herrenschmidt 
6425a2150bSPaul Mackerras /*
6525a2150bSPaul Mackerras  * Return value ideally indicates how the interrupt was handled, but no
6625a2150bSPaul Mackerras  * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
6725a2150bSPaul Mackerras  * so just return 0.
6825a2150bSPaul Mackerras  */
ics_deliver_irq(struct kvmppc_xics * xics,u32 irq,u32 level)6925a2150bSPaul Mackerras static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
70bc5ad3f3SBenjamin Herrenschmidt {
71bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
72bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
73bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
7417d48610SLi Zhong 	u32 pq_old, pq_new;
75bc5ad3f3SBenjamin Herrenschmidt 
76bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
77bc5ad3f3SBenjamin Herrenschmidt 
78bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
79bc5ad3f3SBenjamin Herrenschmidt 	if (!ics) {
80bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
81bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
82bc5ad3f3SBenjamin Herrenschmidt 	}
83bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
84bc5ad3f3SBenjamin Herrenschmidt 	if (!state->exists)
85bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
86bc5ad3f3SBenjamin Herrenschmidt 
8717d48610SLi Zhong 	if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
8817d48610SLi Zhong 		level = 1;
8917d48610SLi Zhong 	else if (level == KVM_INTERRUPT_UNSET)
9017d48610SLi Zhong 		level = 0;
91bc5ad3f3SBenjamin Herrenschmidt 	/*
9217d48610SLi Zhong 	 * Take other values the same as 1, consistent with original code.
9317d48610SLi Zhong 	 * maybe WARN here?
94bc5ad3f3SBenjamin Herrenschmidt 	 */
9517d48610SLi Zhong 
9617d48610SLi Zhong 	if (!state->lsi && level == 0) /* noop for MSI */
97bc5ad3f3SBenjamin Herrenschmidt 		return 0;
9817d48610SLi Zhong 
9917d48610SLi Zhong 	do {
10017d48610SLi Zhong 		pq_old = state->pq_state;
10117d48610SLi Zhong 		if (state->lsi) {
10217d48610SLi Zhong 			if (level) {
10317d48610SLi Zhong 				if (pq_old & PQ_PRESENTED)
10417d48610SLi Zhong 					/* Setting already set LSI ... */
10517d48610SLi Zhong 					return 0;
10617d48610SLi Zhong 
10717d48610SLi Zhong 				pq_new = PQ_PRESENTED;
10817d48610SLi Zhong 			} else
10917d48610SLi Zhong 				pq_new = 0;
11017d48610SLi Zhong 		} else
11117d48610SLi Zhong 			pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
11217d48610SLi Zhong 	} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
11317d48610SLi Zhong 
11417d48610SLi Zhong 	/* Test P=1, Q=0, this is the only case where we present */
11517d48610SLi Zhong 	if (pq_new == PQ_PRESENTED)
11621acd0e4SLi Zhong 		icp_deliver_irq(xics, NULL, irq, false);
117bc5ad3f3SBenjamin Herrenschmidt 
1185d375199SPaul Mackerras 	/* Record which CPU this arrived on for passed-through interrupts */
1195d375199SPaul Mackerras 	if (state->host_irq)
1205d375199SPaul Mackerras 		state->intr_cpu = raw_smp_processor_id();
1215d375199SPaul Mackerras 
12225a2150bSPaul Mackerras 	return 0;
123bc5ad3f3SBenjamin Herrenschmidt }
124bc5ad3f3SBenjamin Herrenschmidt 
ics_check_resend(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct kvmppc_icp * icp)125bc5ad3f3SBenjamin Herrenschmidt static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
126bc5ad3f3SBenjamin Herrenschmidt 			     struct kvmppc_icp *icp)
127bc5ad3f3SBenjamin Herrenschmidt {
128bc5ad3f3SBenjamin Herrenschmidt 	int i;
129bc5ad3f3SBenjamin Herrenschmidt 
130bc5ad3f3SBenjamin Herrenschmidt 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
131bc5ad3f3SBenjamin Herrenschmidt 		struct ics_irq_state *state = &ics->irq_state[i];
13221acd0e4SLi Zhong 		if (state->resend) {
133bc5ad3f3SBenjamin Herrenschmidt 			XICS_DBG("resend %#x prio %#x\n", state->number,
134bc5ad3f3SBenjamin Herrenschmidt 				      state->priority);
13521acd0e4SLi Zhong 			icp_deliver_irq(xics, icp, state->number, true);
136bc5ad3f3SBenjamin Herrenschmidt 		}
13721acd0e4SLi Zhong 	}
138bc5ad3f3SBenjamin Herrenschmidt }
139bc5ad3f3SBenjamin Herrenschmidt 
write_xive(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct ics_irq_state * state,u32 server,u32 priority,u32 saved_priority)140d19bd862SPaul Mackerras static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
141d19bd862SPaul Mackerras 		       struct ics_irq_state *state,
142d19bd862SPaul Mackerras 		       u32 server, u32 priority, u32 saved_priority)
143d19bd862SPaul Mackerras {
144d19bd862SPaul Mackerras 	bool deliver;
14534cb7954SSuresh Warrier 	unsigned long flags;
146d19bd862SPaul Mackerras 
14734cb7954SSuresh Warrier 	local_irq_save(flags);
14834cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
149d19bd862SPaul Mackerras 
150d19bd862SPaul Mackerras 	state->server = server;
151d19bd862SPaul Mackerras 	state->priority = priority;
152d19bd862SPaul Mackerras 	state->saved_priority = saved_priority;
153d19bd862SPaul Mackerras 	deliver = false;
154d19bd862SPaul Mackerras 	if ((state->masked_pending || state->resend) && priority != MASKED) {
155d19bd862SPaul Mackerras 		state->masked_pending = 0;
156bf5a71d5SLi Zhong 		state->resend = 0;
157d19bd862SPaul Mackerras 		deliver = true;
158d19bd862SPaul Mackerras 	}
159d19bd862SPaul Mackerras 
16034cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
16134cb7954SSuresh Warrier 	local_irq_restore(flags);
162d19bd862SPaul Mackerras 
163d19bd862SPaul Mackerras 	return deliver;
164d19bd862SPaul Mackerras }
165d19bd862SPaul Mackerras 
kvmppc_xics_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)166bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
167bc5ad3f3SBenjamin Herrenschmidt {
168bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = kvm->arch.xics;
169bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
170bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
171bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
172bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
173bc5ad3f3SBenjamin Herrenschmidt 
174bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
175bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
176bc5ad3f3SBenjamin Herrenschmidt 
177bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
178bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
179bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
180bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
181bc5ad3f3SBenjamin Herrenschmidt 
182bc5ad3f3SBenjamin Herrenschmidt 	icp = kvmppc_xics_find_server(kvm, server);
183bc5ad3f3SBenjamin Herrenschmidt 	if (!icp)
184bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
185bc5ad3f3SBenjamin Herrenschmidt 
186bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
187bc5ad3f3SBenjamin Herrenschmidt 		 irq, server, priority,
188bc5ad3f3SBenjamin Herrenschmidt 		 state->masked_pending, state->resend);
189bc5ad3f3SBenjamin Herrenschmidt 
190d19bd862SPaul Mackerras 	if (write_xive(xics, ics, state, server, priority, priority))
19121acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
192bc5ad3f3SBenjamin Herrenschmidt 
193bc5ad3f3SBenjamin Herrenschmidt 	return 0;
194bc5ad3f3SBenjamin Herrenschmidt }
195bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)196bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
197bc5ad3f3SBenjamin Herrenschmidt {
198bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = kvm->arch.xics;
199bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
200bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
201bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
20234cb7954SSuresh Warrier 	unsigned long flags;
203bc5ad3f3SBenjamin Herrenschmidt 
204bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
205bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
206bc5ad3f3SBenjamin Herrenschmidt 
207bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, irq, &src);
208bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
209bc5ad3f3SBenjamin Herrenschmidt 		return -EINVAL;
210bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
211bc5ad3f3SBenjamin Herrenschmidt 
21234cb7954SSuresh Warrier 	local_irq_save(flags);
21334cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
214bc5ad3f3SBenjamin Herrenschmidt 	*server = state->server;
215bc5ad3f3SBenjamin Herrenschmidt 	*priority = state->priority;
21634cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
21734cb7954SSuresh Warrier 	local_irq_restore(flags);
218bc5ad3f3SBenjamin Herrenschmidt 
219bc5ad3f3SBenjamin Herrenschmidt 	return 0;
220bc5ad3f3SBenjamin Herrenschmidt }
221bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_int_on(struct kvm * kvm,u32 irq)222d19bd862SPaul Mackerras int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
223d19bd862SPaul Mackerras {
224d19bd862SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
225d19bd862SPaul Mackerras 	struct kvmppc_icp *icp;
226d19bd862SPaul Mackerras 	struct kvmppc_ics *ics;
227d19bd862SPaul Mackerras 	struct ics_irq_state *state;
228d19bd862SPaul Mackerras 	u16 src;
229d19bd862SPaul Mackerras 
230d19bd862SPaul Mackerras 	if (!xics)
231d19bd862SPaul Mackerras 		return -ENODEV;
232d19bd862SPaul Mackerras 
233d19bd862SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &src);
234d19bd862SPaul Mackerras 	if (!ics)
235d19bd862SPaul Mackerras 		return -EINVAL;
236d19bd862SPaul Mackerras 	state = &ics->irq_state[src];
237d19bd862SPaul Mackerras 
238d19bd862SPaul Mackerras 	icp = kvmppc_xics_find_server(kvm, state->server);
239d19bd862SPaul Mackerras 	if (!icp)
240d19bd862SPaul Mackerras 		return -EINVAL;
241d19bd862SPaul Mackerras 
242d19bd862SPaul Mackerras 	if (write_xive(xics, ics, state, state->server, state->saved_priority,
243d19bd862SPaul Mackerras 		       state->saved_priority))
24421acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
245d19bd862SPaul Mackerras 
246d19bd862SPaul Mackerras 	return 0;
247d19bd862SPaul Mackerras }
248d19bd862SPaul Mackerras 
kvmppc_xics_int_off(struct kvm * kvm,u32 irq)249d19bd862SPaul Mackerras int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
250d19bd862SPaul Mackerras {
251d19bd862SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
252d19bd862SPaul Mackerras 	struct kvmppc_ics *ics;
253d19bd862SPaul Mackerras 	struct ics_irq_state *state;
254d19bd862SPaul Mackerras 	u16 src;
255d19bd862SPaul Mackerras 
256d19bd862SPaul Mackerras 	if (!xics)
257d19bd862SPaul Mackerras 		return -ENODEV;
258d19bd862SPaul Mackerras 
259d19bd862SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &src);
260d19bd862SPaul Mackerras 	if (!ics)
261d19bd862SPaul Mackerras 		return -EINVAL;
262d19bd862SPaul Mackerras 	state = &ics->irq_state[src];
263d19bd862SPaul Mackerras 
264d19bd862SPaul Mackerras 	write_xive(xics, ics, state, state->server, MASKED, state->priority);
265d19bd862SPaul Mackerras 
266d19bd862SPaul Mackerras 	return 0;
267d19bd862SPaul Mackerras }
268d19bd862SPaul Mackerras 
269bc5ad3f3SBenjamin Herrenschmidt /* -- ICP routines, including hcalls -- */
270bc5ad3f3SBenjamin Herrenschmidt 
icp_try_update(struct kvmppc_icp * icp,union kvmppc_icp_state old,union kvmppc_icp_state new,bool change_self)271bc5ad3f3SBenjamin Herrenschmidt static inline bool icp_try_update(struct kvmppc_icp *icp,
272bc5ad3f3SBenjamin Herrenschmidt 				  union kvmppc_icp_state old,
273bc5ad3f3SBenjamin Herrenschmidt 				  union kvmppc_icp_state new,
274bc5ad3f3SBenjamin Herrenschmidt 				  bool change_self)
275bc5ad3f3SBenjamin Herrenschmidt {
276bc5ad3f3SBenjamin Herrenschmidt 	bool success;
277bc5ad3f3SBenjamin Herrenschmidt 
278bc5ad3f3SBenjamin Herrenschmidt 	/* Calculate new output value */
279bc5ad3f3SBenjamin Herrenschmidt 	new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
280bc5ad3f3SBenjamin Herrenschmidt 
281bc5ad3f3SBenjamin Herrenschmidt 	/* Attempt atomic update */
282bc5ad3f3SBenjamin Herrenschmidt 	success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
283bc5ad3f3SBenjamin Herrenschmidt 	if (!success)
284bc5ad3f3SBenjamin Herrenschmidt 		goto bail;
285bc5ad3f3SBenjamin Herrenschmidt 
286ade3ac66SAlexey Kardashevskiy 	XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
287bc5ad3f3SBenjamin Herrenschmidt 		 icp->server_num,
288bc5ad3f3SBenjamin Herrenschmidt 		 old.cppr, old.mfrr, old.pending_pri, old.xisr,
289bc5ad3f3SBenjamin Herrenschmidt 		 old.need_resend, old.out_ee);
290bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("UPD        - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291bc5ad3f3SBenjamin Herrenschmidt 		 new.cppr, new.mfrr, new.pending_pri, new.xisr,
292bc5ad3f3SBenjamin Herrenschmidt 		 new.need_resend, new.out_ee);
293bc5ad3f3SBenjamin Herrenschmidt 	/*
294bc5ad3f3SBenjamin Herrenschmidt 	 * Check for output state update
295bc5ad3f3SBenjamin Herrenschmidt 	 *
296bc5ad3f3SBenjamin Herrenschmidt 	 * Note that this is racy since another processor could be updating
297bc5ad3f3SBenjamin Herrenschmidt 	 * the state already. This is why we never clear the interrupt output
298bc5ad3f3SBenjamin Herrenschmidt 	 * here, we only ever set it. The clear only happens prior to doing
299bc5ad3f3SBenjamin Herrenschmidt 	 * an update and only by the processor itself. Currently we do it
300bc5ad3f3SBenjamin Herrenschmidt 	 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
301bc5ad3f3SBenjamin Herrenschmidt 	 *
302bc5ad3f3SBenjamin Herrenschmidt 	 * We also do not try to figure out whether the EE state has changed,
303e7d26f28SBenjamin Herrenschmidt 	 * we unconditionally set it if the new state calls for it. The reason
304e7d26f28SBenjamin Herrenschmidt 	 * for that is that we opportunistically remove the pending interrupt
305e7d26f28SBenjamin Herrenschmidt 	 * flag when raising CPPR, so we need to set it back here if an
306e7d26f28SBenjamin Herrenschmidt 	 * interrupt is still pending.
307bc5ad3f3SBenjamin Herrenschmidt 	 */
308bc5ad3f3SBenjamin Herrenschmidt 	if (new.out_ee) {
309bc5ad3f3SBenjamin Herrenschmidt 		kvmppc_book3s_queue_irqprio(icp->vcpu,
310d24ea8a7SPaul Mackerras 					    BOOK3S_INTERRUPT_EXTERNAL);
311bc5ad3f3SBenjamin Herrenschmidt 		if (!change_self)
31254695c30SBenjamin Herrenschmidt 			kvmppc_fast_vcpu_kick(icp->vcpu);
313bc5ad3f3SBenjamin Herrenschmidt 	}
314bc5ad3f3SBenjamin Herrenschmidt  bail:
315bc5ad3f3SBenjamin Herrenschmidt 	return success;
316bc5ad3f3SBenjamin Herrenschmidt }
317bc5ad3f3SBenjamin Herrenschmidt 
icp_check_resend(struct kvmppc_xics * xics,struct kvmppc_icp * icp)318bc5ad3f3SBenjamin Herrenschmidt static void icp_check_resend(struct kvmppc_xics *xics,
319bc5ad3f3SBenjamin Herrenschmidt 			     struct kvmppc_icp *icp)
320bc5ad3f3SBenjamin Herrenschmidt {
321bc5ad3f3SBenjamin Herrenschmidt 	u32 icsid;
322bc5ad3f3SBenjamin Herrenschmidt 
323bc5ad3f3SBenjamin Herrenschmidt 	/* Order this load with the test for need_resend in the caller */
324bc5ad3f3SBenjamin Herrenschmidt 	smp_rmb();
325bc5ad3f3SBenjamin Herrenschmidt 	for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
326bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_ics *ics = xics->ics[icsid];
327bc5ad3f3SBenjamin Herrenschmidt 
328bc5ad3f3SBenjamin Herrenschmidt 		if (!test_and_clear_bit(icsid, icp->resend_map))
329bc5ad3f3SBenjamin Herrenschmidt 			continue;
330bc5ad3f3SBenjamin Herrenschmidt 		if (!ics)
331bc5ad3f3SBenjamin Herrenschmidt 			continue;
332bc5ad3f3SBenjamin Herrenschmidt 		ics_check_resend(xics, ics, icp);
333bc5ad3f3SBenjamin Herrenschmidt 	}
334bc5ad3f3SBenjamin Herrenschmidt }
335bc5ad3f3SBenjamin Herrenschmidt 
icp_try_to_deliver(struct kvmppc_icp * icp,u32 irq,u8 priority,u32 * reject)336bc5ad3f3SBenjamin Herrenschmidt static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
337bc5ad3f3SBenjamin Herrenschmidt 			       u32 *reject)
338bc5ad3f3SBenjamin Herrenschmidt {
339bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
340bc5ad3f3SBenjamin Herrenschmidt 	bool success;
341bc5ad3f3SBenjamin Herrenschmidt 
342ade3ac66SAlexey Kardashevskiy 	XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
343bc5ad3f3SBenjamin Herrenschmidt 		 icp->server_num);
344bc5ad3f3SBenjamin Herrenschmidt 
345bc5ad3f3SBenjamin Herrenschmidt 	do {
3465ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
347bc5ad3f3SBenjamin Herrenschmidt 
348bc5ad3f3SBenjamin Herrenschmidt 		*reject = 0;
349bc5ad3f3SBenjamin Herrenschmidt 
350bc5ad3f3SBenjamin Herrenschmidt 		/* See if we can deliver */
351bc5ad3f3SBenjamin Herrenschmidt 		success = new_state.cppr > priority &&
352bc5ad3f3SBenjamin Herrenschmidt 			new_state.mfrr > priority &&
353bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri > priority;
354bc5ad3f3SBenjamin Herrenschmidt 
355bc5ad3f3SBenjamin Herrenschmidt 		/*
356bc5ad3f3SBenjamin Herrenschmidt 		 * If we can, check for a rejection and perform the
357bc5ad3f3SBenjamin Herrenschmidt 		 * delivery
358bc5ad3f3SBenjamin Herrenschmidt 		 */
359bc5ad3f3SBenjamin Herrenschmidt 		if (success) {
360bc5ad3f3SBenjamin Herrenschmidt 			*reject = new_state.xisr;
361bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = irq;
362bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = priority;
363bc5ad3f3SBenjamin Herrenschmidt 		} else {
364bc5ad3f3SBenjamin Herrenschmidt 			/*
365bc5ad3f3SBenjamin Herrenschmidt 			 * If we failed to deliver we set need_resend
366bc5ad3f3SBenjamin Herrenschmidt 			 * so a subsequent CPPR state change causes us
367bc5ad3f3SBenjamin Herrenschmidt 			 * to try a new delivery.
368bc5ad3f3SBenjamin Herrenschmidt 			 */
369bc5ad3f3SBenjamin Herrenschmidt 			new_state.need_resend = true;
370bc5ad3f3SBenjamin Herrenschmidt 		}
371bc5ad3f3SBenjamin Herrenschmidt 
372bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, false));
373bc5ad3f3SBenjamin Herrenschmidt 
374bc5ad3f3SBenjamin Herrenschmidt 	return success;
375bc5ad3f3SBenjamin Herrenschmidt }
376bc5ad3f3SBenjamin Herrenschmidt 
icp_deliver_irq(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u32 new_irq,bool check_resend)377bc5ad3f3SBenjamin Herrenschmidt static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
37821acd0e4SLi Zhong 			    u32 new_irq, bool check_resend)
379bc5ad3f3SBenjamin Herrenschmidt {
380bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
381bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
382bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
383bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
38434cb7954SSuresh Warrier 	unsigned long flags;
385bc5ad3f3SBenjamin Herrenschmidt 
386bc5ad3f3SBenjamin Herrenschmidt 	/*
387bc5ad3f3SBenjamin Herrenschmidt 	 * This is used both for initial delivery of an interrupt and
388bc5ad3f3SBenjamin Herrenschmidt 	 * for subsequent rejection.
389bc5ad3f3SBenjamin Herrenschmidt 	 *
390bc5ad3f3SBenjamin Herrenschmidt 	 * Rejection can be racy vs. resends. We have evaluated the
391bc5ad3f3SBenjamin Herrenschmidt 	 * rejection in an atomic ICP transaction which is now complete,
392bc5ad3f3SBenjamin Herrenschmidt 	 * so potentially the ICP can already accept the interrupt again.
393bc5ad3f3SBenjamin Herrenschmidt 	 *
394bc5ad3f3SBenjamin Herrenschmidt 	 * So we need to retry the delivery. Essentially the reject path
395bc5ad3f3SBenjamin Herrenschmidt 	 * boils down to a failed delivery. Always.
396bc5ad3f3SBenjamin Herrenschmidt 	 *
397bc5ad3f3SBenjamin Herrenschmidt 	 * Now the interrupt could also have moved to a different target,
398bc5ad3f3SBenjamin Herrenschmidt 	 * thus we may need to re-do the ICP lookup as well
399bc5ad3f3SBenjamin Herrenschmidt 	 */
400bc5ad3f3SBenjamin Herrenschmidt 
401bc5ad3f3SBenjamin Herrenschmidt  again:
402bc5ad3f3SBenjamin Herrenschmidt 	/* Get the ICS state and lock it */
403bc5ad3f3SBenjamin Herrenschmidt 	ics = kvmppc_xics_find_ics(xics, new_irq, &src);
404bc5ad3f3SBenjamin Herrenschmidt 	if (!ics) {
405bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
406bc5ad3f3SBenjamin Herrenschmidt 		return;
407bc5ad3f3SBenjamin Herrenschmidt 	}
408bc5ad3f3SBenjamin Herrenschmidt 	state = &ics->irq_state[src];
409bc5ad3f3SBenjamin Herrenschmidt 
410bc5ad3f3SBenjamin Herrenschmidt 	/* Get a lock on the ICS */
41134cb7954SSuresh Warrier 	local_irq_save(flags);
41234cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
413bc5ad3f3SBenjamin Herrenschmidt 
414bc5ad3f3SBenjamin Herrenschmidt 	/* Get our server */
415bc5ad3f3SBenjamin Herrenschmidt 	if (!icp || state->server != icp->server_num) {
416bc5ad3f3SBenjamin Herrenschmidt 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
417bc5ad3f3SBenjamin Herrenschmidt 		if (!icp) {
418bc5ad3f3SBenjamin Herrenschmidt 			pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
419bc5ad3f3SBenjamin Herrenschmidt 				new_irq, state->server);
420bc5ad3f3SBenjamin Herrenschmidt 			goto out;
421bc5ad3f3SBenjamin Herrenschmidt 		}
422bc5ad3f3SBenjamin Herrenschmidt 	}
423bc5ad3f3SBenjamin Herrenschmidt 
42421acd0e4SLi Zhong 	if (check_resend)
42521acd0e4SLi Zhong 		if (!state->resend)
42621acd0e4SLi Zhong 			goto out;
42721acd0e4SLi Zhong 
428bc5ad3f3SBenjamin Herrenschmidt 	/* Clear the resend bit of that interrupt */
429bc5ad3f3SBenjamin Herrenschmidt 	state->resend = 0;
430bc5ad3f3SBenjamin Herrenschmidt 
431bc5ad3f3SBenjamin Herrenschmidt 	/*
432bc5ad3f3SBenjamin Herrenschmidt 	 * If masked, bail out
433bc5ad3f3SBenjamin Herrenschmidt 	 *
434bc5ad3f3SBenjamin Herrenschmidt 	 * Note: PAPR doesn't mention anything about masked pending
435bc5ad3f3SBenjamin Herrenschmidt 	 * when doing a resend, only when doing a delivery.
436bc5ad3f3SBenjamin Herrenschmidt 	 *
437bc5ad3f3SBenjamin Herrenschmidt 	 * However that would have the effect of losing a masked
438bc5ad3f3SBenjamin Herrenschmidt 	 * interrupt that was rejected and isn't consistent with
439bc5ad3f3SBenjamin Herrenschmidt 	 * the whole masked_pending business which is about not
440bc5ad3f3SBenjamin Herrenschmidt 	 * losing interrupts that occur while masked.
441bc5ad3f3SBenjamin Herrenschmidt 	 *
442446957baSAdam Buchbinder 	 * I don't differentiate normal deliveries and resends, this
443bc5ad3f3SBenjamin Herrenschmidt 	 * implementation will differ from PAPR and not lose such
444bc5ad3f3SBenjamin Herrenschmidt 	 * interrupts.
445bc5ad3f3SBenjamin Herrenschmidt 	 */
446bc5ad3f3SBenjamin Herrenschmidt 	if (state->priority == MASKED) {
447bc5ad3f3SBenjamin Herrenschmidt 		XICS_DBG("irq %#x masked pending\n", new_irq);
448bc5ad3f3SBenjamin Herrenschmidt 		state->masked_pending = 1;
449bc5ad3f3SBenjamin Herrenschmidt 		goto out;
450bc5ad3f3SBenjamin Herrenschmidt 	}
451bc5ad3f3SBenjamin Herrenschmidt 
452bc5ad3f3SBenjamin Herrenschmidt 	/*
453bc5ad3f3SBenjamin Herrenschmidt 	 * Try the delivery, this will set the need_resend flag
454bc5ad3f3SBenjamin Herrenschmidt 	 * in the ICP as part of the atomic transaction if the
455bc5ad3f3SBenjamin Herrenschmidt 	 * delivery is not possible.
456bc5ad3f3SBenjamin Herrenschmidt 	 *
457bc5ad3f3SBenjamin Herrenschmidt 	 * Note that if successful, the new delivery might have itself
458bc5ad3f3SBenjamin Herrenschmidt 	 * rejected an interrupt that was "delivered" before we took the
45934cb7954SSuresh Warrier 	 * ics spin lock.
460bc5ad3f3SBenjamin Herrenschmidt 	 *
461bc5ad3f3SBenjamin Herrenschmidt 	 * In this case we do the whole sequence all over again for the
462bc5ad3f3SBenjamin Herrenschmidt 	 * new guy. We cannot assume that the rejected interrupt is less
463bc5ad3f3SBenjamin Herrenschmidt 	 * favored than the new one, and thus doesn't need to be delivered,
464bc5ad3f3SBenjamin Herrenschmidt 	 * because by the time we exit icp_try_to_deliver() the target
4651fd02f66SJulia Lawall 	 * processor may well have already consumed & completed it, and thus
466bc5ad3f3SBenjamin Herrenschmidt 	 * the rejected interrupt might actually be already acceptable.
467bc5ad3f3SBenjamin Herrenschmidt 	 */
468bc5ad3f3SBenjamin Herrenschmidt 	if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
469bc5ad3f3SBenjamin Herrenschmidt 		/*
470bc5ad3f3SBenjamin Herrenschmidt 		 * Delivery was successful, did we reject somebody else ?
471bc5ad3f3SBenjamin Herrenschmidt 		 */
472bc5ad3f3SBenjamin Herrenschmidt 		if (reject && reject != XICS_IPI) {
47334cb7954SSuresh Warrier 			arch_spin_unlock(&ics->lock);
47434cb7954SSuresh Warrier 			local_irq_restore(flags);
475bc5ad3f3SBenjamin Herrenschmidt 			new_irq = reject;
47613751f87SKaixu Xia 			check_resend = false;
477bc5ad3f3SBenjamin Herrenschmidt 			goto again;
478bc5ad3f3SBenjamin Herrenschmidt 		}
479bc5ad3f3SBenjamin Herrenschmidt 	} else {
480bc5ad3f3SBenjamin Herrenschmidt 		/*
481bc5ad3f3SBenjamin Herrenschmidt 		 * We failed to deliver the interrupt we need to set the
482bc5ad3f3SBenjamin Herrenschmidt 		 * resend map bit and mark the ICS state as needing a resend
483bc5ad3f3SBenjamin Herrenschmidt 		 */
484bc5ad3f3SBenjamin Herrenschmidt 		state->resend = 1;
485bc5ad3f3SBenjamin Herrenschmidt 
486bc5ad3f3SBenjamin Herrenschmidt 		/*
48721acd0e4SLi Zhong 		 * Make sure when checking resend, we don't miss the resend
48821acd0e4SLi Zhong 		 * if resend_map bit is seen and cleared.
48921acd0e4SLi Zhong 		 */
49021acd0e4SLi Zhong 		smp_wmb();
49121acd0e4SLi Zhong 		set_bit(ics->icsid, icp->resend_map);
49221acd0e4SLi Zhong 
49321acd0e4SLi Zhong 		/*
494bc5ad3f3SBenjamin Herrenschmidt 		 * If the need_resend flag got cleared in the ICP some time
495bc5ad3f3SBenjamin Herrenschmidt 		 * between icp_try_to_deliver() atomic update and now, then
496bc5ad3f3SBenjamin Herrenschmidt 		 * we know it might have missed the resend_map bit. So we
497bc5ad3f3SBenjamin Herrenschmidt 		 * retry
498bc5ad3f3SBenjamin Herrenschmidt 		 */
499bc5ad3f3SBenjamin Herrenschmidt 		smp_mb();
500bc5ad3f3SBenjamin Herrenschmidt 		if (!icp->state.need_resend) {
501bf5a71d5SLi Zhong 			state->resend = 0;
50234cb7954SSuresh Warrier 			arch_spin_unlock(&ics->lock);
50334cb7954SSuresh Warrier 			local_irq_restore(flags);
50413751f87SKaixu Xia 			check_resend = false;
505bc5ad3f3SBenjamin Herrenschmidt 			goto again;
506bc5ad3f3SBenjamin Herrenschmidt 		}
507bc5ad3f3SBenjamin Herrenschmidt 	}
508bc5ad3f3SBenjamin Herrenschmidt  out:
50934cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
51034cb7954SSuresh Warrier 	local_irq_restore(flags);
511bc5ad3f3SBenjamin Herrenschmidt }
512bc5ad3f3SBenjamin Herrenschmidt 
icp_down_cppr(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u8 new_cppr)513bc5ad3f3SBenjamin Herrenschmidt static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
514bc5ad3f3SBenjamin Herrenschmidt 			  u8 new_cppr)
515bc5ad3f3SBenjamin Herrenschmidt {
516bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
517bc5ad3f3SBenjamin Herrenschmidt 	bool resend;
518bc5ad3f3SBenjamin Herrenschmidt 
519bc5ad3f3SBenjamin Herrenschmidt 	/*
520bc5ad3f3SBenjamin Herrenschmidt 	 * This handles several related states in one operation:
521bc5ad3f3SBenjamin Herrenschmidt 	 *
522bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Down_CPPR
523bc5ad3f3SBenjamin Herrenschmidt 	 *
524bc5ad3f3SBenjamin Herrenschmidt 	 * Load CPPR with new value and if the XISR is 0
525bc5ad3f3SBenjamin Herrenschmidt 	 * then check for resends:
526bc5ad3f3SBenjamin Herrenschmidt 	 *
527bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Resend
528bc5ad3f3SBenjamin Herrenschmidt 	 *
529bc5ad3f3SBenjamin Herrenschmidt 	 * If MFRR is more favored than CPPR, check for IPIs
530bc5ad3f3SBenjamin Herrenschmidt 	 * and notify ICS of a potential resend. This is done
531bc5ad3f3SBenjamin Herrenschmidt 	 * asynchronously (when used in real mode, we will have
532bc5ad3f3SBenjamin Herrenschmidt 	 * to exit here).
533bc5ad3f3SBenjamin Herrenschmidt 	 *
534bc5ad3f3SBenjamin Herrenschmidt 	 * We do not handle the complete Check_IPI as documented
535bc5ad3f3SBenjamin Herrenschmidt 	 * here. In the PAPR, this state will be used for both
536bc5ad3f3SBenjamin Herrenschmidt 	 * Set_MFRR and Down_CPPR. However, we know that we aren't
537bc5ad3f3SBenjamin Herrenschmidt 	 * changing the MFRR state here so we don't need to handle
538bc5ad3f3SBenjamin Herrenschmidt 	 * the case of an MFRR causing a reject of a pending irq,
539bc5ad3f3SBenjamin Herrenschmidt 	 * this will have been handled when the MFRR was set in the
540bc5ad3f3SBenjamin Herrenschmidt 	 * first place.
541bc5ad3f3SBenjamin Herrenschmidt 	 *
542bc5ad3f3SBenjamin Herrenschmidt 	 * Thus we don't have to handle rejects, only resends.
543bc5ad3f3SBenjamin Herrenschmidt 	 *
544bc5ad3f3SBenjamin Herrenschmidt 	 * When implementing real mode for HV KVM, resend will lead to
545bc5ad3f3SBenjamin Herrenschmidt 	 * a H_TOO_HARD return and the whole transaction will be handled
546bc5ad3f3SBenjamin Herrenschmidt 	 * in virtual mode.
547bc5ad3f3SBenjamin Herrenschmidt 	 */
548bc5ad3f3SBenjamin Herrenschmidt 	do {
5495ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
550bc5ad3f3SBenjamin Herrenschmidt 
551bc5ad3f3SBenjamin Herrenschmidt 		/* Down_CPPR */
552bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = new_cppr;
553bc5ad3f3SBenjamin Herrenschmidt 
554bc5ad3f3SBenjamin Herrenschmidt 		/*
555bc5ad3f3SBenjamin Herrenschmidt 		 * Cut down Resend / Check_IPI / IPI
556bc5ad3f3SBenjamin Herrenschmidt 		 *
557bc5ad3f3SBenjamin Herrenschmidt 		 * The logic is that we cannot have a pending interrupt
558bc5ad3f3SBenjamin Herrenschmidt 		 * trumped by an IPI at this point (see above), so we
559bc5ad3f3SBenjamin Herrenschmidt 		 * know that either the pending interrupt is already an
560bc5ad3f3SBenjamin Herrenschmidt 		 * IPI (in which case we don't care to override it) or
561bc5ad3f3SBenjamin Herrenschmidt 		 * it's either more favored than us or non existent
562bc5ad3f3SBenjamin Herrenschmidt 		 */
563bc5ad3f3SBenjamin Herrenschmidt 		if (new_state.mfrr < new_cppr &&
564bc5ad3f3SBenjamin Herrenschmidt 		    new_state.mfrr <= new_state.pending_pri) {
565bc5ad3f3SBenjamin Herrenschmidt 			WARN_ON(new_state.xisr != XICS_IPI &&
566bc5ad3f3SBenjamin Herrenschmidt 				new_state.xisr != 0);
567bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = new_state.mfrr;
568bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = XICS_IPI;
569bc5ad3f3SBenjamin Herrenschmidt 		}
570bc5ad3f3SBenjamin Herrenschmidt 
571bc5ad3f3SBenjamin Herrenschmidt 		/* Latch/clear resend bit */
572bc5ad3f3SBenjamin Herrenschmidt 		resend = new_state.need_resend;
573bc5ad3f3SBenjamin Herrenschmidt 		new_state.need_resend = 0;
574bc5ad3f3SBenjamin Herrenschmidt 
575bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
576bc5ad3f3SBenjamin Herrenschmidt 
577bc5ad3f3SBenjamin Herrenschmidt 	/*
578bc5ad3f3SBenjamin Herrenschmidt 	 * Now handle resend checks. Those are asynchronous to the ICP
579bc5ad3f3SBenjamin Herrenschmidt 	 * state update in HW (ie bus transactions) so we can handle them
580bc5ad3f3SBenjamin Herrenschmidt 	 * separately here too
581bc5ad3f3SBenjamin Herrenschmidt 	 */
582bc5ad3f3SBenjamin Herrenschmidt 	if (resend)
583bc5ad3f3SBenjamin Herrenschmidt 		icp_check_resend(xics, icp);
584bc5ad3f3SBenjamin Herrenschmidt }
585bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_h_xirr(struct kvm_vcpu * vcpu)586e7d26f28SBenjamin Herrenschmidt static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
587bc5ad3f3SBenjamin Herrenschmidt {
588bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
589bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
590bc5ad3f3SBenjamin Herrenschmidt 	u32 xirr;
591bc5ad3f3SBenjamin Herrenschmidt 
592bc5ad3f3SBenjamin Herrenschmidt 	/* First, remove EE from the processor */
593d24ea8a7SPaul Mackerras 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
594bc5ad3f3SBenjamin Herrenschmidt 
595bc5ad3f3SBenjamin Herrenschmidt 	/*
596bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Accept_Interrupt
597bc5ad3f3SBenjamin Herrenschmidt 	 *
598bc5ad3f3SBenjamin Herrenschmidt 	 * Return the pending interrupt (if any) along with the
599bc5ad3f3SBenjamin Herrenschmidt 	 * current CPPR, then clear the XISR & set CPPR to the
600bc5ad3f3SBenjamin Herrenschmidt 	 * pending priority
601bc5ad3f3SBenjamin Herrenschmidt 	 */
602bc5ad3f3SBenjamin Herrenschmidt 	do {
6035ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
604bc5ad3f3SBenjamin Herrenschmidt 
605bc5ad3f3SBenjamin Herrenschmidt 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
606bc5ad3f3SBenjamin Herrenschmidt 		if (!old_state.xisr)
607bc5ad3f3SBenjamin Herrenschmidt 			break;
608bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = new_state.pending_pri;
609bc5ad3f3SBenjamin Herrenschmidt 		new_state.pending_pri = 0xff;
610bc5ad3f3SBenjamin Herrenschmidt 		new_state.xisr = 0;
611bc5ad3f3SBenjamin Herrenschmidt 
612bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
613bc5ad3f3SBenjamin Herrenschmidt 
614bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
615bc5ad3f3SBenjamin Herrenschmidt 
616bc5ad3f3SBenjamin Herrenschmidt 	return xirr;
617bc5ad3f3SBenjamin Herrenschmidt }
618bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_h_ipi(struct kvm_vcpu * vcpu,unsigned long server,unsigned long mfrr)619e7d26f28SBenjamin Herrenschmidt static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
620bc5ad3f3SBenjamin Herrenschmidt 				 unsigned long mfrr)
621bc5ad3f3SBenjamin Herrenschmidt {
622bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
623bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
624bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
625bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
626bc5ad3f3SBenjamin Herrenschmidt 	bool resend;
627bc5ad3f3SBenjamin Herrenschmidt 	bool local;
628bc5ad3f3SBenjamin Herrenschmidt 
629bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
630bc5ad3f3SBenjamin Herrenschmidt 		 vcpu->vcpu_id, server, mfrr);
631bc5ad3f3SBenjamin Herrenschmidt 
632bc5ad3f3SBenjamin Herrenschmidt 	icp = vcpu->arch.icp;
633bc5ad3f3SBenjamin Herrenschmidt 	local = icp->server_num == server;
634bc5ad3f3SBenjamin Herrenschmidt 	if (!local) {
635bc5ad3f3SBenjamin Herrenschmidt 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
636bc5ad3f3SBenjamin Herrenschmidt 		if (!icp)
637bc5ad3f3SBenjamin Herrenschmidt 			return H_PARAMETER;
638bc5ad3f3SBenjamin Herrenschmidt 	}
639bc5ad3f3SBenjamin Herrenschmidt 
640bc5ad3f3SBenjamin Herrenschmidt 	/*
641bc5ad3f3SBenjamin Herrenschmidt 	 * ICP state: Set_MFRR
642bc5ad3f3SBenjamin Herrenschmidt 	 *
643bc5ad3f3SBenjamin Herrenschmidt 	 * If the CPPR is more favored than the new MFRR, then
644bc5ad3f3SBenjamin Herrenschmidt 	 * nothing needs to be rejected as there can be no XISR to
645bc5ad3f3SBenjamin Herrenschmidt 	 * reject.  If the MFRR is being made less favored then
646bc5ad3f3SBenjamin Herrenschmidt 	 * there might be a previously-rejected interrupt needing
647bc5ad3f3SBenjamin Herrenschmidt 	 * to be resent.
648bc5ad3f3SBenjamin Herrenschmidt 	 *
649bc5ad3f3SBenjamin Herrenschmidt 	 * ICP state: Check_IPI
6505b88cda6SSuresh E. Warrier 	 *
6515b88cda6SSuresh E. Warrier 	 * If the CPPR is less favored, then we might be replacing
6525b88cda6SSuresh E. Warrier 	 * an interrupt, and thus need to possibly reject it.
6535b88cda6SSuresh E. Warrier 	 *
6545b88cda6SSuresh E. Warrier 	 * ICP State: IPI
6555b88cda6SSuresh E. Warrier 	 *
6565b88cda6SSuresh E. Warrier 	 * Besides rejecting any pending interrupts, we also
6575b88cda6SSuresh E. Warrier 	 * update XISR and pending_pri to mark IPI as pending.
6585b88cda6SSuresh E. Warrier 	 *
6595b88cda6SSuresh E. Warrier 	 * PAPR does not describe this state, but if the MFRR is being
6605b88cda6SSuresh E. Warrier 	 * made less favored than its earlier value, there might be
6615b88cda6SSuresh E. Warrier 	 * a previously-rejected interrupt needing to be resent.
6625b88cda6SSuresh E. Warrier 	 * Ideally, we would want to resend only if
6635b88cda6SSuresh E. Warrier 	 *	prio(pending_interrupt) < mfrr &&
6645b88cda6SSuresh E. Warrier 	 *	prio(pending_interrupt) < cppr
6655b88cda6SSuresh E. Warrier 	 * where pending interrupt is the one that was rejected. But
6665b88cda6SSuresh E. Warrier 	 * we don't have that state, so we simply trigger a resend
6675b88cda6SSuresh E. Warrier 	 * whenever the MFRR is made less favored.
668bc5ad3f3SBenjamin Herrenschmidt 	 */
669bc5ad3f3SBenjamin Herrenschmidt 	do {
6705ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
671bc5ad3f3SBenjamin Herrenschmidt 
672bc5ad3f3SBenjamin Herrenschmidt 		/* Set_MFRR */
673bc5ad3f3SBenjamin Herrenschmidt 		new_state.mfrr = mfrr;
674bc5ad3f3SBenjamin Herrenschmidt 
675bc5ad3f3SBenjamin Herrenschmidt 		/* Check_IPI */
676bc5ad3f3SBenjamin Herrenschmidt 		reject = 0;
677bc5ad3f3SBenjamin Herrenschmidt 		resend = false;
678bc5ad3f3SBenjamin Herrenschmidt 		if (mfrr < new_state.cppr) {
679bc5ad3f3SBenjamin Herrenschmidt 			/* Reject a pending interrupt if not an IPI */
6805b88cda6SSuresh E. Warrier 			if (mfrr <= new_state.pending_pri) {
681bc5ad3f3SBenjamin Herrenschmidt 				reject = new_state.xisr;
682bc5ad3f3SBenjamin Herrenschmidt 				new_state.pending_pri = mfrr;
683bc5ad3f3SBenjamin Herrenschmidt 				new_state.xisr = XICS_IPI;
684bc5ad3f3SBenjamin Herrenschmidt 			}
6855b88cda6SSuresh E. Warrier 		}
686bc5ad3f3SBenjamin Herrenschmidt 
6875b88cda6SSuresh E. Warrier 		if (mfrr > old_state.mfrr) {
688bc5ad3f3SBenjamin Herrenschmidt 			resend = new_state.need_resend;
689bc5ad3f3SBenjamin Herrenschmidt 			new_state.need_resend = 0;
690bc5ad3f3SBenjamin Herrenschmidt 		}
691bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, local));
692bc5ad3f3SBenjamin Herrenschmidt 
693bc5ad3f3SBenjamin Herrenschmidt 	/* Handle reject */
694bc5ad3f3SBenjamin Herrenschmidt 	if (reject && reject != XICS_IPI)
69521acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, reject, false);
696bc5ad3f3SBenjamin Herrenschmidt 
697bc5ad3f3SBenjamin Herrenschmidt 	/* Handle resend */
698bc5ad3f3SBenjamin Herrenschmidt 	if (resend)
699bc5ad3f3SBenjamin Herrenschmidt 		icp_check_resend(xics, icp);
700bc5ad3f3SBenjamin Herrenschmidt 
701bc5ad3f3SBenjamin Herrenschmidt 	return H_SUCCESS;
702bc5ad3f3SBenjamin Herrenschmidt }
703bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_h_ipoll(struct kvm_vcpu * vcpu,unsigned long server)7048e44ddc3SPaul Mackerras static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
7058e44ddc3SPaul Mackerras {
7068e44ddc3SPaul Mackerras 	union kvmppc_icp_state state;
7078e44ddc3SPaul Mackerras 	struct kvmppc_icp *icp;
7088e44ddc3SPaul Mackerras 
7098e44ddc3SPaul Mackerras 	icp = vcpu->arch.icp;
7108e44ddc3SPaul Mackerras 	if (icp->server_num != server) {
7118e44ddc3SPaul Mackerras 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
7128e44ddc3SPaul Mackerras 		if (!icp)
7138e44ddc3SPaul Mackerras 			return H_PARAMETER;
7148e44ddc3SPaul Mackerras 	}
7155ee07612SChristian Borntraeger 	state = READ_ONCE(icp->state);
7168e44ddc3SPaul Mackerras 	kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
7178e44ddc3SPaul Mackerras 	kvmppc_set_gpr(vcpu, 5, state.mfrr);
7188e44ddc3SPaul Mackerras 	return H_SUCCESS;
7198e44ddc3SPaul Mackerras }
7208e44ddc3SPaul Mackerras 
kvmppc_h_cppr(struct kvm_vcpu * vcpu,unsigned long cppr)721e7d26f28SBenjamin Herrenschmidt static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
722bc5ad3f3SBenjamin Herrenschmidt {
723bc5ad3f3SBenjamin Herrenschmidt 	union kvmppc_icp_state old_state, new_state;
724bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
725bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
726bc5ad3f3SBenjamin Herrenschmidt 	u32 reject;
727bc5ad3f3SBenjamin Herrenschmidt 
728bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
729bc5ad3f3SBenjamin Herrenschmidt 
730bc5ad3f3SBenjamin Herrenschmidt 	/*
731bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Set_CPPR
732bc5ad3f3SBenjamin Herrenschmidt 	 *
733bc5ad3f3SBenjamin Herrenschmidt 	 * We can safely compare the new value with the current
734bc5ad3f3SBenjamin Herrenschmidt 	 * value outside of the transaction as the CPPR is only
735bc5ad3f3SBenjamin Herrenschmidt 	 * ever changed by the processor on itself
736bc5ad3f3SBenjamin Herrenschmidt 	 */
737bc5ad3f3SBenjamin Herrenschmidt 	if (cppr > icp->state.cppr)
738bc5ad3f3SBenjamin Herrenschmidt 		icp_down_cppr(xics, icp, cppr);
739bc5ad3f3SBenjamin Herrenschmidt 	else if (cppr == icp->state.cppr)
740bc5ad3f3SBenjamin Herrenschmidt 		return;
741bc5ad3f3SBenjamin Herrenschmidt 
742bc5ad3f3SBenjamin Herrenschmidt 	/*
743bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Up_CPPR
744bc5ad3f3SBenjamin Herrenschmidt 	 *
745bc5ad3f3SBenjamin Herrenschmidt 	 * The processor is raising its priority, this can result
746bc5ad3f3SBenjamin Herrenschmidt 	 * in a rejection of a pending interrupt:
747bc5ad3f3SBenjamin Herrenschmidt 	 *
748bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Reject_Current
749bc5ad3f3SBenjamin Herrenschmidt 	 *
750bc5ad3f3SBenjamin Herrenschmidt 	 * We can remove EE from the current processor, the update
751bc5ad3f3SBenjamin Herrenschmidt 	 * transaction will set it again if needed
752bc5ad3f3SBenjamin Herrenschmidt 	 */
753d24ea8a7SPaul Mackerras 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
754bc5ad3f3SBenjamin Herrenschmidt 
755bc5ad3f3SBenjamin Herrenschmidt 	do {
7565ee07612SChristian Borntraeger 		old_state = new_state = READ_ONCE(icp->state);
757bc5ad3f3SBenjamin Herrenschmidt 
758bc5ad3f3SBenjamin Herrenschmidt 		reject = 0;
759bc5ad3f3SBenjamin Herrenschmidt 		new_state.cppr = cppr;
760bc5ad3f3SBenjamin Herrenschmidt 
761bc5ad3f3SBenjamin Herrenschmidt 		if (cppr <= new_state.pending_pri) {
762bc5ad3f3SBenjamin Herrenschmidt 			reject = new_state.xisr;
763bc5ad3f3SBenjamin Herrenschmidt 			new_state.xisr = 0;
764bc5ad3f3SBenjamin Herrenschmidt 			new_state.pending_pri = 0xff;
765bc5ad3f3SBenjamin Herrenschmidt 		}
766bc5ad3f3SBenjamin Herrenschmidt 
767bc5ad3f3SBenjamin Herrenschmidt 	} while (!icp_try_update(icp, old_state, new_state, true));
768bc5ad3f3SBenjamin Herrenschmidt 
769bc5ad3f3SBenjamin Herrenschmidt 	/*
770bc5ad3f3SBenjamin Herrenschmidt 	 * Check for rejects. They are handled by doing a new delivery
771bc5ad3f3SBenjamin Herrenschmidt 	 * attempt (see comments in icp_deliver_irq).
772bc5ad3f3SBenjamin Herrenschmidt 	 */
773bc5ad3f3SBenjamin Herrenschmidt 	if (reject && reject != XICS_IPI)
77421acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, reject, false);
775bc5ad3f3SBenjamin Herrenschmidt }
776bc5ad3f3SBenjamin Herrenschmidt 
ics_eoi(struct kvm_vcpu * vcpu,u32 irq)77717d48610SLi Zhong static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
778bc5ad3f3SBenjamin Herrenschmidt {
779bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
780bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
781bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
782bc5ad3f3SBenjamin Herrenschmidt 	struct ics_irq_state *state;
783bc5ad3f3SBenjamin Herrenschmidt 	u16 src;
78417d48610SLi Zhong 	u32 pq_old, pq_new;
78517d48610SLi Zhong 
78617d48610SLi Zhong 	/*
78717d48610SLi Zhong 	 * ICS EOI handling: For LSI, if P bit is still set, we need to
78817d48610SLi Zhong 	 * resend it.
78917d48610SLi Zhong 	 *
79017d48610SLi Zhong 	 * For MSI, we move Q bit into P (and clear Q). If it is set,
79117d48610SLi Zhong 	 * resend it.
79217d48610SLi Zhong 	 */
79317d48610SLi Zhong 
79417d48610SLi Zhong 	ics = kvmppc_xics_find_ics(xics, irq, &src);
79517d48610SLi Zhong 	if (!ics) {
79617d48610SLi Zhong 		XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
79717d48610SLi Zhong 		return H_PARAMETER;
79817d48610SLi Zhong 	}
79917d48610SLi Zhong 	state = &ics->irq_state[src];
80017d48610SLi Zhong 
80117d48610SLi Zhong 	if (state->lsi)
80217d48610SLi Zhong 		pq_new = state->pq_state;
80317d48610SLi Zhong 	else
80417d48610SLi Zhong 		do {
80517d48610SLi Zhong 			pq_old = state->pq_state;
80617d48610SLi Zhong 			pq_new = pq_old >> 1;
80717d48610SLi Zhong 		} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
80817d48610SLi Zhong 
80917d48610SLi Zhong 	if (pq_new & PQ_PRESENTED)
81021acd0e4SLi Zhong 		icp_deliver_irq(xics, icp, irq, false);
81117d48610SLi Zhong 
81217d48610SLi Zhong 	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
81317d48610SLi Zhong 
81417d48610SLi Zhong 	return H_SUCCESS;
81517d48610SLi Zhong }
81617d48610SLi Zhong 
kvmppc_h_eoi(struct kvm_vcpu * vcpu,unsigned long xirr)81717d48610SLi Zhong static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
81817d48610SLi Zhong {
81917d48610SLi Zhong 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
82017d48610SLi Zhong 	struct kvmppc_icp *icp = vcpu->arch.icp;
82117d48610SLi Zhong 	u32 irq = xirr & 0x00ffffff;
822bc5ad3f3SBenjamin Herrenschmidt 
823bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
824bc5ad3f3SBenjamin Herrenschmidt 
825bc5ad3f3SBenjamin Herrenschmidt 	/*
826bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: EOI
827bc5ad3f3SBenjamin Herrenschmidt 	 *
828bc5ad3f3SBenjamin Herrenschmidt 	 * Note: If EOI is incorrectly used by SW to lower the CPPR
829bc5ad3f3SBenjamin Herrenschmidt 	 * value (ie more favored), we do not check for rejection of
830cc0e5f1cSPaul Walmsley 	 * a pending interrupt, this is a SW error and PAPR specifies
831bc5ad3f3SBenjamin Herrenschmidt 	 * that we don't have to deal with it.
832bc5ad3f3SBenjamin Herrenschmidt 	 *
833bc5ad3f3SBenjamin Herrenschmidt 	 * The sending of an EOI to the ICS is handled after the
834bc5ad3f3SBenjamin Herrenschmidt 	 * CPPR update
835bc5ad3f3SBenjamin Herrenschmidt 	 *
836bc5ad3f3SBenjamin Herrenschmidt 	 * ICP State: Down_CPPR which we handle
837bc5ad3f3SBenjamin Herrenschmidt 	 * in a separate function as it's shared with H_CPPR.
838bc5ad3f3SBenjamin Herrenschmidt 	 */
839bc5ad3f3SBenjamin Herrenschmidt 	icp_down_cppr(xics, icp, xirr >> 24);
840bc5ad3f3SBenjamin Herrenschmidt 
841bc5ad3f3SBenjamin Herrenschmidt 	/* IPIs have no EOI */
842bc5ad3f3SBenjamin Herrenschmidt 	if (irq == XICS_IPI)
843bc5ad3f3SBenjamin Herrenschmidt 		return H_SUCCESS;
844bc5ad3f3SBenjamin Herrenschmidt 
84517d48610SLi Zhong 	return ics_eoi(vcpu, irq);
846bc5ad3f3SBenjamin Herrenschmidt }
847bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)848f7af5209SSuresh Warrier int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
849e7d26f28SBenjamin Herrenschmidt {
850e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
851e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_icp *icp = vcpu->arch.icp;
852e7d26f28SBenjamin Herrenschmidt 
853e7d26f28SBenjamin Herrenschmidt 	XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
854e7d26f28SBenjamin Herrenschmidt 		 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
855e7d26f28SBenjamin Herrenschmidt 
856878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_KICK_VCPU) {
857878610feSSuresh E. Warrier 		icp->n_rm_kick_vcpu++;
858e7d26f28SBenjamin Herrenschmidt 		kvmppc_fast_vcpu_kick(icp->rm_kick_target);
859878610feSSuresh E. Warrier 	}
860878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_CHECK_RESEND) {
861878610feSSuresh E. Warrier 		icp->n_rm_check_resend++;
8625b88cda6SSuresh E. Warrier 		icp_check_resend(xics, icp->rm_resend_icp);
863878610feSSuresh E. Warrier 	}
864878610feSSuresh E. Warrier 	if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
865878610feSSuresh E. Warrier 		icp->n_rm_notify_eoi++;
86625a2150bSPaul Mackerras 		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
867878610feSSuresh E. Warrier 	}
868e7d26f28SBenjamin Herrenschmidt 
869e7d26f28SBenjamin Herrenschmidt 	icp->rm_action = 0;
870e7d26f28SBenjamin Herrenschmidt 
871e7d26f28SBenjamin Herrenschmidt 	return H_SUCCESS;
872e7d26f28SBenjamin Herrenschmidt }
873f7af5209SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
874e7d26f28SBenjamin Herrenschmidt 
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 req)875bc5ad3f3SBenjamin Herrenschmidt int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
876bc5ad3f3SBenjamin Herrenschmidt {
877e7d26f28SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
878bc5ad3f3SBenjamin Herrenschmidt 	unsigned long res;
879bc5ad3f3SBenjamin Herrenschmidt 	int rc = H_SUCCESS;
880bc5ad3f3SBenjamin Herrenschmidt 
881bc5ad3f3SBenjamin Herrenschmidt 	/* Check if we have an ICP */
882e7d26f28SBenjamin Herrenschmidt 	if (!xics || !vcpu->arch.icp)
883bc5ad3f3SBenjamin Herrenschmidt 		return H_HARDWARE;
884bc5ad3f3SBenjamin Herrenschmidt 
8858e44ddc3SPaul Mackerras 	/* These requests don't have real-mode implementations at present */
8868e44ddc3SPaul Mackerras 	switch (req) {
8878e44ddc3SPaul Mackerras 	case H_XIRR_X:
8888e44ddc3SPaul Mackerras 		res = kvmppc_h_xirr(vcpu);
8898e44ddc3SPaul Mackerras 		kvmppc_set_gpr(vcpu, 4, res);
8908e44ddc3SPaul Mackerras 		kvmppc_set_gpr(vcpu, 5, get_tb());
8918e44ddc3SPaul Mackerras 		return rc;
8928e44ddc3SPaul Mackerras 	case H_IPOLL:
8938e44ddc3SPaul Mackerras 		rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
8948e44ddc3SPaul Mackerras 		return rc;
8958e44ddc3SPaul Mackerras 	}
8968e44ddc3SPaul Mackerras 
897e7d26f28SBenjamin Herrenschmidt 	/* Check for real mode returning too hard */
898a78b55d1SAneesh Kumar K.V 	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
899e7d26f28SBenjamin Herrenschmidt 		return kvmppc_xics_rm_complete(vcpu, req);
900e7d26f28SBenjamin Herrenschmidt 
901bc5ad3f3SBenjamin Herrenschmidt 	switch (req) {
902bc5ad3f3SBenjamin Herrenschmidt 	case H_XIRR:
903e7d26f28SBenjamin Herrenschmidt 		res = kvmppc_h_xirr(vcpu);
904bc5ad3f3SBenjamin Herrenschmidt 		kvmppc_set_gpr(vcpu, 4, res);
905bc5ad3f3SBenjamin Herrenschmidt 		break;
906bc5ad3f3SBenjamin Herrenschmidt 	case H_CPPR:
907e7d26f28SBenjamin Herrenschmidt 		kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
908bc5ad3f3SBenjamin Herrenschmidt 		break;
909bc5ad3f3SBenjamin Herrenschmidt 	case H_EOI:
910e7d26f28SBenjamin Herrenschmidt 		rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
911bc5ad3f3SBenjamin Herrenschmidt 		break;
912bc5ad3f3SBenjamin Herrenschmidt 	case H_IPI:
913e7d26f28SBenjamin Herrenschmidt 		rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
914bc5ad3f3SBenjamin Herrenschmidt 				  kvmppc_get_gpr(vcpu, 5));
915bc5ad3f3SBenjamin Herrenschmidt 		break;
916bc5ad3f3SBenjamin Herrenschmidt 	}
917bc5ad3f3SBenjamin Herrenschmidt 
918bc5ad3f3SBenjamin Herrenschmidt 	return rc;
919bc5ad3f3SBenjamin Herrenschmidt }
9202ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
921bc5ad3f3SBenjamin Herrenschmidt 
922bc5ad3f3SBenjamin Herrenschmidt 
923bc5ad3f3SBenjamin Herrenschmidt /* -- Initialisation code etc. -- */
924bc5ad3f3SBenjamin Herrenschmidt 
xics_debugfs_irqmap(struct seq_file * m,struct kvmppc_passthru_irqmap * pimap)925af893c7dSSuresh Warrier static void xics_debugfs_irqmap(struct seq_file *m,
926af893c7dSSuresh Warrier 				struct kvmppc_passthru_irqmap *pimap)
927af893c7dSSuresh Warrier {
928af893c7dSSuresh Warrier 	int i;
929af893c7dSSuresh Warrier 
930af893c7dSSuresh Warrier 	if (!pimap)
931af893c7dSSuresh Warrier 		return;
932af893c7dSSuresh Warrier 	seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
933af893c7dSSuresh Warrier 				pimap->n_mapped);
934af893c7dSSuresh Warrier 	for (i = 0; i < pimap->n_mapped; i++)  {
935af893c7dSSuresh Warrier 		seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
936af893c7dSSuresh Warrier 			pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
937af893c7dSSuresh Warrier 	}
938af893c7dSSuresh Warrier }
939af893c7dSSuresh Warrier 
xics_debug_show(struct seq_file * m,void * private)940bc5ad3f3SBenjamin Herrenschmidt static int xics_debug_show(struct seq_file *m, void *private)
941bc5ad3f3SBenjamin Herrenschmidt {
942bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics = m->private;
943bc5ad3f3SBenjamin Herrenschmidt 	struct kvm *kvm = xics->kvm;
944bc5ad3f3SBenjamin Herrenschmidt 	struct kvm_vcpu *vcpu;
94546808a4cSMarc Zyngier 	int icsid;
94646808a4cSMarc Zyngier 	unsigned long flags, i;
947878610feSSuresh E. Warrier 	unsigned long t_rm_kick_vcpu, t_rm_check_resend;
9485efa6605SLi Zhong 	unsigned long t_rm_notify_eoi;
9496e0365b7SSuresh Warrier 	unsigned long t_reject, t_check_resend;
950bc5ad3f3SBenjamin Herrenschmidt 
951bc5ad3f3SBenjamin Herrenschmidt 	if (!kvm)
952bc5ad3f3SBenjamin Herrenschmidt 		return 0;
953bc5ad3f3SBenjamin Herrenschmidt 
954878610feSSuresh E. Warrier 	t_rm_kick_vcpu = 0;
955878610feSSuresh E. Warrier 	t_rm_notify_eoi = 0;
956878610feSSuresh E. Warrier 	t_rm_check_resend = 0;
9576e0365b7SSuresh Warrier 	t_check_resend = 0;
9586e0365b7SSuresh Warrier 	t_reject = 0;
959878610feSSuresh E. Warrier 
960af893c7dSSuresh Warrier 	xics_debugfs_irqmap(m, kvm->arch.pimap);
961af893c7dSSuresh Warrier 
962bc5ad3f3SBenjamin Herrenschmidt 	seq_printf(m, "=========\nICP state\n=========\n");
963bc5ad3f3SBenjamin Herrenschmidt 
964bc5ad3f3SBenjamin Herrenschmidt 	kvm_for_each_vcpu(i, vcpu, kvm) {
965bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_icp *icp = vcpu->arch.icp;
966bc5ad3f3SBenjamin Herrenschmidt 		union kvmppc_icp_state state;
967bc5ad3f3SBenjamin Herrenschmidt 
968bc5ad3f3SBenjamin Herrenschmidt 		if (!icp)
969bc5ad3f3SBenjamin Herrenschmidt 			continue;
970bc5ad3f3SBenjamin Herrenschmidt 
9715ee07612SChristian Borntraeger 		state.raw = READ_ONCE(icp->state.raw);
972bc5ad3f3SBenjamin Herrenschmidt 		seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
973bc5ad3f3SBenjamin Herrenschmidt 			   icp->server_num, state.xisr,
974bc5ad3f3SBenjamin Herrenschmidt 			   state.pending_pri, state.cppr, state.mfrr,
975bc5ad3f3SBenjamin Herrenschmidt 			   state.out_ee, state.need_resend);
976878610feSSuresh E. Warrier 		t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
977878610feSSuresh E. Warrier 		t_rm_notify_eoi += icp->n_rm_notify_eoi;
978878610feSSuresh E. Warrier 		t_rm_check_resend += icp->n_rm_check_resend;
9796e0365b7SSuresh Warrier 		t_check_resend += icp->n_check_resend;
9806e0365b7SSuresh Warrier 		t_reject += icp->n_reject;
981bc5ad3f3SBenjamin Herrenschmidt 	}
982bc5ad3f3SBenjamin Herrenschmidt 
9835efa6605SLi Zhong 	seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
984878610feSSuresh E. Warrier 			t_rm_kick_vcpu, t_rm_check_resend,
9855efa6605SLi Zhong 			t_rm_notify_eoi);
9866e0365b7SSuresh Warrier 	seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
9876e0365b7SSuresh Warrier 			t_check_resend, t_reject);
988bc5ad3f3SBenjamin Herrenschmidt 	for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
989bc5ad3f3SBenjamin Herrenschmidt 		struct kvmppc_ics *ics = xics->ics[icsid];
990bc5ad3f3SBenjamin Herrenschmidt 
991bc5ad3f3SBenjamin Herrenschmidt 		if (!ics)
992bc5ad3f3SBenjamin Herrenschmidt 			continue;
993bc5ad3f3SBenjamin Herrenschmidt 
994bc5ad3f3SBenjamin Herrenschmidt 		seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
995bc5ad3f3SBenjamin Herrenschmidt 			   icsid);
996bc5ad3f3SBenjamin Herrenschmidt 
99734cb7954SSuresh Warrier 		local_irq_save(flags);
99834cb7954SSuresh Warrier 		arch_spin_lock(&ics->lock);
999bc5ad3f3SBenjamin Herrenschmidt 
1000bc5ad3f3SBenjamin Herrenschmidt 		for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1001bc5ad3f3SBenjamin Herrenschmidt 			struct ics_irq_state *irq = &ics->irq_state[i];
1002bc5ad3f3SBenjamin Herrenschmidt 
100317d48610SLi Zhong 			seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1004bc5ad3f3SBenjamin Herrenschmidt 				   irq->number, irq->server, irq->priority,
100517d48610SLi Zhong 				   irq->saved_priority, irq->pq_state,
1006bc5ad3f3SBenjamin Herrenschmidt 				   irq->resend, irq->masked_pending);
1007bc5ad3f3SBenjamin Herrenschmidt 
1008bc5ad3f3SBenjamin Herrenschmidt 		}
100934cb7954SSuresh Warrier 		arch_spin_unlock(&ics->lock);
101034cb7954SSuresh Warrier 		local_irq_restore(flags);
1011bc5ad3f3SBenjamin Herrenschmidt 	}
1012bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1013bc5ad3f3SBenjamin Herrenschmidt }
1014bc5ad3f3SBenjamin Herrenschmidt 
10150f6ddf34SYangtao Li DEFINE_SHOW_ATTRIBUTE(xics_debug);
1016bc5ad3f3SBenjamin Herrenschmidt 
xics_debugfs_init(struct kvmppc_xics * xics)1017bc5ad3f3SBenjamin Herrenschmidt static void xics_debugfs_init(struct kvmppc_xics *xics)
1018bc5ad3f3SBenjamin Herrenschmidt {
1019faf01aefSAlexey Kardashevskiy 	xics->dentry = debugfs_create_file("xics", 0444, xics->kvm->debugfs_dentry,
1020bc5ad3f3SBenjamin Herrenschmidt 					   xics, &xics_debug_fops);
1021bc5ad3f3SBenjamin Herrenschmidt 
1022faf01aefSAlexey Kardashevskiy 	pr_debug("%s: created\n", __func__);
1023bc5ad3f3SBenjamin Herrenschmidt }
1024bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_create_ics(struct kvm * kvm,struct kvmppc_xics * xics,int irq)10255975a2e0SPaul Mackerras static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1026bc5ad3f3SBenjamin Herrenschmidt 					struct kvmppc_xics *xics, int irq)
1027bc5ad3f3SBenjamin Herrenschmidt {
1028bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_ics *ics;
1029bc5ad3f3SBenjamin Herrenschmidt 	int i, icsid;
1030bc5ad3f3SBenjamin Herrenschmidt 
1031bc5ad3f3SBenjamin Herrenschmidt 	icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1032bc5ad3f3SBenjamin Herrenschmidt 
1033bc5ad3f3SBenjamin Herrenschmidt 	mutex_lock(&kvm->lock);
1034bc5ad3f3SBenjamin Herrenschmidt 
1035bc5ad3f3SBenjamin Herrenschmidt 	/* ICS already exists - somebody else got here first */
1036bc5ad3f3SBenjamin Herrenschmidt 	if (xics->ics[icsid])
1037bc5ad3f3SBenjamin Herrenschmidt 		goto out;
1038bc5ad3f3SBenjamin Herrenschmidt 
1039bc5ad3f3SBenjamin Herrenschmidt 	/* Create the ICS */
1040bc5ad3f3SBenjamin Herrenschmidt 	ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1041bc5ad3f3SBenjamin Herrenschmidt 	if (!ics)
1042bc5ad3f3SBenjamin Herrenschmidt 		goto out;
1043bc5ad3f3SBenjamin Herrenschmidt 
1044bc5ad3f3SBenjamin Herrenschmidt 	ics->icsid = icsid;
1045bc5ad3f3SBenjamin Herrenschmidt 
1046bc5ad3f3SBenjamin Herrenschmidt 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1047bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1048bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].priority = MASKED;
1049bc5ad3f3SBenjamin Herrenschmidt 		ics->irq_state[i].saved_priority = MASKED;
1050bc5ad3f3SBenjamin Herrenschmidt 	}
1051bc5ad3f3SBenjamin Herrenschmidt 	smp_wmb();
1052bc5ad3f3SBenjamin Herrenschmidt 	xics->ics[icsid] = ics;
1053bc5ad3f3SBenjamin Herrenschmidt 
1054bc5ad3f3SBenjamin Herrenschmidt 	if (icsid > xics->max_icsid)
1055bc5ad3f3SBenjamin Herrenschmidt 		xics->max_icsid = icsid;
1056bc5ad3f3SBenjamin Herrenschmidt 
1057bc5ad3f3SBenjamin Herrenschmidt  out:
1058bc5ad3f3SBenjamin Herrenschmidt 	mutex_unlock(&kvm->lock);
1059bc5ad3f3SBenjamin Herrenschmidt 	return xics->ics[icsid];
1060bc5ad3f3SBenjamin Herrenschmidt }
1061bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_create_icp(struct kvm_vcpu * vcpu,unsigned long server_num)1062936774cdSBenjamin Herrenschmidt static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1063bc5ad3f3SBenjamin Herrenschmidt {
1064bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_icp *icp;
1065bc5ad3f3SBenjamin Herrenschmidt 
1066bc5ad3f3SBenjamin Herrenschmidt 	if (!vcpu->kvm->arch.xics)
1067bc5ad3f3SBenjamin Herrenschmidt 		return -ENODEV;
1068bc5ad3f3SBenjamin Herrenschmidt 
1069bc5ad3f3SBenjamin Herrenschmidt 	if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1070bc5ad3f3SBenjamin Herrenschmidt 		return -EEXIST;
1071bc5ad3f3SBenjamin Herrenschmidt 
1072bc5ad3f3SBenjamin Herrenschmidt 	icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1073bc5ad3f3SBenjamin Herrenschmidt 	if (!icp)
1074bc5ad3f3SBenjamin Herrenschmidt 		return -ENOMEM;
1075bc5ad3f3SBenjamin Herrenschmidt 
1076bc5ad3f3SBenjamin Herrenschmidt 	icp->vcpu = vcpu;
1077bc5ad3f3SBenjamin Herrenschmidt 	icp->server_num = server_num;
1078bc5ad3f3SBenjamin Herrenschmidt 	icp->state.mfrr = MASKED;
1079bc5ad3f3SBenjamin Herrenschmidt 	icp->state.pending_pri = MASKED;
1080bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.icp = icp;
1081bc5ad3f3SBenjamin Herrenschmidt 
1082bc5ad3f3SBenjamin Herrenschmidt 	XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1083bc5ad3f3SBenjamin Herrenschmidt 
1084bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1085bc5ad3f3SBenjamin Herrenschmidt }
1086bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_get_icp(struct kvm_vcpu * vcpu)10878b78645cSPaul Mackerras u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
10888b78645cSPaul Mackerras {
10898b78645cSPaul Mackerras 	struct kvmppc_icp *icp = vcpu->arch.icp;
10908b78645cSPaul Mackerras 	union kvmppc_icp_state state;
10918b78645cSPaul Mackerras 
10928b78645cSPaul Mackerras 	if (!icp)
10938b78645cSPaul Mackerras 		return 0;
10948b78645cSPaul Mackerras 	state = icp->state;
10958b78645cSPaul Mackerras 	return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
10968b78645cSPaul Mackerras 		((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
10978b78645cSPaul Mackerras 		((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
10988b78645cSPaul Mackerras 		((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
10998b78645cSPaul Mackerras }
11008b78645cSPaul Mackerras 
kvmppc_xics_set_icp(struct kvm_vcpu * vcpu,u64 icpval)11018b78645cSPaul Mackerras int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
11028b78645cSPaul Mackerras {
11038b78645cSPaul Mackerras 	struct kvmppc_icp *icp = vcpu->arch.icp;
11048b78645cSPaul Mackerras 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
11058b78645cSPaul Mackerras 	union kvmppc_icp_state old_state, new_state;
11068b78645cSPaul Mackerras 	struct kvmppc_ics *ics;
11078b78645cSPaul Mackerras 	u8 cppr, mfrr, pending_pri;
11088b78645cSPaul Mackerras 	u32 xisr;
11098b78645cSPaul Mackerras 	u16 src;
11108b78645cSPaul Mackerras 	bool resend;
11118b78645cSPaul Mackerras 
11128b78645cSPaul Mackerras 	if (!icp || !xics)
11138b78645cSPaul Mackerras 		return -ENOENT;
11148b78645cSPaul Mackerras 
11158b78645cSPaul Mackerras 	cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
11168b78645cSPaul Mackerras 	xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
11178b78645cSPaul Mackerras 		KVM_REG_PPC_ICP_XISR_MASK;
11188b78645cSPaul Mackerras 	mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
11198b78645cSPaul Mackerras 	pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
11208b78645cSPaul Mackerras 
11218b78645cSPaul Mackerras 	/* Require the new state to be internally consistent */
11228b78645cSPaul Mackerras 	if (xisr == 0) {
11238b78645cSPaul Mackerras 		if (pending_pri != 0xff)
11248b78645cSPaul Mackerras 			return -EINVAL;
11258b78645cSPaul Mackerras 	} else if (xisr == XICS_IPI) {
11268b78645cSPaul Mackerras 		if (pending_pri != mfrr || pending_pri >= cppr)
11278b78645cSPaul Mackerras 			return -EINVAL;
11288b78645cSPaul Mackerras 	} else {
11298b78645cSPaul Mackerras 		if (pending_pri >= mfrr || pending_pri >= cppr)
11308b78645cSPaul Mackerras 			return -EINVAL;
11318b78645cSPaul Mackerras 		ics = kvmppc_xics_find_ics(xics, xisr, &src);
11328b78645cSPaul Mackerras 		if (!ics)
11338b78645cSPaul Mackerras 			return -EINVAL;
11348b78645cSPaul Mackerras 	}
11358b78645cSPaul Mackerras 
11368b78645cSPaul Mackerras 	new_state.raw = 0;
11378b78645cSPaul Mackerras 	new_state.cppr = cppr;
11388b78645cSPaul Mackerras 	new_state.xisr = xisr;
11398b78645cSPaul Mackerras 	new_state.mfrr = mfrr;
11408b78645cSPaul Mackerras 	new_state.pending_pri = pending_pri;
11418b78645cSPaul Mackerras 
11428b78645cSPaul Mackerras 	/*
11438b78645cSPaul Mackerras 	 * Deassert the CPU interrupt request.
11448b78645cSPaul Mackerras 	 * icp_try_update will reassert it if necessary.
11458b78645cSPaul Mackerras 	 */
1146d24ea8a7SPaul Mackerras 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
11478b78645cSPaul Mackerras 
11488b78645cSPaul Mackerras 	/*
11498b78645cSPaul Mackerras 	 * Note that if we displace an interrupt from old_state.xisr,
11508b78645cSPaul Mackerras 	 * we don't mark it as rejected.  We expect userspace to set
11518b78645cSPaul Mackerras 	 * the state of the interrupt sources to be consistent with
11528b78645cSPaul Mackerras 	 * the ICP states (either before or afterwards, which doesn't
11538b78645cSPaul Mackerras 	 * matter).  We do handle resends due to CPPR becoming less
11548b78645cSPaul Mackerras 	 * favoured because that is necessary to end up with a
11558b78645cSPaul Mackerras 	 * consistent state in the situation where userspace restores
11568b78645cSPaul Mackerras 	 * the ICS states before the ICP states.
11578b78645cSPaul Mackerras 	 */
11588b78645cSPaul Mackerras 	do {
11595ee07612SChristian Borntraeger 		old_state = READ_ONCE(icp->state);
11608b78645cSPaul Mackerras 
11618b78645cSPaul Mackerras 		if (new_state.mfrr <= old_state.mfrr) {
11628b78645cSPaul Mackerras 			resend = false;
11638b78645cSPaul Mackerras 			new_state.need_resend = old_state.need_resend;
11648b78645cSPaul Mackerras 		} else {
11658b78645cSPaul Mackerras 			resend = old_state.need_resend;
11668b78645cSPaul Mackerras 			new_state.need_resend = 0;
11678b78645cSPaul Mackerras 		}
11688b78645cSPaul Mackerras 	} while (!icp_try_update(icp, old_state, new_state, false));
11698b78645cSPaul Mackerras 
11708b78645cSPaul Mackerras 	if (resend)
11718b78645cSPaul Mackerras 		icp_check_resend(xics, icp);
11728b78645cSPaul Mackerras 
11738b78645cSPaul Mackerras 	return 0;
11748b78645cSPaul Mackerras }
11758b78645cSPaul Mackerras 
xics_get_source(struct kvmppc_xics * xics,long irq,u64 addr)11765975a2e0SPaul Mackerras static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1177bc5ad3f3SBenjamin Herrenschmidt {
11785975a2e0SPaul Mackerras 	int ret;
11795975a2e0SPaul Mackerras 	struct kvmppc_ics *ics;
11805975a2e0SPaul Mackerras 	struct ics_irq_state *irqp;
11815975a2e0SPaul Mackerras 	u64 __user *ubufp = (u64 __user *) addr;
11825975a2e0SPaul Mackerras 	u16 idx;
11835975a2e0SPaul Mackerras 	u64 val, prio;
118434cb7954SSuresh Warrier 	unsigned long flags;
1185bc5ad3f3SBenjamin Herrenschmidt 
11865975a2e0SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
11875975a2e0SPaul Mackerras 	if (!ics)
11885975a2e0SPaul Mackerras 		return -ENOENT;
1189bc5ad3f3SBenjamin Herrenschmidt 
11905975a2e0SPaul Mackerras 	irqp = &ics->irq_state[idx];
119134cb7954SSuresh Warrier 	local_irq_save(flags);
119234cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
11935975a2e0SPaul Mackerras 	ret = -ENOENT;
11945975a2e0SPaul Mackerras 	if (irqp->exists) {
11955975a2e0SPaul Mackerras 		val = irqp->server;
11965975a2e0SPaul Mackerras 		prio = irqp->priority;
11975975a2e0SPaul Mackerras 		if (prio == MASKED) {
11985975a2e0SPaul Mackerras 			val |= KVM_XICS_MASKED;
11995975a2e0SPaul Mackerras 			prio = irqp->saved_priority;
12005975a2e0SPaul Mackerras 		}
12015975a2e0SPaul Mackerras 		val |= prio << KVM_XICS_PRIORITY_SHIFT;
1202b1a4286bSPaul Mackerras 		if (irqp->lsi) {
1203b1a4286bSPaul Mackerras 			val |= KVM_XICS_LEVEL_SENSITIVE;
120417d48610SLi Zhong 			if (irqp->pq_state & PQ_PRESENTED)
1205b1a4286bSPaul Mackerras 				val |= KVM_XICS_PENDING;
1206b1a4286bSPaul Mackerras 		} else if (irqp->masked_pending || irqp->resend)
12075975a2e0SPaul Mackerras 			val |= KVM_XICS_PENDING;
120817d48610SLi Zhong 
120917d48610SLi Zhong 		if (irqp->pq_state & PQ_PRESENTED)
121017d48610SLi Zhong 			val |= KVM_XICS_PRESENTED;
121117d48610SLi Zhong 
121217d48610SLi Zhong 		if (irqp->pq_state & PQ_QUEUED)
121317d48610SLi Zhong 			val |= KVM_XICS_QUEUED;
121417d48610SLi Zhong 
12155975a2e0SPaul Mackerras 		ret = 0;
12165975a2e0SPaul Mackerras 	}
121734cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
121834cb7954SSuresh Warrier 	local_irq_restore(flags);
1219bc5ad3f3SBenjamin Herrenschmidt 
12205975a2e0SPaul Mackerras 	if (!ret && put_user(val, ubufp))
12215975a2e0SPaul Mackerras 		ret = -EFAULT;
12225975a2e0SPaul Mackerras 
12235975a2e0SPaul Mackerras 	return ret;
12245975a2e0SPaul Mackerras }
12255975a2e0SPaul Mackerras 
xics_set_source(struct kvmppc_xics * xics,long irq,u64 addr)12265975a2e0SPaul Mackerras static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
12275975a2e0SPaul Mackerras {
12285975a2e0SPaul Mackerras 	struct kvmppc_ics *ics;
12295975a2e0SPaul Mackerras 	struct ics_irq_state *irqp;
12305975a2e0SPaul Mackerras 	u64 __user *ubufp = (u64 __user *) addr;
12315975a2e0SPaul Mackerras 	u16 idx;
12325975a2e0SPaul Mackerras 	u64 val;
12335975a2e0SPaul Mackerras 	u8 prio;
12345975a2e0SPaul Mackerras 	u32 server;
123534cb7954SSuresh Warrier 	unsigned long flags;
12365975a2e0SPaul Mackerras 
12375975a2e0SPaul Mackerras 	if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
12385975a2e0SPaul Mackerras 		return -ENOENT;
12395975a2e0SPaul Mackerras 
12405975a2e0SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
12415975a2e0SPaul Mackerras 	if (!ics) {
12425975a2e0SPaul Mackerras 		ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
12435975a2e0SPaul Mackerras 		if (!ics)
12445975a2e0SPaul Mackerras 			return -ENOMEM;
12455975a2e0SPaul Mackerras 	}
12465975a2e0SPaul Mackerras 	irqp = &ics->irq_state[idx];
12475975a2e0SPaul Mackerras 	if (get_user(val, ubufp))
12485975a2e0SPaul Mackerras 		return -EFAULT;
12495975a2e0SPaul Mackerras 
12505975a2e0SPaul Mackerras 	server = val & KVM_XICS_DESTINATION_MASK;
12515975a2e0SPaul Mackerras 	prio = val >> KVM_XICS_PRIORITY_SHIFT;
12525975a2e0SPaul Mackerras 	if (prio != MASKED &&
12535975a2e0SPaul Mackerras 	    kvmppc_xics_find_server(xics->kvm, server) == NULL)
12545975a2e0SPaul Mackerras 		return -EINVAL;
12555975a2e0SPaul Mackerras 
125634cb7954SSuresh Warrier 	local_irq_save(flags);
125734cb7954SSuresh Warrier 	arch_spin_lock(&ics->lock);
12585975a2e0SPaul Mackerras 	irqp->server = server;
12595975a2e0SPaul Mackerras 	irqp->saved_priority = prio;
12605975a2e0SPaul Mackerras 	if (val & KVM_XICS_MASKED)
12615975a2e0SPaul Mackerras 		prio = MASKED;
12625975a2e0SPaul Mackerras 	irqp->priority = prio;
12635975a2e0SPaul Mackerras 	irqp->resend = 0;
12645975a2e0SPaul Mackerras 	irqp->masked_pending = 0;
1265b1a4286bSPaul Mackerras 	irqp->lsi = 0;
126617d48610SLi Zhong 	irqp->pq_state = 0;
126717d48610SLi Zhong 	if (val & KVM_XICS_LEVEL_SENSITIVE)
1268b1a4286bSPaul Mackerras 		irqp->lsi = 1;
126917d48610SLi Zhong 	/* If PENDING, set P in case P is not saved because of old code */
127017d48610SLi Zhong 	if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
127117d48610SLi Zhong 		irqp->pq_state |= PQ_PRESENTED;
127217d48610SLi Zhong 	if (val & KVM_XICS_QUEUED)
127317d48610SLi Zhong 		irqp->pq_state |= PQ_QUEUED;
12745975a2e0SPaul Mackerras 	irqp->exists = 1;
127534cb7954SSuresh Warrier 	arch_spin_unlock(&ics->lock);
127634cb7954SSuresh Warrier 	local_irq_restore(flags);
12775975a2e0SPaul Mackerras 
12785975a2e0SPaul Mackerras 	if (val & KVM_XICS_PENDING)
127921acd0e4SLi Zhong 		icp_deliver_irq(xics, NULL, irqp->number, false);
12805975a2e0SPaul Mackerras 
12815975a2e0SPaul Mackerras 	return 0;
12825975a2e0SPaul Mackerras }
12835975a2e0SPaul Mackerras 
kvmppc_xics_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)12845af50993SBenjamin Herrenschmidt int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
12855975a2e0SPaul Mackerras 			bool line_status)
12865975a2e0SPaul Mackerras {
12875975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
12885975a2e0SPaul Mackerras 
1289e48ba1cbSPaul Mackerras 	if (!xics)
1290e48ba1cbSPaul Mackerras 		return -ENODEV;
129125a2150bSPaul Mackerras 	return ics_deliver_irq(xics, irq, level);
129225a2150bSPaul Mackerras }
129325a2150bSPaul Mackerras 
xics_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)12945975a2e0SPaul Mackerras static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
12955975a2e0SPaul Mackerras {
12965975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
12975975a2e0SPaul Mackerras 
12985975a2e0SPaul Mackerras 	switch (attr->group) {
12995975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13005975a2e0SPaul Mackerras 		return xics_set_source(xics, attr->attr, attr->addr);
13015975a2e0SPaul Mackerras 	}
13025975a2e0SPaul Mackerras 	return -ENXIO;
13035975a2e0SPaul Mackerras }
13045975a2e0SPaul Mackerras 
xics_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)13055975a2e0SPaul Mackerras static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
13065975a2e0SPaul Mackerras {
13075975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
13085975a2e0SPaul Mackerras 
13095975a2e0SPaul Mackerras 	switch (attr->group) {
13105975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13115975a2e0SPaul Mackerras 		return xics_get_source(xics, attr->attr, attr->addr);
13125975a2e0SPaul Mackerras 	}
13135975a2e0SPaul Mackerras 	return -ENXIO;
13145975a2e0SPaul Mackerras }
13155975a2e0SPaul Mackerras 
xics_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)13165975a2e0SPaul Mackerras static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
13175975a2e0SPaul Mackerras {
13185975a2e0SPaul Mackerras 	switch (attr->group) {
13195975a2e0SPaul Mackerras 	case KVM_DEV_XICS_GRP_SOURCES:
13205975a2e0SPaul Mackerras 		if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
13215975a2e0SPaul Mackerras 		    attr->attr < KVMPPC_XICS_NR_IRQS)
13225975a2e0SPaul Mackerras 			return 0;
1323bc5ad3f3SBenjamin Herrenschmidt 		break;
13245975a2e0SPaul Mackerras 	}
13255975a2e0SPaul Mackerras 	return -ENXIO;
1326bc5ad3f3SBenjamin Herrenschmidt }
1327bc5ad3f3SBenjamin Herrenschmidt 
13285706d14dSGreg Kurz /*
13295706d14dSGreg Kurz  * Called when device fd is closed. kvm->lock is held.
13305706d14dSGreg Kurz  */
kvmppc_xics_release(struct kvm_device * dev)13315706d14dSGreg Kurz static void kvmppc_xics_release(struct kvm_device *dev)
1332bc5ad3f3SBenjamin Herrenschmidt {
13335975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
133446808a4cSMarc Zyngier 	unsigned long i;
1335bc5ad3f3SBenjamin Herrenschmidt 	struct kvm *kvm = xics->kvm;
13365706d14dSGreg Kurz 	struct kvm_vcpu *vcpu;
13375706d14dSGreg Kurz 
13385706d14dSGreg Kurz 	pr_devel("Releasing xics device\n");
13395706d14dSGreg Kurz 
13405706d14dSGreg Kurz 	/*
13415706d14dSGreg Kurz 	 * Since this is the device release function, we know that
13425706d14dSGreg Kurz 	 * userspace does not have any open fd referring to the
13435706d14dSGreg Kurz 	 * device.  Therefore there can not be any of the device
13445706d14dSGreg Kurz 	 * attribute set/get functions being executed concurrently,
13455706d14dSGreg Kurz 	 * and similarly, the connect_vcpu and set/clr_mapped
13465706d14dSGreg Kurz 	 * functions also cannot be being executed.
13475706d14dSGreg Kurz 	 */
1348bc5ad3f3SBenjamin Herrenschmidt 
1349bc5ad3f3SBenjamin Herrenschmidt 	debugfs_remove(xics->dentry);
1350bc5ad3f3SBenjamin Herrenschmidt 
13515706d14dSGreg Kurz 	/*
13525706d14dSGreg Kurz 	 * We should clean up the vCPU interrupt presenters first.
13535706d14dSGreg Kurz 	 */
13545706d14dSGreg Kurz 	kvm_for_each_vcpu(i, vcpu, kvm) {
13555706d14dSGreg Kurz 		/*
13565706d14dSGreg Kurz 		 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
13575706d14dSGreg Kurz 		 * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently.
13585706d14dSGreg Kurz 		 * Holding the vcpu->mutex also means that execution is
13595706d14dSGreg Kurz 		 * excluded for the vcpu until the ICP was freed. When the vcpu
13605706d14dSGreg Kurz 		 * can execute again, vcpu->arch.icp and vcpu->arch.irq_type
13615706d14dSGreg Kurz 		 * have been cleared and the vcpu will not be going into the
13625706d14dSGreg Kurz 		 * XICS code anymore.
13635706d14dSGreg Kurz 		 */
13645706d14dSGreg Kurz 		mutex_lock(&vcpu->mutex);
13655706d14dSGreg Kurz 		kvmppc_xics_free_icp(vcpu);
13665706d14dSGreg Kurz 		mutex_unlock(&vcpu->mutex);
13675706d14dSGreg Kurz 	}
13685706d14dSGreg Kurz 
1369bc5ad3f3SBenjamin Herrenschmidt 	if (kvm)
1370bc5ad3f3SBenjamin Herrenschmidt 		kvm->arch.xics = NULL;
1371bc5ad3f3SBenjamin Herrenschmidt 
13725706d14dSGreg Kurz 	for (i = 0; i <= xics->max_icsid; i++) {
1373bc5ad3f3SBenjamin Herrenschmidt 		kfree(xics->ics[i]);
13745706d14dSGreg Kurz 		xics->ics[i] = NULL;
13755706d14dSGreg Kurz 	}
13765706d14dSGreg Kurz 	/*
13775706d14dSGreg Kurz 	 * A reference of the kvmppc_xics pointer is now kept under
13785706d14dSGreg Kurz 	 * the xics_device pointer of the machine for reuse. It is
13795706d14dSGreg Kurz 	 * freed when the VM is destroyed for now until we fix all the
13805706d14dSGreg Kurz 	 * execution paths.
13815706d14dSGreg Kurz 	 */
13825975a2e0SPaul Mackerras 	kfree(dev);
1383bc5ad3f3SBenjamin Herrenschmidt }
1384bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_get_device(struct kvm * kvm)13855706d14dSGreg Kurz static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
13865706d14dSGreg Kurz {
13875706d14dSGreg Kurz 	struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
13885706d14dSGreg Kurz 	struct kvmppc_xics *xics = *kvm_xics_device;
13895706d14dSGreg Kurz 
13905706d14dSGreg Kurz 	if (!xics) {
13915706d14dSGreg Kurz 		xics = kzalloc(sizeof(*xics), GFP_KERNEL);
13925706d14dSGreg Kurz 		*kvm_xics_device = xics;
13935706d14dSGreg Kurz 	} else {
13945706d14dSGreg Kurz 		memset(xics, 0, sizeof(*xics));
13955706d14dSGreg Kurz 	}
13965706d14dSGreg Kurz 
13975706d14dSGreg Kurz 	return xics;
13985706d14dSGreg Kurz }
13995706d14dSGreg Kurz 
kvmppc_xics_create(struct kvm_device * dev,u32 type)14005975a2e0SPaul Mackerras static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1401bc5ad3f3SBenjamin Herrenschmidt {
1402bc5ad3f3SBenjamin Herrenschmidt 	struct kvmppc_xics *xics;
14035975a2e0SPaul Mackerras 	struct kvm *kvm = dev->kvm;
1404bc5ad3f3SBenjamin Herrenschmidt 
14055706d14dSGreg Kurz 	pr_devel("Creating xics for partition\n");
14065706d14dSGreg Kurz 
14075706d14dSGreg Kurz 	/* Already there ? */
14085706d14dSGreg Kurz 	if (kvm->arch.xics)
14095706d14dSGreg Kurz 		return -EEXIST;
14105706d14dSGreg Kurz 
14115706d14dSGreg Kurz 	xics = kvmppc_xics_get_device(kvm);
1412bc5ad3f3SBenjamin Herrenschmidt 	if (!xics)
1413bc5ad3f3SBenjamin Herrenschmidt 		return -ENOMEM;
1414bc5ad3f3SBenjamin Herrenschmidt 
14155975a2e0SPaul Mackerras 	dev->private = xics;
14165975a2e0SPaul Mackerras 	xics->dev = dev;
1417bc5ad3f3SBenjamin Herrenschmidt 	xics->kvm = kvm;
1418bc5ad3f3SBenjamin Herrenschmidt 	kvm->arch.xics = xics;
1419bc5ad3f3SBenjamin Herrenschmidt 
14203a167beaSAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
14214bad7779SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
14224bad7779SPaul Mackerras 	    cpu_has_feature(CPU_FTR_HVMODE)) {
1423e7d26f28SBenjamin Herrenschmidt 		/* Enable real mode support */
1424e7d26f28SBenjamin Herrenschmidt 		xics->real_mode = ENABLE_REALMODE;
1425e7d26f28SBenjamin Herrenschmidt 		xics->real_mode_dbg = DEBUG_REALMODE;
1426e7d26f28SBenjamin Herrenschmidt 	}
14273a167beaSAneesh Kumar K.V #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1428e7d26f28SBenjamin Herrenschmidt 
1429bc5ad3f3SBenjamin Herrenschmidt 	return 0;
1430bc5ad3f3SBenjamin Herrenschmidt }
1431bc5ad3f3SBenjamin Herrenschmidt 
kvmppc_xics_init(struct kvm_device * dev)1432023e9fddSChristoffer Dall static void kvmppc_xics_init(struct kvm_device *dev)
1433023e9fddSChristoffer Dall {
1434e40b38a4SNour-eddine Taleb 	struct kvmppc_xics *xics = dev->private;
1435023e9fddSChristoffer Dall 
1436023e9fddSChristoffer Dall 	xics_debugfs_init(xics);
1437023e9fddSChristoffer Dall }
1438023e9fddSChristoffer Dall 
14395975a2e0SPaul Mackerras struct kvm_device_ops kvm_xics_ops = {
14405975a2e0SPaul Mackerras 	.name = "kvm-xics",
14415975a2e0SPaul Mackerras 	.create = kvmppc_xics_create,
1442023e9fddSChristoffer Dall 	.init = kvmppc_xics_init,
14435706d14dSGreg Kurz 	.release = kvmppc_xics_release,
14445975a2e0SPaul Mackerras 	.set_attr = xics_set_attr,
14455975a2e0SPaul Mackerras 	.get_attr = xics_get_attr,
14465975a2e0SPaul Mackerras 	.has_attr = xics_has_attr,
14475975a2e0SPaul Mackerras };
14485975a2e0SPaul Mackerras 
kvmppc_xics_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 xcpu)14495975a2e0SPaul Mackerras int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
14505975a2e0SPaul Mackerras 			     u32 xcpu)
14515975a2e0SPaul Mackerras {
14525975a2e0SPaul Mackerras 	struct kvmppc_xics *xics = dev->private;
14535975a2e0SPaul Mackerras 	int r = -EBUSY;
14545975a2e0SPaul Mackerras 
14555975a2e0SPaul Mackerras 	if (dev->ops != &kvm_xics_ops)
14565975a2e0SPaul Mackerras 		return -EPERM;
14575975a2e0SPaul Mackerras 	if (xics->kvm != vcpu->kvm)
14585975a2e0SPaul Mackerras 		return -EPERM;
14595706d14dSGreg Kurz 	if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
14605975a2e0SPaul Mackerras 		return -EBUSY;
14615975a2e0SPaul Mackerras 
14625975a2e0SPaul Mackerras 	r = kvmppc_xics_create_icp(vcpu, xcpu);
14635975a2e0SPaul Mackerras 	if (!r)
14645975a2e0SPaul Mackerras 		vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
14655975a2e0SPaul Mackerras 
14665975a2e0SPaul Mackerras 	return r;
14675975a2e0SPaul Mackerras }
14685975a2e0SPaul Mackerras 
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)1469bc5ad3f3SBenjamin Herrenschmidt void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1470bc5ad3f3SBenjamin Herrenschmidt {
1471bc5ad3f3SBenjamin Herrenschmidt 	if (!vcpu->arch.icp)
1472bc5ad3f3SBenjamin Herrenschmidt 		return;
1473bc5ad3f3SBenjamin Herrenschmidt 	kfree(vcpu->arch.icp);
1474bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.icp = NULL;
1475bc5ad3f3SBenjamin Herrenschmidt 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1476bc5ad3f3SBenjamin Herrenschmidt }
147725a2150bSPaul Mackerras 
kvmppc_xics_set_mapped(struct kvm * kvm,unsigned long irq,unsigned long host_irq)14785d375199SPaul Mackerras void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
14795d375199SPaul Mackerras 			    unsigned long host_irq)
14805d375199SPaul Mackerras {
14815d375199SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
14825d375199SPaul Mackerras 	struct kvmppc_ics *ics;
14835d375199SPaul Mackerras 	u16 idx;
14845d375199SPaul Mackerras 
14855d375199SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
14865d375199SPaul Mackerras 	if (!ics)
14875d375199SPaul Mackerras 		return;
14885d375199SPaul Mackerras 
14895d375199SPaul Mackerras 	ics->irq_state[idx].host_irq = host_irq;
14905d375199SPaul Mackerras 	ics->irq_state[idx].intr_cpu = -1;
14915d375199SPaul Mackerras }
14925d375199SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
14935d375199SPaul Mackerras 
kvmppc_xics_clr_mapped(struct kvm * kvm,unsigned long irq,unsigned long host_irq)14945d375199SPaul Mackerras void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
14955d375199SPaul Mackerras 			    unsigned long host_irq)
14965d375199SPaul Mackerras {
14975d375199SPaul Mackerras 	struct kvmppc_xics *xics = kvm->arch.xics;
14985d375199SPaul Mackerras 	struct kvmppc_ics *ics;
14995d375199SPaul Mackerras 	u16 idx;
15005d375199SPaul Mackerras 
15015d375199SPaul Mackerras 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
15025d375199SPaul Mackerras 	if (!ics)
15035d375199SPaul Mackerras 		return;
15045d375199SPaul Mackerras 
15055d375199SPaul Mackerras 	ics->irq_state[idx].host_irq = 0;
15065d375199SPaul Mackerras }
15075d375199SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
1508