1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Adjunct processor matrix VFIO device driver callbacks.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
8 * Halil Pasic <pasic@linux.ibm.com>
9 * Pierre Morel <pmorel@linux.ibm.com>
10 */
11 #include <linux/string.h>
12 #include <linux/vfio.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/ctype.h>
16 #include <linux/bitops.h>
17 #include <linux/kvm_host.h>
18 #include <linux/module.h>
19 #include <asm/kvm.h>
20 #include <asm/zcrypt.h>
21
22 #include "vfio_ap_private.h"
23
24 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
25 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
26
27 static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
28 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
29
match_apqn(struct device * dev,const void * data)30 static int match_apqn(struct device *dev, const void *data)
31 {
32 struct vfio_ap_queue *q = dev_get_drvdata(dev);
33
34 return (q->apqn == *(int *)(data)) ? 1 : 0;
35 }
36
37 /**
38 * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
39 * @matrix_mdev: the associated mediated matrix
40 * @apqn: The queue APQN
41 *
42 * Retrieve a queue with a specific APQN from the list of the
43 * devices of the vfio_ap_drv.
44 * Verify that the APID and the APQI are set in the matrix.
45 *
46 * Returns the pointer to the associated vfio_ap_queue
47 */
vfio_ap_get_queue(struct ap_matrix_mdev * matrix_mdev,int apqn)48 static struct vfio_ap_queue *vfio_ap_get_queue(
49 struct ap_matrix_mdev *matrix_mdev,
50 int apqn)
51 {
52 struct vfio_ap_queue *q;
53
54 if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
55 return NULL;
56 if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
57 return NULL;
58
59 q = vfio_ap_find_queue(apqn);
60 if (q)
61 q->matrix_mdev = matrix_mdev;
62
63 return q;
64 }
65
66 /**
67 * vfio_ap_wait_for_irqclear
68 * @apqn: The AP Queue number
69 *
70 * Checks the IRQ bit for the status of this APQN using ap_tapq.
71 * Returns if the ap_tapq function succeeded and the bit is clear.
72 * Returns if ap_tapq function failed with invalid, deconfigured or
73 * checkstopped AP.
74 * Otherwise retries up to 5 times after waiting 20ms.
75 *
76 */
vfio_ap_wait_for_irqclear(int apqn)77 static void vfio_ap_wait_for_irqclear(int apqn)
78 {
79 struct ap_queue_status status;
80 int retry = 5;
81
82 do {
83 status = ap_tapq(apqn, NULL);
84 switch (status.response_code) {
85 case AP_RESPONSE_NORMAL:
86 case AP_RESPONSE_RESET_IN_PROGRESS:
87 if (!status.irq_enabled)
88 return;
89 fallthrough;
90 case AP_RESPONSE_BUSY:
91 msleep(20);
92 break;
93 case AP_RESPONSE_Q_NOT_AVAIL:
94 case AP_RESPONSE_DECONFIGURED:
95 case AP_RESPONSE_CHECKSTOPPED:
96 default:
97 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
98 status.response_code, apqn);
99 return;
100 }
101 } while (--retry);
102
103 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
104 __func__, status.response_code, apqn);
105 }
106
107 /**
108 * vfio_ap_free_aqic_resources
109 * @q: The vfio_ap_queue
110 *
111 * Unregisters the ISC in the GIB when the saved ISC not invalid.
112 * Unpin the guest's page holding the NIB when it exist.
113 * Reset the saved_pfn and saved_isc to invalid values.
114 *
115 */
vfio_ap_free_aqic_resources(struct vfio_ap_queue * q)116 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
117 {
118 if (!q)
119 return;
120 if (q->saved_isc != VFIO_AP_ISC_INVALID &&
121 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
122 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
123 q->saved_isc = VFIO_AP_ISC_INVALID;
124 }
125 if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
126 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
127 &q->saved_pfn, 1);
128 q->saved_pfn = 0;
129 }
130 }
131
132 /**
133 * vfio_ap_irq_disable
134 * @q: The vfio_ap_queue
135 *
136 * Uses ap_aqic to disable the interruption and in case of success, reset
137 * in progress or IRQ disable command already proceeded: calls
138 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
139 * and calls vfio_ap_free_aqic_resources() to free the resources associated
140 * with the AP interrupt handling.
141 *
142 * In the case the AP is busy, or a reset is in progress,
143 * retries after 20ms, up to 5 times.
144 *
145 * Returns if ap_aqic function failed with invalid, deconfigured or
146 * checkstopped AP.
147 */
vfio_ap_irq_disable(struct vfio_ap_queue * q)148 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
149 {
150 struct ap_qirq_ctrl aqic_gisa = {};
151 struct ap_queue_status status;
152 int retries = 5;
153
154 do {
155 status = ap_aqic(q->apqn, aqic_gisa, NULL);
156 switch (status.response_code) {
157 case AP_RESPONSE_OTHERWISE_CHANGED:
158 case AP_RESPONSE_NORMAL:
159 vfio_ap_wait_for_irqclear(q->apqn);
160 goto end_free;
161 case AP_RESPONSE_RESET_IN_PROGRESS:
162 case AP_RESPONSE_BUSY:
163 msleep(20);
164 break;
165 case AP_RESPONSE_Q_NOT_AVAIL:
166 case AP_RESPONSE_DECONFIGURED:
167 case AP_RESPONSE_CHECKSTOPPED:
168 case AP_RESPONSE_INVALID_ADDRESS:
169 default:
170 /* All cases in default means AP not operational */
171 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
172 status.response_code);
173 goto end_free;
174 }
175 } while (retries--);
176
177 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
178 status.response_code);
179 end_free:
180 vfio_ap_free_aqic_resources(q);
181 q->matrix_mdev = NULL;
182 return status;
183 }
184
185 /**
186 * vfio_ap_setirq: Enable Interruption for a APQN
187 *
188 * @dev: the device associated with the ap_queue
189 * @q: the vfio_ap_queue holding AQIC parameters
190 *
191 * Pin the NIB saved in *q
192 * Register the guest ISC to GIB interface and retrieve the
193 * host ISC to issue the host side PQAP/AQIC
194 *
195 * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
196 * vfio_pin_pages failed.
197 *
198 * Otherwise return the ap_queue_status returned by the ap_aqic(),
199 * all retry handling will be done by the guest.
200 */
vfio_ap_irq_enable(struct vfio_ap_queue * q,int isc,unsigned long nib)201 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
202 int isc,
203 unsigned long nib)
204 {
205 struct ap_qirq_ctrl aqic_gisa = {};
206 struct ap_queue_status status = {};
207 struct kvm_s390_gisa *gisa;
208 struct kvm *kvm;
209 unsigned long h_nib, g_pfn, h_pfn;
210 int ret;
211
212 g_pfn = nib >> PAGE_SHIFT;
213 ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
214 IOMMU_READ | IOMMU_WRITE, &h_pfn);
215 switch (ret) {
216 case 1:
217 break;
218 default:
219 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
220 return status;
221 }
222
223 kvm = q->matrix_mdev->kvm;
224 gisa = kvm->arch.gisa_int.origin;
225
226 h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
227 aqic_gisa.gisc = isc;
228 aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
229 aqic_gisa.ir = 1;
230 aqic_gisa.gisa = (uint64_t)gisa >> 4;
231
232 status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
233 switch (status.response_code) {
234 case AP_RESPONSE_NORMAL:
235 /* See if we did clear older IRQ configuration */
236 vfio_ap_free_aqic_resources(q);
237 q->saved_pfn = g_pfn;
238 q->saved_isc = isc;
239 break;
240 case AP_RESPONSE_OTHERWISE_CHANGED:
241 /* We could not modify IRQ setings: clear new configuration */
242 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
243 kvm_s390_gisc_unregister(kvm, isc);
244 break;
245 default:
246 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
247 status.response_code);
248 vfio_ap_irq_disable(q);
249 break;
250 }
251
252 return status;
253 }
254
255 /**
256 * handle_pqap: PQAP instruction callback
257 *
258 * @vcpu: The vcpu on which we received the PQAP instruction
259 *
260 * Get the general register contents to initialize internal variables.
261 * REG[0]: APQN
262 * REG[1]: IR and ISC
263 * REG[2]: NIB
264 *
265 * Response.status may be set to following Response Code:
266 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
267 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
268 * - AP_RESPONSE_NORMAL (0) : in case of successs
269 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
270 * We take the matrix_dev lock to ensure serialization on queues and
271 * mediated device access.
272 *
273 * Return 0 if we could handle the request inside KVM.
274 * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
275 */
handle_pqap(struct kvm_vcpu * vcpu)276 static int handle_pqap(struct kvm_vcpu *vcpu)
277 {
278 uint64_t status;
279 uint16_t apqn;
280 struct vfio_ap_queue *q;
281 struct ap_queue_status qstatus = {
282 .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
283 struct ap_matrix_mdev *matrix_mdev;
284
285 /* If we do not use the AIV facility just go to userland */
286 if (!(vcpu->arch.sie_block->eca & ECA_AIV))
287 return -EOPNOTSUPP;
288
289 apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
290 mutex_lock(&matrix_dev->lock);
291
292 if (!vcpu->kvm->arch.crypto.pqap_hook)
293 goto out_unlock;
294 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
295 struct ap_matrix_mdev, pqap_hook);
296
297 /*
298 * If the KVM pointer is in the process of being set, wait until the
299 * process has completed.
300 */
301 wait_event_cmd(matrix_mdev->wait_for_kvm,
302 !matrix_mdev->kvm_busy,
303 mutex_unlock(&matrix_dev->lock),
304 mutex_lock(&matrix_dev->lock));
305
306 /* If the there is no guest using the mdev, there is nothing to do */
307 if (!matrix_mdev->kvm)
308 goto out_unlock;
309
310 q = vfio_ap_get_queue(matrix_mdev, apqn);
311 if (!q)
312 goto out_unlock;
313
314 status = vcpu->run->s.regs.gprs[1];
315
316 /* If IR bit(16) is set we enable the interrupt */
317 if ((status >> (63 - 16)) & 0x01)
318 qstatus = vfio_ap_irq_enable(q, status & 0x07,
319 vcpu->run->s.regs.gprs[2]);
320 else
321 qstatus = vfio_ap_irq_disable(q);
322
323 out_unlock:
324 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
325 vcpu->run->s.regs.gprs[1] >>= 32;
326 mutex_unlock(&matrix_dev->lock);
327 return 0;
328 }
329
vfio_ap_matrix_init(struct ap_config_info * info,struct ap_matrix * matrix)330 static void vfio_ap_matrix_init(struct ap_config_info *info,
331 struct ap_matrix *matrix)
332 {
333 matrix->apm_max = info->apxa ? info->Na : 63;
334 matrix->aqm_max = info->apxa ? info->Nd : 15;
335 matrix->adm_max = info->apxa ? info->Nd : 15;
336 }
337
vfio_ap_mdev_create(struct mdev_device * mdev)338 static int vfio_ap_mdev_create(struct mdev_device *mdev)
339 {
340 struct ap_matrix_mdev *matrix_mdev;
341
342 if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
343 return -EPERM;
344
345 matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
346 if (!matrix_mdev) {
347 atomic_inc(&matrix_dev->available_instances);
348 return -ENOMEM;
349 }
350
351 matrix_mdev->mdev = mdev;
352 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
353 init_waitqueue_head(&matrix_mdev->wait_for_kvm);
354 mdev_set_drvdata(mdev, matrix_mdev);
355 matrix_mdev->pqap_hook.hook = handle_pqap;
356 matrix_mdev->pqap_hook.owner = THIS_MODULE;
357 mutex_lock(&matrix_dev->lock);
358 list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
359 mutex_unlock(&matrix_dev->lock);
360
361 return 0;
362 }
363
vfio_ap_mdev_remove(struct mdev_device * mdev)364 static int vfio_ap_mdev_remove(struct mdev_device *mdev)
365 {
366 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
367
368 mutex_lock(&matrix_dev->lock);
369
370 /*
371 * If the KVM pointer is in flux or the guest is running, disallow
372 * un-assignment of control domain.
373 */
374 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
375 mutex_unlock(&matrix_dev->lock);
376 return -EBUSY;
377 }
378
379 vfio_ap_mdev_reset_queues(mdev);
380 list_del(&matrix_mdev->node);
381 kfree(matrix_mdev);
382 mdev_set_drvdata(mdev, NULL);
383 atomic_inc(&matrix_dev->available_instances);
384 mutex_unlock(&matrix_dev->lock);
385
386 return 0;
387 }
388
name_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)389 static ssize_t name_show(struct mdev_type *mtype,
390 struct mdev_type_attribute *attr, char *buf)
391 {
392 return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
393 }
394
395 static MDEV_TYPE_ATTR_RO(name);
396
available_instances_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)397 static ssize_t available_instances_show(struct mdev_type *mtype,
398 struct mdev_type_attribute *attr,
399 char *buf)
400 {
401 return sprintf(buf, "%d\n",
402 atomic_read(&matrix_dev->available_instances));
403 }
404
405 static MDEV_TYPE_ATTR_RO(available_instances);
406
device_api_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)407 static ssize_t device_api_show(struct mdev_type *mtype,
408 struct mdev_type_attribute *attr, char *buf)
409 {
410 return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
411 }
412
413 static MDEV_TYPE_ATTR_RO(device_api);
414
415 static struct attribute *vfio_ap_mdev_type_attrs[] = {
416 &mdev_type_attr_name.attr,
417 &mdev_type_attr_device_api.attr,
418 &mdev_type_attr_available_instances.attr,
419 NULL,
420 };
421
422 static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
423 .name = VFIO_AP_MDEV_TYPE_HWVIRT,
424 .attrs = vfio_ap_mdev_type_attrs,
425 };
426
427 static struct attribute_group *vfio_ap_mdev_type_groups[] = {
428 &vfio_ap_mdev_hwvirt_type_group,
429 NULL,
430 };
431
432 struct vfio_ap_queue_reserved {
433 unsigned long *apid;
434 unsigned long *apqi;
435 bool reserved;
436 };
437
438 /**
439 * vfio_ap_has_queue
440 *
441 * @dev: an AP queue device
442 * @data: a struct vfio_ap_queue_reserved reference
443 *
444 * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
445 * apid or apqi specified in @data:
446 *
447 * - If @data contains both an apid and apqi value, then @data will be flagged
448 * as reserved if the APID and APQI fields for the AP queue device matches
449 *
450 * - If @data contains only an apid value, @data will be flagged as
451 * reserved if the APID field in the AP queue device matches
452 *
453 * - If @data contains only an apqi value, @data will be flagged as
454 * reserved if the APQI field in the AP queue device matches
455 *
456 * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
457 * @data does not contain either an apid or apqi.
458 */
vfio_ap_has_queue(struct device * dev,void * data)459 static int vfio_ap_has_queue(struct device *dev, void *data)
460 {
461 struct vfio_ap_queue_reserved *qres = data;
462 struct ap_queue *ap_queue = to_ap_queue(dev);
463 ap_qid_t qid;
464 unsigned long id;
465
466 if (qres->apid && qres->apqi) {
467 qid = AP_MKQID(*qres->apid, *qres->apqi);
468 if (qid == ap_queue->qid)
469 qres->reserved = true;
470 } else if (qres->apid && !qres->apqi) {
471 id = AP_QID_CARD(ap_queue->qid);
472 if (id == *qres->apid)
473 qres->reserved = true;
474 } else if (!qres->apid && qres->apqi) {
475 id = AP_QID_QUEUE(ap_queue->qid);
476 if (id == *qres->apqi)
477 qres->reserved = true;
478 } else {
479 return -EINVAL;
480 }
481
482 return 0;
483 }
484
485 /**
486 * vfio_ap_verify_queue_reserved
487 *
488 * @matrix_dev: a mediated matrix device
489 * @apid: an AP adapter ID
490 * @apqi: an AP queue index
491 *
492 * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
493 * driver according to the following rules:
494 *
495 * - If both @apid and @apqi are not NULL, then there must be an AP queue
496 * device bound to the vfio_ap driver with the APQN identified by @apid and
497 * @apqi
498 *
499 * - If only @apid is not NULL, then there must be an AP queue device bound
500 * to the vfio_ap driver with an APQN containing @apid
501 *
502 * - If only @apqi is not NULL, then there must be an AP queue device bound
503 * to the vfio_ap driver with an APQN containing @apqi
504 *
505 * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
506 */
vfio_ap_verify_queue_reserved(unsigned long * apid,unsigned long * apqi)507 static int vfio_ap_verify_queue_reserved(unsigned long *apid,
508 unsigned long *apqi)
509 {
510 int ret;
511 struct vfio_ap_queue_reserved qres;
512
513 qres.apid = apid;
514 qres.apqi = apqi;
515 qres.reserved = false;
516
517 ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
518 &qres, vfio_ap_has_queue);
519 if (ret)
520 return ret;
521
522 if (qres.reserved)
523 return 0;
524
525 return -EADDRNOTAVAIL;
526 }
527
528 static int
vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev * matrix_mdev,unsigned long apid)529 vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
530 unsigned long apid)
531 {
532 int ret;
533 unsigned long apqi;
534 unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
535
536 if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
537 return vfio_ap_verify_queue_reserved(&apid, NULL);
538
539 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
540 ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
541 if (ret)
542 return ret;
543 }
544
545 return 0;
546 }
547
548 /**
549 * vfio_ap_mdev_verify_no_sharing
550 *
551 * Verifies that the APQNs derived from the cross product of the AP adapter IDs
552 * and AP queue indexes comprising the AP matrix are not configured for another
553 * mediated device. AP queue sharing is not allowed.
554 *
555 * @matrix_mdev: the mediated matrix device
556 *
557 * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
558 */
vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev * matrix_mdev)559 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
560 {
561 struct ap_matrix_mdev *lstdev;
562 DECLARE_BITMAP(apm, AP_DEVICES);
563 DECLARE_BITMAP(aqm, AP_DOMAINS);
564
565 list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
566 if (matrix_mdev == lstdev)
567 continue;
568
569 memset(apm, 0, sizeof(apm));
570 memset(aqm, 0, sizeof(aqm));
571
572 /*
573 * We work on full longs, as we can only exclude the leftover
574 * bits in non-inverse order. The leftover is all zeros.
575 */
576 if (!bitmap_and(apm, matrix_mdev->matrix.apm,
577 lstdev->matrix.apm, AP_DEVICES))
578 continue;
579
580 if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
581 lstdev->matrix.aqm, AP_DOMAINS))
582 continue;
583
584 return -EADDRINUSE;
585 }
586
587 return 0;
588 }
589
590 /**
591 * assign_adapter_store
592 *
593 * @dev: the matrix device
594 * @attr: the mediated matrix device's assign_adapter attribute
595 * @buf: a buffer containing the AP adapter number (APID) to
596 * be assigned
597 * @count: the number of bytes in @buf
598 *
599 * Parses the APID from @buf and sets the corresponding bit in the mediated
600 * matrix device's APM.
601 *
602 * Returns the number of bytes processed if the APID is valid; otherwise,
603 * returns one of the following errors:
604 *
605 * 1. -EINVAL
606 * The APID is not a valid number
607 *
608 * 2. -ENODEV
609 * The APID exceeds the maximum value configured for the system
610 *
611 * 3. -EADDRNOTAVAIL
612 * An APQN derived from the cross product of the APID being assigned
613 * and the APQIs previously assigned is not bound to the vfio_ap device
614 * driver; or, if no APQIs have yet been assigned, the APID is not
615 * contained in an APQN bound to the vfio_ap device driver.
616 *
617 * 4. -EADDRINUSE
618 * An APQN derived from the cross product of the APID being assigned
619 * and the APQIs previously assigned is being used by another mediated
620 * matrix device
621 */
assign_adapter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)622 static ssize_t assign_adapter_store(struct device *dev,
623 struct device_attribute *attr,
624 const char *buf, size_t count)
625 {
626 int ret;
627 unsigned long apid;
628 struct mdev_device *mdev = mdev_from_dev(dev);
629 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
630
631 mutex_lock(&matrix_dev->lock);
632
633 /*
634 * If the KVM pointer is in flux or the guest is running, disallow
635 * un-assignment of adapter
636 */
637 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
638 ret = -EBUSY;
639 goto done;
640 }
641
642 ret = kstrtoul(buf, 0, &apid);
643 if (ret)
644 goto done;
645
646 if (apid > matrix_mdev->matrix.apm_max) {
647 ret = -ENODEV;
648 goto done;
649 }
650
651 /*
652 * Set the bit in the AP mask (APM) corresponding to the AP adapter
653 * number (APID). The bits in the mask, from most significant to least
654 * significant bit, correspond to APIDs 0-255.
655 */
656 ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
657 if (ret)
658 goto done;
659
660 set_bit_inv(apid, matrix_mdev->matrix.apm);
661
662 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
663 if (ret)
664 goto share_err;
665
666 ret = count;
667 goto done;
668
669 share_err:
670 clear_bit_inv(apid, matrix_mdev->matrix.apm);
671 done:
672 mutex_unlock(&matrix_dev->lock);
673
674 return ret;
675 }
676 static DEVICE_ATTR_WO(assign_adapter);
677
678 /**
679 * unassign_adapter_store
680 *
681 * @dev: the matrix device
682 * @attr: the mediated matrix device's unassign_adapter attribute
683 * @buf: a buffer containing the adapter number (APID) to be unassigned
684 * @count: the number of bytes in @buf
685 *
686 * Parses the APID from @buf and clears the corresponding bit in the mediated
687 * matrix device's APM.
688 *
689 * Returns the number of bytes processed if the APID is valid; otherwise,
690 * returns one of the following errors:
691 * -EINVAL if the APID is not a number
692 * -ENODEV if the APID it exceeds the maximum value configured for the
693 * system
694 */
unassign_adapter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)695 static ssize_t unassign_adapter_store(struct device *dev,
696 struct device_attribute *attr,
697 const char *buf, size_t count)
698 {
699 int ret;
700 unsigned long apid;
701 struct mdev_device *mdev = mdev_from_dev(dev);
702 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
703
704 mutex_lock(&matrix_dev->lock);
705
706 /*
707 * If the KVM pointer is in flux or the guest is running, disallow
708 * un-assignment of adapter
709 */
710 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
711 ret = -EBUSY;
712 goto done;
713 }
714
715 ret = kstrtoul(buf, 0, &apid);
716 if (ret)
717 goto done;
718
719 if (apid > matrix_mdev->matrix.apm_max) {
720 ret = -ENODEV;
721 goto done;
722 }
723
724 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
725 ret = count;
726 done:
727 mutex_unlock(&matrix_dev->lock);
728 return ret;
729 }
730 static DEVICE_ATTR_WO(unassign_adapter);
731
732 static int
vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev * matrix_mdev,unsigned long apqi)733 vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
734 unsigned long apqi)
735 {
736 int ret;
737 unsigned long apid;
738 unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
739
740 if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
741 return vfio_ap_verify_queue_reserved(NULL, &apqi);
742
743 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
744 ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
745 if (ret)
746 return ret;
747 }
748
749 return 0;
750 }
751
752 /**
753 * assign_domain_store
754 *
755 * @dev: the matrix device
756 * @attr: the mediated matrix device's assign_domain attribute
757 * @buf: a buffer containing the AP queue index (APQI) of the domain to
758 * be assigned
759 * @count: the number of bytes in @buf
760 *
761 * Parses the APQI from @buf and sets the corresponding bit in the mediated
762 * matrix device's AQM.
763 *
764 * Returns the number of bytes processed if the APQI is valid; otherwise returns
765 * one of the following errors:
766 *
767 * 1. -EINVAL
768 * The APQI is not a valid number
769 *
770 * 2. -ENODEV
771 * The APQI exceeds the maximum value configured for the system
772 *
773 * 3. -EADDRNOTAVAIL
774 * An APQN derived from the cross product of the APQI being assigned
775 * and the APIDs previously assigned is not bound to the vfio_ap device
776 * driver; or, if no APIDs have yet been assigned, the APQI is not
777 * contained in an APQN bound to the vfio_ap device driver.
778 *
779 * 4. -EADDRINUSE
780 * An APQN derived from the cross product of the APQI being assigned
781 * and the APIDs previously assigned is being used by another mediated
782 * matrix device
783 */
assign_domain_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)784 static ssize_t assign_domain_store(struct device *dev,
785 struct device_attribute *attr,
786 const char *buf, size_t count)
787 {
788 int ret;
789 unsigned long apqi;
790 struct mdev_device *mdev = mdev_from_dev(dev);
791 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
792 unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
793
794 mutex_lock(&matrix_dev->lock);
795
796 /*
797 * If the KVM pointer is in flux or the guest is running, disallow
798 * assignment of domain
799 */
800 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
801 ret = -EBUSY;
802 goto done;
803 }
804
805 ret = kstrtoul(buf, 0, &apqi);
806 if (ret)
807 goto done;
808 if (apqi > max_apqi) {
809 ret = -ENODEV;
810 goto done;
811 }
812
813 ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
814 if (ret)
815 goto done;
816
817 set_bit_inv(apqi, matrix_mdev->matrix.aqm);
818
819 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
820 if (ret)
821 goto share_err;
822
823 ret = count;
824 goto done;
825
826 share_err:
827 clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
828 done:
829 mutex_unlock(&matrix_dev->lock);
830
831 return ret;
832 }
833 static DEVICE_ATTR_WO(assign_domain);
834
835
836 /**
837 * unassign_domain_store
838 *
839 * @dev: the matrix device
840 * @attr: the mediated matrix device's unassign_domain attribute
841 * @buf: a buffer containing the AP queue index (APQI) of the domain to
842 * be unassigned
843 * @count: the number of bytes in @buf
844 *
845 * Parses the APQI from @buf and clears the corresponding bit in the
846 * mediated matrix device's AQM.
847 *
848 * Returns the number of bytes processed if the APQI is valid; otherwise,
849 * returns one of the following errors:
850 * -EINVAL if the APQI is not a number
851 * -ENODEV if the APQI exceeds the maximum value configured for the system
852 */
unassign_domain_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)853 static ssize_t unassign_domain_store(struct device *dev,
854 struct device_attribute *attr,
855 const char *buf, size_t count)
856 {
857 int ret;
858 unsigned long apqi;
859 struct mdev_device *mdev = mdev_from_dev(dev);
860 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
861
862 mutex_lock(&matrix_dev->lock);
863
864 /*
865 * If the KVM pointer is in flux or the guest is running, disallow
866 * un-assignment of domain
867 */
868 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
869 ret = -EBUSY;
870 goto done;
871 }
872
873 ret = kstrtoul(buf, 0, &apqi);
874 if (ret)
875 goto done;
876
877 if (apqi > matrix_mdev->matrix.aqm_max) {
878 ret = -ENODEV;
879 goto done;
880 }
881
882 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
883 ret = count;
884
885 done:
886 mutex_unlock(&matrix_dev->lock);
887 return ret;
888 }
889 static DEVICE_ATTR_WO(unassign_domain);
890
891 /**
892 * assign_control_domain_store
893 *
894 * @dev: the matrix device
895 * @attr: the mediated matrix device's assign_control_domain attribute
896 * @buf: a buffer containing the domain ID to be assigned
897 * @count: the number of bytes in @buf
898 *
899 * Parses the domain ID from @buf and sets the corresponding bit in the mediated
900 * matrix device's ADM.
901 *
902 * Returns the number of bytes processed if the domain ID is valid; otherwise,
903 * returns one of the following errors:
904 * -EINVAL if the ID is not a number
905 * -ENODEV if the ID exceeds the maximum value configured for the system
906 */
assign_control_domain_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)907 static ssize_t assign_control_domain_store(struct device *dev,
908 struct device_attribute *attr,
909 const char *buf, size_t count)
910 {
911 int ret;
912 unsigned long id;
913 struct mdev_device *mdev = mdev_from_dev(dev);
914 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
915
916 mutex_lock(&matrix_dev->lock);
917
918 /*
919 * If the KVM pointer is in flux or the guest is running, disallow
920 * assignment of control domain.
921 */
922 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
923 ret = -EBUSY;
924 goto done;
925 }
926
927 ret = kstrtoul(buf, 0, &id);
928 if (ret)
929 goto done;
930
931 if (id > matrix_mdev->matrix.adm_max) {
932 ret = -ENODEV;
933 goto done;
934 }
935
936 /* Set the bit in the ADM (bitmask) corresponding to the AP control
937 * domain number (id). The bits in the mask, from most significant to
938 * least significant, correspond to IDs 0 up to the one less than the
939 * number of control domains that can be assigned.
940 */
941 set_bit_inv(id, matrix_mdev->matrix.adm);
942 ret = count;
943 done:
944 mutex_unlock(&matrix_dev->lock);
945 return ret;
946 }
947 static DEVICE_ATTR_WO(assign_control_domain);
948
949 /**
950 * unassign_control_domain_store
951 *
952 * @dev: the matrix device
953 * @attr: the mediated matrix device's unassign_control_domain attribute
954 * @buf: a buffer containing the domain ID to be unassigned
955 * @count: the number of bytes in @buf
956 *
957 * Parses the domain ID from @buf and clears the corresponding bit in the
958 * mediated matrix device's ADM.
959 *
960 * Returns the number of bytes processed if the domain ID is valid; otherwise,
961 * returns one of the following errors:
962 * -EINVAL if the ID is not a number
963 * -ENODEV if the ID exceeds the maximum value configured for the system
964 */
unassign_control_domain_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)965 static ssize_t unassign_control_domain_store(struct device *dev,
966 struct device_attribute *attr,
967 const char *buf, size_t count)
968 {
969 int ret;
970 unsigned long domid;
971 struct mdev_device *mdev = mdev_from_dev(dev);
972 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
973 unsigned long max_domid = matrix_mdev->matrix.adm_max;
974
975 mutex_lock(&matrix_dev->lock);
976
977 /*
978 * If the KVM pointer is in flux or the guest is running, disallow
979 * un-assignment of control domain.
980 */
981 if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
982 ret = -EBUSY;
983 goto done;
984 }
985
986 ret = kstrtoul(buf, 0, &domid);
987 if (ret)
988 goto done;
989 if (domid > max_domid) {
990 ret = -ENODEV;
991 goto done;
992 }
993
994 clear_bit_inv(domid, matrix_mdev->matrix.adm);
995 ret = count;
996 done:
997 mutex_unlock(&matrix_dev->lock);
998 return ret;
999 }
1000 static DEVICE_ATTR_WO(unassign_control_domain);
1001
control_domains_show(struct device * dev,struct device_attribute * dev_attr,char * buf)1002 static ssize_t control_domains_show(struct device *dev,
1003 struct device_attribute *dev_attr,
1004 char *buf)
1005 {
1006 unsigned long id;
1007 int nchars = 0;
1008 int n;
1009 char *bufpos = buf;
1010 struct mdev_device *mdev = mdev_from_dev(dev);
1011 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1012 unsigned long max_domid = matrix_mdev->matrix.adm_max;
1013
1014 mutex_lock(&matrix_dev->lock);
1015 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
1016 n = sprintf(bufpos, "%04lx\n", id);
1017 bufpos += n;
1018 nchars += n;
1019 }
1020 mutex_unlock(&matrix_dev->lock);
1021
1022 return nchars;
1023 }
1024 static DEVICE_ATTR_RO(control_domains);
1025
matrix_show(struct device * dev,struct device_attribute * attr,char * buf)1026 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
1027 char *buf)
1028 {
1029 struct mdev_device *mdev = mdev_from_dev(dev);
1030 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1031 char *bufpos = buf;
1032 unsigned long apid;
1033 unsigned long apqi;
1034 unsigned long apid1;
1035 unsigned long apqi1;
1036 unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
1037 unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
1038 int nchars = 0;
1039 int n;
1040
1041 apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
1042 apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
1043
1044 mutex_lock(&matrix_dev->lock);
1045
1046 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
1047 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
1048 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
1049 naqm_bits) {
1050 n = sprintf(bufpos, "%02lx.%04lx\n", apid,
1051 apqi);
1052 bufpos += n;
1053 nchars += n;
1054 }
1055 }
1056 } else if (apid1 < napm_bits) {
1057 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
1058 n = sprintf(bufpos, "%02lx.\n", apid);
1059 bufpos += n;
1060 nchars += n;
1061 }
1062 } else if (apqi1 < naqm_bits) {
1063 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
1064 n = sprintf(bufpos, ".%04lx\n", apqi);
1065 bufpos += n;
1066 nchars += n;
1067 }
1068 }
1069
1070 mutex_unlock(&matrix_dev->lock);
1071
1072 return nchars;
1073 }
1074 static DEVICE_ATTR_RO(matrix);
1075
1076 static struct attribute *vfio_ap_mdev_attrs[] = {
1077 &dev_attr_assign_adapter.attr,
1078 &dev_attr_unassign_adapter.attr,
1079 &dev_attr_assign_domain.attr,
1080 &dev_attr_unassign_domain.attr,
1081 &dev_attr_assign_control_domain.attr,
1082 &dev_attr_unassign_control_domain.attr,
1083 &dev_attr_control_domains.attr,
1084 &dev_attr_matrix.attr,
1085 NULL,
1086 };
1087
1088 static struct attribute_group vfio_ap_mdev_attr_group = {
1089 .attrs = vfio_ap_mdev_attrs
1090 };
1091
1092 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
1093 &vfio_ap_mdev_attr_group,
1094 NULL
1095 };
1096
1097 /**
1098 * vfio_ap_mdev_set_kvm
1099 *
1100 * @matrix_mdev: a mediated matrix device
1101 * @kvm: reference to KVM instance
1102 *
1103 * Sets all data for @matrix_mdev that are needed to manage AP resources
1104 * for the guest whose state is represented by @kvm.
1105 *
1106 * Note: The matrix_dev->lock must be taken prior to calling
1107 * this function; however, the lock will be temporarily released while the
1108 * guest's AP configuration is set to avoid a potential lockdep splat.
1109 * The kvm->lock is taken to set the guest's AP configuration which, under
1110 * certain circumstances, will result in a circular lock dependency if this is
1111 * done under the @matrix_mdev->lock.
1112 *
1113 * Return 0 if no other mediated matrix device has a reference to @kvm;
1114 * otherwise, returns an -EPERM.
1115 */
vfio_ap_mdev_set_kvm(struct ap_matrix_mdev * matrix_mdev,struct kvm * kvm)1116 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
1117 struct kvm *kvm)
1118 {
1119 struct ap_matrix_mdev *m;
1120
1121 if (kvm->arch.crypto.crycbd) {
1122 list_for_each_entry(m, &matrix_dev->mdev_list, node) {
1123 if (m != matrix_mdev && m->kvm == kvm)
1124 return -EPERM;
1125 }
1126
1127 kvm_get_kvm(kvm);
1128 matrix_mdev->kvm_busy = true;
1129 mutex_unlock(&matrix_dev->lock);
1130 kvm_arch_crypto_set_masks(kvm,
1131 matrix_mdev->matrix.apm,
1132 matrix_mdev->matrix.aqm,
1133 matrix_mdev->matrix.adm);
1134 mutex_lock(&matrix_dev->lock);
1135 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
1136 matrix_mdev->kvm = kvm;
1137 matrix_mdev->kvm_busy = false;
1138 wake_up_all(&matrix_mdev->wait_for_kvm);
1139 }
1140
1141 return 0;
1142 }
1143
1144 /*
1145 * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
1146 *
1147 * @nb: The notifier block
1148 * @action: Action to be taken
1149 * @data: data associated with the request
1150 *
1151 * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
1152 * pinned before). Other requests are ignored.
1153 *
1154 */
vfio_ap_mdev_iommu_notifier(struct notifier_block * nb,unsigned long action,void * data)1155 static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
1156 unsigned long action, void *data)
1157 {
1158 struct ap_matrix_mdev *matrix_mdev;
1159
1160 matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
1161
1162 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
1163 struct vfio_iommu_type1_dma_unmap *unmap = data;
1164 unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
1165
1166 vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
1167 return NOTIFY_OK;
1168 }
1169
1170 return NOTIFY_DONE;
1171 }
1172
1173 /**
1174 * vfio_ap_mdev_unset_kvm
1175 *
1176 * @matrix_mdev: a matrix mediated device
1177 *
1178 * Performs clean-up of resources no longer needed by @matrix_mdev.
1179 *
1180 * Note: The matrix_dev->lock must be taken prior to calling
1181 * this function; however, the lock will be temporarily released while the
1182 * guest's AP configuration is cleared to avoid a potential lockdep splat.
1183 * The kvm->lock is taken to clear the guest's AP configuration which, under
1184 * certain circumstances, will result in a circular lock dependency if this is
1185 * done under the @matrix_mdev->lock.
1186 *
1187 */
vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev * matrix_mdev)1188 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
1189 {
1190 /*
1191 * If the KVM pointer is in the process of being set, wait until the
1192 * process has completed.
1193 */
1194 wait_event_cmd(matrix_mdev->wait_for_kvm,
1195 !matrix_mdev->kvm_busy,
1196 mutex_unlock(&matrix_dev->lock),
1197 mutex_lock(&matrix_dev->lock));
1198
1199 if (matrix_mdev->kvm) {
1200 matrix_mdev->kvm_busy = true;
1201 mutex_unlock(&matrix_dev->lock);
1202 kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
1203 mutex_lock(&matrix_dev->lock);
1204 vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
1205 matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
1206 kvm_put_kvm(matrix_mdev->kvm);
1207 matrix_mdev->kvm = NULL;
1208 matrix_mdev->kvm_busy = false;
1209 wake_up_all(&matrix_mdev->wait_for_kvm);
1210 }
1211 }
1212
vfio_ap_mdev_group_notifier(struct notifier_block * nb,unsigned long action,void * data)1213 static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
1214 unsigned long action, void *data)
1215 {
1216 int notify_rc = NOTIFY_OK;
1217 struct ap_matrix_mdev *matrix_mdev;
1218
1219 if (action != VFIO_GROUP_NOTIFY_SET_KVM)
1220 return NOTIFY_OK;
1221
1222 mutex_lock(&matrix_dev->lock);
1223 matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
1224
1225 if (!data)
1226 vfio_ap_mdev_unset_kvm(matrix_mdev);
1227 else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
1228 notify_rc = NOTIFY_DONE;
1229
1230 mutex_unlock(&matrix_dev->lock);
1231
1232 return notify_rc;
1233 }
1234
vfio_ap_find_queue(int apqn)1235 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
1236 {
1237 struct device *dev;
1238 struct vfio_ap_queue *q = NULL;
1239
1240 dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
1241 &apqn, match_apqn);
1242 if (dev) {
1243 q = dev_get_drvdata(dev);
1244 put_device(dev);
1245 }
1246
1247 return q;
1248 }
1249
vfio_ap_mdev_reset_queue(struct vfio_ap_queue * q,unsigned int retry)1250 int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
1251 unsigned int retry)
1252 {
1253 struct ap_queue_status status;
1254 int ret;
1255 int retry2 = 2;
1256
1257 if (!q)
1258 return 0;
1259
1260 retry_zapq:
1261 status = ap_zapq(q->apqn);
1262 switch (status.response_code) {
1263 case AP_RESPONSE_NORMAL:
1264 ret = 0;
1265 break;
1266 case AP_RESPONSE_RESET_IN_PROGRESS:
1267 if (retry--) {
1268 msleep(20);
1269 goto retry_zapq;
1270 }
1271 ret = -EBUSY;
1272 break;
1273 case AP_RESPONSE_Q_NOT_AVAIL:
1274 case AP_RESPONSE_DECONFIGURED:
1275 case AP_RESPONSE_CHECKSTOPPED:
1276 WARN_ON_ONCE(status.irq_enabled);
1277 ret = -EBUSY;
1278 goto free_resources;
1279 default:
1280 /* things are really broken, give up */
1281 WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
1282 status.response_code);
1283 return -EIO;
1284 }
1285
1286 /* wait for the reset to take effect */
1287 while (retry2--) {
1288 if (status.queue_empty && !status.irq_enabled)
1289 break;
1290 msleep(20);
1291 status = ap_tapq(q->apqn, NULL);
1292 }
1293 WARN_ON_ONCE(retry2 <= 0);
1294
1295 free_resources:
1296 vfio_ap_free_aqic_resources(q);
1297
1298 return ret;
1299 }
1300
vfio_ap_mdev_reset_queues(struct mdev_device * mdev)1301 static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
1302 {
1303 int ret;
1304 int rc = 0;
1305 unsigned long apid, apqi;
1306 struct vfio_ap_queue *q;
1307 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1308
1309 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
1310 matrix_mdev->matrix.apm_max + 1) {
1311 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
1312 matrix_mdev->matrix.aqm_max + 1) {
1313 q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
1314 ret = vfio_ap_mdev_reset_queue(q, 1);
1315 /*
1316 * Regardless whether a queue turns out to be busy, or
1317 * is not operational, we need to continue resetting
1318 * the remaining queues.
1319 */
1320 if (ret)
1321 rc = ret;
1322 }
1323 }
1324
1325 return rc;
1326 }
1327
vfio_ap_mdev_open(struct mdev_device * mdev)1328 static int vfio_ap_mdev_open(struct mdev_device *mdev)
1329 {
1330 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1331 unsigned long events;
1332 int ret;
1333
1334
1335 if (!try_module_get(THIS_MODULE))
1336 return -ENODEV;
1337
1338 matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
1339 events = VFIO_GROUP_NOTIFY_SET_KVM;
1340
1341 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1342 &events, &matrix_mdev->group_notifier);
1343 if (ret) {
1344 module_put(THIS_MODULE);
1345 return ret;
1346 }
1347
1348 matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
1349 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
1350 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
1351 &events, &matrix_mdev->iommu_notifier);
1352 if (!ret)
1353 return ret;
1354
1355 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1356 &matrix_mdev->group_notifier);
1357 module_put(THIS_MODULE);
1358 return ret;
1359 }
1360
vfio_ap_mdev_release(struct mdev_device * mdev)1361 static void vfio_ap_mdev_release(struct mdev_device *mdev)
1362 {
1363 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1364
1365 mutex_lock(&matrix_dev->lock);
1366 vfio_ap_mdev_unset_kvm(matrix_mdev);
1367 mutex_unlock(&matrix_dev->lock);
1368
1369 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
1370 &matrix_mdev->iommu_notifier);
1371 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1372 &matrix_mdev->group_notifier);
1373 module_put(THIS_MODULE);
1374 }
1375
vfio_ap_mdev_get_device_info(unsigned long arg)1376 static int vfio_ap_mdev_get_device_info(unsigned long arg)
1377 {
1378 unsigned long minsz;
1379 struct vfio_device_info info;
1380
1381 minsz = offsetofend(struct vfio_device_info, num_irqs);
1382
1383 if (copy_from_user(&info, (void __user *)arg, minsz))
1384 return -EFAULT;
1385
1386 if (info.argsz < minsz)
1387 return -EINVAL;
1388
1389 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
1390 info.num_regions = 0;
1391 info.num_irqs = 0;
1392
1393 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
1394 }
1395
vfio_ap_mdev_ioctl(struct mdev_device * mdev,unsigned int cmd,unsigned long arg)1396 static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
1397 unsigned int cmd, unsigned long arg)
1398 {
1399 int ret;
1400 struct ap_matrix_mdev *matrix_mdev;
1401
1402 mutex_lock(&matrix_dev->lock);
1403 switch (cmd) {
1404 case VFIO_DEVICE_GET_INFO:
1405 ret = vfio_ap_mdev_get_device_info(arg);
1406 break;
1407 case VFIO_DEVICE_RESET:
1408 matrix_mdev = mdev_get_drvdata(mdev);
1409 if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
1410 ret = -EINVAL;
1411 break;
1412 }
1413
1414 /*
1415 * If the KVM pointer is in the process of being set, wait until
1416 * the process has completed.
1417 */
1418 wait_event_cmd(matrix_mdev->wait_for_kvm,
1419 !matrix_mdev->kvm_busy,
1420 mutex_unlock(&matrix_dev->lock),
1421 mutex_lock(&matrix_dev->lock));
1422
1423 ret = vfio_ap_mdev_reset_queues(mdev);
1424 break;
1425 default:
1426 ret = -EOPNOTSUPP;
1427 break;
1428 }
1429 mutex_unlock(&matrix_dev->lock);
1430
1431 return ret;
1432 }
1433
1434 static const struct mdev_parent_ops vfio_ap_matrix_ops = {
1435 .owner = THIS_MODULE,
1436 .supported_type_groups = vfio_ap_mdev_type_groups,
1437 .mdev_attr_groups = vfio_ap_mdev_attr_groups,
1438 .create = vfio_ap_mdev_create,
1439 .remove = vfio_ap_mdev_remove,
1440 .open = vfio_ap_mdev_open,
1441 .release = vfio_ap_mdev_release,
1442 .ioctl = vfio_ap_mdev_ioctl,
1443 };
1444
vfio_ap_mdev_register(void)1445 int vfio_ap_mdev_register(void)
1446 {
1447 atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
1448
1449 return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
1450 }
1451
vfio_ap_mdev_unregister(void)1452 void vfio_ap_mdev_unregister(void)
1453 {
1454 mdev_unregister_device(&matrix_dev->device);
1455 }
1456