1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 */
6
7 #define pr_fmt(fmt) "iommu: " fmt
8
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34
35 struct iommu_group {
36 struct kobject kobj;
37 struct kobject *devices_kobj;
38 struct list_head devices;
39 struct mutex mutex;
40 struct blocking_notifier_head notifier;
41 void *iommu_data;
42 void (*iommu_data_release)(void *iommu_data);
43 char *name;
44 int id;
45 struct iommu_domain *default_domain;
46 struct iommu_domain *domain;
47 struct list_head entry;
48 };
49
50 struct group_device {
51 struct list_head list;
52 struct device *dev;
53 char *name;
54 };
55
56 struct iommu_group_attribute {
57 struct attribute attr;
58 ssize_t (*show)(struct iommu_group *group, char *buf);
59 ssize_t (*store)(struct iommu_group *group,
60 const char *buf, size_t count);
61 };
62
63 static const char * const iommu_group_resv_type_string[] = {
64 [IOMMU_RESV_DIRECT] = "direct",
65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
66 [IOMMU_RESV_RESERVED] = "reserved",
67 [IOMMU_RESV_MSI] = "msi",
68 [IOMMU_RESV_SW_MSI] = "msi",
69 };
70
71 #define IOMMU_CMD_LINE_DMA_API BIT(0)
72 #define IOMMU_CMD_LINE_STRICT BIT(1)
73
74 static int iommu_alloc_default_domain(struct iommu_group *group,
75 struct device *dev);
76 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
77 unsigned type);
78 static int __iommu_attach_device(struct iommu_domain *domain,
79 struct device *dev);
80 static int __iommu_attach_group(struct iommu_domain *domain,
81 struct iommu_group *group);
82 static void __iommu_detach_group(struct iommu_domain *domain,
83 struct iommu_group *group);
84 static int iommu_create_device_direct_mappings(struct iommu_group *group,
85 struct device *dev);
86 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
87 static ssize_t iommu_group_store_type(struct iommu_group *group,
88 const char *buf, size_t count);
89
90 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
91 struct iommu_group_attribute iommu_group_attr_##_name = \
92 __ATTR(_name, _mode, _show, _store)
93
94 #define to_iommu_group_attr(_attr) \
95 container_of(_attr, struct iommu_group_attribute, attr)
96 #define to_iommu_group(_kobj) \
97 container_of(_kobj, struct iommu_group, kobj)
98
99 static LIST_HEAD(iommu_device_list);
100 static DEFINE_SPINLOCK(iommu_device_lock);
101
102 /*
103 * Use a function instead of an array here because the domain-type is a
104 * bit-field, so an array would waste memory.
105 */
iommu_domain_type_str(unsigned int t)106 static const char *iommu_domain_type_str(unsigned int t)
107 {
108 switch (t) {
109 case IOMMU_DOMAIN_BLOCKED:
110 return "Blocked";
111 case IOMMU_DOMAIN_IDENTITY:
112 return "Passthrough";
113 case IOMMU_DOMAIN_UNMANAGED:
114 return "Unmanaged";
115 case IOMMU_DOMAIN_DMA:
116 return "Translated";
117 default:
118 return "Unknown";
119 }
120 }
121
iommu_subsys_init(void)122 static int __init iommu_subsys_init(void)
123 {
124 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
125 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
126 iommu_set_default_passthrough(false);
127 else
128 iommu_set_default_translated(false);
129
130 if (iommu_default_passthrough() && mem_encrypt_active()) {
131 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
132 iommu_set_default_translated(false);
133 }
134 }
135
136 pr_info("Default domain type: %s %s\n",
137 iommu_domain_type_str(iommu_def_domain_type),
138 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
139 "(set via kernel command line)" : "");
140
141 return 0;
142 }
143 subsys_initcall(iommu_subsys_init);
144
145 /**
146 * iommu_device_register() - Register an IOMMU hardware instance
147 * @iommu: IOMMU handle for the instance
148 * @ops: IOMMU ops to associate with the instance
149 * @hwdev: (optional) actual instance device, used for fwnode lookup
150 *
151 * Return: 0 on success, or an error.
152 */
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)153 int iommu_device_register(struct iommu_device *iommu,
154 const struct iommu_ops *ops, struct device *hwdev)
155 {
156 /* We need to be able to take module references appropriately */
157 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
158 return -EINVAL;
159
160 iommu->ops = ops;
161 if (hwdev)
162 iommu->fwnode = hwdev->fwnode;
163
164 spin_lock(&iommu_device_lock);
165 list_add_tail(&iommu->list, &iommu_device_list);
166 spin_unlock(&iommu_device_lock);
167 return 0;
168 }
169 EXPORT_SYMBOL_GPL(iommu_device_register);
170
iommu_device_unregister(struct iommu_device * iommu)171 void iommu_device_unregister(struct iommu_device *iommu)
172 {
173 spin_lock(&iommu_device_lock);
174 list_del(&iommu->list);
175 spin_unlock(&iommu_device_lock);
176 }
177 EXPORT_SYMBOL_GPL(iommu_device_unregister);
178
dev_iommu_get(struct device * dev)179 static struct dev_iommu *dev_iommu_get(struct device *dev)
180 {
181 struct dev_iommu *param = dev->iommu;
182
183 if (param)
184 return param;
185
186 param = kzalloc(sizeof(*param), GFP_KERNEL);
187 if (!param)
188 return NULL;
189
190 mutex_init(¶m->lock);
191 dev->iommu = param;
192 return param;
193 }
194
dev_iommu_free(struct device * dev)195 static void dev_iommu_free(struct device *dev)
196 {
197 iommu_fwspec_free(dev);
198 kfree(dev->iommu);
199 dev->iommu = NULL;
200 }
201
__iommu_probe_device(struct device * dev,struct list_head * group_list)202 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
203 {
204 const struct iommu_ops *ops = dev->bus->iommu_ops;
205 struct iommu_device *iommu_dev;
206 struct iommu_group *group;
207 int ret;
208
209 if (!ops)
210 return -ENODEV;
211
212 if (!dev_iommu_get(dev))
213 return -ENOMEM;
214
215 if (!try_module_get(ops->owner)) {
216 ret = -EINVAL;
217 goto err_free;
218 }
219
220 iommu_dev = ops->probe_device(dev);
221 if (IS_ERR(iommu_dev)) {
222 ret = PTR_ERR(iommu_dev);
223 goto out_module_put;
224 }
225
226 dev->iommu->iommu_dev = iommu_dev;
227
228 group = iommu_group_get_for_dev(dev);
229 if (IS_ERR(group)) {
230 ret = PTR_ERR(group);
231 goto out_release;
232 }
233 iommu_group_put(group);
234
235 if (group_list && !group->default_domain && list_empty(&group->entry))
236 list_add_tail(&group->entry, group_list);
237
238 iommu_device_link(iommu_dev, dev);
239
240 return 0;
241
242 out_release:
243 ops->release_device(dev);
244
245 out_module_put:
246 module_put(ops->owner);
247
248 err_free:
249 dev_iommu_free(dev);
250
251 return ret;
252 }
253
iommu_probe_device(struct device * dev)254 int iommu_probe_device(struct device *dev)
255 {
256 const struct iommu_ops *ops = dev->bus->iommu_ops;
257 struct iommu_group *group;
258 int ret;
259
260 ret = __iommu_probe_device(dev, NULL);
261 if (ret)
262 goto err_out;
263
264 group = iommu_group_get(dev);
265 if (!group) {
266 ret = -ENODEV;
267 goto err_release;
268 }
269
270 /*
271 * Try to allocate a default domain - needs support from the
272 * IOMMU driver. There are still some drivers which don't
273 * support default domains, so the return value is not yet
274 * checked.
275 */
276 iommu_alloc_default_domain(group, dev);
277
278 if (group->default_domain) {
279 ret = __iommu_attach_device(group->default_domain, dev);
280 if (ret) {
281 iommu_group_put(group);
282 goto err_release;
283 }
284 }
285
286 iommu_create_device_direct_mappings(group, dev);
287
288 iommu_group_put(group);
289
290 if (ops->probe_finalize)
291 ops->probe_finalize(dev);
292
293 return 0;
294
295 err_release:
296 iommu_release_device(dev);
297
298 err_out:
299 return ret;
300
301 }
302
iommu_release_device(struct device * dev)303 void iommu_release_device(struct device *dev)
304 {
305 const struct iommu_ops *ops = dev->bus->iommu_ops;
306
307 if (!dev->iommu)
308 return;
309
310 iommu_device_unlink(dev->iommu->iommu_dev, dev);
311
312 ops->release_device(dev);
313
314 iommu_group_remove_device(dev);
315 module_put(ops->owner);
316 dev_iommu_free(dev);
317 }
318
iommu_set_def_domain_type(char * str)319 static int __init iommu_set_def_domain_type(char *str)
320 {
321 bool pt;
322 int ret;
323
324 ret = kstrtobool(str, &pt);
325 if (ret)
326 return ret;
327
328 if (pt)
329 iommu_set_default_passthrough(true);
330 else
331 iommu_set_default_translated(true);
332
333 return 0;
334 }
335 early_param("iommu.passthrough", iommu_set_def_domain_type);
336
iommu_dma_setup(char * str)337 static int __init iommu_dma_setup(char *str)
338 {
339 int ret = kstrtobool(str, &iommu_dma_strict);
340
341 if (!ret)
342 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
343 return ret;
344 }
345 early_param("iommu.strict", iommu_dma_setup);
346
iommu_set_dma_strict(bool strict)347 void iommu_set_dma_strict(bool strict)
348 {
349 if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
350 iommu_dma_strict = strict;
351 }
352
iommu_get_dma_strict(struct iommu_domain * domain)353 bool iommu_get_dma_strict(struct iommu_domain *domain)
354 {
355 /* only allow lazy flushing for DMA domains */
356 if (domain->type == IOMMU_DOMAIN_DMA)
357 return iommu_dma_strict;
358 return true;
359 }
360 EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
361
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)362 static ssize_t iommu_group_attr_show(struct kobject *kobj,
363 struct attribute *__attr, char *buf)
364 {
365 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
366 struct iommu_group *group = to_iommu_group(kobj);
367 ssize_t ret = -EIO;
368
369 if (attr->show)
370 ret = attr->show(group, buf);
371 return ret;
372 }
373
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)374 static ssize_t iommu_group_attr_store(struct kobject *kobj,
375 struct attribute *__attr,
376 const char *buf, size_t count)
377 {
378 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
379 struct iommu_group *group = to_iommu_group(kobj);
380 ssize_t ret = -EIO;
381
382 if (attr->store)
383 ret = attr->store(group, buf, count);
384 return ret;
385 }
386
387 static const struct sysfs_ops iommu_group_sysfs_ops = {
388 .show = iommu_group_attr_show,
389 .store = iommu_group_attr_store,
390 };
391
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)392 static int iommu_group_create_file(struct iommu_group *group,
393 struct iommu_group_attribute *attr)
394 {
395 return sysfs_create_file(&group->kobj, &attr->attr);
396 }
397
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)398 static void iommu_group_remove_file(struct iommu_group *group,
399 struct iommu_group_attribute *attr)
400 {
401 sysfs_remove_file(&group->kobj, &attr->attr);
402 }
403
iommu_group_show_name(struct iommu_group * group,char * buf)404 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
405 {
406 return sprintf(buf, "%s\n", group->name);
407 }
408
409 /**
410 * iommu_insert_resv_region - Insert a new region in the
411 * list of reserved regions.
412 * @new: new region to insert
413 * @regions: list of regions
414 *
415 * Elements are sorted by start address and overlapping segments
416 * of the same type are merged.
417 */
iommu_insert_resv_region(struct iommu_resv_region * new,struct list_head * regions)418 static int iommu_insert_resv_region(struct iommu_resv_region *new,
419 struct list_head *regions)
420 {
421 struct iommu_resv_region *iter, *tmp, *nr, *top;
422 LIST_HEAD(stack);
423
424 nr = iommu_alloc_resv_region(new->start, new->length,
425 new->prot, new->type);
426 if (!nr)
427 return -ENOMEM;
428
429 /* First add the new element based on start address sorting */
430 list_for_each_entry(iter, regions, list) {
431 if (nr->start < iter->start ||
432 (nr->start == iter->start && nr->type <= iter->type))
433 break;
434 }
435 list_add_tail(&nr->list, &iter->list);
436
437 /* Merge overlapping segments of type nr->type in @regions, if any */
438 list_for_each_entry_safe(iter, tmp, regions, list) {
439 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
440
441 /* no merge needed on elements of different types than @new */
442 if (iter->type != new->type) {
443 list_move_tail(&iter->list, &stack);
444 continue;
445 }
446
447 /* look for the last stack element of same type as @iter */
448 list_for_each_entry_reverse(top, &stack, list)
449 if (top->type == iter->type)
450 goto check_overlap;
451
452 list_move_tail(&iter->list, &stack);
453 continue;
454
455 check_overlap:
456 top_end = top->start + top->length - 1;
457
458 if (iter->start > top_end + 1) {
459 list_move_tail(&iter->list, &stack);
460 } else {
461 top->length = max(top_end, iter_end) - top->start + 1;
462 list_del(&iter->list);
463 kfree(iter);
464 }
465 }
466 list_splice(&stack, regions);
467 return 0;
468 }
469
470 static int
iommu_insert_device_resv_regions(struct list_head * dev_resv_regions,struct list_head * group_resv_regions)471 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
472 struct list_head *group_resv_regions)
473 {
474 struct iommu_resv_region *entry;
475 int ret = 0;
476
477 list_for_each_entry(entry, dev_resv_regions, list) {
478 ret = iommu_insert_resv_region(entry, group_resv_regions);
479 if (ret)
480 break;
481 }
482 return ret;
483 }
484
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)485 int iommu_get_group_resv_regions(struct iommu_group *group,
486 struct list_head *head)
487 {
488 struct group_device *device;
489 int ret = 0;
490
491 mutex_lock(&group->mutex);
492 list_for_each_entry(device, &group->devices, list) {
493 struct list_head dev_resv_regions;
494
495 INIT_LIST_HEAD(&dev_resv_regions);
496 iommu_get_resv_regions(device->dev, &dev_resv_regions);
497 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
498 iommu_put_resv_regions(device->dev, &dev_resv_regions);
499 if (ret)
500 break;
501 }
502 mutex_unlock(&group->mutex);
503 return ret;
504 }
505 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
506
iommu_group_show_resv_regions(struct iommu_group * group,char * buf)507 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
508 char *buf)
509 {
510 struct iommu_resv_region *region, *next;
511 struct list_head group_resv_regions;
512 char *str = buf;
513
514 INIT_LIST_HEAD(&group_resv_regions);
515 iommu_get_group_resv_regions(group, &group_resv_regions);
516
517 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
518 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
519 (long long int)region->start,
520 (long long int)(region->start +
521 region->length - 1),
522 iommu_group_resv_type_string[region->type]);
523 kfree(region);
524 }
525
526 return (str - buf);
527 }
528
iommu_group_show_type(struct iommu_group * group,char * buf)529 static ssize_t iommu_group_show_type(struct iommu_group *group,
530 char *buf)
531 {
532 char *type = "unknown\n";
533
534 mutex_lock(&group->mutex);
535 if (group->default_domain) {
536 switch (group->default_domain->type) {
537 case IOMMU_DOMAIN_BLOCKED:
538 type = "blocked\n";
539 break;
540 case IOMMU_DOMAIN_IDENTITY:
541 type = "identity\n";
542 break;
543 case IOMMU_DOMAIN_UNMANAGED:
544 type = "unmanaged\n";
545 break;
546 case IOMMU_DOMAIN_DMA:
547 type = "DMA\n";
548 break;
549 }
550 }
551 mutex_unlock(&group->mutex);
552 strcpy(buf, type);
553
554 return strlen(type);
555 }
556
557 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
558
559 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
560 iommu_group_show_resv_regions, NULL);
561
562 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
563 iommu_group_store_type);
564
iommu_group_release(struct kobject * kobj)565 static void iommu_group_release(struct kobject *kobj)
566 {
567 struct iommu_group *group = to_iommu_group(kobj);
568
569 pr_debug("Releasing group %d\n", group->id);
570
571 if (group->iommu_data_release)
572 group->iommu_data_release(group->iommu_data);
573
574 ida_simple_remove(&iommu_group_ida, group->id);
575
576 if (group->default_domain)
577 iommu_domain_free(group->default_domain);
578
579 kfree(group->name);
580 kfree(group);
581 }
582
583 static struct kobj_type iommu_group_ktype = {
584 .sysfs_ops = &iommu_group_sysfs_ops,
585 .release = iommu_group_release,
586 };
587
588 /**
589 * iommu_group_alloc - Allocate a new group
590 *
591 * This function is called by an iommu driver to allocate a new iommu
592 * group. The iommu group represents the minimum granularity of the iommu.
593 * Upon successful return, the caller holds a reference to the supplied
594 * group in order to hold the group until devices are added. Use
595 * iommu_group_put() to release this extra reference count, allowing the
596 * group to be automatically reclaimed once it has no devices or external
597 * references.
598 */
iommu_group_alloc(void)599 struct iommu_group *iommu_group_alloc(void)
600 {
601 struct iommu_group *group;
602 int ret;
603
604 group = kzalloc(sizeof(*group), GFP_KERNEL);
605 if (!group)
606 return ERR_PTR(-ENOMEM);
607
608 group->kobj.kset = iommu_group_kset;
609 mutex_init(&group->mutex);
610 INIT_LIST_HEAD(&group->devices);
611 INIT_LIST_HEAD(&group->entry);
612 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
613
614 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
615 if (ret < 0) {
616 kfree(group);
617 return ERR_PTR(ret);
618 }
619 group->id = ret;
620
621 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
622 NULL, "%d", group->id);
623 if (ret) {
624 ida_simple_remove(&iommu_group_ida, group->id);
625 kobject_put(&group->kobj);
626 return ERR_PTR(ret);
627 }
628
629 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
630 if (!group->devices_kobj) {
631 kobject_put(&group->kobj); /* triggers .release & free */
632 return ERR_PTR(-ENOMEM);
633 }
634
635 /*
636 * The devices_kobj holds a reference on the group kobject, so
637 * as long as that exists so will the group. We can therefore
638 * use the devices_kobj for reference counting.
639 */
640 kobject_put(&group->kobj);
641
642 ret = iommu_group_create_file(group,
643 &iommu_group_attr_reserved_regions);
644 if (ret)
645 return ERR_PTR(ret);
646
647 ret = iommu_group_create_file(group, &iommu_group_attr_type);
648 if (ret)
649 return ERR_PTR(ret);
650
651 pr_debug("Allocated group %d\n", group->id);
652
653 return group;
654 }
655 EXPORT_SYMBOL_GPL(iommu_group_alloc);
656
iommu_group_get_by_id(int id)657 struct iommu_group *iommu_group_get_by_id(int id)
658 {
659 struct kobject *group_kobj;
660 struct iommu_group *group;
661 const char *name;
662
663 if (!iommu_group_kset)
664 return NULL;
665
666 name = kasprintf(GFP_KERNEL, "%d", id);
667 if (!name)
668 return NULL;
669
670 group_kobj = kset_find_obj(iommu_group_kset, name);
671 kfree(name);
672
673 if (!group_kobj)
674 return NULL;
675
676 group = container_of(group_kobj, struct iommu_group, kobj);
677 BUG_ON(group->id != id);
678
679 kobject_get(group->devices_kobj);
680 kobject_put(&group->kobj);
681
682 return group;
683 }
684 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
685
686 /**
687 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
688 * @group: the group
689 *
690 * iommu drivers can store data in the group for use when doing iommu
691 * operations. This function provides a way to retrieve it. Caller
692 * should hold a group reference.
693 */
iommu_group_get_iommudata(struct iommu_group * group)694 void *iommu_group_get_iommudata(struct iommu_group *group)
695 {
696 return group->iommu_data;
697 }
698 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
699
700 /**
701 * iommu_group_set_iommudata - set iommu_data for a group
702 * @group: the group
703 * @iommu_data: new data
704 * @release: release function for iommu_data
705 *
706 * iommu drivers can store data in the group for use when doing iommu
707 * operations. This function provides a way to set the data after
708 * the group has been allocated. Caller should hold a group reference.
709 */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))710 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
711 void (*release)(void *iommu_data))
712 {
713 group->iommu_data = iommu_data;
714 group->iommu_data_release = release;
715 }
716 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
717
718 /**
719 * iommu_group_set_name - set name for a group
720 * @group: the group
721 * @name: name
722 *
723 * Allow iommu driver to set a name for a group. When set it will
724 * appear in a name attribute file under the group in sysfs.
725 */
iommu_group_set_name(struct iommu_group * group,const char * name)726 int iommu_group_set_name(struct iommu_group *group, const char *name)
727 {
728 int ret;
729
730 if (group->name) {
731 iommu_group_remove_file(group, &iommu_group_attr_name);
732 kfree(group->name);
733 group->name = NULL;
734 if (!name)
735 return 0;
736 }
737
738 group->name = kstrdup(name, GFP_KERNEL);
739 if (!group->name)
740 return -ENOMEM;
741
742 ret = iommu_group_create_file(group, &iommu_group_attr_name);
743 if (ret) {
744 kfree(group->name);
745 group->name = NULL;
746 return ret;
747 }
748
749 return 0;
750 }
751 EXPORT_SYMBOL_GPL(iommu_group_set_name);
752
iommu_create_device_direct_mappings(struct iommu_group * group,struct device * dev)753 static int iommu_create_device_direct_mappings(struct iommu_group *group,
754 struct device *dev)
755 {
756 struct iommu_domain *domain = group->default_domain;
757 struct iommu_resv_region *entry;
758 struct list_head mappings;
759 unsigned long pg_size;
760 int ret = 0;
761
762 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
763 return 0;
764
765 BUG_ON(!domain->pgsize_bitmap);
766
767 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
768 INIT_LIST_HEAD(&mappings);
769
770 iommu_get_resv_regions(dev, &mappings);
771
772 /* We need to consider overlapping regions for different devices */
773 list_for_each_entry(entry, &mappings, list) {
774 dma_addr_t start, end, addr;
775 size_t map_size = 0;
776
777 if (domain->ops->apply_resv_region)
778 domain->ops->apply_resv_region(dev, domain, entry);
779
780 start = ALIGN(entry->start, pg_size);
781 end = ALIGN(entry->start + entry->length, pg_size);
782
783 if (entry->type != IOMMU_RESV_DIRECT &&
784 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
785 continue;
786
787 for (addr = start; addr <= end; addr += pg_size) {
788 phys_addr_t phys_addr;
789
790 if (addr == end)
791 goto map_end;
792
793 phys_addr = iommu_iova_to_phys(domain, addr);
794 if (!phys_addr) {
795 map_size += pg_size;
796 continue;
797 }
798
799 map_end:
800 if (map_size) {
801 ret = iommu_map(domain, addr - map_size,
802 addr - map_size, map_size,
803 entry->prot);
804 if (ret)
805 goto out;
806 map_size = 0;
807 }
808 }
809
810 }
811
812 iommu_flush_iotlb_all(domain);
813
814 out:
815 iommu_put_resv_regions(dev, &mappings);
816
817 return ret;
818 }
819
iommu_is_attach_deferred(struct iommu_domain * domain,struct device * dev)820 static bool iommu_is_attach_deferred(struct iommu_domain *domain,
821 struct device *dev)
822 {
823 if (domain->ops->is_attach_deferred)
824 return domain->ops->is_attach_deferred(domain, dev);
825
826 return false;
827 }
828
829 /**
830 * iommu_group_add_device - add a device to an iommu group
831 * @group: the group into which to add the device (reference should be held)
832 * @dev: the device
833 *
834 * This function is called by an iommu driver to add a device into a
835 * group. Adding a device increments the group reference count.
836 */
iommu_group_add_device(struct iommu_group * group,struct device * dev)837 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
838 {
839 int ret, i = 0;
840 struct group_device *device;
841
842 device = kzalloc(sizeof(*device), GFP_KERNEL);
843 if (!device)
844 return -ENOMEM;
845
846 device->dev = dev;
847
848 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
849 if (ret)
850 goto err_free_device;
851
852 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
853 rename:
854 if (!device->name) {
855 ret = -ENOMEM;
856 goto err_remove_link;
857 }
858
859 ret = sysfs_create_link_nowarn(group->devices_kobj,
860 &dev->kobj, device->name);
861 if (ret) {
862 if (ret == -EEXIST && i >= 0) {
863 /*
864 * Account for the slim chance of collision
865 * and append an instance to the name.
866 */
867 kfree(device->name);
868 device->name = kasprintf(GFP_KERNEL, "%s.%d",
869 kobject_name(&dev->kobj), i++);
870 goto rename;
871 }
872 goto err_free_name;
873 }
874
875 kobject_get(group->devices_kobj);
876
877 dev->iommu_group = group;
878
879 mutex_lock(&group->mutex);
880 list_add_tail(&device->list, &group->devices);
881 if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
882 ret = __iommu_attach_device(group->domain, dev);
883 mutex_unlock(&group->mutex);
884 if (ret)
885 goto err_put_group;
886
887 /* Notify any listeners about change to group. */
888 blocking_notifier_call_chain(&group->notifier,
889 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
890
891 trace_add_device_to_group(group->id, dev);
892
893 dev_info(dev, "Adding to iommu group %d\n", group->id);
894
895 return 0;
896
897 err_put_group:
898 mutex_lock(&group->mutex);
899 list_del(&device->list);
900 mutex_unlock(&group->mutex);
901 dev->iommu_group = NULL;
902 kobject_put(group->devices_kobj);
903 sysfs_remove_link(group->devices_kobj, device->name);
904 err_free_name:
905 kfree(device->name);
906 err_remove_link:
907 sysfs_remove_link(&dev->kobj, "iommu_group");
908 err_free_device:
909 kfree(device);
910 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
911 return ret;
912 }
913 EXPORT_SYMBOL_GPL(iommu_group_add_device);
914
915 /**
916 * iommu_group_remove_device - remove a device from it's current group
917 * @dev: device to be removed
918 *
919 * This function is called by an iommu driver to remove the device from
920 * it's current group. This decrements the iommu group reference count.
921 */
iommu_group_remove_device(struct device * dev)922 void iommu_group_remove_device(struct device *dev)
923 {
924 struct iommu_group *group = dev->iommu_group;
925 struct group_device *tmp_device, *device = NULL;
926
927 dev_info(dev, "Removing from iommu group %d\n", group->id);
928
929 /* Pre-notify listeners that a device is being removed. */
930 blocking_notifier_call_chain(&group->notifier,
931 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
932
933 mutex_lock(&group->mutex);
934 list_for_each_entry(tmp_device, &group->devices, list) {
935 if (tmp_device->dev == dev) {
936 device = tmp_device;
937 list_del(&device->list);
938 break;
939 }
940 }
941 mutex_unlock(&group->mutex);
942
943 if (!device)
944 return;
945
946 sysfs_remove_link(group->devices_kobj, device->name);
947 sysfs_remove_link(&dev->kobj, "iommu_group");
948
949 trace_remove_device_from_group(group->id, dev);
950
951 kfree(device->name);
952 kfree(device);
953 dev->iommu_group = NULL;
954 kobject_put(group->devices_kobj);
955 }
956 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
957
iommu_group_device_count(struct iommu_group * group)958 static int iommu_group_device_count(struct iommu_group *group)
959 {
960 struct group_device *entry;
961 int ret = 0;
962
963 list_for_each_entry(entry, &group->devices, list)
964 ret++;
965
966 return ret;
967 }
968
969 /**
970 * iommu_group_for_each_dev - iterate over each device in the group
971 * @group: the group
972 * @data: caller opaque data to be passed to callback function
973 * @fn: caller supplied callback function
974 *
975 * This function is called by group users to iterate over group devices.
976 * Callers should hold a reference count to the group during callback.
977 * The group->mutex is held across callbacks, which will block calls to
978 * iommu_group_add/remove_device.
979 */
__iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))980 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
981 int (*fn)(struct device *, void *))
982 {
983 struct group_device *device;
984 int ret = 0;
985
986 list_for_each_entry(device, &group->devices, list) {
987 ret = fn(device->dev, data);
988 if (ret)
989 break;
990 }
991 return ret;
992 }
993
994
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))995 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
996 int (*fn)(struct device *, void *))
997 {
998 int ret;
999
1000 mutex_lock(&group->mutex);
1001 ret = __iommu_group_for_each_dev(group, data, fn);
1002 mutex_unlock(&group->mutex);
1003
1004 return ret;
1005 }
1006 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1007
1008 /**
1009 * iommu_group_get - Return the group for a device and increment reference
1010 * @dev: get the group that this device belongs to
1011 *
1012 * This function is called by iommu drivers and users to get the group
1013 * for the specified device. If found, the group is returned and the group
1014 * reference in incremented, else NULL.
1015 */
iommu_group_get(struct device * dev)1016 struct iommu_group *iommu_group_get(struct device *dev)
1017 {
1018 struct iommu_group *group = dev->iommu_group;
1019
1020 if (group)
1021 kobject_get(group->devices_kobj);
1022
1023 return group;
1024 }
1025 EXPORT_SYMBOL_GPL(iommu_group_get);
1026
1027 /**
1028 * iommu_group_ref_get - Increment reference on a group
1029 * @group: the group to use, must not be NULL
1030 *
1031 * This function is called by iommu drivers to take additional references on an
1032 * existing group. Returns the given group for convenience.
1033 */
iommu_group_ref_get(struct iommu_group * group)1034 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1035 {
1036 kobject_get(group->devices_kobj);
1037 return group;
1038 }
1039 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1040
1041 /**
1042 * iommu_group_put - Decrement group reference
1043 * @group: the group to use
1044 *
1045 * This function is called by iommu drivers and users to release the
1046 * iommu group. Once the reference count is zero, the group is released.
1047 */
iommu_group_put(struct iommu_group * group)1048 void iommu_group_put(struct iommu_group *group)
1049 {
1050 if (group)
1051 kobject_put(group->devices_kobj);
1052 }
1053 EXPORT_SYMBOL_GPL(iommu_group_put);
1054
1055 /**
1056 * iommu_group_register_notifier - Register a notifier for group changes
1057 * @group: the group to watch
1058 * @nb: notifier block to signal
1059 *
1060 * This function allows iommu group users to track changes in a group.
1061 * See include/linux/iommu.h for actions sent via this notifier. Caller
1062 * should hold a reference to the group throughout notifier registration.
1063 */
iommu_group_register_notifier(struct iommu_group * group,struct notifier_block * nb)1064 int iommu_group_register_notifier(struct iommu_group *group,
1065 struct notifier_block *nb)
1066 {
1067 return blocking_notifier_chain_register(&group->notifier, nb);
1068 }
1069 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1070
1071 /**
1072 * iommu_group_unregister_notifier - Unregister a notifier
1073 * @group: the group to watch
1074 * @nb: notifier block to signal
1075 *
1076 * Unregister a previously registered group notifier block.
1077 */
iommu_group_unregister_notifier(struct iommu_group * group,struct notifier_block * nb)1078 int iommu_group_unregister_notifier(struct iommu_group *group,
1079 struct notifier_block *nb)
1080 {
1081 return blocking_notifier_chain_unregister(&group->notifier, nb);
1082 }
1083 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1084
1085 /**
1086 * iommu_register_device_fault_handler() - Register a device fault handler
1087 * @dev: the device
1088 * @handler: the fault handler
1089 * @data: private data passed as argument to the handler
1090 *
1091 * When an IOMMU fault event is received, this handler gets called with the
1092 * fault event and data as argument. The handler should return 0 on success. If
1093 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1094 * complete the fault by calling iommu_page_response() with one of the following
1095 * response code:
1096 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1097 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1098 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1099 * page faults if possible.
1100 *
1101 * Return 0 if the fault handler was installed successfully, or an error.
1102 */
iommu_register_device_fault_handler(struct device * dev,iommu_dev_fault_handler_t handler,void * data)1103 int iommu_register_device_fault_handler(struct device *dev,
1104 iommu_dev_fault_handler_t handler,
1105 void *data)
1106 {
1107 struct dev_iommu *param = dev->iommu;
1108 int ret = 0;
1109
1110 if (!param)
1111 return -EINVAL;
1112
1113 mutex_lock(¶m->lock);
1114 /* Only allow one fault handler registered for each device */
1115 if (param->fault_param) {
1116 ret = -EBUSY;
1117 goto done_unlock;
1118 }
1119
1120 get_device(dev);
1121 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1122 if (!param->fault_param) {
1123 put_device(dev);
1124 ret = -ENOMEM;
1125 goto done_unlock;
1126 }
1127 param->fault_param->handler = handler;
1128 param->fault_param->data = data;
1129 mutex_init(¶m->fault_param->lock);
1130 INIT_LIST_HEAD(¶m->fault_param->faults);
1131
1132 done_unlock:
1133 mutex_unlock(¶m->lock);
1134
1135 return ret;
1136 }
1137 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1138
1139 /**
1140 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1141 * @dev: the device
1142 *
1143 * Remove the device fault handler installed with
1144 * iommu_register_device_fault_handler().
1145 *
1146 * Return 0 on success, or an error.
1147 */
iommu_unregister_device_fault_handler(struct device * dev)1148 int iommu_unregister_device_fault_handler(struct device *dev)
1149 {
1150 struct dev_iommu *param = dev->iommu;
1151 int ret = 0;
1152
1153 if (!param)
1154 return -EINVAL;
1155
1156 mutex_lock(¶m->lock);
1157
1158 if (!param->fault_param)
1159 goto unlock;
1160
1161 /* we cannot unregister handler if there are pending faults */
1162 if (!list_empty(¶m->fault_param->faults)) {
1163 ret = -EBUSY;
1164 goto unlock;
1165 }
1166
1167 kfree(param->fault_param);
1168 param->fault_param = NULL;
1169 put_device(dev);
1170 unlock:
1171 mutex_unlock(¶m->lock);
1172
1173 return ret;
1174 }
1175 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1176
1177 /**
1178 * iommu_report_device_fault() - Report fault event to device driver
1179 * @dev: the device
1180 * @evt: fault event data
1181 *
1182 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1183 * handler. When this function fails and the fault is recoverable, it is the
1184 * caller's responsibility to complete the fault.
1185 *
1186 * Return 0 on success, or an error.
1187 */
iommu_report_device_fault(struct device * dev,struct iommu_fault_event * evt)1188 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1189 {
1190 struct dev_iommu *param = dev->iommu;
1191 struct iommu_fault_event *evt_pending = NULL;
1192 struct iommu_fault_param *fparam;
1193 int ret = 0;
1194
1195 if (!param || !evt)
1196 return -EINVAL;
1197
1198 /* we only report device fault if there is a handler registered */
1199 mutex_lock(¶m->lock);
1200 fparam = param->fault_param;
1201 if (!fparam || !fparam->handler) {
1202 ret = -EINVAL;
1203 goto done_unlock;
1204 }
1205
1206 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1207 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1208 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1209 GFP_KERNEL);
1210 if (!evt_pending) {
1211 ret = -ENOMEM;
1212 goto done_unlock;
1213 }
1214 mutex_lock(&fparam->lock);
1215 list_add_tail(&evt_pending->list, &fparam->faults);
1216 mutex_unlock(&fparam->lock);
1217 }
1218
1219 ret = fparam->handler(&evt->fault, fparam->data);
1220 if (ret && evt_pending) {
1221 mutex_lock(&fparam->lock);
1222 list_del(&evt_pending->list);
1223 mutex_unlock(&fparam->lock);
1224 kfree(evt_pending);
1225 }
1226 done_unlock:
1227 mutex_unlock(¶m->lock);
1228 return ret;
1229 }
1230 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1231
iommu_page_response(struct device * dev,struct iommu_page_response * msg)1232 int iommu_page_response(struct device *dev,
1233 struct iommu_page_response *msg)
1234 {
1235 bool needs_pasid;
1236 int ret = -EINVAL;
1237 struct iommu_fault_event *evt;
1238 struct iommu_fault_page_request *prm;
1239 struct dev_iommu *param = dev->iommu;
1240 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1241 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1242
1243 if (!domain || !domain->ops->page_response)
1244 return -ENODEV;
1245
1246 if (!param || !param->fault_param)
1247 return -EINVAL;
1248
1249 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1250 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1251 return -EINVAL;
1252
1253 /* Only send response if there is a fault report pending */
1254 mutex_lock(¶m->fault_param->lock);
1255 if (list_empty(¶m->fault_param->faults)) {
1256 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1257 goto done_unlock;
1258 }
1259 /*
1260 * Check if we have a matching page request pending to respond,
1261 * otherwise return -EINVAL
1262 */
1263 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1264 prm = &evt->fault.prm;
1265 if (prm->grpid != msg->grpid)
1266 continue;
1267
1268 /*
1269 * If the PASID is required, the corresponding request is
1270 * matched using the group ID, the PASID valid bit and the PASID
1271 * value. Otherwise only the group ID matches request and
1272 * response.
1273 */
1274 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1275 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1276 continue;
1277
1278 if (!needs_pasid && has_pasid) {
1279 /* No big deal, just clear it. */
1280 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1281 msg->pasid = 0;
1282 }
1283
1284 ret = domain->ops->page_response(dev, evt, msg);
1285 list_del(&evt->list);
1286 kfree(evt);
1287 break;
1288 }
1289
1290 done_unlock:
1291 mutex_unlock(¶m->fault_param->lock);
1292 return ret;
1293 }
1294 EXPORT_SYMBOL_GPL(iommu_page_response);
1295
1296 /**
1297 * iommu_group_id - Return ID for a group
1298 * @group: the group to ID
1299 *
1300 * Return the unique ID for the group matching the sysfs group number.
1301 */
iommu_group_id(struct iommu_group * group)1302 int iommu_group_id(struct iommu_group *group)
1303 {
1304 return group->id;
1305 }
1306 EXPORT_SYMBOL_GPL(iommu_group_id);
1307
1308 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1309 unsigned long *devfns);
1310
1311 /*
1312 * To consider a PCI device isolated, we require ACS to support Source
1313 * Validation, Request Redirection, Completer Redirection, and Upstream
1314 * Forwarding. This effectively means that devices cannot spoof their
1315 * requester ID, requests and completions cannot be redirected, and all
1316 * transactions are forwarded upstream, even as it passes through a
1317 * bridge where the target device is downstream.
1318 */
1319 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1320
1321 /*
1322 * For multifunction devices which are not isolated from each other, find
1323 * all the other non-isolated functions and look for existing groups. For
1324 * each function, we also need to look for aliases to or from other devices
1325 * that may already have a group.
1326 */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)1327 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1328 unsigned long *devfns)
1329 {
1330 struct pci_dev *tmp = NULL;
1331 struct iommu_group *group;
1332
1333 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1334 return NULL;
1335
1336 for_each_pci_dev(tmp) {
1337 if (tmp == pdev || tmp->bus != pdev->bus ||
1338 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1339 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1340 continue;
1341
1342 group = get_pci_alias_group(tmp, devfns);
1343 if (group) {
1344 pci_dev_put(tmp);
1345 return group;
1346 }
1347 }
1348
1349 return NULL;
1350 }
1351
1352 /*
1353 * Look for aliases to or from the given device for existing groups. DMA
1354 * aliases are only supported on the same bus, therefore the search
1355 * space is quite small (especially since we're really only looking at pcie
1356 * device, and therefore only expect multiple slots on the root complex or
1357 * downstream switch ports). It's conceivable though that a pair of
1358 * multifunction devices could have aliases between them that would cause a
1359 * loop. To prevent this, we use a bitmap to track where we've been.
1360 */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)1361 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1362 unsigned long *devfns)
1363 {
1364 struct pci_dev *tmp = NULL;
1365 struct iommu_group *group;
1366
1367 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1368 return NULL;
1369
1370 group = iommu_group_get(&pdev->dev);
1371 if (group)
1372 return group;
1373
1374 for_each_pci_dev(tmp) {
1375 if (tmp == pdev || tmp->bus != pdev->bus)
1376 continue;
1377
1378 /* We alias them or they alias us */
1379 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1380 group = get_pci_alias_group(tmp, devfns);
1381 if (group) {
1382 pci_dev_put(tmp);
1383 return group;
1384 }
1385
1386 group = get_pci_function_alias_group(tmp, devfns);
1387 if (group) {
1388 pci_dev_put(tmp);
1389 return group;
1390 }
1391 }
1392 }
1393
1394 return NULL;
1395 }
1396
1397 struct group_for_pci_data {
1398 struct pci_dev *pdev;
1399 struct iommu_group *group;
1400 };
1401
1402 /*
1403 * DMA alias iterator callback, return the last seen device. Stop and return
1404 * the IOMMU group if we find one along the way.
1405 */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)1406 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1407 {
1408 struct group_for_pci_data *data = opaque;
1409
1410 data->pdev = pdev;
1411 data->group = iommu_group_get(&pdev->dev);
1412
1413 return data->group != NULL;
1414 }
1415
1416 /*
1417 * Generic device_group call-back function. It just allocates one
1418 * iommu-group per device.
1419 */
generic_device_group(struct device * dev)1420 struct iommu_group *generic_device_group(struct device *dev)
1421 {
1422 return iommu_group_alloc();
1423 }
1424 EXPORT_SYMBOL_GPL(generic_device_group);
1425
1426 /*
1427 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1428 * to find or create an IOMMU group for a device.
1429 */
pci_device_group(struct device * dev)1430 struct iommu_group *pci_device_group(struct device *dev)
1431 {
1432 struct pci_dev *pdev = to_pci_dev(dev);
1433 struct group_for_pci_data data;
1434 struct pci_bus *bus;
1435 struct iommu_group *group = NULL;
1436 u64 devfns[4] = { 0 };
1437
1438 if (WARN_ON(!dev_is_pci(dev)))
1439 return ERR_PTR(-EINVAL);
1440
1441 /*
1442 * Find the upstream DMA alias for the device. A device must not
1443 * be aliased due to topology in order to have its own IOMMU group.
1444 * If we find an alias along the way that already belongs to a
1445 * group, use it.
1446 */
1447 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1448 return data.group;
1449
1450 pdev = data.pdev;
1451
1452 /*
1453 * Continue upstream from the point of minimum IOMMU granularity
1454 * due to aliases to the point where devices are protected from
1455 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1456 * group, use it.
1457 */
1458 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1459 if (!bus->self)
1460 continue;
1461
1462 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1463 break;
1464
1465 pdev = bus->self;
1466
1467 group = iommu_group_get(&pdev->dev);
1468 if (group)
1469 return group;
1470 }
1471
1472 /*
1473 * Look for existing groups on device aliases. If we alias another
1474 * device or another device aliases us, use the same group.
1475 */
1476 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1477 if (group)
1478 return group;
1479
1480 /*
1481 * Look for existing groups on non-isolated functions on the same
1482 * slot and aliases of those funcions, if any. No need to clear
1483 * the search bitmap, the tested devfns are still valid.
1484 */
1485 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1486 if (group)
1487 return group;
1488
1489 /* No shared group found, allocate new */
1490 return iommu_group_alloc();
1491 }
1492 EXPORT_SYMBOL_GPL(pci_device_group);
1493
1494 /* Get the IOMMU group for device on fsl-mc bus */
fsl_mc_device_group(struct device * dev)1495 struct iommu_group *fsl_mc_device_group(struct device *dev)
1496 {
1497 struct device *cont_dev = fsl_mc_cont_dev(dev);
1498 struct iommu_group *group;
1499
1500 group = iommu_group_get(cont_dev);
1501 if (!group)
1502 group = iommu_group_alloc();
1503 return group;
1504 }
1505 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1506
iommu_get_def_domain_type(struct device * dev)1507 static int iommu_get_def_domain_type(struct device *dev)
1508 {
1509 const struct iommu_ops *ops = dev->bus->iommu_ops;
1510
1511 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1512 return IOMMU_DOMAIN_DMA;
1513
1514 if (ops->def_domain_type)
1515 return ops->def_domain_type(dev);
1516
1517 return 0;
1518 }
1519
iommu_group_alloc_default_domain(struct bus_type * bus,struct iommu_group * group,unsigned int type)1520 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1521 struct iommu_group *group,
1522 unsigned int type)
1523 {
1524 struct iommu_domain *dom;
1525
1526 dom = __iommu_domain_alloc(bus, type);
1527 if (!dom && type != IOMMU_DOMAIN_DMA) {
1528 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1529 if (dom)
1530 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1531 type, group->name);
1532 }
1533
1534 if (!dom)
1535 return -ENOMEM;
1536
1537 group->default_domain = dom;
1538 if (!group->domain)
1539 group->domain = dom;
1540 return 0;
1541 }
1542
iommu_alloc_default_domain(struct iommu_group * group,struct device * dev)1543 static int iommu_alloc_default_domain(struct iommu_group *group,
1544 struct device *dev)
1545 {
1546 unsigned int type;
1547
1548 if (group->default_domain)
1549 return 0;
1550
1551 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1552
1553 return iommu_group_alloc_default_domain(dev->bus, group, type);
1554 }
1555
1556 /**
1557 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1558 * @dev: target device
1559 *
1560 * This function is intended to be called by IOMMU drivers and extended to
1561 * support common, bus-defined algorithms when determining or creating the
1562 * IOMMU group for a device. On success, the caller will hold a reference
1563 * to the returned IOMMU group, which will already include the provided
1564 * device. The reference should be released with iommu_group_put().
1565 */
iommu_group_get_for_dev(struct device * dev)1566 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1567 {
1568 const struct iommu_ops *ops = dev->bus->iommu_ops;
1569 struct iommu_group *group;
1570 int ret;
1571
1572 group = iommu_group_get(dev);
1573 if (group)
1574 return group;
1575
1576 if (!ops)
1577 return ERR_PTR(-EINVAL);
1578
1579 group = ops->device_group(dev);
1580 if (WARN_ON_ONCE(group == NULL))
1581 return ERR_PTR(-EINVAL);
1582
1583 if (IS_ERR(group))
1584 return group;
1585
1586 ret = iommu_group_add_device(group, dev);
1587 if (ret)
1588 goto out_put_group;
1589
1590 return group;
1591
1592 out_put_group:
1593 iommu_group_put(group);
1594
1595 return ERR_PTR(ret);
1596 }
1597
iommu_group_default_domain(struct iommu_group * group)1598 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1599 {
1600 return group->default_domain;
1601 }
1602
probe_iommu_group(struct device * dev,void * data)1603 static int probe_iommu_group(struct device *dev, void *data)
1604 {
1605 struct list_head *group_list = data;
1606 struct iommu_group *group;
1607 int ret;
1608
1609 /* Device is probed already if in a group */
1610 group = iommu_group_get(dev);
1611 if (group) {
1612 iommu_group_put(group);
1613 return 0;
1614 }
1615
1616 ret = __iommu_probe_device(dev, group_list);
1617 if (ret == -ENODEV)
1618 ret = 0;
1619
1620 return ret;
1621 }
1622
remove_iommu_group(struct device * dev,void * data)1623 static int remove_iommu_group(struct device *dev, void *data)
1624 {
1625 iommu_release_device(dev);
1626
1627 return 0;
1628 }
1629
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1630 static int iommu_bus_notifier(struct notifier_block *nb,
1631 unsigned long action, void *data)
1632 {
1633 unsigned long group_action = 0;
1634 struct device *dev = data;
1635 struct iommu_group *group;
1636
1637 /*
1638 * ADD/DEL call into iommu driver ops if provided, which may
1639 * result in ADD/DEL notifiers to group->notifier
1640 */
1641 if (action == BUS_NOTIFY_ADD_DEVICE) {
1642 int ret;
1643
1644 ret = iommu_probe_device(dev);
1645 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1646 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1647 iommu_release_device(dev);
1648 return NOTIFY_OK;
1649 }
1650
1651 /*
1652 * Remaining BUS_NOTIFYs get filtered and republished to the
1653 * group, if anyone is listening
1654 */
1655 group = iommu_group_get(dev);
1656 if (!group)
1657 return 0;
1658
1659 switch (action) {
1660 case BUS_NOTIFY_BIND_DRIVER:
1661 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1662 break;
1663 case BUS_NOTIFY_BOUND_DRIVER:
1664 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1665 break;
1666 case BUS_NOTIFY_UNBIND_DRIVER:
1667 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1668 break;
1669 case BUS_NOTIFY_UNBOUND_DRIVER:
1670 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1671 break;
1672 }
1673
1674 if (group_action)
1675 blocking_notifier_call_chain(&group->notifier,
1676 group_action, dev);
1677
1678 iommu_group_put(group);
1679 return 0;
1680 }
1681
1682 struct __group_domain_type {
1683 struct device *dev;
1684 unsigned int type;
1685 };
1686
probe_get_default_domain_type(struct device * dev,void * data)1687 static int probe_get_default_domain_type(struct device *dev, void *data)
1688 {
1689 struct __group_domain_type *gtype = data;
1690 unsigned int type = iommu_get_def_domain_type(dev);
1691
1692 if (type) {
1693 if (gtype->type && gtype->type != type) {
1694 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1695 iommu_domain_type_str(type),
1696 dev_name(gtype->dev),
1697 iommu_domain_type_str(gtype->type));
1698 gtype->type = 0;
1699 }
1700
1701 if (!gtype->dev) {
1702 gtype->dev = dev;
1703 gtype->type = type;
1704 }
1705 }
1706
1707 return 0;
1708 }
1709
probe_alloc_default_domain(struct bus_type * bus,struct iommu_group * group)1710 static void probe_alloc_default_domain(struct bus_type *bus,
1711 struct iommu_group *group)
1712 {
1713 struct __group_domain_type gtype;
1714
1715 memset(>ype, 0, sizeof(gtype));
1716
1717 /* Ask for default domain requirements of all devices in the group */
1718 __iommu_group_for_each_dev(group, >ype,
1719 probe_get_default_domain_type);
1720
1721 if (!gtype.type)
1722 gtype.type = iommu_def_domain_type;
1723
1724 iommu_group_alloc_default_domain(bus, group, gtype.type);
1725
1726 }
1727
iommu_group_do_dma_attach(struct device * dev,void * data)1728 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1729 {
1730 struct iommu_domain *domain = data;
1731 int ret = 0;
1732
1733 if (!iommu_is_attach_deferred(domain, dev))
1734 ret = __iommu_attach_device(domain, dev);
1735
1736 return ret;
1737 }
1738
__iommu_group_dma_attach(struct iommu_group * group)1739 static int __iommu_group_dma_attach(struct iommu_group *group)
1740 {
1741 return __iommu_group_for_each_dev(group, group->default_domain,
1742 iommu_group_do_dma_attach);
1743 }
1744
iommu_group_do_probe_finalize(struct device * dev,void * data)1745 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1746 {
1747 struct iommu_domain *domain = data;
1748
1749 if (domain->ops->probe_finalize)
1750 domain->ops->probe_finalize(dev);
1751
1752 return 0;
1753 }
1754
__iommu_group_dma_finalize(struct iommu_group * group)1755 static void __iommu_group_dma_finalize(struct iommu_group *group)
1756 {
1757 __iommu_group_for_each_dev(group, group->default_domain,
1758 iommu_group_do_probe_finalize);
1759 }
1760
iommu_do_create_direct_mappings(struct device * dev,void * data)1761 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1762 {
1763 struct iommu_group *group = data;
1764
1765 iommu_create_device_direct_mappings(group, dev);
1766
1767 return 0;
1768 }
1769
iommu_group_create_direct_mappings(struct iommu_group * group)1770 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1771 {
1772 return __iommu_group_for_each_dev(group, group,
1773 iommu_do_create_direct_mappings);
1774 }
1775
bus_iommu_probe(struct bus_type * bus)1776 int bus_iommu_probe(struct bus_type *bus)
1777 {
1778 struct iommu_group *group, *next;
1779 LIST_HEAD(group_list);
1780 int ret;
1781
1782 /*
1783 * This code-path does not allocate the default domain when
1784 * creating the iommu group, so do it after the groups are
1785 * created.
1786 */
1787 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1788 if (ret)
1789 return ret;
1790
1791 list_for_each_entry_safe(group, next, &group_list, entry) {
1792 /* Remove item from the list */
1793 list_del_init(&group->entry);
1794
1795 mutex_lock(&group->mutex);
1796
1797 /* Try to allocate default domain */
1798 probe_alloc_default_domain(bus, group);
1799
1800 if (!group->default_domain) {
1801 mutex_unlock(&group->mutex);
1802 continue;
1803 }
1804
1805 iommu_group_create_direct_mappings(group);
1806
1807 ret = __iommu_group_dma_attach(group);
1808
1809 mutex_unlock(&group->mutex);
1810
1811 if (ret)
1812 break;
1813
1814 __iommu_group_dma_finalize(group);
1815 }
1816
1817 return ret;
1818 }
1819
iommu_bus_init(struct bus_type * bus,const struct iommu_ops * ops)1820 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1821 {
1822 struct notifier_block *nb;
1823 int err;
1824
1825 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1826 if (!nb)
1827 return -ENOMEM;
1828
1829 nb->notifier_call = iommu_bus_notifier;
1830
1831 err = bus_register_notifier(bus, nb);
1832 if (err)
1833 goto out_free;
1834
1835 err = bus_iommu_probe(bus);
1836 if (err)
1837 goto out_err;
1838
1839
1840 return 0;
1841
1842 out_err:
1843 /* Clean up */
1844 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1845 bus_unregister_notifier(bus, nb);
1846
1847 out_free:
1848 kfree(nb);
1849
1850 return err;
1851 }
1852
1853 /**
1854 * bus_set_iommu - set iommu-callbacks for the bus
1855 * @bus: bus.
1856 * @ops: the callbacks provided by the iommu-driver
1857 *
1858 * This function is called by an iommu driver to set the iommu methods
1859 * used for a particular bus. Drivers for devices on that bus can use
1860 * the iommu-api after these ops are registered.
1861 * This special function is needed because IOMMUs are usually devices on
1862 * the bus itself, so the iommu drivers are not initialized when the bus
1863 * is set up. With this function the iommu-driver can set the iommu-ops
1864 * afterwards.
1865 */
bus_set_iommu(struct bus_type * bus,const struct iommu_ops * ops)1866 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1867 {
1868 int err;
1869
1870 if (ops == NULL) {
1871 bus->iommu_ops = NULL;
1872 return 0;
1873 }
1874
1875 if (bus->iommu_ops != NULL)
1876 return -EBUSY;
1877
1878 bus->iommu_ops = ops;
1879
1880 /* Do IOMMU specific setup for this bus-type */
1881 err = iommu_bus_init(bus, ops);
1882 if (err)
1883 bus->iommu_ops = NULL;
1884
1885 return err;
1886 }
1887 EXPORT_SYMBOL_GPL(bus_set_iommu);
1888
iommu_present(struct bus_type * bus)1889 bool iommu_present(struct bus_type *bus)
1890 {
1891 return bus->iommu_ops != NULL;
1892 }
1893 EXPORT_SYMBOL_GPL(iommu_present);
1894
iommu_capable(struct bus_type * bus,enum iommu_cap cap)1895 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1896 {
1897 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1898 return false;
1899
1900 return bus->iommu_ops->capable(cap);
1901 }
1902 EXPORT_SYMBOL_GPL(iommu_capable);
1903
1904 /**
1905 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1906 * @domain: iommu domain
1907 * @handler: fault handler
1908 * @token: user data, will be passed back to the fault handler
1909 *
1910 * This function should be used by IOMMU users which want to be notified
1911 * whenever an IOMMU fault happens.
1912 *
1913 * The fault handler itself should return 0 on success, and an appropriate
1914 * error code otherwise.
1915 */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1916 void iommu_set_fault_handler(struct iommu_domain *domain,
1917 iommu_fault_handler_t handler,
1918 void *token)
1919 {
1920 BUG_ON(!domain);
1921
1922 domain->handler = handler;
1923 domain->handler_token = token;
1924 }
1925 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1926
__iommu_domain_alloc(struct bus_type * bus,unsigned type)1927 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1928 unsigned type)
1929 {
1930 struct iommu_domain *domain;
1931
1932 if (bus == NULL || bus->iommu_ops == NULL)
1933 return NULL;
1934
1935 domain = bus->iommu_ops->domain_alloc(type);
1936 if (!domain)
1937 return NULL;
1938
1939 domain->ops = bus->iommu_ops;
1940 domain->type = type;
1941 /* Assume all sizes by default; the driver may override this later */
1942 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1943
1944 return domain;
1945 }
1946
iommu_domain_alloc(struct bus_type * bus)1947 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1948 {
1949 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1950 }
1951 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1952
iommu_domain_free(struct iommu_domain * domain)1953 void iommu_domain_free(struct iommu_domain *domain)
1954 {
1955 domain->ops->domain_free(domain);
1956 }
1957 EXPORT_SYMBOL_GPL(iommu_domain_free);
1958
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)1959 static int __iommu_attach_device(struct iommu_domain *domain,
1960 struct device *dev)
1961 {
1962 int ret;
1963
1964 if (unlikely(domain->ops->attach_dev == NULL))
1965 return -ENODEV;
1966
1967 ret = domain->ops->attach_dev(domain, dev);
1968 if (!ret)
1969 trace_attach_device_to_domain(dev);
1970 return ret;
1971 }
1972
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1973 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1974 {
1975 struct iommu_group *group;
1976 int ret;
1977
1978 group = iommu_group_get(dev);
1979 if (!group)
1980 return -ENODEV;
1981
1982 /*
1983 * Lock the group to make sure the device-count doesn't
1984 * change while we are attaching
1985 */
1986 mutex_lock(&group->mutex);
1987 ret = -EINVAL;
1988 if (iommu_group_device_count(group) != 1)
1989 goto out_unlock;
1990
1991 ret = __iommu_attach_group(domain, group);
1992
1993 out_unlock:
1994 mutex_unlock(&group->mutex);
1995 iommu_group_put(group);
1996
1997 return ret;
1998 }
1999 EXPORT_SYMBOL_GPL(iommu_attach_device);
2000
iommu_deferred_attach(struct device * dev,struct iommu_domain * domain)2001 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2002 {
2003 const struct iommu_ops *ops = domain->ops;
2004
2005 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
2006 return __iommu_attach_device(domain, dev);
2007
2008 return 0;
2009 }
2010
2011 /*
2012 * Check flags and other user provided data for valid combinations. We also
2013 * make sure no reserved fields or unused flags are set. This is to ensure
2014 * not breaking userspace in the future when these fields or flags are used.
2015 */
iommu_check_cache_invl_data(struct iommu_cache_invalidate_info * info)2016 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
2017 {
2018 u32 mask;
2019 int i;
2020
2021 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
2022 return -EINVAL;
2023
2024 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
2025 if (info->cache & ~mask)
2026 return -EINVAL;
2027
2028 if (info->granularity >= IOMMU_INV_GRANU_NR)
2029 return -EINVAL;
2030
2031 switch (info->granularity) {
2032 case IOMMU_INV_GRANU_ADDR:
2033 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
2034 return -EINVAL;
2035
2036 mask = IOMMU_INV_ADDR_FLAGS_PASID |
2037 IOMMU_INV_ADDR_FLAGS_ARCHID |
2038 IOMMU_INV_ADDR_FLAGS_LEAF;
2039
2040 if (info->granu.addr_info.flags & ~mask)
2041 return -EINVAL;
2042 break;
2043 case IOMMU_INV_GRANU_PASID:
2044 mask = IOMMU_INV_PASID_FLAGS_PASID |
2045 IOMMU_INV_PASID_FLAGS_ARCHID;
2046 if (info->granu.pasid_info.flags & ~mask)
2047 return -EINVAL;
2048
2049 break;
2050 case IOMMU_INV_GRANU_DOMAIN:
2051 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2052 return -EINVAL;
2053 break;
2054 default:
2055 return -EINVAL;
2056 }
2057
2058 /* Check reserved padding fields */
2059 for (i = 0; i < sizeof(info->padding); i++) {
2060 if (info->padding[i])
2061 return -EINVAL;
2062 }
2063
2064 return 0;
2065 }
2066
iommu_uapi_cache_invalidate(struct iommu_domain * domain,struct device * dev,void __user * uinfo)2067 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2068 void __user *uinfo)
2069 {
2070 struct iommu_cache_invalidate_info inv_info = { 0 };
2071 u32 minsz;
2072 int ret;
2073
2074 if (unlikely(!domain->ops->cache_invalidate))
2075 return -ENODEV;
2076
2077 /*
2078 * No new spaces can be added before the variable sized union, the
2079 * minimum size is the offset to the union.
2080 */
2081 minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2082
2083 /* Copy minsz from user to get flags and argsz */
2084 if (copy_from_user(&inv_info, uinfo, minsz))
2085 return -EFAULT;
2086
2087 /* Fields before the variable size union are mandatory */
2088 if (inv_info.argsz < minsz)
2089 return -EINVAL;
2090
2091 /* PASID and address granu require additional info beyond minsz */
2092 if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2093 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2094 return -EINVAL;
2095
2096 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2097 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2098 return -EINVAL;
2099
2100 /*
2101 * User might be using a newer UAPI header which has a larger data
2102 * size, we shall support the existing flags within the current
2103 * size. Copy the remaining user data _after_ minsz but not more
2104 * than the current kernel supported size.
2105 */
2106 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2107 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2108 return -EFAULT;
2109
2110 /* Now the argsz is validated, check the content */
2111 ret = iommu_check_cache_invl_data(&inv_info);
2112 if (ret)
2113 return ret;
2114
2115 return domain->ops->cache_invalidate(domain, dev, &inv_info);
2116 }
2117 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2118
iommu_check_bind_data(struct iommu_gpasid_bind_data * data)2119 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2120 {
2121 u64 mask;
2122 int i;
2123
2124 if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2125 return -EINVAL;
2126
2127 /* Check the range of supported formats */
2128 if (data->format >= IOMMU_PASID_FORMAT_LAST)
2129 return -EINVAL;
2130
2131 /* Check all flags */
2132 mask = IOMMU_SVA_GPASID_VAL;
2133 if (data->flags & ~mask)
2134 return -EINVAL;
2135
2136 /* Check reserved padding fields */
2137 for (i = 0; i < sizeof(data->padding); i++) {
2138 if (data->padding[i])
2139 return -EINVAL;
2140 }
2141
2142 return 0;
2143 }
2144
iommu_sva_prepare_bind_data(void __user * udata,struct iommu_gpasid_bind_data * data)2145 static int iommu_sva_prepare_bind_data(void __user *udata,
2146 struct iommu_gpasid_bind_data *data)
2147 {
2148 u32 minsz;
2149
2150 /*
2151 * No new spaces can be added before the variable sized union, the
2152 * minimum size is the offset to the union.
2153 */
2154 minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2155
2156 /* Copy minsz from user to get flags and argsz */
2157 if (copy_from_user(data, udata, minsz))
2158 return -EFAULT;
2159
2160 /* Fields before the variable size union are mandatory */
2161 if (data->argsz < minsz)
2162 return -EINVAL;
2163 /*
2164 * User might be using a newer UAPI header, we shall let IOMMU vendor
2165 * driver decide on what size it needs. Since the guest PASID bind data
2166 * can be vendor specific, larger argsz could be the result of extension
2167 * for one vendor but it should not affect another vendor.
2168 * Copy the remaining user data _after_ minsz
2169 */
2170 if (copy_from_user((void *)data + minsz, udata + minsz,
2171 min_t(u32, data->argsz, sizeof(*data)) - minsz))
2172 return -EFAULT;
2173
2174 return iommu_check_bind_data(data);
2175 }
2176
iommu_uapi_sva_bind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)2177 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2178 void __user *udata)
2179 {
2180 struct iommu_gpasid_bind_data data = { 0 };
2181 int ret;
2182
2183 if (unlikely(!domain->ops->sva_bind_gpasid))
2184 return -ENODEV;
2185
2186 ret = iommu_sva_prepare_bind_data(udata, &data);
2187 if (ret)
2188 return ret;
2189
2190 return domain->ops->sva_bind_gpasid(domain, dev, &data);
2191 }
2192 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2193
iommu_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)2194 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2195 ioasid_t pasid)
2196 {
2197 if (unlikely(!domain->ops->sva_unbind_gpasid))
2198 return -ENODEV;
2199
2200 return domain->ops->sva_unbind_gpasid(dev, pasid);
2201 }
2202 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2203
iommu_uapi_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)2204 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2205 void __user *udata)
2206 {
2207 struct iommu_gpasid_bind_data data = { 0 };
2208 int ret;
2209
2210 if (unlikely(!domain->ops->sva_bind_gpasid))
2211 return -ENODEV;
2212
2213 ret = iommu_sva_prepare_bind_data(udata, &data);
2214 if (ret)
2215 return ret;
2216
2217 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2218 }
2219 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2220
__iommu_detach_device(struct iommu_domain * domain,struct device * dev)2221 static void __iommu_detach_device(struct iommu_domain *domain,
2222 struct device *dev)
2223 {
2224 if (iommu_is_attach_deferred(domain, dev))
2225 return;
2226
2227 if (unlikely(domain->ops->detach_dev == NULL))
2228 return;
2229
2230 domain->ops->detach_dev(domain, dev);
2231 trace_detach_device_from_domain(dev);
2232 }
2233
iommu_detach_device(struct iommu_domain * domain,struct device * dev)2234 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2235 {
2236 struct iommu_group *group;
2237
2238 group = iommu_group_get(dev);
2239 if (!group)
2240 return;
2241
2242 mutex_lock(&group->mutex);
2243 if (iommu_group_device_count(group) != 1) {
2244 WARN_ON(1);
2245 goto out_unlock;
2246 }
2247
2248 __iommu_detach_group(domain, group);
2249
2250 out_unlock:
2251 mutex_unlock(&group->mutex);
2252 iommu_group_put(group);
2253 }
2254 EXPORT_SYMBOL_GPL(iommu_detach_device);
2255
iommu_get_domain_for_dev(struct device * dev)2256 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2257 {
2258 struct iommu_domain *domain;
2259 struct iommu_group *group;
2260
2261 group = iommu_group_get(dev);
2262 if (!group)
2263 return NULL;
2264
2265 domain = group->domain;
2266
2267 iommu_group_put(group);
2268
2269 return domain;
2270 }
2271 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2272
2273 /*
2274 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2275 * guarantees that the group and its default domain are valid and correct.
2276 */
iommu_get_dma_domain(struct device * dev)2277 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2278 {
2279 return dev->iommu_group->default_domain;
2280 }
2281
2282 /*
2283 * IOMMU groups are really the natural working unit of the IOMMU, but
2284 * the IOMMU API works on domains and devices. Bridge that gap by
2285 * iterating over the devices in a group. Ideally we'd have a single
2286 * device which represents the requestor ID of the group, but we also
2287 * allow IOMMU drivers to create policy defined minimum sets, where
2288 * the physical hardware may be able to distiguish members, but we
2289 * wish to group them at a higher level (ex. untrusted multi-function
2290 * PCI devices). Thus we attach each device.
2291 */
iommu_group_do_attach_device(struct device * dev,void * data)2292 static int iommu_group_do_attach_device(struct device *dev, void *data)
2293 {
2294 struct iommu_domain *domain = data;
2295
2296 return __iommu_attach_device(domain, dev);
2297 }
2298
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2299 static int __iommu_attach_group(struct iommu_domain *domain,
2300 struct iommu_group *group)
2301 {
2302 int ret;
2303
2304 if (group->default_domain && group->domain != group->default_domain)
2305 return -EBUSY;
2306
2307 ret = __iommu_group_for_each_dev(group, domain,
2308 iommu_group_do_attach_device);
2309 if (ret == 0)
2310 group->domain = domain;
2311
2312 return ret;
2313 }
2314
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2315 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2316 {
2317 int ret;
2318
2319 mutex_lock(&group->mutex);
2320 ret = __iommu_attach_group(domain, group);
2321 mutex_unlock(&group->mutex);
2322
2323 return ret;
2324 }
2325 EXPORT_SYMBOL_GPL(iommu_attach_group);
2326
iommu_group_do_detach_device(struct device * dev,void * data)2327 static int iommu_group_do_detach_device(struct device *dev, void *data)
2328 {
2329 struct iommu_domain *domain = data;
2330
2331 __iommu_detach_device(domain, dev);
2332
2333 return 0;
2334 }
2335
__iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2336 static void __iommu_detach_group(struct iommu_domain *domain,
2337 struct iommu_group *group)
2338 {
2339 int ret;
2340
2341 if (!group->default_domain) {
2342 __iommu_group_for_each_dev(group, domain,
2343 iommu_group_do_detach_device);
2344 group->domain = NULL;
2345 return;
2346 }
2347
2348 if (group->domain == group->default_domain)
2349 return;
2350
2351 /* Detach by re-attaching to the default domain */
2352 ret = __iommu_group_for_each_dev(group, group->default_domain,
2353 iommu_group_do_attach_device);
2354 if (ret != 0)
2355 WARN_ON(1);
2356 else
2357 group->domain = group->default_domain;
2358 }
2359
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2360 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2361 {
2362 mutex_lock(&group->mutex);
2363 __iommu_detach_group(domain, group);
2364 mutex_unlock(&group->mutex);
2365 }
2366 EXPORT_SYMBOL_GPL(iommu_detach_group);
2367
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)2368 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2369 {
2370 if (unlikely(domain->ops->iova_to_phys == NULL))
2371 return 0;
2372
2373 return domain->ops->iova_to_phys(domain, iova);
2374 }
2375 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2376
iommu_pgsize(struct iommu_domain * domain,unsigned long addr_merge,size_t size)2377 static size_t iommu_pgsize(struct iommu_domain *domain,
2378 unsigned long addr_merge, size_t size)
2379 {
2380 unsigned int pgsize_idx;
2381 size_t pgsize;
2382
2383 /* Max page size that still fits into 'size' */
2384 pgsize_idx = __fls(size);
2385
2386 /* need to consider alignment requirements ? */
2387 if (likely(addr_merge)) {
2388 /* Max page size allowed by address */
2389 unsigned int align_pgsize_idx = __ffs(addr_merge);
2390 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2391 }
2392
2393 /* build a mask of acceptable page sizes */
2394 pgsize = (1UL << (pgsize_idx + 1)) - 1;
2395
2396 /* throw away page sizes not supported by the hardware */
2397 pgsize &= domain->pgsize_bitmap;
2398
2399 /* make sure we're still sane */
2400 BUG_ON(!pgsize);
2401
2402 /* pick the biggest page */
2403 pgsize_idx = __fls(pgsize);
2404 pgsize = 1UL << pgsize_idx;
2405
2406 return pgsize;
2407 }
2408
__iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2409 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2410 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2411 {
2412 const struct iommu_ops *ops = domain->ops;
2413 unsigned long orig_iova = iova;
2414 unsigned int min_pagesz;
2415 size_t orig_size = size;
2416 phys_addr_t orig_paddr = paddr;
2417 int ret = 0;
2418
2419 if (unlikely(ops->map == NULL ||
2420 domain->pgsize_bitmap == 0UL))
2421 return -ENODEV;
2422
2423 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2424 return -EINVAL;
2425
2426 /* find out the minimum page size supported */
2427 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2428
2429 /*
2430 * both the virtual address and the physical one, as well as
2431 * the size of the mapping, must be aligned (at least) to the
2432 * size of the smallest page supported by the hardware
2433 */
2434 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2435 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2436 iova, &paddr, size, min_pagesz);
2437 return -EINVAL;
2438 }
2439
2440 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2441
2442 while (size) {
2443 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2444
2445 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
2446 iova, &paddr, pgsize);
2447 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2448
2449 if (ret)
2450 break;
2451
2452 iova += pgsize;
2453 paddr += pgsize;
2454 size -= pgsize;
2455 }
2456
2457 /* unroll mapping in case something went wrong */
2458 if (ret)
2459 iommu_unmap(domain, orig_iova, orig_size - size);
2460 else
2461 trace_map(orig_iova, orig_paddr, orig_size);
2462
2463 return ret;
2464 }
2465
_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2466 static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2467 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2468 {
2469 const struct iommu_ops *ops = domain->ops;
2470 int ret;
2471
2472 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2473 if (ret == 0 && ops->iotlb_sync_map)
2474 ops->iotlb_sync_map(domain, iova, size);
2475
2476 return ret;
2477 }
2478
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2479 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2480 phys_addr_t paddr, size_t size, int prot)
2481 {
2482 might_sleep();
2483 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2484 }
2485 EXPORT_SYMBOL_GPL(iommu_map);
2486
iommu_map_atomic(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2487 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2488 phys_addr_t paddr, size_t size, int prot)
2489 {
2490 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2491 }
2492 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2493
__iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2494 static size_t __iommu_unmap(struct iommu_domain *domain,
2495 unsigned long iova, size_t size,
2496 struct iommu_iotlb_gather *iotlb_gather)
2497 {
2498 const struct iommu_ops *ops = domain->ops;
2499 size_t unmapped_page, unmapped = 0;
2500 unsigned long orig_iova = iova;
2501 unsigned int min_pagesz;
2502
2503 if (unlikely(ops->unmap == NULL ||
2504 domain->pgsize_bitmap == 0UL))
2505 return 0;
2506
2507 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2508 return 0;
2509
2510 /* find out the minimum page size supported */
2511 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2512
2513 /*
2514 * The virtual address, as well as the size of the mapping, must be
2515 * aligned (at least) to the size of the smallest page supported
2516 * by the hardware
2517 */
2518 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2519 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2520 iova, size, min_pagesz);
2521 return 0;
2522 }
2523
2524 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2525
2526 /*
2527 * Keep iterating until we either unmap 'size' bytes (or more)
2528 * or we hit an area that isn't mapped.
2529 */
2530 while (unmapped < size) {
2531 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2532
2533 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2534 if (!unmapped_page)
2535 break;
2536
2537 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2538 iova, unmapped_page);
2539
2540 iova += unmapped_page;
2541 unmapped += unmapped_page;
2542 }
2543
2544 trace_unmap(orig_iova, size, unmapped);
2545 return unmapped;
2546 }
2547
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)2548 size_t iommu_unmap(struct iommu_domain *domain,
2549 unsigned long iova, size_t size)
2550 {
2551 struct iommu_iotlb_gather iotlb_gather;
2552 size_t ret;
2553
2554 iommu_iotlb_gather_init(&iotlb_gather);
2555 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2556 iommu_iotlb_sync(domain, &iotlb_gather);
2557
2558 return ret;
2559 }
2560 EXPORT_SYMBOL_GPL(iommu_unmap);
2561
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2562 size_t iommu_unmap_fast(struct iommu_domain *domain,
2563 unsigned long iova, size_t size,
2564 struct iommu_iotlb_gather *iotlb_gather)
2565 {
2566 return __iommu_unmap(domain, iova, size, iotlb_gather);
2567 }
2568 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2569
__iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)2570 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2571 struct scatterlist *sg, unsigned int nents, int prot,
2572 gfp_t gfp)
2573 {
2574 const struct iommu_ops *ops = domain->ops;
2575 size_t len = 0, mapped = 0;
2576 phys_addr_t start;
2577 unsigned int i = 0;
2578 int ret;
2579
2580 while (i <= nents) {
2581 phys_addr_t s_phys = sg_phys(sg);
2582
2583 if (len && s_phys != start + len) {
2584 ret = __iommu_map(domain, iova + mapped, start,
2585 len, prot, gfp);
2586
2587 if (ret)
2588 goto out_err;
2589
2590 mapped += len;
2591 len = 0;
2592 }
2593
2594 if (len) {
2595 len += sg->length;
2596 } else {
2597 len = sg->length;
2598 start = s_phys;
2599 }
2600
2601 if (++i < nents)
2602 sg = sg_next(sg);
2603 }
2604
2605 if (ops->iotlb_sync_map)
2606 ops->iotlb_sync_map(domain, iova, mapped);
2607 return mapped;
2608
2609 out_err:
2610 /* undo mappings already done */
2611 iommu_unmap(domain, iova, mapped);
2612
2613 return 0;
2614
2615 }
2616
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2617 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2618 struct scatterlist *sg, unsigned int nents, int prot)
2619 {
2620 might_sleep();
2621 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2622 }
2623 EXPORT_SYMBOL_GPL(iommu_map_sg);
2624
iommu_map_sg_atomic(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2625 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2626 struct scatterlist *sg, unsigned int nents, int prot)
2627 {
2628 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2629 }
2630
2631 /**
2632 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2633 * @domain: the iommu domain where the fault has happened
2634 * @dev: the device where the fault has happened
2635 * @iova: the faulting address
2636 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2637 *
2638 * This function should be called by the low-level IOMMU implementations
2639 * whenever IOMMU faults happen, to allow high-level users, that are
2640 * interested in such events, to know about them.
2641 *
2642 * This event may be useful for several possible use cases:
2643 * - mere logging of the event
2644 * - dynamic TLB/PTE loading
2645 * - if restarting of the faulting device is required
2646 *
2647 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2648 * PTE/TLB loading will one day be supported, implementations will be able
2649 * to tell whether it succeeded or not according to this return value).
2650 *
2651 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2652 * (though fault handlers can also return -ENOSYS, in case they want to
2653 * elicit the default behavior of the IOMMU drivers).
2654 */
report_iommu_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags)2655 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2656 unsigned long iova, int flags)
2657 {
2658 int ret = -ENOSYS;
2659
2660 /*
2661 * if upper layers showed interest and installed a fault handler,
2662 * invoke it.
2663 */
2664 if (domain->handler)
2665 ret = domain->handler(domain, dev, iova, flags,
2666 domain->handler_token);
2667
2668 trace_io_page_fault(dev, iova, flags);
2669 return ret;
2670 }
2671 EXPORT_SYMBOL_GPL(report_iommu_fault);
2672
iommu_init(void)2673 static int __init iommu_init(void)
2674 {
2675 iommu_group_kset = kset_create_and_add("iommu_groups",
2676 NULL, kernel_kobj);
2677 BUG_ON(!iommu_group_kset);
2678
2679 iommu_debugfs_setup();
2680
2681 return 0;
2682 }
2683 core_initcall(iommu_init);
2684
iommu_enable_nesting(struct iommu_domain * domain)2685 int iommu_enable_nesting(struct iommu_domain *domain)
2686 {
2687 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2688 return -EINVAL;
2689 if (!domain->ops->enable_nesting)
2690 return -EINVAL;
2691 return domain->ops->enable_nesting(domain);
2692 }
2693 EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2694
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirk)2695 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2696 unsigned long quirk)
2697 {
2698 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2699 return -EINVAL;
2700 if (!domain->ops->set_pgtable_quirks)
2701 return -EINVAL;
2702 return domain->ops->set_pgtable_quirks(domain, quirk);
2703 }
2704 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2705
iommu_get_resv_regions(struct device * dev,struct list_head * list)2706 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2707 {
2708 const struct iommu_ops *ops = dev->bus->iommu_ops;
2709
2710 if (ops && ops->get_resv_regions)
2711 ops->get_resv_regions(dev, list);
2712 }
2713
iommu_put_resv_regions(struct device * dev,struct list_head * list)2714 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2715 {
2716 const struct iommu_ops *ops = dev->bus->iommu_ops;
2717
2718 if (ops && ops->put_resv_regions)
2719 ops->put_resv_regions(dev, list);
2720 }
2721
2722 /**
2723 * generic_iommu_put_resv_regions - Reserved region driver helper
2724 * @dev: device for which to free reserved regions
2725 * @list: reserved region list for device
2726 *
2727 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2728 * for simple reservations. Memory allocated for each reserved region will be
2729 * freed. If an IOMMU driver allocates additional resources per region, it is
2730 * going to have to implement a custom callback.
2731 */
generic_iommu_put_resv_regions(struct device * dev,struct list_head * list)2732 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2733 {
2734 struct iommu_resv_region *entry, *next;
2735
2736 list_for_each_entry_safe(entry, next, list, list)
2737 kfree(entry);
2738 }
2739 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2740
iommu_alloc_resv_region(phys_addr_t start,size_t length,int prot,enum iommu_resv_type type)2741 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2742 size_t length, int prot,
2743 enum iommu_resv_type type)
2744 {
2745 struct iommu_resv_region *region;
2746
2747 region = kzalloc(sizeof(*region), GFP_KERNEL);
2748 if (!region)
2749 return NULL;
2750
2751 INIT_LIST_HEAD(®ion->list);
2752 region->start = start;
2753 region->length = length;
2754 region->prot = prot;
2755 region->type = type;
2756 return region;
2757 }
2758 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2759
iommu_set_default_passthrough(bool cmd_line)2760 void iommu_set_default_passthrough(bool cmd_line)
2761 {
2762 if (cmd_line)
2763 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2764 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2765 }
2766
iommu_set_default_translated(bool cmd_line)2767 void iommu_set_default_translated(bool cmd_line)
2768 {
2769 if (cmd_line)
2770 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2771 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2772 }
2773
iommu_default_passthrough(void)2774 bool iommu_default_passthrough(void)
2775 {
2776 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2777 }
2778 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2779
iommu_ops_from_fwnode(struct fwnode_handle * fwnode)2780 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2781 {
2782 const struct iommu_ops *ops = NULL;
2783 struct iommu_device *iommu;
2784
2785 spin_lock(&iommu_device_lock);
2786 list_for_each_entry(iommu, &iommu_device_list, list)
2787 if (iommu->fwnode == fwnode) {
2788 ops = iommu->ops;
2789 break;
2790 }
2791 spin_unlock(&iommu_device_lock);
2792 return ops;
2793 }
2794
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)2795 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2796 const struct iommu_ops *ops)
2797 {
2798 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2799
2800 if (fwspec)
2801 return ops == fwspec->ops ? 0 : -EINVAL;
2802
2803 if (!dev_iommu_get(dev))
2804 return -ENOMEM;
2805
2806 /* Preallocate for the overwhelmingly common case of 1 ID */
2807 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2808 if (!fwspec)
2809 return -ENOMEM;
2810
2811 of_node_get(to_of_node(iommu_fwnode));
2812 fwspec->iommu_fwnode = iommu_fwnode;
2813 fwspec->ops = ops;
2814 dev_iommu_fwspec_set(dev, fwspec);
2815 return 0;
2816 }
2817 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2818
iommu_fwspec_free(struct device * dev)2819 void iommu_fwspec_free(struct device *dev)
2820 {
2821 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2822
2823 if (fwspec) {
2824 fwnode_handle_put(fwspec->iommu_fwnode);
2825 kfree(fwspec);
2826 dev_iommu_fwspec_set(dev, NULL);
2827 }
2828 }
2829 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2830
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)2831 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2832 {
2833 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2834 int i, new_num;
2835
2836 if (!fwspec)
2837 return -EINVAL;
2838
2839 new_num = fwspec->num_ids + num_ids;
2840 if (new_num > 1) {
2841 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2842 GFP_KERNEL);
2843 if (!fwspec)
2844 return -ENOMEM;
2845
2846 dev_iommu_fwspec_set(dev, fwspec);
2847 }
2848
2849 for (i = 0; i < num_ids; i++)
2850 fwspec->ids[fwspec->num_ids + i] = ids[i];
2851
2852 fwspec->num_ids = new_num;
2853 return 0;
2854 }
2855 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2856
2857 /*
2858 * Per device IOMMU features.
2859 */
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)2860 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2861 {
2862 if (dev->iommu && dev->iommu->iommu_dev) {
2863 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2864
2865 if (ops->dev_enable_feat)
2866 return ops->dev_enable_feat(dev, feat);
2867 }
2868
2869 return -ENODEV;
2870 }
2871 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2872
2873 /*
2874 * The device drivers should do the necessary cleanups before calling this.
2875 * For example, before disabling the aux-domain feature, the device driver
2876 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2877 */
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)2878 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2879 {
2880 if (dev->iommu && dev->iommu->iommu_dev) {
2881 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2882
2883 if (ops->dev_disable_feat)
2884 return ops->dev_disable_feat(dev, feat);
2885 }
2886
2887 return -EBUSY;
2888 }
2889 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2890
iommu_dev_feature_enabled(struct device * dev,enum iommu_dev_features feat)2891 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2892 {
2893 if (dev->iommu && dev->iommu->iommu_dev) {
2894 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2895
2896 if (ops->dev_feat_enabled)
2897 return ops->dev_feat_enabled(dev, feat);
2898 }
2899
2900 return false;
2901 }
2902 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2903
2904 /*
2905 * Aux-domain specific attach/detach.
2906 *
2907 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2908 * true. Also, as long as domains are attached to a device through this
2909 * interface, any tries to call iommu_attach_device() should fail
2910 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2911 * This should make us safe against a device being attached to a guest as a
2912 * whole while there are still pasid users on it (aux and sva).
2913 */
iommu_aux_attach_device(struct iommu_domain * domain,struct device * dev)2914 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2915 {
2916 int ret = -ENODEV;
2917
2918 if (domain->ops->aux_attach_dev)
2919 ret = domain->ops->aux_attach_dev(domain, dev);
2920
2921 if (!ret)
2922 trace_attach_device_to_domain(dev);
2923
2924 return ret;
2925 }
2926 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2927
iommu_aux_detach_device(struct iommu_domain * domain,struct device * dev)2928 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2929 {
2930 if (domain->ops->aux_detach_dev) {
2931 domain->ops->aux_detach_dev(domain, dev);
2932 trace_detach_device_from_domain(dev);
2933 }
2934 }
2935 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2936
iommu_aux_get_pasid(struct iommu_domain * domain,struct device * dev)2937 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2938 {
2939 int ret = -ENODEV;
2940
2941 if (domain->ops->aux_get_pasid)
2942 ret = domain->ops->aux_get_pasid(domain, dev);
2943
2944 return ret;
2945 }
2946 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2947
2948 /**
2949 * iommu_sva_bind_device() - Bind a process address space to a device
2950 * @dev: the device
2951 * @mm: the mm to bind, caller must hold a reference to it
2952 *
2953 * Create a bond between device and address space, allowing the device to access
2954 * the mm using the returned PASID. If a bond already exists between @device and
2955 * @mm, it is returned and an additional reference is taken. Caller must call
2956 * iommu_sva_unbind_device() to release each reference.
2957 *
2958 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2959 * initialize the required SVA features.
2960 *
2961 * On error, returns an ERR_PTR value.
2962 */
2963 struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm,void * drvdata)2964 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2965 {
2966 struct iommu_group *group;
2967 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2968 const struct iommu_ops *ops = dev->bus->iommu_ops;
2969
2970 if (!ops || !ops->sva_bind)
2971 return ERR_PTR(-ENODEV);
2972
2973 group = iommu_group_get(dev);
2974 if (!group)
2975 return ERR_PTR(-ENODEV);
2976
2977 /* Ensure device count and domain don't change while we're binding */
2978 mutex_lock(&group->mutex);
2979
2980 /*
2981 * To keep things simple, SVA currently doesn't support IOMMU groups
2982 * with more than one device. Existing SVA-capable systems are not
2983 * affected by the problems that required IOMMU groups (lack of ACS
2984 * isolation, device ID aliasing and other hardware issues).
2985 */
2986 if (iommu_group_device_count(group) != 1)
2987 goto out_unlock;
2988
2989 handle = ops->sva_bind(dev, mm, drvdata);
2990
2991 out_unlock:
2992 mutex_unlock(&group->mutex);
2993 iommu_group_put(group);
2994
2995 return handle;
2996 }
2997 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2998
2999 /**
3000 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3001 * @handle: the handle returned by iommu_sva_bind_device()
3002 *
3003 * Put reference to a bond between device and address space. The device should
3004 * not be issuing any more transaction for this PASID. All outstanding page
3005 * requests for this PASID must have been flushed to the IOMMU.
3006 */
iommu_sva_unbind_device(struct iommu_sva * handle)3007 void iommu_sva_unbind_device(struct iommu_sva *handle)
3008 {
3009 struct iommu_group *group;
3010 struct device *dev = handle->dev;
3011 const struct iommu_ops *ops = dev->bus->iommu_ops;
3012
3013 if (!ops || !ops->sva_unbind)
3014 return;
3015
3016 group = iommu_group_get(dev);
3017 if (!group)
3018 return;
3019
3020 mutex_lock(&group->mutex);
3021 ops->sva_unbind(handle);
3022 mutex_unlock(&group->mutex);
3023
3024 iommu_group_put(group);
3025 }
3026 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3027
iommu_sva_get_pasid(struct iommu_sva * handle)3028 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3029 {
3030 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3031
3032 if (!ops || !ops->sva_get_pasid)
3033 return IOMMU_PASID_INVALID;
3034
3035 return ops->sva_get_pasid(handle);
3036 }
3037 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3038
3039 /*
3040 * Changes the default domain of an iommu group that has *only* one device
3041 *
3042 * @group: The group for which the default domain should be changed
3043 * @prev_dev: The device in the group (this is used to make sure that the device
3044 * hasn't changed after the caller has called this function)
3045 * @type: The type of the new default domain that gets associated with the group
3046 *
3047 * Returns 0 on success and error code on failure
3048 *
3049 * Note:
3050 * 1. Presently, this function is called only when user requests to change the
3051 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
3052 * Please take a closer look if intended to use for other purposes.
3053 */
iommu_change_dev_def_domain(struct iommu_group * group,struct device * prev_dev,int type)3054 static int iommu_change_dev_def_domain(struct iommu_group *group,
3055 struct device *prev_dev, int type)
3056 {
3057 struct iommu_domain *prev_dom;
3058 struct group_device *grp_dev;
3059 int ret, dev_def_dom;
3060 struct device *dev;
3061
3062 if (!group)
3063 return -EINVAL;
3064
3065 mutex_lock(&group->mutex);
3066
3067 if (group->default_domain != group->domain) {
3068 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
3069 ret = -EBUSY;
3070 goto out;
3071 }
3072
3073 /*
3074 * iommu group wasn't locked while acquiring device lock in
3075 * iommu_group_store_type(). So, make sure that the device count hasn't
3076 * changed while acquiring device lock.
3077 *
3078 * Changing default domain of an iommu group with two or more devices
3079 * isn't supported because there could be a potential deadlock. Consider
3080 * the following scenario. T1 is trying to acquire device locks of all
3081 * the devices in the group and before it could acquire all of them,
3082 * there could be another thread T2 (from different sub-system and use
3083 * case) that has already acquired some of the device locks and might be
3084 * waiting for T1 to release other device locks.
3085 */
3086 if (iommu_group_device_count(group) != 1) {
3087 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
3088 ret = -EINVAL;
3089 goto out;
3090 }
3091
3092 /* Since group has only one device */
3093 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3094 dev = grp_dev->dev;
3095
3096 if (prev_dev != dev) {
3097 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
3098 ret = -EBUSY;
3099 goto out;
3100 }
3101
3102 prev_dom = group->default_domain;
3103 if (!prev_dom) {
3104 ret = -EINVAL;
3105 goto out;
3106 }
3107
3108 dev_def_dom = iommu_get_def_domain_type(dev);
3109 if (!type) {
3110 /*
3111 * If the user hasn't requested any specific type of domain and
3112 * if the device supports both the domains, then default to the
3113 * domain the device was booted with
3114 */
3115 type = dev_def_dom ? : iommu_def_domain_type;
3116 } else if (dev_def_dom && type != dev_def_dom) {
3117 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
3118 iommu_domain_type_str(type));
3119 ret = -EINVAL;
3120 goto out;
3121 }
3122
3123 /*
3124 * Switch to a new domain only if the requested domain type is different
3125 * from the existing default domain type
3126 */
3127 if (prev_dom->type == type) {
3128 ret = 0;
3129 goto out;
3130 }
3131
3132 /* Sets group->default_domain to the newly allocated domain */
3133 ret = iommu_group_alloc_default_domain(dev->bus, group, type);
3134 if (ret)
3135 goto out;
3136
3137 ret = iommu_create_device_direct_mappings(group, dev);
3138 if (ret)
3139 goto free_new_domain;
3140
3141 ret = __iommu_attach_device(group->default_domain, dev);
3142 if (ret)
3143 goto free_new_domain;
3144
3145 group->domain = group->default_domain;
3146
3147 /*
3148 * Release the mutex here because ops->probe_finalize() call-back of
3149 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3150 * in-turn might call back into IOMMU core code, where it tries to take
3151 * group->mutex, resulting in a deadlock.
3152 */
3153 mutex_unlock(&group->mutex);
3154
3155 /* Make sure dma_ops is appropriatley set */
3156 iommu_group_do_probe_finalize(dev, group->default_domain);
3157 iommu_domain_free(prev_dom);
3158 return 0;
3159
3160 free_new_domain:
3161 iommu_domain_free(group->default_domain);
3162 group->default_domain = prev_dom;
3163 group->domain = prev_dom;
3164
3165 out:
3166 mutex_unlock(&group->mutex);
3167
3168 return ret;
3169 }
3170
3171 /*
3172 * Changing the default domain through sysfs requires the users to ubind the
3173 * drivers from the devices in the iommu group. Return failure if this doesn't
3174 * meet.
3175 *
3176 * We need to consider the race between this and the device release path.
3177 * device_lock(dev) is used here to guarantee that the device release path
3178 * will not be entered at the same time.
3179 */
iommu_group_store_type(struct iommu_group * group,const char * buf,size_t count)3180 static ssize_t iommu_group_store_type(struct iommu_group *group,
3181 const char *buf, size_t count)
3182 {
3183 struct group_device *grp_dev;
3184 struct device *dev;
3185 int ret, req_type;
3186
3187 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3188 return -EACCES;
3189
3190 if (WARN_ON(!group))
3191 return -EINVAL;
3192
3193 if (sysfs_streq(buf, "identity"))
3194 req_type = IOMMU_DOMAIN_IDENTITY;
3195 else if (sysfs_streq(buf, "DMA"))
3196 req_type = IOMMU_DOMAIN_DMA;
3197 else if (sysfs_streq(buf, "auto"))
3198 req_type = 0;
3199 else
3200 return -EINVAL;
3201
3202 /*
3203 * Lock/Unlock the group mutex here before device lock to
3204 * 1. Make sure that the iommu group has only one device (this is a
3205 * prerequisite for step 2)
3206 * 2. Get struct *dev which is needed to lock device
3207 */
3208 mutex_lock(&group->mutex);
3209 if (iommu_group_device_count(group) != 1) {
3210 mutex_unlock(&group->mutex);
3211 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3212 return -EINVAL;
3213 }
3214
3215 /* Since group has only one device */
3216 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3217 dev = grp_dev->dev;
3218 get_device(dev);
3219
3220 /*
3221 * Don't hold the group mutex because taking group mutex first and then
3222 * the device lock could potentially cause a deadlock as below. Assume
3223 * two threads T1 and T2. T1 is trying to change default domain of an
3224 * iommu group and T2 is trying to hot unplug a device or release [1] VF
3225 * of a PCIe device which is in the same iommu group. T1 takes group
3226 * mutex and before it could take device lock assume T2 has taken device
3227 * lock and is yet to take group mutex. Now, both the threads will be
3228 * waiting for the other thread to release lock. Below, lock order was
3229 * suggested.
3230 * device_lock(dev);
3231 * mutex_lock(&group->mutex);
3232 * iommu_change_dev_def_domain();
3233 * mutex_unlock(&group->mutex);
3234 * device_unlock(dev);
3235 *
3236 * [1] Typical device release path
3237 * device_lock() from device/driver core code
3238 * -> bus_notifier()
3239 * -> iommu_bus_notifier()
3240 * -> iommu_release_device()
3241 * -> ops->release_device() vendor driver calls back iommu core code
3242 * -> mutex_lock() from iommu core code
3243 */
3244 mutex_unlock(&group->mutex);
3245
3246 /* Check if the device in the group still has a driver bound to it */
3247 device_lock(dev);
3248 if (device_is_bound(dev)) {
3249 pr_err_ratelimited("Device is still bound to driver\n");
3250 ret = -EBUSY;
3251 goto out;
3252 }
3253
3254 ret = iommu_change_dev_def_domain(group, dev, req_type);
3255 ret = ret ?: count;
3256
3257 out:
3258 device_unlock(dev);
3259 put_device(dev);
3260
3261 return ret;
3262 }
3263