1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
9 */
10
11 #include <linux/acpi_iort.h>
12 #include <linux/atomic.h>
13 #include <linux/crash_dump.h>
14 #include <linux/device.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/gfp.h>
18 #include <linux/huge_mm.h>
19 #include <linux/iommu.h>
20 #include <linux/iova.h>
21 #include <linux/irq.h>
22 #include <linux/list_sort.h>
23 #include <linux/memremap.h>
24 #include <linux/mm.h>
25 #include <linux/mutex.h>
26 #include <linux/of_iommu.h>
27 #include <linux/pci.h>
28 #include <linux/scatterlist.h>
29 #include <linux/spinlock.h>
30 #include <linux/swiotlb.h>
31 #include <linux/vmalloc.h>
32 #include <trace/events/swiotlb.h>
33
34 #include "dma-iommu.h"
35 #include "iommu-pages.h"
36
37 struct iommu_dma_msi_page {
38 struct list_head list;
39 dma_addr_t iova;
40 phys_addr_t phys;
41 };
42
43 enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
45 IOMMU_DMA_MSI_COOKIE,
46 };
47
48 enum iommu_dma_queue_type {
49 IOMMU_DMA_OPTS_PER_CPU_QUEUE,
50 IOMMU_DMA_OPTS_SINGLE_QUEUE,
51 };
52
53 struct iommu_dma_options {
54 enum iommu_dma_queue_type qt;
55 size_t fq_size;
56 unsigned int fq_timeout;
57 };
58
59 struct iommu_dma_cookie {
60 enum iommu_dma_cookie_type type;
61 union {
62 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
63 struct {
64 struct iova_domain iovad;
65 /* Flush queue */
66 union {
67 struct iova_fq *single_fq;
68 struct iova_fq __percpu *percpu_fq;
69 };
70 /* Number of TLB flushes that have been started */
71 atomic64_t fq_flush_start_cnt;
72 /* Number of TLB flushes that have been finished */
73 atomic64_t fq_flush_finish_cnt;
74 /* Timer to regularily empty the flush queues */
75 struct timer_list fq_timer;
76 /* 1 when timer is active, 0 when not */
77 atomic_t fq_timer_on;
78 };
79 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
80 dma_addr_t msi_iova;
81 };
82 struct list_head msi_page_list;
83
84 /* Domain for flush queue callback; NULL if flush queue not in use */
85 struct iommu_domain *fq_domain;
86 /* Options for dma-iommu use */
87 struct iommu_dma_options options;
88 struct mutex mutex;
89 };
90
91 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
92 bool iommu_dma_forcedac __read_mostly;
93
iommu_dma_forcedac_setup(char * str)94 static int __init iommu_dma_forcedac_setup(char *str)
95 {
96 int ret = kstrtobool(str, &iommu_dma_forcedac);
97
98 if (!ret && iommu_dma_forcedac)
99 pr_info("Forcing DAC for PCI devices\n");
100 return ret;
101 }
102 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
103
104 /* Number of entries per flush queue */
105 #define IOVA_DEFAULT_FQ_SIZE 256
106 #define IOVA_SINGLE_FQ_SIZE 32768
107
108 /* Timeout (in ms) after which entries are flushed from the queue */
109 #define IOVA_DEFAULT_FQ_TIMEOUT 10
110 #define IOVA_SINGLE_FQ_TIMEOUT 1000
111
112 /* Flush queue entry for deferred flushing */
113 struct iova_fq_entry {
114 unsigned long iova_pfn;
115 unsigned long pages;
116 struct list_head freelist;
117 u64 counter; /* Flush counter when this entry was added */
118 };
119
120 /* Per-CPU flush queue structure */
121 struct iova_fq {
122 spinlock_t lock;
123 unsigned int head, tail;
124 unsigned int mod_mask;
125 struct iova_fq_entry entries[];
126 };
127
128 #define fq_ring_for_each(i, fq) \
129 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
130
fq_full(struct iova_fq * fq)131 static inline bool fq_full(struct iova_fq *fq)
132 {
133 assert_spin_locked(&fq->lock);
134 return (((fq->tail + 1) & fq->mod_mask) == fq->head);
135 }
136
fq_ring_add(struct iova_fq * fq)137 static inline unsigned int fq_ring_add(struct iova_fq *fq)
138 {
139 unsigned int idx = fq->tail;
140
141 assert_spin_locked(&fq->lock);
142
143 fq->tail = (idx + 1) & fq->mod_mask;
144
145 return idx;
146 }
147
fq_ring_free_locked(struct iommu_dma_cookie * cookie,struct iova_fq * fq)148 static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
149 {
150 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
151 unsigned int idx;
152
153 assert_spin_locked(&fq->lock);
154
155 fq_ring_for_each(idx, fq) {
156
157 if (fq->entries[idx].counter >= counter)
158 break;
159
160 iommu_put_pages_list(&fq->entries[idx].freelist);
161 free_iova_fast(&cookie->iovad,
162 fq->entries[idx].iova_pfn,
163 fq->entries[idx].pages);
164
165 fq->head = (fq->head + 1) & fq->mod_mask;
166 }
167 }
168
fq_ring_free(struct iommu_dma_cookie * cookie,struct iova_fq * fq)169 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
170 {
171 unsigned long flags;
172
173 spin_lock_irqsave(&fq->lock, flags);
174 fq_ring_free_locked(cookie, fq);
175 spin_unlock_irqrestore(&fq->lock, flags);
176 }
177
fq_flush_iotlb(struct iommu_dma_cookie * cookie)178 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
179 {
180 atomic64_inc(&cookie->fq_flush_start_cnt);
181 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
182 atomic64_inc(&cookie->fq_flush_finish_cnt);
183 }
184
fq_flush_timeout(struct timer_list * t)185 static void fq_flush_timeout(struct timer_list *t)
186 {
187 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
188 int cpu;
189
190 atomic_set(&cookie->fq_timer_on, 0);
191 fq_flush_iotlb(cookie);
192
193 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
194 fq_ring_free(cookie, cookie->single_fq);
195 } else {
196 for_each_possible_cpu(cpu)
197 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
198 }
199 }
200
queue_iova(struct iommu_dma_cookie * cookie,unsigned long pfn,unsigned long pages,struct list_head * freelist)201 static void queue_iova(struct iommu_dma_cookie *cookie,
202 unsigned long pfn, unsigned long pages,
203 struct list_head *freelist)
204 {
205 struct iova_fq *fq;
206 unsigned long flags;
207 unsigned int idx;
208
209 /*
210 * Order against the IOMMU driver's pagetable update from unmapping
211 * @pte, to guarantee that fq_flush_iotlb() observes that if called
212 * from a different CPU before we release the lock below. Full barrier
213 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
214 * written fq state here.
215 */
216 smp_mb();
217
218 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
219 fq = cookie->single_fq;
220 else
221 fq = raw_cpu_ptr(cookie->percpu_fq);
222
223 spin_lock_irqsave(&fq->lock, flags);
224
225 /*
226 * First remove all entries from the flush queue that have already been
227 * flushed out on another CPU. This makes the fq_full() check below less
228 * likely to be true.
229 */
230 fq_ring_free_locked(cookie, fq);
231
232 if (fq_full(fq)) {
233 fq_flush_iotlb(cookie);
234 fq_ring_free_locked(cookie, fq);
235 }
236
237 idx = fq_ring_add(fq);
238
239 fq->entries[idx].iova_pfn = pfn;
240 fq->entries[idx].pages = pages;
241 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
242 list_splice(freelist, &fq->entries[idx].freelist);
243
244 spin_unlock_irqrestore(&fq->lock, flags);
245
246 /* Avoid false sharing as much as possible. */
247 if (!atomic_read(&cookie->fq_timer_on) &&
248 !atomic_xchg(&cookie->fq_timer_on, 1))
249 mod_timer(&cookie->fq_timer,
250 jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
251 }
252
iommu_dma_free_fq_single(struct iova_fq * fq)253 static void iommu_dma_free_fq_single(struct iova_fq *fq)
254 {
255 int idx;
256
257 fq_ring_for_each(idx, fq)
258 iommu_put_pages_list(&fq->entries[idx].freelist);
259 vfree(fq);
260 }
261
iommu_dma_free_fq_percpu(struct iova_fq __percpu * percpu_fq)262 static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
263 {
264 int cpu, idx;
265
266 /* The IOVAs will be torn down separately, so just free our queued pages */
267 for_each_possible_cpu(cpu) {
268 struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
269
270 fq_ring_for_each(idx, fq)
271 iommu_put_pages_list(&fq->entries[idx].freelist);
272 }
273
274 free_percpu(percpu_fq);
275 }
276
iommu_dma_free_fq(struct iommu_dma_cookie * cookie)277 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
278 {
279 if (!cookie->fq_domain)
280 return;
281
282 del_timer_sync(&cookie->fq_timer);
283 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
284 iommu_dma_free_fq_single(cookie->single_fq);
285 else
286 iommu_dma_free_fq_percpu(cookie->percpu_fq);
287 }
288
iommu_dma_init_one_fq(struct iova_fq * fq,size_t fq_size)289 static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
290 {
291 int i;
292
293 fq->head = 0;
294 fq->tail = 0;
295 fq->mod_mask = fq_size - 1;
296
297 spin_lock_init(&fq->lock);
298
299 for (i = 0; i < fq_size; i++)
300 INIT_LIST_HEAD(&fq->entries[i].freelist);
301 }
302
iommu_dma_init_fq_single(struct iommu_dma_cookie * cookie)303 static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
304 {
305 size_t fq_size = cookie->options.fq_size;
306 struct iova_fq *queue;
307
308 queue = vmalloc(struct_size(queue, entries, fq_size));
309 if (!queue)
310 return -ENOMEM;
311 iommu_dma_init_one_fq(queue, fq_size);
312 cookie->single_fq = queue;
313
314 return 0;
315 }
316
iommu_dma_init_fq_percpu(struct iommu_dma_cookie * cookie)317 static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
318 {
319 size_t fq_size = cookie->options.fq_size;
320 struct iova_fq __percpu *queue;
321 int cpu;
322
323 queue = __alloc_percpu(struct_size(queue, entries, fq_size),
324 __alignof__(*queue));
325 if (!queue)
326 return -ENOMEM;
327
328 for_each_possible_cpu(cpu)
329 iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
330 cookie->percpu_fq = queue;
331 return 0;
332 }
333
334 /* sysfs updates are serialised by the mutex of the group owning @domain */
iommu_dma_init_fq(struct iommu_domain * domain)335 int iommu_dma_init_fq(struct iommu_domain *domain)
336 {
337 struct iommu_dma_cookie *cookie = domain->iova_cookie;
338 int rc;
339
340 if (cookie->fq_domain)
341 return 0;
342
343 atomic64_set(&cookie->fq_flush_start_cnt, 0);
344 atomic64_set(&cookie->fq_flush_finish_cnt, 0);
345
346 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
347 rc = iommu_dma_init_fq_single(cookie);
348 else
349 rc = iommu_dma_init_fq_percpu(cookie);
350
351 if (rc) {
352 pr_warn("iova flush queue initialization failed\n");
353 return -ENOMEM;
354 }
355
356 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
357 atomic_set(&cookie->fq_timer_on, 0);
358 /*
359 * Prevent incomplete fq state being observable. Pairs with path from
360 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
361 */
362 smp_wmb();
363 WRITE_ONCE(cookie->fq_domain, domain);
364 return 0;
365 }
366
cookie_msi_granule(struct iommu_dma_cookie * cookie)367 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
368 {
369 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
370 return cookie->iovad.granule;
371 return PAGE_SIZE;
372 }
373
cookie_alloc(enum iommu_dma_cookie_type type)374 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
375 {
376 struct iommu_dma_cookie *cookie;
377
378 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
379 if (cookie) {
380 INIT_LIST_HEAD(&cookie->msi_page_list);
381 cookie->type = type;
382 }
383 return cookie;
384 }
385
386 /**
387 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
388 * @domain: IOMMU domain to prepare for DMA-API usage
389 */
iommu_get_dma_cookie(struct iommu_domain * domain)390 int iommu_get_dma_cookie(struct iommu_domain *domain)
391 {
392 if (domain->iova_cookie)
393 return -EEXIST;
394
395 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
396 if (!domain->iova_cookie)
397 return -ENOMEM;
398
399 mutex_init(&domain->iova_cookie->mutex);
400 return 0;
401 }
402
403 /**
404 * iommu_get_msi_cookie - Acquire just MSI remapping resources
405 * @domain: IOMMU domain to prepare
406 * @base: Start address of IOVA region for MSI mappings
407 *
408 * Users who manage their own IOVA allocation and do not want DMA API support,
409 * but would still like to take advantage of automatic MSI remapping, can use
410 * this to initialise their own domain appropriately. Users should reserve a
411 * contiguous IOVA region, starting at @base, large enough to accommodate the
412 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
413 * used by the devices attached to @domain.
414 */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)415 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
416 {
417 struct iommu_dma_cookie *cookie;
418
419 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
420 return -EINVAL;
421
422 if (domain->iova_cookie)
423 return -EEXIST;
424
425 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
426 if (!cookie)
427 return -ENOMEM;
428
429 cookie->msi_iova = base;
430 domain->iova_cookie = cookie;
431 return 0;
432 }
433 EXPORT_SYMBOL(iommu_get_msi_cookie);
434
435 /**
436 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
437 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
438 * iommu_get_msi_cookie()
439 */
iommu_put_dma_cookie(struct iommu_domain * domain)440 void iommu_put_dma_cookie(struct iommu_domain *domain)
441 {
442 struct iommu_dma_cookie *cookie = domain->iova_cookie;
443 struct iommu_dma_msi_page *msi, *tmp;
444
445 if (!cookie)
446 return;
447
448 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
449 iommu_dma_free_fq(cookie);
450 put_iova_domain(&cookie->iovad);
451 }
452
453 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
454 list_del(&msi->list);
455 kfree(msi);
456 }
457 kfree(cookie);
458 domain->iova_cookie = NULL;
459 }
460
461 /**
462 * iommu_dma_get_resv_regions - Reserved region driver helper
463 * @dev: Device from iommu_get_resv_regions()
464 * @list: Reserved region list from iommu_get_resv_regions()
465 *
466 * IOMMU drivers can use this to implement their .get_resv_regions callback
467 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
468 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
469 * reservation.
470 */
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)471 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
472 {
473
474 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
475 iort_iommu_get_resv_regions(dev, list);
476
477 if (dev->of_node)
478 of_iommu_get_resv_regions(dev, list);
479 }
480 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
481
cookie_init_hw_msi_region(struct iommu_dma_cookie * cookie,phys_addr_t start,phys_addr_t end)482 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
483 phys_addr_t start, phys_addr_t end)
484 {
485 struct iova_domain *iovad = &cookie->iovad;
486 struct iommu_dma_msi_page *msi_page;
487 int i, num_pages;
488
489 start -= iova_offset(iovad, start);
490 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
491
492 for (i = 0; i < num_pages; i++) {
493 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
494 if (!msi_page)
495 return -ENOMEM;
496
497 msi_page->phys = start;
498 msi_page->iova = start;
499 INIT_LIST_HEAD(&msi_page->list);
500 list_add(&msi_page->list, &cookie->msi_page_list);
501 start += iovad->granule;
502 }
503
504 return 0;
505 }
506
iommu_dma_ranges_sort(void * priv,const struct list_head * a,const struct list_head * b)507 static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
508 const struct list_head *b)
509 {
510 struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
511 struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
512
513 return res_a->res->start > res_b->res->start;
514 }
515
iova_reserve_pci_windows(struct pci_dev * dev,struct iova_domain * iovad)516 static int iova_reserve_pci_windows(struct pci_dev *dev,
517 struct iova_domain *iovad)
518 {
519 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
520 struct resource_entry *window;
521 unsigned long lo, hi;
522 phys_addr_t start = 0, end;
523
524 resource_list_for_each_entry(window, &bridge->windows) {
525 if (resource_type(window->res) != IORESOURCE_MEM)
526 continue;
527
528 lo = iova_pfn(iovad, window->res->start - window->offset);
529 hi = iova_pfn(iovad, window->res->end - window->offset);
530 reserve_iova(iovad, lo, hi);
531 }
532
533 /* Get reserved DMA windows from host bridge */
534 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
535 resource_list_for_each_entry(window, &bridge->dma_ranges) {
536 end = window->res->start - window->offset;
537 resv_iova:
538 if (end > start) {
539 lo = iova_pfn(iovad, start);
540 hi = iova_pfn(iovad, end);
541 reserve_iova(iovad, lo, hi);
542 } else if (end < start) {
543 /* DMA ranges should be non-overlapping */
544 dev_err(&dev->dev,
545 "Failed to reserve IOVA [%pa-%pa]\n",
546 &start, &end);
547 return -EINVAL;
548 }
549
550 start = window->res->end - window->offset + 1;
551 /* If window is last entry */
552 if (window->node.next == &bridge->dma_ranges &&
553 end != ~(phys_addr_t)0) {
554 end = ~(phys_addr_t)0;
555 goto resv_iova;
556 }
557 }
558
559 return 0;
560 }
561
iova_reserve_iommu_regions(struct device * dev,struct iommu_domain * domain)562 static int iova_reserve_iommu_regions(struct device *dev,
563 struct iommu_domain *domain)
564 {
565 struct iommu_dma_cookie *cookie = domain->iova_cookie;
566 struct iova_domain *iovad = &cookie->iovad;
567 struct iommu_resv_region *region;
568 LIST_HEAD(resv_regions);
569 int ret = 0;
570
571 if (dev_is_pci(dev)) {
572 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
573 if (ret)
574 return ret;
575 }
576
577 iommu_get_resv_regions(dev, &resv_regions);
578 list_for_each_entry(region, &resv_regions, list) {
579 unsigned long lo, hi;
580
581 /* We ARE the software that manages these! */
582 if (region->type == IOMMU_RESV_SW_MSI)
583 continue;
584
585 lo = iova_pfn(iovad, region->start);
586 hi = iova_pfn(iovad, region->start + region->length - 1);
587 reserve_iova(iovad, lo, hi);
588
589 if (region->type == IOMMU_RESV_MSI)
590 ret = cookie_init_hw_msi_region(cookie, region->start,
591 region->start + region->length);
592 if (ret)
593 break;
594 }
595 iommu_put_resv_regions(dev, &resv_regions);
596
597 return ret;
598 }
599
dev_is_untrusted(struct device * dev)600 static bool dev_is_untrusted(struct device *dev)
601 {
602 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
603 }
604
dev_use_swiotlb(struct device * dev,size_t size,enum dma_data_direction dir)605 static bool dev_use_swiotlb(struct device *dev, size_t size,
606 enum dma_data_direction dir)
607 {
608 return IS_ENABLED(CONFIG_SWIOTLB) &&
609 (dev_is_untrusted(dev) ||
610 dma_kmalloc_needs_bounce(dev, size, dir));
611 }
612
dev_use_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)613 static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
614 int nents, enum dma_data_direction dir)
615 {
616 struct scatterlist *s;
617 int i;
618
619 if (!IS_ENABLED(CONFIG_SWIOTLB))
620 return false;
621
622 if (dev_is_untrusted(dev))
623 return true;
624
625 /*
626 * If kmalloc() buffers are not DMA-safe for this device and
627 * direction, check the individual lengths in the sg list. If any
628 * element is deemed unsafe, use the swiotlb for bouncing.
629 */
630 if (!dma_kmalloc_safe(dev, dir)) {
631 for_each_sg(sg, s, nents, i)
632 if (!dma_kmalloc_size_aligned(s->length))
633 return true;
634 }
635
636 return false;
637 }
638
639 /**
640 * iommu_dma_init_options - Initialize dma-iommu options
641 * @options: The options to be initialized
642 * @dev: Device the options are set for
643 *
644 * This allows tuning dma-iommu specific to device properties
645 */
iommu_dma_init_options(struct iommu_dma_options * options,struct device * dev)646 static void iommu_dma_init_options(struct iommu_dma_options *options,
647 struct device *dev)
648 {
649 /* Shadowing IOTLB flushes do better with a single large queue */
650 if (dev->iommu->shadow_on_flush) {
651 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
652 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
653 options->fq_size = IOVA_SINGLE_FQ_SIZE;
654 } else {
655 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
656 options->fq_size = IOVA_DEFAULT_FQ_SIZE;
657 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
658 }
659 }
660
661 /**
662 * iommu_dma_init_domain - Initialise a DMA mapping domain
663 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
664 * @dev: Device the domain is being initialised for
665 *
666 * If the geometry and dma_range_map include address 0, we reserve that page
667 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
668 * any change which could make prior IOVAs invalid will fail.
669 */
iommu_dma_init_domain(struct iommu_domain * domain,struct device * dev)670 static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
671 {
672 struct iommu_dma_cookie *cookie = domain->iova_cookie;
673 const struct bus_dma_region *map = dev->dma_range_map;
674 unsigned long order, base_pfn;
675 struct iova_domain *iovad;
676 int ret;
677
678 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
679 return -EINVAL;
680
681 iovad = &cookie->iovad;
682
683 /* Use the smallest supported page size for IOVA granularity */
684 order = __ffs(domain->pgsize_bitmap);
685 base_pfn = 1;
686
687 /* Check the domain allows at least some access to the device... */
688 if (map) {
689 if (dma_range_map_min(map) > domain->geometry.aperture_end ||
690 dma_range_map_max(map) < domain->geometry.aperture_start) {
691 pr_warn("specified DMA range outside IOMMU capability\n");
692 return -EFAULT;
693 }
694 }
695 /* ...then finally give it a kicking to make sure it fits */
696 base_pfn = max_t(unsigned long, base_pfn,
697 domain->geometry.aperture_start >> order);
698
699 /* start_pfn is always nonzero for an already-initialised domain */
700 mutex_lock(&cookie->mutex);
701 if (iovad->start_pfn) {
702 if (1UL << order != iovad->granule ||
703 base_pfn != iovad->start_pfn) {
704 pr_warn("Incompatible range for DMA domain\n");
705 ret = -EFAULT;
706 goto done_unlock;
707 }
708
709 ret = 0;
710 goto done_unlock;
711 }
712
713 init_iova_domain(iovad, 1UL << order, base_pfn);
714 ret = iova_domain_init_rcaches(iovad);
715 if (ret)
716 goto done_unlock;
717
718 iommu_dma_init_options(&cookie->options, dev);
719
720 /* If the FQ fails we can simply fall back to strict mode */
721 if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
722 (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
723 domain->type = IOMMU_DOMAIN_DMA;
724
725 ret = iova_reserve_iommu_regions(dev, domain);
726
727 done_unlock:
728 mutex_unlock(&cookie->mutex);
729 return ret;
730 }
731
732 /**
733 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
734 * page flags.
735 * @dir: Direction of DMA transfer
736 * @coherent: Is the DMA master cache-coherent?
737 * @attrs: DMA attributes for the mapping
738 *
739 * Return: corresponding IOMMU API page protection flags
740 */
dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)741 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
742 unsigned long attrs)
743 {
744 int prot = coherent ? IOMMU_CACHE : 0;
745
746 if (attrs & DMA_ATTR_PRIVILEGED)
747 prot |= IOMMU_PRIV;
748
749 switch (dir) {
750 case DMA_BIDIRECTIONAL:
751 return prot | IOMMU_READ | IOMMU_WRITE;
752 case DMA_TO_DEVICE:
753 return prot | IOMMU_READ;
754 case DMA_FROM_DEVICE:
755 return prot | IOMMU_WRITE;
756 default:
757 return 0;
758 }
759 }
760
iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)761 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
762 size_t size, u64 dma_limit, struct device *dev)
763 {
764 struct iommu_dma_cookie *cookie = domain->iova_cookie;
765 struct iova_domain *iovad = &cookie->iovad;
766 unsigned long shift, iova_len, iova;
767
768 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
769 cookie->msi_iova += size;
770 return cookie->msi_iova - size;
771 }
772
773 shift = iova_shift(iovad);
774 iova_len = size >> shift;
775
776 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
777
778 if (domain->geometry.force_aperture)
779 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
780
781 /*
782 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
783 * DAC reasoning loses relevance with PCIe, but enough hardware and
784 * firmware bugs are still lurking out there that it's safest not to
785 * venture into the 64-bit space until necessary.
786 *
787 * If your device goes wrong after seeing the notice then likely either
788 * its driver is not setting DMA masks accurately, the hardware has
789 * some inherent bug in handling >32-bit addresses, or not all the
790 * expected address bits are wired up between the device and the IOMMU.
791 */
792 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
793 iova = alloc_iova_fast(iovad, iova_len,
794 DMA_BIT_MASK(32) >> shift, false);
795 if (iova)
796 goto done;
797
798 dev->iommu->pci_32bit_workaround = false;
799 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
800 }
801
802 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
803 done:
804 return (dma_addr_t)iova << shift;
805 }
806
iommu_dma_free_iova(struct iommu_dma_cookie * cookie,dma_addr_t iova,size_t size,struct iommu_iotlb_gather * gather)807 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
808 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
809 {
810 struct iova_domain *iovad = &cookie->iovad;
811
812 /* The MSI case is only ever cleaning up its most recent allocation */
813 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
814 cookie->msi_iova -= size;
815 else if (gather && gather->queued)
816 queue_iova(cookie, iova_pfn(iovad, iova),
817 size >> iova_shift(iovad),
818 &gather->freelist);
819 else
820 free_iova_fast(iovad, iova_pfn(iovad, iova),
821 size >> iova_shift(iovad));
822 }
823
__iommu_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t size)824 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
825 size_t size)
826 {
827 struct iommu_domain *domain = iommu_get_dma_domain(dev);
828 struct iommu_dma_cookie *cookie = domain->iova_cookie;
829 struct iova_domain *iovad = &cookie->iovad;
830 size_t iova_off = iova_offset(iovad, dma_addr);
831 struct iommu_iotlb_gather iotlb_gather;
832 size_t unmapped;
833
834 dma_addr -= iova_off;
835 size = iova_align(iovad, size + iova_off);
836 iommu_iotlb_gather_init(&iotlb_gather);
837 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
838
839 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
840 WARN_ON(unmapped != size);
841
842 if (!iotlb_gather.queued)
843 iommu_iotlb_sync(domain, &iotlb_gather);
844 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
845 }
846
__iommu_dma_map(struct device * dev,phys_addr_t phys,size_t size,int prot,u64 dma_mask)847 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
848 size_t size, int prot, u64 dma_mask)
849 {
850 struct iommu_domain *domain = iommu_get_dma_domain(dev);
851 struct iommu_dma_cookie *cookie = domain->iova_cookie;
852 struct iova_domain *iovad = &cookie->iovad;
853 size_t iova_off = iova_offset(iovad, phys);
854 dma_addr_t iova;
855
856 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
857 iommu_deferred_attach(dev, domain))
858 return DMA_MAPPING_ERROR;
859
860 /* If anyone ever wants this we'd need support in the IOVA allocator */
861 if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
862 "Unsupported alignment constraint\n"))
863 return DMA_MAPPING_ERROR;
864
865 size = iova_align(iovad, size + iova_off);
866
867 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
868 if (!iova)
869 return DMA_MAPPING_ERROR;
870
871 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
872 iommu_dma_free_iova(cookie, iova, size, NULL);
873 return DMA_MAPPING_ERROR;
874 }
875 return iova + iova_off;
876 }
877
__iommu_dma_free_pages(struct page ** pages,int count)878 static void __iommu_dma_free_pages(struct page **pages, int count)
879 {
880 while (count--)
881 __free_page(pages[count]);
882 kvfree(pages);
883 }
884
__iommu_dma_alloc_pages(struct device * dev,unsigned int count,unsigned long order_mask,gfp_t gfp)885 static struct page **__iommu_dma_alloc_pages(struct device *dev,
886 unsigned int count, unsigned long order_mask, gfp_t gfp)
887 {
888 struct page **pages;
889 unsigned int i = 0, nid = dev_to_node(dev);
890
891 order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
892 if (!order_mask)
893 return NULL;
894
895 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
896 if (!pages)
897 return NULL;
898
899 /* IOMMU can map any pages, so himem can also be used here */
900 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
901
902 while (count) {
903 struct page *page = NULL;
904 unsigned int order_size;
905
906 /*
907 * Higher-order allocations are a convenience rather
908 * than a necessity, hence using __GFP_NORETRY until
909 * falling back to minimum-order allocations.
910 */
911 for (order_mask &= GENMASK(__fls(count), 0);
912 order_mask; order_mask &= ~order_size) {
913 unsigned int order = __fls(order_mask);
914 gfp_t alloc_flags = gfp;
915
916 order_size = 1U << order;
917 if (order_mask > order_size)
918 alloc_flags |= __GFP_NORETRY;
919 page = alloc_pages_node(nid, alloc_flags, order);
920 if (!page)
921 continue;
922 if (order)
923 split_page(page, order);
924 break;
925 }
926 if (!page) {
927 __iommu_dma_free_pages(pages, i);
928 return NULL;
929 }
930 count -= order_size;
931 while (order_size--)
932 pages[i++] = page++;
933 }
934 return pages;
935 }
936
937 /*
938 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
939 * but an IOMMU which supports smaller pages might not map the whole thing.
940 */
__iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,gfp_t gfp,pgprot_t prot,unsigned long attrs)941 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
942 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
943 unsigned long attrs)
944 {
945 struct iommu_domain *domain = iommu_get_dma_domain(dev);
946 struct iommu_dma_cookie *cookie = domain->iova_cookie;
947 struct iova_domain *iovad = &cookie->iovad;
948 bool coherent = dev_is_dma_coherent(dev);
949 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
950 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
951 struct page **pages;
952 dma_addr_t iova;
953 ssize_t ret;
954
955 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
956 iommu_deferred_attach(dev, domain))
957 return NULL;
958
959 min_size = alloc_sizes & -alloc_sizes;
960 if (min_size < PAGE_SIZE) {
961 min_size = PAGE_SIZE;
962 alloc_sizes |= PAGE_SIZE;
963 } else {
964 size = ALIGN(size, min_size);
965 }
966 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
967 alloc_sizes = min_size;
968
969 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
970 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
971 gfp);
972 if (!pages)
973 return NULL;
974
975 size = iova_align(iovad, size);
976 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
977 if (!iova)
978 goto out_free_pages;
979
980 /*
981 * Remove the zone/policy flags from the GFP - these are applied to the
982 * __iommu_dma_alloc_pages() but are not used for the supporting
983 * internal allocations that follow.
984 */
985 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
986
987 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
988 goto out_free_iova;
989
990 if (!(ioprot & IOMMU_CACHE)) {
991 struct scatterlist *sg;
992 int i;
993
994 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
995 arch_dma_prep_coherent(sg_page(sg), sg->length);
996 }
997
998 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
999 gfp);
1000 if (ret < 0 || ret < size)
1001 goto out_free_sg;
1002
1003 sgt->sgl->dma_address = iova;
1004 sgt->sgl->dma_length = size;
1005 return pages;
1006
1007 out_free_sg:
1008 sg_free_table(sgt);
1009 out_free_iova:
1010 iommu_dma_free_iova(cookie, iova, size, NULL);
1011 out_free_pages:
1012 __iommu_dma_free_pages(pages, count);
1013 return NULL;
1014 }
1015
iommu_dma_alloc_remap(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,pgprot_t prot,unsigned long attrs)1016 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
1017 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
1018 unsigned long attrs)
1019 {
1020 struct page **pages;
1021 struct sg_table sgt;
1022 void *vaddr;
1023
1024 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
1025 attrs);
1026 if (!pages)
1027 return NULL;
1028 *dma_handle = sgt.sgl->dma_address;
1029 sg_free_table(&sgt);
1030 vaddr = dma_common_pages_remap(pages, size, prot,
1031 __builtin_return_address(0));
1032 if (!vaddr)
1033 goto out_unmap;
1034 return vaddr;
1035
1036 out_unmap:
1037 __iommu_dma_unmap(dev, *dma_handle, size);
1038 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1039 return NULL;
1040 }
1041
iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)1042 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
1043 size_t size, enum dma_data_direction dir, gfp_t gfp,
1044 unsigned long attrs)
1045 {
1046 struct dma_sgt_handle *sh;
1047
1048 sh = kmalloc(sizeof(*sh), gfp);
1049 if (!sh)
1050 return NULL;
1051
1052 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
1053 PAGE_KERNEL, attrs);
1054 if (!sh->pages) {
1055 kfree(sh);
1056 return NULL;
1057 }
1058 return &sh->sgt;
1059 }
1060
iommu_dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)1061 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1062 struct sg_table *sgt, enum dma_data_direction dir)
1063 {
1064 struct dma_sgt_handle *sh = sgt_handle(sgt);
1065
1066 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
1067 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1068 sg_free_table(&sh->sgt);
1069 kfree(sh);
1070 }
1071
iommu_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)1072 static void iommu_dma_sync_single_for_cpu(struct device *dev,
1073 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
1074 {
1075 phys_addr_t phys;
1076
1077 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1078 return;
1079
1080 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1081 if (!dev_is_dma_coherent(dev))
1082 arch_sync_dma_for_cpu(phys, size, dir);
1083
1084 if (is_swiotlb_buffer(dev, phys))
1085 swiotlb_sync_single_for_cpu(dev, phys, size, dir);
1086 }
1087
iommu_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)1088 static void iommu_dma_sync_single_for_device(struct device *dev,
1089 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
1090 {
1091 phys_addr_t phys;
1092
1093 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1094 return;
1095
1096 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1097 if (is_swiotlb_buffer(dev, phys))
1098 swiotlb_sync_single_for_device(dev, phys, size, dir);
1099
1100 if (!dev_is_dma_coherent(dev))
1101 arch_sync_dma_for_device(phys, size, dir);
1102 }
1103
iommu_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)1104 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
1105 struct scatterlist *sgl, int nelems,
1106 enum dma_data_direction dir)
1107 {
1108 struct scatterlist *sg;
1109 int i;
1110
1111 if (sg_dma_is_swiotlb(sgl))
1112 for_each_sg(sgl, sg, nelems, i)
1113 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
1114 sg->length, dir);
1115 else if (!dev_is_dma_coherent(dev))
1116 for_each_sg(sgl, sg, nelems, i)
1117 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
1118 }
1119
iommu_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)1120 static void iommu_dma_sync_sg_for_device(struct device *dev,
1121 struct scatterlist *sgl, int nelems,
1122 enum dma_data_direction dir)
1123 {
1124 struct scatterlist *sg;
1125 int i;
1126
1127 if (sg_dma_is_swiotlb(sgl))
1128 for_each_sg(sgl, sg, nelems, i)
1129 iommu_dma_sync_single_for_device(dev,
1130 sg_dma_address(sg),
1131 sg->length, dir);
1132 else if (!dev_is_dma_coherent(dev))
1133 for_each_sg(sgl, sg, nelems, i)
1134 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
1135 }
1136
iommu_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1137 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1138 unsigned long offset, size_t size, enum dma_data_direction dir,
1139 unsigned long attrs)
1140 {
1141 phys_addr_t phys = page_to_phys(page) + offset;
1142 bool coherent = dev_is_dma_coherent(dev);
1143 int prot = dma_info_to_prot(dir, coherent, attrs);
1144 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1145 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1146 struct iova_domain *iovad = &cookie->iovad;
1147 dma_addr_t iova, dma_mask = dma_get_mask(dev);
1148
1149 /*
1150 * If both the physical buffer start address and size are
1151 * page aligned, we don't need to use a bounce page.
1152 */
1153 if (dev_use_swiotlb(dev, size, dir) &&
1154 iova_offset(iovad, phys | size)) {
1155 if (!is_swiotlb_active(dev)) {
1156 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1157 return DMA_MAPPING_ERROR;
1158 }
1159
1160 trace_swiotlb_bounced(dev, phys, size);
1161
1162 phys = swiotlb_tbl_map_single(dev, phys, size,
1163 iova_mask(iovad), dir, attrs);
1164
1165 if (phys == DMA_MAPPING_ERROR)
1166 return DMA_MAPPING_ERROR;
1167
1168 /*
1169 * Untrusted devices should not see padding areas with random
1170 * leftover kernel data, so zero the pre- and post-padding.
1171 * swiotlb_tbl_map_single() has initialized the bounce buffer
1172 * proper to the contents of the original memory buffer.
1173 */
1174 if (dev_is_untrusted(dev)) {
1175 size_t start, virt = (size_t)phys_to_virt(phys);
1176
1177 /* Pre-padding */
1178 start = iova_align_down(iovad, virt);
1179 memset((void *)start, 0, virt - start);
1180
1181 /* Post-padding */
1182 start = virt + size;
1183 memset((void *)start, 0,
1184 iova_align(iovad, start) - start);
1185 }
1186 }
1187
1188 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1189 arch_sync_dma_for_device(phys, size, dir);
1190
1191 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
1192 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
1193 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1194 return iova;
1195 }
1196
iommu_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1197 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
1198 size_t size, enum dma_data_direction dir, unsigned long attrs)
1199 {
1200 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1201 phys_addr_t phys;
1202
1203 phys = iommu_iova_to_phys(domain, dma_handle);
1204 if (WARN_ON(!phys))
1205 return;
1206
1207 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
1208 arch_sync_dma_for_cpu(phys, size, dir);
1209
1210 __iommu_dma_unmap(dev, dma_handle, size);
1211
1212 if (unlikely(is_swiotlb_buffer(dev, phys)))
1213 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1214 }
1215
1216 /*
1217 * Prepare a successfully-mapped scatterlist to give back to the caller.
1218 *
1219 * At this point the segments are already laid out by iommu_dma_map_sg() to
1220 * avoid individually crossing any boundaries, so we merely need to check a
1221 * segment's start address to avoid concatenating across one.
1222 */
__finalise_sg(struct device * dev,struct scatterlist * sg,int nents,dma_addr_t dma_addr)1223 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1224 dma_addr_t dma_addr)
1225 {
1226 struct scatterlist *s, *cur = sg;
1227 unsigned long seg_mask = dma_get_seg_boundary(dev);
1228 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1229 int i, count = 0;
1230
1231 for_each_sg(sg, s, nents, i) {
1232 /* Restore this segment's original unaligned fields first */
1233 dma_addr_t s_dma_addr = sg_dma_address(s);
1234 unsigned int s_iova_off = sg_dma_address(s);
1235 unsigned int s_length = sg_dma_len(s);
1236 unsigned int s_iova_len = s->length;
1237
1238 sg_dma_address(s) = DMA_MAPPING_ERROR;
1239 sg_dma_len(s) = 0;
1240
1241 if (sg_dma_is_bus_address(s)) {
1242 if (i > 0)
1243 cur = sg_next(cur);
1244
1245 sg_dma_unmark_bus_address(s);
1246 sg_dma_address(cur) = s_dma_addr;
1247 sg_dma_len(cur) = s_length;
1248 sg_dma_mark_bus_address(cur);
1249 count++;
1250 cur_len = 0;
1251 continue;
1252 }
1253
1254 s->offset += s_iova_off;
1255 s->length = s_length;
1256
1257 /*
1258 * Now fill in the real DMA data. If...
1259 * - there is a valid output segment to append to
1260 * - and this segment starts on an IOVA page boundary
1261 * - but doesn't fall at a segment boundary
1262 * - and wouldn't make the resulting output segment too long
1263 */
1264 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1265 (max_len - cur_len >= s_length)) {
1266 /* ...then concatenate it with the previous one */
1267 cur_len += s_length;
1268 } else {
1269 /* Otherwise start the next output segment */
1270 if (i > 0)
1271 cur = sg_next(cur);
1272 cur_len = s_length;
1273 count++;
1274
1275 sg_dma_address(cur) = dma_addr + s_iova_off;
1276 }
1277
1278 sg_dma_len(cur) = cur_len;
1279 dma_addr += s_iova_len;
1280
1281 if (s_length + s_iova_off < s_iova_len)
1282 cur_len = 0;
1283 }
1284 return count;
1285 }
1286
1287 /*
1288 * If mapping failed, then just restore the original list,
1289 * but making sure the DMA fields are invalidated.
1290 */
__invalidate_sg(struct scatterlist * sg,int nents)1291 static void __invalidate_sg(struct scatterlist *sg, int nents)
1292 {
1293 struct scatterlist *s;
1294 int i;
1295
1296 for_each_sg(sg, s, nents, i) {
1297 if (sg_dma_is_bus_address(s)) {
1298 sg_dma_unmark_bus_address(s);
1299 } else {
1300 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1301 s->offset += sg_dma_address(s);
1302 if (sg_dma_len(s))
1303 s->length = sg_dma_len(s);
1304 }
1305 sg_dma_address(s) = DMA_MAPPING_ERROR;
1306 sg_dma_len(s) = 0;
1307 }
1308 }
1309
iommu_dma_unmap_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1310 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1311 int nents, enum dma_data_direction dir, unsigned long attrs)
1312 {
1313 struct scatterlist *s;
1314 int i;
1315
1316 for_each_sg(sg, s, nents, i)
1317 iommu_dma_unmap_page(dev, sg_dma_address(s),
1318 sg_dma_len(s), dir, attrs);
1319 }
1320
iommu_dma_map_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1321 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1322 int nents, enum dma_data_direction dir, unsigned long attrs)
1323 {
1324 struct scatterlist *s;
1325 int i;
1326
1327 sg_dma_mark_swiotlb(sg);
1328
1329 for_each_sg(sg, s, nents, i) {
1330 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
1331 s->offset, s->length, dir, attrs);
1332 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
1333 goto out_unmap;
1334 sg_dma_len(s) = s->length;
1335 }
1336
1337 return nents;
1338
1339 out_unmap:
1340 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1341 return -EIO;
1342 }
1343
1344 /*
1345 * The DMA API client is passing in a scatterlist which could describe
1346 * any old buffer layout, but the IOMMU API requires everything to be
1347 * aligned to IOMMU pages. Hence the need for this complicated bit of
1348 * impedance-matching, to be able to hand off a suitably-aligned list,
1349 * but still preserve the original offsets and sizes for the caller.
1350 */
iommu_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1351 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
1352 int nents, enum dma_data_direction dir, unsigned long attrs)
1353 {
1354 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1355 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1356 struct iova_domain *iovad = &cookie->iovad;
1357 struct scatterlist *s, *prev = NULL;
1358 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
1359 struct pci_p2pdma_map_state p2pdma_state = {};
1360 enum pci_p2pdma_map_type map;
1361 dma_addr_t iova;
1362 size_t iova_len = 0;
1363 unsigned long mask = dma_get_seg_boundary(dev);
1364 ssize_t ret;
1365 int i;
1366
1367 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1368 ret = iommu_deferred_attach(dev, domain);
1369 if (ret)
1370 goto out;
1371 }
1372
1373 if (dev_use_sg_swiotlb(dev, sg, nents, dir))
1374 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1375
1376 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1377 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1378
1379 /*
1380 * Work out how much IOVA space we need, and align the segments to
1381 * IOVA granules for the IOMMU driver to handle. With some clever
1382 * trickery we can modify the list in-place, but reversibly, by
1383 * stashing the unaligned parts in the as-yet-unused DMA fields.
1384 */
1385 for_each_sg(sg, s, nents, i) {
1386 size_t s_iova_off = iova_offset(iovad, s->offset);
1387 size_t s_length = s->length;
1388 size_t pad_len = (mask - iova_len + 1) & mask;
1389
1390 if (is_pci_p2pdma_page(sg_page(s))) {
1391 map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
1392 switch (map) {
1393 case PCI_P2PDMA_MAP_BUS_ADDR:
1394 /*
1395 * iommu_map_sg() will skip this segment as
1396 * it is marked as a bus address,
1397 * __finalise_sg() will copy the dma address
1398 * into the output segment.
1399 */
1400 continue;
1401 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1402 /*
1403 * Mapping through host bridge should be
1404 * mapped with regular IOVAs, thus we
1405 * do nothing here and continue below.
1406 */
1407 break;
1408 default:
1409 ret = -EREMOTEIO;
1410 goto out_restore_sg;
1411 }
1412 }
1413
1414 sg_dma_address(s) = s_iova_off;
1415 sg_dma_len(s) = s_length;
1416 s->offset -= s_iova_off;
1417 s_length = iova_align(iovad, s_length + s_iova_off);
1418 s->length = s_length;
1419
1420 /*
1421 * Due to the alignment of our single IOVA allocation, we can
1422 * depend on these assumptions about the segment boundary mask:
1423 * - If mask size >= IOVA size, then the IOVA range cannot
1424 * possibly fall across a boundary, so we don't care.
1425 * - If mask size < IOVA size, then the IOVA range must start
1426 * exactly on a boundary, therefore we can lay things out
1427 * based purely on segment lengths without needing to know
1428 * the actual addresses beforehand.
1429 * - The mask must be a power of 2, so pad_len == 0 if
1430 * iova_len == 0, thus we cannot dereference prev the first
1431 * time through here (i.e. before it has a meaningful value).
1432 */
1433 if (pad_len && pad_len < s_length - 1) {
1434 prev->length += pad_len;
1435 iova_len += pad_len;
1436 }
1437
1438 iova_len += s_length;
1439 prev = s;
1440 }
1441
1442 if (!iova_len)
1443 return __finalise_sg(dev, sg, nents, 0);
1444
1445 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1446 if (!iova) {
1447 ret = -ENOMEM;
1448 goto out_restore_sg;
1449 }
1450
1451 /*
1452 * We'll leave any physical concatenation to the IOMMU driver's
1453 * implementation - it knows better than we do.
1454 */
1455 ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1456 if (ret < 0 || ret < iova_len)
1457 goto out_free_iova;
1458
1459 return __finalise_sg(dev, sg, nents, iova);
1460
1461 out_free_iova:
1462 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1463 out_restore_sg:
1464 __invalidate_sg(sg, nents);
1465 out:
1466 if (ret != -ENOMEM && ret != -EREMOTEIO)
1467 return -EINVAL;
1468 return ret;
1469 }
1470
iommu_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1471 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1472 int nents, enum dma_data_direction dir, unsigned long attrs)
1473 {
1474 dma_addr_t end = 0, start;
1475 struct scatterlist *tmp;
1476 int i;
1477
1478 if (sg_dma_is_swiotlb(sg)) {
1479 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1480 return;
1481 }
1482
1483 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1484 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1485
1486 /*
1487 * The scatterlist segments are mapped into a single
1488 * contiguous IOVA allocation, the start and end points
1489 * just have to be determined.
1490 */
1491 for_each_sg(sg, tmp, nents, i) {
1492 if (sg_dma_is_bus_address(tmp)) {
1493 sg_dma_unmark_bus_address(tmp);
1494 continue;
1495 }
1496
1497 if (sg_dma_len(tmp) == 0)
1498 break;
1499
1500 start = sg_dma_address(tmp);
1501 break;
1502 }
1503
1504 nents -= i;
1505 for_each_sg(tmp, tmp, nents, i) {
1506 if (sg_dma_is_bus_address(tmp)) {
1507 sg_dma_unmark_bus_address(tmp);
1508 continue;
1509 }
1510
1511 if (sg_dma_len(tmp) == 0)
1512 break;
1513
1514 end = sg_dma_address(tmp) + sg_dma_len(tmp);
1515 }
1516
1517 if (end)
1518 __iommu_dma_unmap(dev, start, end - start);
1519 }
1520
iommu_dma_map_resource(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1521 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1522 size_t size, enum dma_data_direction dir, unsigned long attrs)
1523 {
1524 return __iommu_dma_map(dev, phys, size,
1525 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1526 dma_get_mask(dev));
1527 }
1528
iommu_dma_unmap_resource(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1529 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1530 size_t size, enum dma_data_direction dir, unsigned long attrs)
1531 {
1532 __iommu_dma_unmap(dev, handle, size);
1533 }
1534
__iommu_dma_free(struct device * dev,size_t size,void * cpu_addr)1535 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1536 {
1537 size_t alloc_size = PAGE_ALIGN(size);
1538 int count = alloc_size >> PAGE_SHIFT;
1539 struct page *page = NULL, **pages = NULL;
1540
1541 /* Non-coherent atomic allocation? Easy */
1542 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1543 dma_free_from_pool(dev, cpu_addr, alloc_size))
1544 return;
1545
1546 if (is_vmalloc_addr(cpu_addr)) {
1547 /*
1548 * If it the address is remapped, then it's either non-coherent
1549 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1550 */
1551 pages = dma_common_find_pages(cpu_addr);
1552 if (!pages)
1553 page = vmalloc_to_page(cpu_addr);
1554 dma_common_free_remap(cpu_addr, alloc_size);
1555 } else {
1556 /* Lowmem means a coherent atomic or CMA allocation */
1557 page = virt_to_page(cpu_addr);
1558 }
1559
1560 if (pages)
1561 __iommu_dma_free_pages(pages, count);
1562 if (page)
1563 dma_free_contiguous(dev, page, alloc_size);
1564 }
1565
iommu_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1566 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1567 dma_addr_t handle, unsigned long attrs)
1568 {
1569 __iommu_dma_unmap(dev, handle, size);
1570 __iommu_dma_free(dev, size, cpu_addr);
1571 }
1572
iommu_dma_alloc_pages(struct device * dev,size_t size,struct page ** pagep,gfp_t gfp,unsigned long attrs)1573 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1574 struct page **pagep, gfp_t gfp, unsigned long attrs)
1575 {
1576 bool coherent = dev_is_dma_coherent(dev);
1577 size_t alloc_size = PAGE_ALIGN(size);
1578 int node = dev_to_node(dev);
1579 struct page *page = NULL;
1580 void *cpu_addr;
1581
1582 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1583 if (!page)
1584 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1585 if (!page)
1586 return NULL;
1587
1588 if (!coherent || PageHighMem(page)) {
1589 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1590
1591 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1592 prot, __builtin_return_address(0));
1593 if (!cpu_addr)
1594 goto out_free_pages;
1595
1596 if (!coherent)
1597 arch_dma_prep_coherent(page, size);
1598 } else {
1599 cpu_addr = page_address(page);
1600 }
1601
1602 *pagep = page;
1603 memset(cpu_addr, 0, alloc_size);
1604 return cpu_addr;
1605 out_free_pages:
1606 dma_free_contiguous(dev, page, alloc_size);
1607 return NULL;
1608 }
1609
iommu_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1610 static void *iommu_dma_alloc(struct device *dev, size_t size,
1611 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1612 {
1613 bool coherent = dev_is_dma_coherent(dev);
1614 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1615 struct page *page = NULL;
1616 void *cpu_addr;
1617
1618 gfp |= __GFP_ZERO;
1619
1620 if (gfpflags_allow_blocking(gfp) &&
1621 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1622 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1623 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1624 }
1625
1626 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1627 !gfpflags_allow_blocking(gfp) && !coherent)
1628 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1629 gfp, NULL);
1630 else
1631 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1632 if (!cpu_addr)
1633 return NULL;
1634
1635 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1636 dev->coherent_dma_mask);
1637 if (*handle == DMA_MAPPING_ERROR) {
1638 __iommu_dma_free(dev, size, cpu_addr);
1639 return NULL;
1640 }
1641
1642 return cpu_addr;
1643 }
1644
iommu_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1645 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1646 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1647 unsigned long attrs)
1648 {
1649 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1650 unsigned long pfn, off = vma->vm_pgoff;
1651 int ret;
1652
1653 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1654
1655 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1656 return ret;
1657
1658 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1659 return -ENXIO;
1660
1661 if (is_vmalloc_addr(cpu_addr)) {
1662 struct page **pages = dma_common_find_pages(cpu_addr);
1663
1664 if (pages)
1665 return vm_map_pages(vma, pages, nr_pages);
1666 pfn = vmalloc_to_pfn(cpu_addr);
1667 } else {
1668 pfn = page_to_pfn(virt_to_page(cpu_addr));
1669 }
1670
1671 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1672 vma->vm_end - vma->vm_start,
1673 vma->vm_page_prot);
1674 }
1675
iommu_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1676 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1677 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1678 unsigned long attrs)
1679 {
1680 struct page *page;
1681 int ret;
1682
1683 if (is_vmalloc_addr(cpu_addr)) {
1684 struct page **pages = dma_common_find_pages(cpu_addr);
1685
1686 if (pages) {
1687 return sg_alloc_table_from_pages(sgt, pages,
1688 PAGE_ALIGN(size) >> PAGE_SHIFT,
1689 0, size, GFP_KERNEL);
1690 }
1691
1692 page = vmalloc_to_page(cpu_addr);
1693 } else {
1694 page = virt_to_page(cpu_addr);
1695 }
1696
1697 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1698 if (!ret)
1699 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1700 return ret;
1701 }
1702
iommu_dma_get_merge_boundary(struct device * dev)1703 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1704 {
1705 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1706
1707 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1708 }
1709
iommu_dma_opt_mapping_size(void)1710 static size_t iommu_dma_opt_mapping_size(void)
1711 {
1712 return iova_rcache_range();
1713 }
1714
iommu_dma_max_mapping_size(struct device * dev)1715 static size_t iommu_dma_max_mapping_size(struct device *dev)
1716 {
1717 if (dev_is_untrusted(dev))
1718 return swiotlb_max_mapping_size(dev);
1719
1720 return SIZE_MAX;
1721 }
1722
1723 static const struct dma_map_ops iommu_dma_ops = {
1724 .flags = DMA_F_PCI_P2PDMA_SUPPORTED |
1725 DMA_F_CAN_SKIP_SYNC,
1726 .alloc = iommu_dma_alloc,
1727 .free = iommu_dma_free,
1728 .alloc_pages_op = dma_common_alloc_pages,
1729 .free_pages = dma_common_free_pages,
1730 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
1731 .free_noncontiguous = iommu_dma_free_noncontiguous,
1732 .mmap = iommu_dma_mmap,
1733 .get_sgtable = iommu_dma_get_sgtable,
1734 .map_page = iommu_dma_map_page,
1735 .unmap_page = iommu_dma_unmap_page,
1736 .map_sg = iommu_dma_map_sg,
1737 .unmap_sg = iommu_dma_unmap_sg,
1738 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1739 .sync_single_for_device = iommu_dma_sync_single_for_device,
1740 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1741 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1742 .map_resource = iommu_dma_map_resource,
1743 .unmap_resource = iommu_dma_unmap_resource,
1744 .get_merge_boundary = iommu_dma_get_merge_boundary,
1745 .opt_mapping_size = iommu_dma_opt_mapping_size,
1746 .max_mapping_size = iommu_dma_max_mapping_size,
1747 };
1748
iommu_setup_dma_ops(struct device * dev)1749 void iommu_setup_dma_ops(struct device *dev)
1750 {
1751 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1752
1753 if (dev_is_pci(dev))
1754 dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
1755
1756 if (iommu_is_dma_domain(domain)) {
1757 if (iommu_dma_init_domain(domain, dev))
1758 goto out_err;
1759 dev->dma_ops = &iommu_dma_ops;
1760 } else if (dev->dma_ops == &iommu_dma_ops) {
1761 /* Clean up if we've switched *from* a DMA domain */
1762 dev->dma_ops = NULL;
1763 }
1764
1765 return;
1766 out_err:
1767 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1768 dev_name(dev));
1769 }
1770
iommu_dma_get_msi_page(struct device * dev,phys_addr_t msi_addr,struct iommu_domain * domain)1771 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1772 phys_addr_t msi_addr, struct iommu_domain *domain)
1773 {
1774 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1775 struct iommu_dma_msi_page *msi_page;
1776 dma_addr_t iova;
1777 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1778 size_t size = cookie_msi_granule(cookie);
1779
1780 msi_addr &= ~(phys_addr_t)(size - 1);
1781 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1782 if (msi_page->phys == msi_addr)
1783 return msi_page;
1784
1785 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1786 if (!msi_page)
1787 return NULL;
1788
1789 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1790 if (!iova)
1791 goto out_free_page;
1792
1793 if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
1794 goto out_free_iova;
1795
1796 INIT_LIST_HEAD(&msi_page->list);
1797 msi_page->phys = msi_addr;
1798 msi_page->iova = iova;
1799 list_add(&msi_page->list, &cookie->msi_page_list);
1800 return msi_page;
1801
1802 out_free_iova:
1803 iommu_dma_free_iova(cookie, iova, size, NULL);
1804 out_free_page:
1805 kfree(msi_page);
1806 return NULL;
1807 }
1808
1809 /**
1810 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1811 * @desc: MSI descriptor, will store the MSI page
1812 * @msi_addr: MSI target address to be mapped
1813 *
1814 * Return: 0 on success or negative error code if the mapping failed.
1815 */
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1816 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1817 {
1818 struct device *dev = msi_desc_to_dev(desc);
1819 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1820 struct iommu_dma_msi_page *msi_page;
1821 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1822
1823 if (!domain || !domain->iova_cookie) {
1824 desc->iommu_cookie = NULL;
1825 return 0;
1826 }
1827
1828 /*
1829 * In fact the whole prepare operation should already be serialised by
1830 * irq_domain_mutex further up the callchain, but that's pretty subtle
1831 * on its own, so consider this locking as failsafe documentation...
1832 */
1833 mutex_lock(&msi_prepare_lock);
1834 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1835 mutex_unlock(&msi_prepare_lock);
1836
1837 msi_desc_set_iommu_cookie(desc, msi_page);
1838
1839 if (!msi_page)
1840 return -ENOMEM;
1841 return 0;
1842 }
1843
1844 /**
1845 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1846 * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1847 * @msg: MSI message containing target physical address
1848 */
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1849 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1850 {
1851 struct device *dev = msi_desc_to_dev(desc);
1852 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1853 const struct iommu_dma_msi_page *msi_page;
1854
1855 msi_page = msi_desc_get_iommu_cookie(desc);
1856
1857 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1858 return;
1859
1860 msg->address_hi = upper_32_bits(msi_page->iova);
1861 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1862 msg->address_lo += lower_32_bits(msi_page->iova);
1863 }
1864
iommu_dma_init(void)1865 static int iommu_dma_init(void)
1866 {
1867 if (is_kdump_kernel())
1868 static_branch_enable(&iommu_deferred_attach_enabled);
1869
1870 return iova_cache_get();
1871 }
1872 arch_initcall(iommu_dma_init);
1873