xref: /linux/drivers/iommu/arm/arm-smmu/qcom_iommu.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IOMMU API for QCOM secure IOMMUs.  Somewhat based on arm-smmu.c
4  *
5  * Copyright (C) 2013 ARM Limited
6  * Copyright (C) 2017 Red Hat
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/io-64-nonatomic-hi-lo.h>
18 #include <linux/io-pgtable.h>
19 #include <linux/iommu.h>
20 #include <linux/iopoll.h>
21 #include <linux/kconfig.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/firmware/qcom/qcom_scm.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 
34 #include "arm-smmu.h"
35 
36 #define SMMU_INTR_SEL_NS     0x2000
37 
38 enum qcom_iommu_clk {
39 	CLK_IFACE,
40 	CLK_BUS,
41 	CLK_TBU,
42 	CLK_NUM,
43 };
44 
45 struct qcom_iommu_ctx;
46 
47 struct qcom_iommu_dev {
48 	/* IOMMU core code handle */
49 	struct iommu_device	 iommu;
50 	struct device		*dev;
51 	struct clk_bulk_data clks[CLK_NUM];
52 	void __iomem		*local_base;
53 	u32			 sec_id;
54 	u8			 num_ctxs;
55 	struct qcom_iommu_ctx	*ctxs[];   /* indexed by asid-1 */
56 };
57 
58 struct qcom_iommu_ctx {
59 	struct device		*dev;
60 	void __iomem		*base;
61 	bool			 secure_init;
62 	u8			 asid;      /* asid and ctx bank # are 1:1 */
63 	struct iommu_domain	*domain;
64 };
65 
66 struct qcom_iommu_domain {
67 	struct io_pgtable_ops	*pgtbl_ops;
68 	spinlock_t		 pgtbl_lock;
69 	struct mutex		 init_mutex; /* Protects iommu pointer */
70 	struct iommu_domain	 domain;
71 	struct qcom_iommu_dev	*iommu;
72 	struct iommu_fwspec	*fwspec;
73 };
74 
75 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
76 {
77 	return container_of(dom, struct qcom_iommu_domain, domain);
78 }
79 
80 static const struct iommu_ops qcom_iommu_ops;
81 
82 static struct qcom_iommu_dev * to_iommu(struct device *dev)
83 {
84 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
85 
86 	if (!fwspec || fwspec->ops != &qcom_iommu_ops)
87 		return NULL;
88 
89 	return dev_iommu_priv_get(dev);
90 }
91 
92 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
93 {
94 	struct qcom_iommu_dev *qcom_iommu = d->iommu;
95 	if (!qcom_iommu)
96 		return NULL;
97 	return qcom_iommu->ctxs[asid - 1];
98 }
99 
100 static inline void
101 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
102 {
103 	writel_relaxed(val, ctx->base + reg);
104 }
105 
106 static inline void
107 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
108 {
109 	writeq_relaxed(val, ctx->base + reg);
110 }
111 
112 static inline u32
113 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
114 {
115 	return readl_relaxed(ctx->base + reg);
116 }
117 
118 static inline u64
119 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
120 {
121 	return readq_relaxed(ctx->base + reg);
122 }
123 
124 static void qcom_iommu_tlb_sync(void *cookie)
125 {
126 	struct qcom_iommu_domain *qcom_domain = cookie;
127 	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
128 	unsigned i;
129 
130 	for (i = 0; i < fwspec->num_ids; i++) {
131 		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
132 		unsigned int val, ret;
133 
134 		iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
135 
136 		ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
137 					 (val & 0x1) == 0, 0, 5000000);
138 		if (ret)
139 			dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
140 	}
141 }
142 
143 static void qcom_iommu_tlb_inv_context(void *cookie)
144 {
145 	struct qcom_iommu_domain *qcom_domain = cookie;
146 	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
147 	unsigned i;
148 
149 	for (i = 0; i < fwspec->num_ids; i++) {
150 		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
151 		iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
152 	}
153 
154 	qcom_iommu_tlb_sync(cookie);
155 }
156 
157 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
158 					    size_t granule, bool leaf, void *cookie)
159 {
160 	struct qcom_iommu_domain *qcom_domain = cookie;
161 	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
162 	unsigned i, reg;
163 
164 	reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
165 
166 	for (i = 0; i < fwspec->num_ids; i++) {
167 		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
168 		size_t s = size;
169 
170 		iova = (iova >> 12) << 12;
171 		iova |= ctx->asid;
172 		do {
173 			iommu_writel(ctx, reg, iova);
174 			iova += granule;
175 		} while (s -= granule);
176 	}
177 }
178 
179 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
180 				      size_t granule, void *cookie)
181 {
182 	qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
183 	qcom_iommu_tlb_sync(cookie);
184 }
185 
186 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
187 				    unsigned long iova, size_t granule,
188 				    void *cookie)
189 {
190 	qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
191 }
192 
193 static const struct iommu_flush_ops qcom_flush_ops = {
194 	.tlb_flush_all	= qcom_iommu_tlb_inv_context,
195 	.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
196 	.tlb_add_page	= qcom_iommu_tlb_add_page,
197 };
198 
199 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
200 {
201 	struct qcom_iommu_ctx *ctx = dev;
202 	u32 fsr, fsynr;
203 	u64 iova;
204 
205 	fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
206 
207 	if (!(fsr & ARM_SMMU_FSR_FAULT))
208 		return IRQ_NONE;
209 
210 	fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
211 	iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
212 
213 	if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
214 		dev_err_ratelimited(ctx->dev,
215 				    "Unhandled context fault: fsr=0x%x, "
216 				    "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
217 				    fsr, iova, fsynr, ctx->asid);
218 	}
219 
220 	iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
221 	iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
222 
223 	return IRQ_HANDLED;
224 }
225 
226 static int qcom_iommu_init_domain(struct iommu_domain *domain,
227 				  struct qcom_iommu_dev *qcom_iommu,
228 				  struct device *dev)
229 {
230 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
231 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
232 	struct io_pgtable_ops *pgtbl_ops;
233 	struct io_pgtable_cfg pgtbl_cfg;
234 	int i, ret = 0;
235 	u32 reg;
236 
237 	mutex_lock(&qcom_domain->init_mutex);
238 	if (qcom_domain->iommu)
239 		goto out_unlock;
240 
241 	pgtbl_cfg = (struct io_pgtable_cfg) {
242 		.pgsize_bitmap	= qcom_iommu_ops.pgsize_bitmap,
243 		.ias		= 32,
244 		.oas		= 40,
245 		.tlb		= &qcom_flush_ops,
246 		.iommu_dev	= qcom_iommu->dev,
247 	};
248 
249 	qcom_domain->iommu = qcom_iommu;
250 	qcom_domain->fwspec = fwspec;
251 
252 	pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
253 	if (!pgtbl_ops) {
254 		dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
255 		ret = -ENOMEM;
256 		goto out_clear_iommu;
257 	}
258 
259 	/* Update the domain's page sizes to reflect the page table format */
260 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
261 	domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
262 	domain->geometry.force_aperture = true;
263 
264 	for (i = 0; i < fwspec->num_ids; i++) {
265 		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
266 
267 		if (!ctx->secure_init) {
268 			ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
269 			if (ret) {
270 				dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
271 				goto out_clear_iommu;
272 			}
273 			ctx->secure_init = true;
274 		}
275 
276 		/* TTBRs */
277 		iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
278 				pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
279 				FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
280 		iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
281 
282 		/* TCR */
283 		iommu_writel(ctx, ARM_SMMU_CB_TCR2,
284 				arm_smmu_lpae_tcr2(&pgtbl_cfg));
285 		iommu_writel(ctx, ARM_SMMU_CB_TCR,
286 			     arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
287 
288 		/* MAIRs (stage-1 only) */
289 		iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
290 				pgtbl_cfg.arm_lpae_s1_cfg.mair);
291 		iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
292 				pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
293 
294 		/* SCTLR */
295 		reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
296 		      ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
297 		      ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
298 		      ARM_SMMU_SCTLR_CFCFG;
299 
300 		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
301 			reg |= ARM_SMMU_SCTLR_E;
302 
303 		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
304 
305 		ctx->domain = domain;
306 	}
307 
308 	mutex_unlock(&qcom_domain->init_mutex);
309 
310 	/* Publish page table ops for map/unmap */
311 	qcom_domain->pgtbl_ops = pgtbl_ops;
312 
313 	return 0;
314 
315 out_clear_iommu:
316 	qcom_domain->iommu = NULL;
317 out_unlock:
318 	mutex_unlock(&qcom_domain->init_mutex);
319 	return ret;
320 }
321 
322 static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
323 {
324 	struct qcom_iommu_domain *qcom_domain;
325 
326 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
327 		return NULL;
328 	/*
329 	 * Allocate the domain and initialise some of its data structures.
330 	 * We can't really do anything meaningful until we've added a
331 	 * master.
332 	 */
333 	qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
334 	if (!qcom_domain)
335 		return NULL;
336 
337 	mutex_init(&qcom_domain->init_mutex);
338 	spin_lock_init(&qcom_domain->pgtbl_lock);
339 
340 	return &qcom_domain->domain;
341 }
342 
343 static void qcom_iommu_domain_free(struct iommu_domain *domain)
344 {
345 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
346 
347 	if (qcom_domain->iommu) {
348 		/*
349 		 * NOTE: unmap can be called after client device is powered
350 		 * off, for example, with GPUs or anything involving dma-buf.
351 		 * So we cannot rely on the device_link.  Make sure the IOMMU
352 		 * is on to avoid unclocked accesses in the TLB inv path:
353 		 */
354 		pm_runtime_get_sync(qcom_domain->iommu->dev);
355 		free_io_pgtable_ops(qcom_domain->pgtbl_ops);
356 		pm_runtime_put_sync(qcom_domain->iommu->dev);
357 	}
358 
359 	kfree(qcom_domain);
360 }
361 
362 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
363 {
364 	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
365 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
366 	int ret;
367 
368 	if (!qcom_iommu) {
369 		dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
370 		return -ENXIO;
371 	}
372 
373 	/* Ensure that the domain is finalized */
374 	pm_runtime_get_sync(qcom_iommu->dev);
375 	ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
376 	pm_runtime_put_sync(qcom_iommu->dev);
377 	if (ret < 0)
378 		return ret;
379 
380 	/*
381 	 * Sanity check the domain. We don't support domains across
382 	 * different IOMMUs.
383 	 */
384 	if (qcom_domain->iommu != qcom_iommu)
385 		return -EINVAL;
386 
387 	return 0;
388 }
389 
390 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
391 			  phys_addr_t paddr, size_t pgsize, size_t pgcount,
392 			  int prot, gfp_t gfp, size_t *mapped)
393 {
394 	int ret;
395 	unsigned long flags;
396 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
397 	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
398 
399 	if (!ops)
400 		return -ENODEV;
401 
402 	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
403 	ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped);
404 	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
405 	return ret;
406 }
407 
408 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
409 			       size_t pgsize, size_t pgcount,
410 			       struct iommu_iotlb_gather *gather)
411 {
412 	size_t ret;
413 	unsigned long flags;
414 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
415 	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
416 
417 	if (!ops)
418 		return 0;
419 
420 	/* NOTE: unmap can be called after client device is powered off,
421 	 * for example, with GPUs or anything involving dma-buf.  So we
422 	 * cannot rely on the device_link.  Make sure the IOMMU is on to
423 	 * avoid unclocked accesses in the TLB inv path:
424 	 */
425 	pm_runtime_get_sync(qcom_domain->iommu->dev);
426 	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
427 	ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
428 	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
429 	pm_runtime_put_sync(qcom_domain->iommu->dev);
430 
431 	return ret;
432 }
433 
434 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
435 {
436 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
437 	struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
438 						  struct io_pgtable, ops);
439 	if (!qcom_domain->pgtbl_ops)
440 		return;
441 
442 	pm_runtime_get_sync(qcom_domain->iommu->dev);
443 	qcom_iommu_tlb_sync(pgtable->cookie);
444 	pm_runtime_put_sync(qcom_domain->iommu->dev);
445 }
446 
447 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
448 				  struct iommu_iotlb_gather *gather)
449 {
450 	qcom_iommu_flush_iotlb_all(domain);
451 }
452 
453 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
454 					   dma_addr_t iova)
455 {
456 	phys_addr_t ret;
457 	unsigned long flags;
458 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
459 	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
460 
461 	if (!ops)
462 		return 0;
463 
464 	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
465 	ret = ops->iova_to_phys(ops, iova);
466 	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
467 
468 	return ret;
469 }
470 
471 static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
472 {
473 	switch (cap) {
474 	case IOMMU_CAP_CACHE_COHERENCY:
475 		/*
476 		 * Return true here as the SMMU can always send out coherent
477 		 * requests.
478 		 */
479 		return true;
480 	case IOMMU_CAP_NOEXEC:
481 		return true;
482 	default:
483 		return false;
484 	}
485 }
486 
487 static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
488 {
489 	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
490 	struct device_link *link;
491 
492 	if (!qcom_iommu)
493 		return ERR_PTR(-ENODEV);
494 
495 	/*
496 	 * Establish the link between iommu and master, so that the
497 	 * iommu gets runtime enabled/disabled as per the master's
498 	 * needs.
499 	 */
500 	link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
501 	if (!link) {
502 		dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
503 			dev_name(qcom_iommu->dev), dev_name(dev));
504 		return ERR_PTR(-ENODEV);
505 	}
506 
507 	return &qcom_iommu->iommu;
508 }
509 
510 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
511 {
512 	struct qcom_iommu_dev *qcom_iommu;
513 	struct platform_device *iommu_pdev;
514 	unsigned asid = args->args[0];
515 
516 	if (args->args_count != 1) {
517 		dev_err(dev, "incorrect number of iommu params found for %s "
518 			"(found %d, expected 1)\n",
519 			args->np->full_name, args->args_count);
520 		return -EINVAL;
521 	}
522 
523 	iommu_pdev = of_find_device_by_node(args->np);
524 	if (WARN_ON(!iommu_pdev))
525 		return -EINVAL;
526 
527 	qcom_iommu = platform_get_drvdata(iommu_pdev);
528 
529 	/* make sure the asid specified in dt is valid, so we don't have
530 	 * to sanity check this elsewhere, since 'asid - 1' is used to
531 	 * index into qcom_iommu->ctxs:
532 	 */
533 	if (WARN_ON(asid < 1) ||
534 	    WARN_ON(asid > qcom_iommu->num_ctxs)) {
535 		put_device(&iommu_pdev->dev);
536 		return -EINVAL;
537 	}
538 
539 	if (!dev_iommu_priv_get(dev)) {
540 		dev_iommu_priv_set(dev, qcom_iommu);
541 	} else {
542 		/* make sure devices iommus dt node isn't referring to
543 		 * multiple different iommu devices.  Multiple context
544 		 * banks are ok, but multiple devices are not:
545 		 */
546 		if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
547 			put_device(&iommu_pdev->dev);
548 			return -EINVAL;
549 		}
550 	}
551 
552 	return iommu_fwspec_add_ids(dev, &asid, 1);
553 }
554 
555 static const struct iommu_ops qcom_iommu_ops = {
556 	.capable	= qcom_iommu_capable,
557 	.domain_alloc	= qcom_iommu_domain_alloc,
558 	.probe_device	= qcom_iommu_probe_device,
559 	.device_group	= generic_device_group,
560 	.of_xlate	= qcom_iommu_of_xlate,
561 	.pgsize_bitmap	= SZ_4K | SZ_64K | SZ_1M | SZ_16M,
562 	.default_domain_ops = &(const struct iommu_domain_ops) {
563 		.attach_dev	= qcom_iommu_attach_dev,
564 		.map_pages	= qcom_iommu_map,
565 		.unmap_pages	= qcom_iommu_unmap,
566 		.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
567 		.iotlb_sync	= qcom_iommu_iotlb_sync,
568 		.iova_to_phys	= qcom_iommu_iova_to_phys,
569 		.free		= qcom_iommu_domain_free,
570 	}
571 };
572 
573 static int qcom_iommu_sec_ptbl_init(struct device *dev)
574 {
575 	size_t psize = 0;
576 	unsigned int spare = 0;
577 	void *cpu_addr;
578 	dma_addr_t paddr;
579 	unsigned long attrs;
580 	static bool allocated = false;
581 	int ret;
582 
583 	if (allocated)
584 		return 0;
585 
586 	ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
587 	if (ret) {
588 		dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
589 			ret);
590 		return ret;
591 	}
592 
593 	dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
594 
595 	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
596 
597 	cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
598 	if (!cpu_addr) {
599 		dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
600 			psize);
601 		return -ENOMEM;
602 	}
603 
604 	ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
605 	if (ret) {
606 		dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
607 		goto free_mem;
608 	}
609 
610 	allocated = true;
611 	return 0;
612 
613 free_mem:
614 	dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
615 	return ret;
616 }
617 
618 static int get_asid(const struct device_node *np)
619 {
620 	u32 reg;
621 
622 	/* read the "reg" property directly to get the relative address
623 	 * of the context bank, and calculate the asid from that:
624 	 */
625 	if (of_property_read_u32_index(np, "reg", 0, &reg))
626 		return -ENODEV;
627 
628 	return reg / 0x1000;      /* context banks are 0x1000 apart */
629 }
630 
631 static int qcom_iommu_ctx_probe(struct platform_device *pdev)
632 {
633 	struct qcom_iommu_ctx *ctx;
634 	struct device *dev = &pdev->dev;
635 	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
636 	struct resource *res;
637 	int ret, irq;
638 
639 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
640 	if (!ctx)
641 		return -ENOMEM;
642 
643 	ctx->dev = dev;
644 	platform_set_drvdata(pdev, ctx);
645 
646 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
647 	ctx->base = devm_ioremap_resource(dev, res);
648 	if (IS_ERR(ctx->base))
649 		return PTR_ERR(ctx->base);
650 
651 	irq = platform_get_irq(pdev, 0);
652 	if (irq < 0)
653 		return -ENODEV;
654 
655 	/* clear IRQs before registering fault handler, just in case the
656 	 * boot-loader left us a surprise:
657 	 */
658 	iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
659 
660 	ret = devm_request_irq(dev, irq,
661 			       qcom_iommu_fault,
662 			       IRQF_SHARED,
663 			       "qcom-iommu-fault",
664 			       ctx);
665 	if (ret) {
666 		dev_err(dev, "failed to request IRQ %u\n", irq);
667 		return ret;
668 	}
669 
670 	ret = get_asid(dev->of_node);
671 	if (ret < 0) {
672 		dev_err(dev, "missing reg property\n");
673 		return ret;
674 	}
675 
676 	ctx->asid = ret;
677 
678 	dev_dbg(dev, "found asid %u\n", ctx->asid);
679 
680 	qcom_iommu->ctxs[ctx->asid - 1] = ctx;
681 
682 	return 0;
683 }
684 
685 static void qcom_iommu_ctx_remove(struct platform_device *pdev)
686 {
687 	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
688 	struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
689 
690 	platform_set_drvdata(pdev, NULL);
691 
692 	qcom_iommu->ctxs[ctx->asid - 1] = NULL;
693 }
694 
695 static const struct of_device_id ctx_of_match[] = {
696 	{ .compatible = "qcom,msm-iommu-v1-ns" },
697 	{ .compatible = "qcom,msm-iommu-v1-sec" },
698 	{ /* sentinel */ }
699 };
700 
701 static struct platform_driver qcom_iommu_ctx_driver = {
702 	.driver	= {
703 		.name		= "qcom-iommu-ctx",
704 		.of_match_table	= ctx_of_match,
705 	},
706 	.probe	= qcom_iommu_ctx_probe,
707 	.remove_new = qcom_iommu_ctx_remove,
708 };
709 
710 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
711 {
712 	struct device_node *child;
713 
714 	for_each_child_of_node(qcom_iommu->dev->of_node, child) {
715 		if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
716 			of_node_put(child);
717 			return true;
718 		}
719 	}
720 
721 	return false;
722 }
723 
724 static int qcom_iommu_device_probe(struct platform_device *pdev)
725 {
726 	struct device_node *child;
727 	struct qcom_iommu_dev *qcom_iommu;
728 	struct device *dev = &pdev->dev;
729 	struct resource *res;
730 	struct clk *clk;
731 	int ret, max_asid = 0;
732 
733 	/* find the max asid (which is 1:1 to ctx bank idx), so we know how
734 	 * many child ctx devices we have:
735 	 */
736 	for_each_child_of_node(dev->of_node, child)
737 		max_asid = max(max_asid, get_asid(child));
738 
739 	qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
740 				  GFP_KERNEL);
741 	if (!qcom_iommu)
742 		return -ENOMEM;
743 	qcom_iommu->num_ctxs = max_asid;
744 	qcom_iommu->dev = dev;
745 
746 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
747 	if (res) {
748 		qcom_iommu->local_base = devm_ioremap_resource(dev, res);
749 		if (IS_ERR(qcom_iommu->local_base))
750 			return PTR_ERR(qcom_iommu->local_base);
751 	}
752 
753 	clk = devm_clk_get(dev, "iface");
754 	if (IS_ERR(clk)) {
755 		dev_err(dev, "failed to get iface clock\n");
756 		return PTR_ERR(clk);
757 	}
758 	qcom_iommu->clks[CLK_IFACE].clk = clk;
759 
760 	clk = devm_clk_get(dev, "bus");
761 	if (IS_ERR(clk)) {
762 		dev_err(dev, "failed to get bus clock\n");
763 		return PTR_ERR(clk);
764 	}
765 	qcom_iommu->clks[CLK_BUS].clk = clk;
766 
767 	clk = devm_clk_get_optional(dev, "tbu");
768 	if (IS_ERR(clk)) {
769 		dev_err(dev, "failed to get tbu clock\n");
770 		return PTR_ERR(clk);
771 	}
772 	qcom_iommu->clks[CLK_TBU].clk = clk;
773 
774 	if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
775 				 &qcom_iommu->sec_id)) {
776 		dev_err(dev, "missing qcom,iommu-secure-id property\n");
777 		return -ENODEV;
778 	}
779 
780 	if (qcom_iommu_has_secure_context(qcom_iommu)) {
781 		ret = qcom_iommu_sec_ptbl_init(dev);
782 		if (ret) {
783 			dev_err(dev, "cannot init secure pg table(%d)\n", ret);
784 			return ret;
785 		}
786 	}
787 
788 	platform_set_drvdata(pdev, qcom_iommu);
789 
790 	pm_runtime_enable(dev);
791 
792 	/* register context bank devices, which are child nodes: */
793 	ret = devm_of_platform_populate(dev);
794 	if (ret) {
795 		dev_err(dev, "Failed to populate iommu contexts\n");
796 		goto err_pm_disable;
797 	}
798 
799 	ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
800 				     dev_name(dev));
801 	if (ret) {
802 		dev_err(dev, "Failed to register iommu in sysfs\n");
803 		goto err_pm_disable;
804 	}
805 
806 	ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
807 	if (ret) {
808 		dev_err(dev, "Failed to register iommu\n");
809 		goto err_pm_disable;
810 	}
811 
812 	if (qcom_iommu->local_base) {
813 		pm_runtime_get_sync(dev);
814 		writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
815 		pm_runtime_put_sync(dev);
816 	}
817 
818 	return 0;
819 
820 err_pm_disable:
821 	pm_runtime_disable(dev);
822 	return ret;
823 }
824 
825 static void qcom_iommu_device_remove(struct platform_device *pdev)
826 {
827 	struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
828 
829 	pm_runtime_force_suspend(&pdev->dev);
830 	platform_set_drvdata(pdev, NULL);
831 	iommu_device_sysfs_remove(&qcom_iommu->iommu);
832 	iommu_device_unregister(&qcom_iommu->iommu);
833 }
834 
835 static int __maybe_unused qcom_iommu_resume(struct device *dev)
836 {
837 	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
838 
839 	return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
840 }
841 
842 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
843 {
844 	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
845 
846 	clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
847 
848 	return 0;
849 }
850 
851 static const struct dev_pm_ops qcom_iommu_pm_ops = {
852 	SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
853 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
854 				pm_runtime_force_resume)
855 };
856 
857 static const struct of_device_id qcom_iommu_of_match[] = {
858 	{ .compatible = "qcom,msm-iommu-v1" },
859 	{ /* sentinel */ }
860 };
861 
862 static struct platform_driver qcom_iommu_driver = {
863 	.driver	= {
864 		.name		= "qcom-iommu",
865 		.of_match_table	= qcom_iommu_of_match,
866 		.pm		= &qcom_iommu_pm_ops,
867 	},
868 	.probe	= qcom_iommu_device_probe,
869 	.remove_new = qcom_iommu_device_remove,
870 };
871 
872 static int __init qcom_iommu_init(void)
873 {
874 	int ret;
875 
876 	ret = platform_driver_register(&qcom_iommu_ctx_driver);
877 	if (ret)
878 		return ret;
879 
880 	ret = platform_driver_register(&qcom_iommu_driver);
881 	if (ret)
882 		platform_driver_unregister(&qcom_iommu_ctx_driver);
883 
884 	return ret;
885 }
886 device_initcall(qcom_iommu_init);
887