1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c 4 * 5 * Copyright (C) 2013 ARM Limited 6 * Copyright (C) 2017 Red Hat 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/bitfield.h> 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/io-64-nonatomic-hi-lo.h> 18 #include <linux/io-pgtable.h> 19 #include <linux/iommu.h> 20 #include <linux/iopoll.h> 21 #include <linux/kconfig.h> 22 #include <linux/init.h> 23 #include <linux/mutex.h> 24 #include <linux/of.h> 25 #include <linux/of_platform.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/firmware/qcom/qcom_scm.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 33 #include "arm-smmu.h" 34 35 #define SMMU_INTR_SEL_NS 0x2000 36 37 enum qcom_iommu_clk { 38 CLK_IFACE, 39 CLK_BUS, 40 CLK_TBU, 41 CLK_NUM, 42 }; 43 44 struct qcom_iommu_ctx; 45 46 struct qcom_iommu_dev { 47 /* IOMMU core code handle */ 48 struct iommu_device iommu; 49 struct device *dev; 50 struct clk_bulk_data clks[CLK_NUM]; 51 void __iomem *local_base; 52 u32 sec_id; 53 u8 max_asid; 54 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ 55 }; 56 57 struct qcom_iommu_ctx { 58 struct device *dev; 59 void __iomem *base; 60 bool secure_init; 61 bool secured_ctx; 62 u8 asid; /* asid and ctx bank # are 1:1 */ 63 struct iommu_domain *domain; 64 }; 65 66 struct qcom_iommu_domain { 67 struct io_pgtable_ops *pgtbl_ops; 68 spinlock_t pgtbl_lock; 69 struct mutex init_mutex; /* Protects iommu pointer */ 70 struct iommu_domain domain; 71 struct qcom_iommu_dev *iommu; 72 struct iommu_fwspec *fwspec; 73 }; 74 75 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) 76 { 77 return container_of(dom, struct qcom_iommu_domain, domain); 78 } 79 80 static const struct iommu_ops qcom_iommu_ops; 81 82 static struct qcom_iommu_dev * to_iommu(struct device *dev) 83 { 84 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 85 86 if (!fwspec || fwspec->ops != &qcom_iommu_ops) 87 return NULL; 88 89 return dev_iommu_priv_get(dev); 90 } 91 92 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) 93 { 94 struct qcom_iommu_dev *qcom_iommu = d->iommu; 95 if (!qcom_iommu) 96 return NULL; 97 return qcom_iommu->ctxs[asid]; 98 } 99 100 static inline void 101 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val) 102 { 103 writel_relaxed(val, ctx->base + reg); 104 } 105 106 static inline void 107 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val) 108 { 109 writeq_relaxed(val, ctx->base + reg); 110 } 111 112 static inline u32 113 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg) 114 { 115 return readl_relaxed(ctx->base + reg); 116 } 117 118 static inline u64 119 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) 120 { 121 return readq_relaxed(ctx->base + reg); 122 } 123 124 static void qcom_iommu_tlb_sync(void *cookie) 125 { 126 struct qcom_iommu_domain *qcom_domain = cookie; 127 struct iommu_fwspec *fwspec = qcom_domain->fwspec; 128 unsigned i; 129 130 for (i = 0; i < fwspec->num_ids; i++) { 131 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 132 unsigned int val, ret; 133 134 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); 135 136 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val, 137 (val & 0x1) == 0, 0, 5000000); 138 if (ret) 139 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n"); 140 } 141 } 142 143 static void qcom_iommu_tlb_inv_context(void *cookie) 144 { 145 struct qcom_iommu_domain *qcom_domain = cookie; 146 struct iommu_fwspec *fwspec = qcom_domain->fwspec; 147 unsigned i; 148 149 for (i = 0; i < fwspec->num_ids; i++) { 150 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 151 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); 152 } 153 154 qcom_iommu_tlb_sync(cookie); 155 } 156 157 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, 158 size_t granule, bool leaf, void *cookie) 159 { 160 struct qcom_iommu_domain *qcom_domain = cookie; 161 struct iommu_fwspec *fwspec = qcom_domain->fwspec; 162 unsigned i, reg; 163 164 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; 165 166 for (i = 0; i < fwspec->num_ids; i++) { 167 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 168 size_t s = size; 169 170 iova = (iova >> 12) << 12; 171 iova |= ctx->asid; 172 do { 173 iommu_writel(ctx, reg, iova); 174 iova += granule; 175 } while (s -= granule); 176 } 177 } 178 179 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, 180 size_t granule, void *cookie) 181 { 182 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); 183 qcom_iommu_tlb_sync(cookie); 184 } 185 186 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, 187 unsigned long iova, size_t granule, 188 void *cookie) 189 { 190 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); 191 } 192 193 static const struct iommu_flush_ops qcom_flush_ops = { 194 .tlb_flush_all = qcom_iommu_tlb_inv_context, 195 .tlb_flush_walk = qcom_iommu_tlb_flush_walk, 196 .tlb_add_page = qcom_iommu_tlb_add_page, 197 }; 198 199 static irqreturn_t qcom_iommu_fault(int irq, void *dev) 200 { 201 struct qcom_iommu_ctx *ctx = dev; 202 u32 fsr, fsynr; 203 u64 iova; 204 205 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); 206 207 if (!(fsr & ARM_SMMU_FSR_FAULT)) 208 return IRQ_NONE; 209 210 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); 211 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); 212 213 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) { 214 dev_err_ratelimited(ctx->dev, 215 "Unhandled context fault: fsr=0x%x, " 216 "iova=0x%016llx, fsynr=0x%x, cb=%d\n", 217 fsr, iova, fsynr, ctx->asid); 218 } 219 220 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); 221 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE); 222 223 return IRQ_HANDLED; 224 } 225 226 static int qcom_iommu_init_domain(struct iommu_domain *domain, 227 struct qcom_iommu_dev *qcom_iommu, 228 struct device *dev) 229 { 230 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 231 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 232 struct io_pgtable_ops *pgtbl_ops; 233 struct io_pgtable_cfg pgtbl_cfg; 234 int i, ret = 0; 235 u32 reg; 236 237 mutex_lock(&qcom_domain->init_mutex); 238 if (qcom_domain->iommu) 239 goto out_unlock; 240 241 pgtbl_cfg = (struct io_pgtable_cfg) { 242 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, 243 .ias = 32, 244 .oas = 40, 245 .tlb = &qcom_flush_ops, 246 .iommu_dev = qcom_iommu->dev, 247 }; 248 249 qcom_domain->iommu = qcom_iommu; 250 qcom_domain->fwspec = fwspec; 251 252 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain); 253 if (!pgtbl_ops) { 254 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); 255 ret = -ENOMEM; 256 goto out_clear_iommu; 257 } 258 259 /* Update the domain's page sizes to reflect the page table format */ 260 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 261 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; 262 domain->geometry.force_aperture = true; 263 264 for (i = 0; i < fwspec->num_ids; i++) { 265 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 266 267 if (!ctx->secure_init) { 268 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); 269 if (ret) { 270 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret); 271 goto out_clear_iommu; 272 } 273 ctx->secure_init = true; 274 } 275 276 /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */ 277 if (ctx->secured_ctx) { 278 ctx->domain = domain; 279 continue; 280 } 281 282 /* Disable context bank before programming */ 283 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); 284 285 /* Clear context bank fault address fault status registers */ 286 iommu_writel(ctx, ARM_SMMU_CB_FAR, 0); 287 iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); 288 289 /* TTBRs */ 290 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, 291 pgtbl_cfg.arm_lpae_s1_cfg.ttbr | 292 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); 293 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0); 294 295 /* TCR */ 296 iommu_writel(ctx, ARM_SMMU_CB_TCR2, 297 arm_smmu_lpae_tcr2(&pgtbl_cfg)); 298 iommu_writel(ctx, ARM_SMMU_CB_TCR, 299 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE); 300 301 /* MAIRs (stage-1 only) */ 302 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, 303 pgtbl_cfg.arm_lpae_s1_cfg.mair); 304 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, 305 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32); 306 307 /* SCTLR */ 308 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | 309 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE | 310 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE | 311 ARM_SMMU_SCTLR_CFCFG; 312 313 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 314 reg |= ARM_SMMU_SCTLR_E; 315 316 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); 317 318 ctx->domain = domain; 319 } 320 321 mutex_unlock(&qcom_domain->init_mutex); 322 323 /* Publish page table ops for map/unmap */ 324 qcom_domain->pgtbl_ops = pgtbl_ops; 325 326 return 0; 327 328 out_clear_iommu: 329 qcom_domain->iommu = NULL; 330 out_unlock: 331 mutex_unlock(&qcom_domain->init_mutex); 332 return ret; 333 } 334 335 static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev) 336 { 337 struct qcom_iommu_domain *qcom_domain; 338 339 /* 340 * Allocate the domain and initialise some of its data structures. 341 * We can't really do anything meaningful until we've added a 342 * master. 343 */ 344 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL); 345 if (!qcom_domain) 346 return NULL; 347 348 mutex_init(&qcom_domain->init_mutex); 349 spin_lock_init(&qcom_domain->pgtbl_lock); 350 351 return &qcom_domain->domain; 352 } 353 354 static void qcom_iommu_domain_free(struct iommu_domain *domain) 355 { 356 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 357 358 if (qcom_domain->iommu) { 359 /* 360 * NOTE: unmap can be called after client device is powered 361 * off, for example, with GPUs or anything involving dma-buf. 362 * So we cannot rely on the device_link. Make sure the IOMMU 363 * is on to avoid unclocked accesses in the TLB inv path: 364 */ 365 pm_runtime_get_sync(qcom_domain->iommu->dev); 366 free_io_pgtable_ops(qcom_domain->pgtbl_ops); 367 pm_runtime_put_sync(qcom_domain->iommu->dev); 368 } 369 370 kfree(qcom_domain); 371 } 372 373 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 374 { 375 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); 376 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 377 int ret; 378 379 if (!qcom_iommu) { 380 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n"); 381 return -ENXIO; 382 } 383 384 /* Ensure that the domain is finalized */ 385 pm_runtime_get_sync(qcom_iommu->dev); 386 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); 387 pm_runtime_put_sync(qcom_iommu->dev); 388 if (ret < 0) 389 return ret; 390 391 /* 392 * Sanity check the domain. We don't support domains across 393 * different IOMMUs. 394 */ 395 if (qcom_domain->iommu != qcom_iommu) 396 return -EINVAL; 397 398 return 0; 399 } 400 401 static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain, 402 struct device *dev) 403 { 404 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 405 struct qcom_iommu_domain *qcom_domain; 406 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 407 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); 408 unsigned int i; 409 410 if (domain == identity_domain || !domain) 411 return 0; 412 413 qcom_domain = to_qcom_iommu_domain(domain); 414 if (WARN_ON(!qcom_domain->iommu)) 415 return -EINVAL; 416 417 pm_runtime_get_sync(qcom_iommu->dev); 418 for (i = 0; i < fwspec->num_ids; i++) { 419 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 420 421 /* Disable the context bank: */ 422 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); 423 424 ctx->domain = NULL; 425 } 426 pm_runtime_put_sync(qcom_iommu->dev); 427 return 0; 428 } 429 430 static struct iommu_domain_ops qcom_iommu_identity_ops = { 431 .attach_dev = qcom_iommu_identity_attach, 432 }; 433 434 static struct iommu_domain qcom_iommu_identity_domain = { 435 .type = IOMMU_DOMAIN_IDENTITY, 436 .ops = &qcom_iommu_identity_ops, 437 }; 438 439 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, 440 phys_addr_t paddr, size_t pgsize, size_t pgcount, 441 int prot, gfp_t gfp, size_t *mapped) 442 { 443 int ret; 444 unsigned long flags; 445 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 446 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; 447 448 if (!ops) 449 return -ENODEV; 450 451 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); 452 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped); 453 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); 454 return ret; 455 } 456 457 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, 458 size_t pgsize, size_t pgcount, 459 struct iommu_iotlb_gather *gather) 460 { 461 size_t ret; 462 unsigned long flags; 463 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 464 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; 465 466 if (!ops) 467 return 0; 468 469 /* NOTE: unmap can be called after client device is powered off, 470 * for example, with GPUs or anything involving dma-buf. So we 471 * cannot rely on the device_link. Make sure the IOMMU is on to 472 * avoid unclocked accesses in the TLB inv path: 473 */ 474 pm_runtime_get_sync(qcom_domain->iommu->dev); 475 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); 476 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather); 477 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); 478 pm_runtime_put_sync(qcom_domain->iommu->dev); 479 480 return ret; 481 } 482 483 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) 484 { 485 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 486 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, 487 struct io_pgtable, ops); 488 if (!qcom_domain->pgtbl_ops) 489 return; 490 491 pm_runtime_get_sync(qcom_domain->iommu->dev); 492 qcom_iommu_tlb_sync(pgtable->cookie); 493 pm_runtime_put_sync(qcom_domain->iommu->dev); 494 } 495 496 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, 497 struct iommu_iotlb_gather *gather) 498 { 499 qcom_iommu_flush_iotlb_all(domain); 500 } 501 502 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, 503 dma_addr_t iova) 504 { 505 phys_addr_t ret; 506 unsigned long flags; 507 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); 508 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; 509 510 if (!ops) 511 return 0; 512 513 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); 514 ret = ops->iova_to_phys(ops, iova); 515 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); 516 517 return ret; 518 } 519 520 static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap) 521 { 522 switch (cap) { 523 case IOMMU_CAP_CACHE_COHERENCY: 524 /* 525 * Return true here as the SMMU can always send out coherent 526 * requests. 527 */ 528 return true; 529 case IOMMU_CAP_NOEXEC: 530 return true; 531 default: 532 return false; 533 } 534 } 535 536 static struct iommu_device *qcom_iommu_probe_device(struct device *dev) 537 { 538 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); 539 struct device_link *link; 540 541 if (!qcom_iommu) 542 return ERR_PTR(-ENODEV); 543 544 /* 545 * Establish the link between iommu and master, so that the 546 * iommu gets runtime enabled/disabled as per the master's 547 * needs. 548 */ 549 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME); 550 if (!link) { 551 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n", 552 dev_name(qcom_iommu->dev), dev_name(dev)); 553 return ERR_PTR(-ENODEV); 554 } 555 556 return &qcom_iommu->iommu; 557 } 558 559 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 560 { 561 struct qcom_iommu_dev *qcom_iommu; 562 struct platform_device *iommu_pdev; 563 unsigned asid = args->args[0]; 564 565 if (args->args_count != 1) { 566 dev_err(dev, "incorrect number of iommu params found for %s " 567 "(found %d, expected 1)\n", 568 args->np->full_name, args->args_count); 569 return -EINVAL; 570 } 571 572 iommu_pdev = of_find_device_by_node(args->np); 573 if (WARN_ON(!iommu_pdev)) 574 return -EINVAL; 575 576 qcom_iommu = platform_get_drvdata(iommu_pdev); 577 578 /* make sure the asid specified in dt is valid, so we don't have 579 * to sanity check this elsewhere: 580 */ 581 if (WARN_ON(asid > qcom_iommu->max_asid) || 582 WARN_ON(qcom_iommu->ctxs[asid] == NULL)) { 583 put_device(&iommu_pdev->dev); 584 return -EINVAL; 585 } 586 587 if (!dev_iommu_priv_get(dev)) { 588 dev_iommu_priv_set(dev, qcom_iommu); 589 } else { 590 /* make sure devices iommus dt node isn't referring to 591 * multiple different iommu devices. Multiple context 592 * banks are ok, but multiple devices are not: 593 */ 594 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) { 595 put_device(&iommu_pdev->dev); 596 return -EINVAL; 597 } 598 } 599 600 return iommu_fwspec_add_ids(dev, &asid, 1); 601 } 602 603 static const struct iommu_ops qcom_iommu_ops = { 604 .identity_domain = &qcom_iommu_identity_domain, 605 .capable = qcom_iommu_capable, 606 .domain_alloc_paging = qcom_iommu_domain_alloc_paging, 607 .probe_device = qcom_iommu_probe_device, 608 .device_group = generic_device_group, 609 .of_xlate = qcom_iommu_of_xlate, 610 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, 611 .default_domain_ops = &(const struct iommu_domain_ops) { 612 .attach_dev = qcom_iommu_attach_dev, 613 .map_pages = qcom_iommu_map, 614 .unmap_pages = qcom_iommu_unmap, 615 .flush_iotlb_all = qcom_iommu_flush_iotlb_all, 616 .iotlb_sync = qcom_iommu_iotlb_sync, 617 .iova_to_phys = qcom_iommu_iova_to_phys, 618 .free = qcom_iommu_domain_free, 619 } 620 }; 621 622 static int qcom_iommu_sec_ptbl_init(struct device *dev) 623 { 624 size_t psize = 0; 625 unsigned int spare = 0; 626 void *cpu_addr; 627 dma_addr_t paddr; 628 unsigned long attrs; 629 static bool allocated = false; 630 int ret; 631 632 if (allocated) 633 return 0; 634 635 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); 636 if (ret) { 637 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", 638 ret); 639 return ret; 640 } 641 642 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); 643 644 attrs = DMA_ATTR_NO_KERNEL_MAPPING; 645 646 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); 647 if (!cpu_addr) { 648 dev_err(dev, "failed to allocate %zu bytes for pgtable\n", 649 psize); 650 return -ENOMEM; 651 } 652 653 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); 654 if (ret) { 655 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); 656 goto free_mem; 657 } 658 659 allocated = true; 660 return 0; 661 662 free_mem: 663 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); 664 return ret; 665 } 666 667 static int get_asid(const struct device_node *np) 668 { 669 u32 reg, val; 670 int asid; 671 672 /* read the "reg" property directly to get the relative address 673 * of the context bank, and calculate the asid from that: 674 */ 675 if (of_property_read_u32_index(np, "reg", 0, ®)) 676 return -ENODEV; 677 678 /* 679 * Context banks are 0x1000 apart but, in some cases, the ASID 680 * number doesn't match to this logic and needs to be passed 681 * from the DT configuration explicitly. 682 */ 683 if (!of_property_read_u32(np, "qcom,ctx-asid", &val)) 684 asid = val; 685 else 686 asid = reg / 0x1000; 687 688 return asid; 689 } 690 691 static int qcom_iommu_ctx_probe(struct platform_device *pdev) 692 { 693 struct qcom_iommu_ctx *ctx; 694 struct device *dev = &pdev->dev; 695 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); 696 int ret, irq; 697 698 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 699 if (!ctx) 700 return -ENOMEM; 701 702 ctx->dev = dev; 703 platform_set_drvdata(pdev, ctx); 704 705 ctx->base = devm_platform_ioremap_resource(pdev, 0); 706 if (IS_ERR(ctx->base)) 707 return PTR_ERR(ctx->base); 708 709 irq = platform_get_irq(pdev, 0); 710 if (irq < 0) 711 return irq; 712 713 if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec")) 714 ctx->secured_ctx = true; 715 716 /* clear IRQs before registering fault handler, just in case the 717 * boot-loader left us a surprise: 718 */ 719 if (!ctx->secured_ctx) 720 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); 721 722 ret = devm_request_irq(dev, irq, 723 qcom_iommu_fault, 724 IRQF_SHARED, 725 "qcom-iommu-fault", 726 ctx); 727 if (ret) { 728 dev_err(dev, "failed to request IRQ %u\n", irq); 729 return ret; 730 } 731 732 ret = get_asid(dev->of_node); 733 if (ret < 0) { 734 dev_err(dev, "missing reg property\n"); 735 return ret; 736 } 737 738 ctx->asid = ret; 739 740 dev_dbg(dev, "found asid %u\n", ctx->asid); 741 742 qcom_iommu->ctxs[ctx->asid] = ctx; 743 744 return 0; 745 } 746 747 static void qcom_iommu_ctx_remove(struct platform_device *pdev) 748 { 749 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent); 750 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev); 751 752 platform_set_drvdata(pdev, NULL); 753 754 qcom_iommu->ctxs[ctx->asid] = NULL; 755 } 756 757 static const struct of_device_id ctx_of_match[] = { 758 { .compatible = "qcom,msm-iommu-v1-ns" }, 759 { .compatible = "qcom,msm-iommu-v1-sec" }, 760 { .compatible = "qcom,msm-iommu-v2-ns" }, 761 { .compatible = "qcom,msm-iommu-v2-sec" }, 762 { /* sentinel */ } 763 }; 764 765 static struct platform_driver qcom_iommu_ctx_driver = { 766 .driver = { 767 .name = "qcom-iommu-ctx", 768 .of_match_table = ctx_of_match, 769 }, 770 .probe = qcom_iommu_ctx_probe, 771 .remove_new = qcom_iommu_ctx_remove, 772 }; 773 774 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) 775 { 776 struct device_node *child; 777 778 for_each_child_of_node(qcom_iommu->dev->of_node, child) { 779 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") || 780 of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) { 781 of_node_put(child); 782 return true; 783 } 784 } 785 786 return false; 787 } 788 789 static int qcom_iommu_device_probe(struct platform_device *pdev) 790 { 791 struct device_node *child; 792 struct qcom_iommu_dev *qcom_iommu; 793 struct device *dev = &pdev->dev; 794 struct resource *res; 795 struct clk *clk; 796 int ret, max_asid = 0; 797 798 /* find the max asid (which is 1:1 to ctx bank idx), so we know how 799 * many child ctx devices we have: 800 */ 801 for_each_child_of_node(dev->of_node, child) 802 max_asid = max(max_asid, get_asid(child)); 803 804 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1), 805 GFP_KERNEL); 806 if (!qcom_iommu) 807 return -ENOMEM; 808 qcom_iommu->max_asid = max_asid; 809 qcom_iommu->dev = dev; 810 811 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 812 if (res) { 813 qcom_iommu->local_base = devm_ioremap_resource(dev, res); 814 if (IS_ERR(qcom_iommu->local_base)) 815 return PTR_ERR(qcom_iommu->local_base); 816 } 817 818 clk = devm_clk_get(dev, "iface"); 819 if (IS_ERR(clk)) { 820 dev_err(dev, "failed to get iface clock\n"); 821 return PTR_ERR(clk); 822 } 823 qcom_iommu->clks[CLK_IFACE].clk = clk; 824 825 clk = devm_clk_get(dev, "bus"); 826 if (IS_ERR(clk)) { 827 dev_err(dev, "failed to get bus clock\n"); 828 return PTR_ERR(clk); 829 } 830 qcom_iommu->clks[CLK_BUS].clk = clk; 831 832 clk = devm_clk_get_optional(dev, "tbu"); 833 if (IS_ERR(clk)) { 834 dev_err(dev, "failed to get tbu clock\n"); 835 return PTR_ERR(clk); 836 } 837 qcom_iommu->clks[CLK_TBU].clk = clk; 838 839 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", 840 &qcom_iommu->sec_id)) { 841 dev_err(dev, "missing qcom,iommu-secure-id property\n"); 842 return -ENODEV; 843 } 844 845 if (qcom_iommu_has_secure_context(qcom_iommu)) { 846 ret = qcom_iommu_sec_ptbl_init(dev); 847 if (ret) { 848 dev_err(dev, "cannot init secure pg table(%d)\n", ret); 849 return ret; 850 } 851 } 852 853 platform_set_drvdata(pdev, qcom_iommu); 854 855 pm_runtime_enable(dev); 856 857 /* register context bank devices, which are child nodes: */ 858 ret = devm_of_platform_populate(dev); 859 if (ret) { 860 dev_err(dev, "Failed to populate iommu contexts\n"); 861 goto err_pm_disable; 862 } 863 864 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, 865 dev_name(dev)); 866 if (ret) { 867 dev_err(dev, "Failed to register iommu in sysfs\n"); 868 goto err_pm_disable; 869 } 870 871 ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); 872 if (ret) { 873 dev_err(dev, "Failed to register iommu\n"); 874 goto err_pm_disable; 875 } 876 877 if (qcom_iommu->local_base) { 878 pm_runtime_get_sync(dev); 879 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); 880 pm_runtime_put_sync(dev); 881 } 882 883 return 0; 884 885 err_pm_disable: 886 pm_runtime_disable(dev); 887 return ret; 888 } 889 890 static void qcom_iommu_device_remove(struct platform_device *pdev) 891 { 892 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); 893 894 pm_runtime_force_suspend(&pdev->dev); 895 platform_set_drvdata(pdev, NULL); 896 iommu_device_sysfs_remove(&qcom_iommu->iommu); 897 iommu_device_unregister(&qcom_iommu->iommu); 898 } 899 900 static int __maybe_unused qcom_iommu_resume(struct device *dev) 901 { 902 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); 903 904 return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks); 905 } 906 907 static int __maybe_unused qcom_iommu_suspend(struct device *dev) 908 { 909 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); 910 911 clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks); 912 913 return 0; 914 } 915 916 static const struct dev_pm_ops qcom_iommu_pm_ops = { 917 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL) 918 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 919 pm_runtime_force_resume) 920 }; 921 922 static const struct of_device_id qcom_iommu_of_match[] = { 923 { .compatible = "qcom,msm-iommu-v1" }, 924 { .compatible = "qcom,msm-iommu-v2" }, 925 { /* sentinel */ } 926 }; 927 928 static struct platform_driver qcom_iommu_driver = { 929 .driver = { 930 .name = "qcom-iommu", 931 .of_match_table = qcom_iommu_of_match, 932 .pm = &qcom_iommu_pm_ops, 933 }, 934 .probe = qcom_iommu_device_probe, 935 .remove_new = qcom_iommu_device_remove, 936 }; 937 938 static int __init qcom_iommu_init(void) 939 { 940 int ret; 941 942 ret = platform_driver_register(&qcom_iommu_ctx_driver); 943 if (ret) 944 return ret; 945 946 ret = platform_driver_register(&qcom_iommu_driver); 947 if (ret) 948 platform_driver_unregister(&qcom_iommu_ctx_driver); 949 950 return ret; 951 } 952 device_initcall(qcom_iommu_init); 953