1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the IOMMU SVA API for the ARM SMMUv3
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched/mm.h>
10 #include <linux/slab.h>
11 
12 #include "arm-smmu-v3.h"
13 #include "../../iommu-sva.h"
14 #include "../../io-pgtable-arm.h"
15 
16 struct arm_smmu_mmu_notifier {
17 	struct mmu_notifier		mn;
18 	struct arm_smmu_ctx_desc	*cd;
19 	bool				cleared;
20 	refcount_t			refs;
21 	struct list_head		list;
22 	struct arm_smmu_domain		*domain;
23 };
24 
25 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
26 
27 struct arm_smmu_bond {
28 	struct mm_struct		*mm;
29 	struct arm_smmu_mmu_notifier	*smmu_mn;
30 	struct list_head		list;
31 };
32 
33 #define sva_to_bond(handle) \
34 	container_of(handle, struct arm_smmu_bond, sva)
35 
36 static DEFINE_MUTEX(sva_lock);
37 
38 /*
39  * Write the CD to the CD tables for all masters that this domain is attached
40  * to. Note that this is only used to update existing CD entries in the target
41  * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
42  */
43 static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
44 					   int ssid,
45 					   struct arm_smmu_ctx_desc *cd)
46 {
47 	struct arm_smmu_master *master;
48 	unsigned long flags;
49 
50 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
51 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
52 		arm_smmu_write_ctx_desc(master, ssid, cd);
53 	}
54 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
55 }
56 
57 /*
58  * Check if the CPU ASID is available on the SMMU side. If a private context
59  * descriptor is using it, try to replace it.
60  */
61 static struct arm_smmu_ctx_desc *
62 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
63 {
64 	int ret;
65 	u32 new_asid;
66 	struct arm_smmu_ctx_desc *cd;
67 	struct arm_smmu_device *smmu;
68 	struct arm_smmu_domain *smmu_domain;
69 
70 	cd = xa_load(&arm_smmu_asid_xa, asid);
71 	if (!cd)
72 		return NULL;
73 
74 	if (cd->mm) {
75 		if (WARN_ON(cd->mm != mm))
76 			return ERR_PTR(-EINVAL);
77 		/* All devices bound to this mm use the same cd struct. */
78 		refcount_inc(&cd->refs);
79 		return cd;
80 	}
81 
82 	smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
83 	smmu = smmu_domain->smmu;
84 
85 	ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
86 		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
87 	if (ret)
88 		return ERR_PTR(-ENOSPC);
89 	/*
90 	 * Race with unmap: TLB invalidations will start targeting the new ASID,
91 	 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
92 	 * later, so it doesn't matter.
93 	 */
94 	cd->asid = new_asid;
95 	/*
96 	 * Update ASID and invalidate CD in all associated masters. There will
97 	 * be some overlap between use of both ASIDs, until we invalidate the
98 	 * TLB.
99 	 */
100 	arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
101 
102 	/* Invalidate TLB entries previously associated with that context */
103 	arm_smmu_tlb_inv_asid(smmu, asid);
104 
105 	xa_erase(&arm_smmu_asid_xa, asid);
106 	return NULL;
107 }
108 
109 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
110 {
111 	u16 asid;
112 	int err = 0;
113 	u64 tcr, par, reg;
114 	struct arm_smmu_ctx_desc *cd;
115 	struct arm_smmu_ctx_desc *ret = NULL;
116 
117 	/* Don't free the mm until we release the ASID */
118 	mmgrab(mm);
119 
120 	asid = arm64_mm_context_get(mm);
121 	if (!asid) {
122 		err = -ESRCH;
123 		goto out_drop_mm;
124 	}
125 
126 	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
127 	if (!cd) {
128 		err = -ENOMEM;
129 		goto out_put_context;
130 	}
131 
132 	refcount_set(&cd->refs, 1);
133 
134 	mutex_lock(&arm_smmu_asid_lock);
135 	ret = arm_smmu_share_asid(mm, asid);
136 	if (ret) {
137 		mutex_unlock(&arm_smmu_asid_lock);
138 		goto out_free_cd;
139 	}
140 
141 	err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
142 	mutex_unlock(&arm_smmu_asid_lock);
143 
144 	if (err)
145 		goto out_free_asid;
146 
147 	tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
148 	      FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
149 	      FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
150 	      FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
151 	      CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
152 
153 	switch (PAGE_SIZE) {
154 	case SZ_4K:
155 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
156 		break;
157 	case SZ_16K:
158 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
159 		break;
160 	case SZ_64K:
161 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
162 		break;
163 	default:
164 		WARN_ON(1);
165 		err = -EINVAL;
166 		goto out_free_asid;
167 	}
168 
169 	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
170 	par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
171 	tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
172 
173 	cd->ttbr = virt_to_phys(mm->pgd);
174 	cd->tcr = tcr;
175 	/*
176 	 * MAIR value is pretty much constant and global, so we can just get it
177 	 * from the current CPU register
178 	 */
179 	cd->mair = read_sysreg(mair_el1);
180 	cd->asid = asid;
181 	cd->mm = mm;
182 
183 	return cd;
184 
185 out_free_asid:
186 	arm_smmu_free_asid(cd);
187 out_free_cd:
188 	kfree(cd);
189 out_put_context:
190 	arm64_mm_context_put(mm);
191 out_drop_mm:
192 	mmdrop(mm);
193 	return err < 0 ? ERR_PTR(err) : ret;
194 }
195 
196 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
197 {
198 	if (arm_smmu_free_asid(cd)) {
199 		/* Unpin ASID */
200 		arm64_mm_context_put(cd->mm);
201 		mmdrop(cd->mm);
202 		kfree(cd);
203 	}
204 }
205 
206 /*
207  * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
208  * is used as a threshold to replace per-page TLBI commands to issue in the
209  * command queue with an address-space TLBI command, when SMMU w/o a range
210  * invalidation feature handles too many per-page TLBI commands, which will
211  * otherwise result in a soft lockup.
212  */
213 #define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))
214 
215 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
216 						struct mm_struct *mm,
217 						unsigned long start,
218 						unsigned long end)
219 {
220 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
221 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
222 	size_t size;
223 
224 	/*
225 	 * The mm_types defines vm_end as the first byte after the end address,
226 	 * different from IOMMU subsystem using the last address of an address
227 	 * range. So do a simple translation here by calculating size correctly.
228 	 */
229 	size = end - start;
230 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
231 		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
232 			size = 0;
233 	} else {
234 		if (size == ULONG_MAX)
235 			size = 0;
236 	}
237 
238 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
239 		if (!size)
240 			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
241 					      smmu_mn->cd->asid);
242 		else
243 			arm_smmu_tlb_inv_range_asid(start, size,
244 						    smmu_mn->cd->asid,
245 						    PAGE_SIZE, false,
246 						    smmu_domain);
247 	}
248 
249 	arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start,
250 				size);
251 }
252 
253 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
254 {
255 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
256 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
257 
258 	mutex_lock(&sva_lock);
259 	if (smmu_mn->cleared) {
260 		mutex_unlock(&sva_lock);
261 		return;
262 	}
263 
264 	/*
265 	 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
266 	 * but disable translation.
267 	 */
268 	arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
269 					 &quiet_cd);
270 
271 	arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
272 	arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
273 
274 	smmu_mn->cleared = true;
275 	mutex_unlock(&sva_lock);
276 }
277 
278 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
279 {
280 	kfree(mn_to_smmu(mn));
281 }
282 
283 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
284 	.arch_invalidate_secondary_tlbs	= arm_smmu_mm_arch_invalidate_secondary_tlbs,
285 	.release			= arm_smmu_mm_release,
286 	.free_notifier			= arm_smmu_mmu_notifier_free,
287 };
288 
289 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
290 static struct arm_smmu_mmu_notifier *
291 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
292 			  struct mm_struct *mm)
293 {
294 	int ret;
295 	unsigned long flags;
296 	struct arm_smmu_ctx_desc *cd;
297 	struct arm_smmu_mmu_notifier *smmu_mn;
298 	struct arm_smmu_master *master;
299 
300 	list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
301 		if (smmu_mn->mn.mm == mm) {
302 			refcount_inc(&smmu_mn->refs);
303 			return smmu_mn;
304 		}
305 	}
306 
307 	cd = arm_smmu_alloc_shared_cd(mm);
308 	if (IS_ERR(cd))
309 		return ERR_CAST(cd);
310 
311 	smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
312 	if (!smmu_mn) {
313 		ret = -ENOMEM;
314 		goto err_free_cd;
315 	}
316 
317 	refcount_set(&smmu_mn->refs, 1);
318 	smmu_mn->cd = cd;
319 	smmu_mn->domain = smmu_domain;
320 	smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
321 
322 	ret = mmu_notifier_register(&smmu_mn->mn, mm);
323 	if (ret) {
324 		kfree(smmu_mn);
325 		goto err_free_cd;
326 	}
327 
328 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
329 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
330 		ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
331 					      cd);
332 		if (ret) {
333 			list_for_each_entry_from_reverse(
334 				master, &smmu_domain->devices, domain_head)
335 				arm_smmu_write_ctx_desc(
336 					master, mm_get_enqcmd_pasid(mm), NULL);
337 			break;
338 		}
339 	}
340 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
341 	if (ret)
342 		goto err_put_notifier;
343 
344 	list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
345 	return smmu_mn;
346 
347 err_put_notifier:
348 	/* Frees smmu_mn */
349 	mmu_notifier_put(&smmu_mn->mn);
350 err_free_cd:
351 	arm_smmu_free_shared_cd(cd);
352 	return ERR_PTR(ret);
353 }
354 
355 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
356 {
357 	struct mm_struct *mm = smmu_mn->mn.mm;
358 	struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
359 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
360 
361 	if (!refcount_dec_and_test(&smmu_mn->refs))
362 		return;
363 
364 	list_del(&smmu_mn->list);
365 
366 	arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
367 					 NULL);
368 
369 	/*
370 	 * If we went through clear(), we've already invalidated, and no
371 	 * new TLB entry can have been formed.
372 	 */
373 	if (!smmu_mn->cleared) {
374 		arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
375 		arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
376 					0);
377 	}
378 
379 	/* Frees smmu_mn */
380 	mmu_notifier_put(&smmu_mn->mn);
381 	arm_smmu_free_shared_cd(cd);
382 }
383 
384 static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
385 {
386 	int ret;
387 	struct arm_smmu_bond *bond;
388 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
389 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
390 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
391 
392 	if (!master || !master->sva_enabled)
393 		return -ENODEV;
394 
395 	bond = kzalloc(sizeof(*bond), GFP_KERNEL);
396 	if (!bond)
397 		return -ENOMEM;
398 
399 	bond->mm = mm;
400 
401 	bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
402 	if (IS_ERR(bond->smmu_mn)) {
403 		ret = PTR_ERR(bond->smmu_mn);
404 		goto err_free_bond;
405 	}
406 
407 	list_add(&bond->list, &master->bonds);
408 	return 0;
409 
410 err_free_bond:
411 	kfree(bond);
412 	return ret;
413 }
414 
415 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
416 {
417 	unsigned long reg, fld;
418 	unsigned long oas;
419 	unsigned long asid_bits;
420 	u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
421 
422 	if (vabits_actual == 52)
423 		feat_mask |= ARM_SMMU_FEAT_VAX;
424 
425 	if ((smmu->features & feat_mask) != feat_mask)
426 		return false;
427 
428 	if (!(smmu->pgsize_bitmap & PAGE_SIZE))
429 		return false;
430 
431 	/*
432 	 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
433 	 * not even pretending to support AArch32 here. Abort if the MMU outputs
434 	 * addresses larger than what we support.
435 	 */
436 	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
437 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
438 	oas = id_aa64mmfr0_parange_to_phys_shift(fld);
439 	if (smmu->oas < oas)
440 		return false;
441 
442 	/* We can support bigger ASIDs than the CPU, but not smaller */
443 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
444 	asid_bits = fld ? 16 : 8;
445 	if (smmu->asid_bits < asid_bits)
446 		return false;
447 
448 	/*
449 	 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
450 	 * generally the maximum number of bindable processes.
451 	 */
452 	if (arm64_kernel_unmapped_at_el0())
453 		asid_bits--;
454 	dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
455 		num_possible_cpus() - 2);
456 
457 	return true;
458 }
459 
460 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
461 {
462 	/* We're not keeping track of SIDs in fault events */
463 	if (master->num_streams != 1)
464 		return false;
465 
466 	return master->stall_enabled;
467 }
468 
469 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
470 {
471 	if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
472 		return false;
473 
474 	/* SSID support is mandatory for the moment */
475 	return master->ssid_bits;
476 }
477 
478 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
479 {
480 	bool enabled;
481 
482 	mutex_lock(&sva_lock);
483 	enabled = master->sva_enabled;
484 	mutex_unlock(&sva_lock);
485 	return enabled;
486 }
487 
488 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
489 {
490 	int ret;
491 	struct device *dev = master->dev;
492 
493 	/*
494 	 * Drivers for devices supporting PRI or stall should enable IOPF first.
495 	 * Others have device-specific fault handlers and don't need IOPF.
496 	 */
497 	if (!arm_smmu_master_iopf_supported(master))
498 		return 0;
499 
500 	if (!master->iopf_enabled)
501 		return -EINVAL;
502 
503 	ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
504 	if (ret)
505 		return ret;
506 
507 	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
508 	if (ret) {
509 		iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
510 		return ret;
511 	}
512 	return 0;
513 }
514 
515 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
516 {
517 	struct device *dev = master->dev;
518 
519 	if (!master->iopf_enabled)
520 		return;
521 
522 	iommu_unregister_device_fault_handler(dev);
523 	iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
524 }
525 
526 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
527 {
528 	int ret;
529 
530 	mutex_lock(&sva_lock);
531 	ret = arm_smmu_master_sva_enable_iopf(master);
532 	if (!ret)
533 		master->sva_enabled = true;
534 	mutex_unlock(&sva_lock);
535 
536 	return ret;
537 }
538 
539 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
540 {
541 	mutex_lock(&sva_lock);
542 	if (!list_empty(&master->bonds)) {
543 		dev_err(master->dev, "cannot disable SVA, device is bound\n");
544 		mutex_unlock(&sva_lock);
545 		return -EBUSY;
546 	}
547 	arm_smmu_master_sva_disable_iopf(master);
548 	master->sva_enabled = false;
549 	mutex_unlock(&sva_lock);
550 
551 	return 0;
552 }
553 
554 void arm_smmu_sva_notifier_synchronize(void)
555 {
556 	/*
557 	 * Some MMU notifiers may still be waiting to be freed, using
558 	 * arm_smmu_mmu_notifier_free(). Wait for them.
559 	 */
560 	mmu_notifier_synchronize();
561 }
562 
563 void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
564 				   struct device *dev, ioasid_t id)
565 {
566 	struct mm_struct *mm = domain->mm;
567 	struct arm_smmu_bond *bond = NULL, *t;
568 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
569 
570 	mutex_lock(&sva_lock);
571 	list_for_each_entry(t, &master->bonds, list) {
572 		if (t->mm == mm) {
573 			bond = t;
574 			break;
575 		}
576 	}
577 
578 	if (!WARN_ON(!bond)) {
579 		list_del(&bond->list);
580 		arm_smmu_mmu_notifier_put(bond->smmu_mn);
581 		kfree(bond);
582 	}
583 	mutex_unlock(&sva_lock);
584 }
585 
586 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
587 				      struct device *dev, ioasid_t id)
588 {
589 	int ret = 0;
590 	struct mm_struct *mm = domain->mm;
591 
592 	mutex_lock(&sva_lock);
593 	ret = __arm_smmu_sva_bind(dev, mm);
594 	mutex_unlock(&sva_lock);
595 
596 	return ret;
597 }
598 
599 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
600 {
601 	kfree(domain);
602 }
603 
604 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
605 	.set_dev_pasid		= arm_smmu_sva_set_dev_pasid,
606 	.free			= arm_smmu_sva_domain_free
607 };
608 
609 struct iommu_domain *arm_smmu_sva_domain_alloc(void)
610 {
611 	struct iommu_domain *domain;
612 
613 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
614 	if (!domain)
615 		return NULL;
616 	domain->ops = &arm_smmu_sva_domain_ops;
617 
618 	return domain;
619 }
620