1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the IOMMU SVA API for the ARM SMMUv3
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/slab.h>
10 
11 #include "arm-smmu-v3.h"
12 #include "../../iommu-sva-lib.h"
13 #include "../../io-pgtable-arm.h"
14 
15 struct arm_smmu_mmu_notifier {
16 	struct mmu_notifier		mn;
17 	struct arm_smmu_ctx_desc	*cd;
18 	bool				cleared;
19 	refcount_t			refs;
20 	struct list_head		list;
21 	struct arm_smmu_domain		*domain;
22 };
23 
24 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
25 
26 struct arm_smmu_bond {
27 	struct iommu_sva		sva;
28 	struct mm_struct		*mm;
29 	struct arm_smmu_mmu_notifier	*smmu_mn;
30 	struct list_head		list;
31 	refcount_t			refs;
32 };
33 
34 #define sva_to_bond(handle) \
35 	container_of(handle, struct arm_smmu_bond, sva)
36 
37 static DEFINE_MUTEX(sva_lock);
38 
39 /*
40  * Check if the CPU ASID is available on the SMMU side. If a private context
41  * descriptor is using it, try to replace it.
42  */
43 static struct arm_smmu_ctx_desc *
44 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
45 {
46 	int ret;
47 	u32 new_asid;
48 	struct arm_smmu_ctx_desc *cd;
49 	struct arm_smmu_device *smmu;
50 	struct arm_smmu_domain *smmu_domain;
51 
52 	cd = xa_load(&arm_smmu_asid_xa, asid);
53 	if (!cd)
54 		return NULL;
55 
56 	if (cd->mm) {
57 		if (WARN_ON(cd->mm != mm))
58 			return ERR_PTR(-EINVAL);
59 		/* All devices bound to this mm use the same cd struct. */
60 		refcount_inc(&cd->refs);
61 		return cd;
62 	}
63 
64 	smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
65 	smmu = smmu_domain->smmu;
66 
67 	ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
68 		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
69 	if (ret)
70 		return ERR_PTR(-ENOSPC);
71 	/*
72 	 * Race with unmap: TLB invalidations will start targeting the new ASID,
73 	 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
74 	 * later, so it doesn't matter.
75 	 */
76 	cd->asid = new_asid;
77 	/*
78 	 * Update ASID and invalidate CD in all associated masters. There will
79 	 * be some overlap between use of both ASIDs, until we invalidate the
80 	 * TLB.
81 	 */
82 	arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
83 
84 	/* Invalidate TLB entries previously associated with that context */
85 	arm_smmu_tlb_inv_asid(smmu, asid);
86 
87 	xa_erase(&arm_smmu_asid_xa, asid);
88 	return NULL;
89 }
90 
91 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
92 {
93 	u16 asid;
94 	int err = 0;
95 	u64 tcr, par, reg;
96 	struct arm_smmu_ctx_desc *cd;
97 	struct arm_smmu_ctx_desc *ret = NULL;
98 
99 	asid = arm64_mm_context_get(mm);
100 	if (!asid)
101 		return ERR_PTR(-ESRCH);
102 
103 	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
104 	if (!cd) {
105 		err = -ENOMEM;
106 		goto out_put_context;
107 	}
108 
109 	refcount_set(&cd->refs, 1);
110 
111 	mutex_lock(&arm_smmu_asid_lock);
112 	ret = arm_smmu_share_asid(mm, asid);
113 	if (ret) {
114 		mutex_unlock(&arm_smmu_asid_lock);
115 		goto out_free_cd;
116 	}
117 
118 	err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
119 	mutex_unlock(&arm_smmu_asid_lock);
120 
121 	if (err)
122 		goto out_free_asid;
123 
124 	tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
125 	      FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
126 	      FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
127 	      FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
128 	      CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
129 
130 	switch (PAGE_SIZE) {
131 	case SZ_4K:
132 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
133 		break;
134 	case SZ_16K:
135 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
136 		break;
137 	case SZ_64K:
138 		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
139 		break;
140 	default:
141 		WARN_ON(1);
142 		err = -EINVAL;
143 		goto out_free_asid;
144 	}
145 
146 	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
147 	par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
148 	tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
149 
150 	cd->ttbr = virt_to_phys(mm->pgd);
151 	cd->tcr = tcr;
152 	/*
153 	 * MAIR value is pretty much constant and global, so we can just get it
154 	 * from the current CPU register
155 	 */
156 	cd->mair = read_sysreg(mair_el1);
157 	cd->asid = asid;
158 	cd->mm = mm;
159 
160 	return cd;
161 
162 out_free_asid:
163 	arm_smmu_free_asid(cd);
164 out_free_cd:
165 	kfree(cd);
166 out_put_context:
167 	arm64_mm_context_put(mm);
168 	return err < 0 ? ERR_PTR(err) : ret;
169 }
170 
171 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
172 {
173 	if (arm_smmu_free_asid(cd)) {
174 		/* Unpin ASID */
175 		arm64_mm_context_put(cd->mm);
176 		kfree(cd);
177 	}
178 }
179 
180 static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
181 					 struct mm_struct *mm,
182 					 unsigned long start, unsigned long end)
183 {
184 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
185 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
186 	size_t size;
187 
188 	/*
189 	 * The mm_types defines vm_end as the first byte after the end address,
190 	 * different from IOMMU subsystem using the last address of an address
191 	 * range. So do a simple translation here by calculating size correctly.
192 	 */
193 	size = end - start;
194 
195 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
196 		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
197 					    PAGE_SIZE, false, smmu_domain);
198 	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
199 }
200 
201 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
202 {
203 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
204 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
205 
206 	mutex_lock(&sva_lock);
207 	if (smmu_mn->cleared) {
208 		mutex_unlock(&sva_lock);
209 		return;
210 	}
211 
212 	/*
213 	 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
214 	 * but disable translation.
215 	 */
216 	arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
217 
218 	arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
219 	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
220 
221 	smmu_mn->cleared = true;
222 	mutex_unlock(&sva_lock);
223 }
224 
225 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
226 {
227 	kfree(mn_to_smmu(mn));
228 }
229 
230 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
231 	.invalidate_range	= arm_smmu_mm_invalidate_range,
232 	.release		= arm_smmu_mm_release,
233 	.free_notifier		= arm_smmu_mmu_notifier_free,
234 };
235 
236 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
237 static struct arm_smmu_mmu_notifier *
238 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
239 			  struct mm_struct *mm)
240 {
241 	int ret;
242 	struct arm_smmu_ctx_desc *cd;
243 	struct arm_smmu_mmu_notifier *smmu_mn;
244 
245 	list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
246 		if (smmu_mn->mn.mm == mm) {
247 			refcount_inc(&smmu_mn->refs);
248 			return smmu_mn;
249 		}
250 	}
251 
252 	cd = arm_smmu_alloc_shared_cd(mm);
253 	if (IS_ERR(cd))
254 		return ERR_CAST(cd);
255 
256 	smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
257 	if (!smmu_mn) {
258 		ret = -ENOMEM;
259 		goto err_free_cd;
260 	}
261 
262 	refcount_set(&smmu_mn->refs, 1);
263 	smmu_mn->cd = cd;
264 	smmu_mn->domain = smmu_domain;
265 	smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
266 
267 	ret = mmu_notifier_register(&smmu_mn->mn, mm);
268 	if (ret) {
269 		kfree(smmu_mn);
270 		goto err_free_cd;
271 	}
272 
273 	ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
274 	if (ret)
275 		goto err_put_notifier;
276 
277 	list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
278 	return smmu_mn;
279 
280 err_put_notifier:
281 	/* Frees smmu_mn */
282 	mmu_notifier_put(&smmu_mn->mn);
283 err_free_cd:
284 	arm_smmu_free_shared_cd(cd);
285 	return ERR_PTR(ret);
286 }
287 
288 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
289 {
290 	struct mm_struct *mm = smmu_mn->mn.mm;
291 	struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
292 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
293 
294 	if (!refcount_dec_and_test(&smmu_mn->refs))
295 		return;
296 
297 	list_del(&smmu_mn->list);
298 	arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
299 
300 	/*
301 	 * If we went through clear(), we've already invalidated, and no
302 	 * new TLB entry can have been formed.
303 	 */
304 	if (!smmu_mn->cleared) {
305 		arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
306 		arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
307 	}
308 
309 	/* Frees smmu_mn */
310 	mmu_notifier_put(&smmu_mn->mn);
311 	arm_smmu_free_shared_cd(cd);
312 }
313 
314 static struct iommu_sva *
315 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
316 {
317 	int ret;
318 	struct arm_smmu_bond *bond;
319 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
320 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
321 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
322 
323 	if (!master || !master->sva_enabled)
324 		return ERR_PTR(-ENODEV);
325 
326 	/* If bind() was already called for this {dev, mm} pair, reuse it. */
327 	list_for_each_entry(bond, &master->bonds, list) {
328 		if (bond->mm == mm) {
329 			refcount_inc(&bond->refs);
330 			return &bond->sva;
331 		}
332 	}
333 
334 	bond = kzalloc(sizeof(*bond), GFP_KERNEL);
335 	if (!bond)
336 		return ERR_PTR(-ENOMEM);
337 
338 	/* Allocate a PASID for this mm if necessary */
339 	ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
340 	if (ret)
341 		goto err_free_bond;
342 
343 	bond->mm = mm;
344 	bond->sva.dev = dev;
345 	refcount_set(&bond->refs, 1);
346 
347 	bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
348 	if (IS_ERR(bond->smmu_mn)) {
349 		ret = PTR_ERR(bond->smmu_mn);
350 		goto err_free_bond;
351 	}
352 
353 	list_add(&bond->list, &master->bonds);
354 	return &bond->sva;
355 
356 err_free_bond:
357 	kfree(bond);
358 	return ERR_PTR(ret);
359 }
360 
361 struct iommu_sva *
362 arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
363 {
364 	struct iommu_sva *handle;
365 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
366 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
367 
368 	if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
369 		return ERR_PTR(-EINVAL);
370 
371 	mutex_lock(&sva_lock);
372 	handle = __arm_smmu_sva_bind(dev, mm);
373 	mutex_unlock(&sva_lock);
374 	return handle;
375 }
376 
377 void arm_smmu_sva_unbind(struct iommu_sva *handle)
378 {
379 	struct arm_smmu_bond *bond = sva_to_bond(handle);
380 
381 	mutex_lock(&sva_lock);
382 	if (refcount_dec_and_test(&bond->refs)) {
383 		list_del(&bond->list);
384 		arm_smmu_mmu_notifier_put(bond->smmu_mn);
385 		kfree(bond);
386 	}
387 	mutex_unlock(&sva_lock);
388 }
389 
390 u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
391 {
392 	struct arm_smmu_bond *bond = sva_to_bond(handle);
393 
394 	return bond->mm->pasid;
395 }
396 
397 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
398 {
399 	unsigned long reg, fld;
400 	unsigned long oas;
401 	unsigned long asid_bits;
402 	u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
403 
404 	if (vabits_actual == 52)
405 		feat_mask |= ARM_SMMU_FEAT_VAX;
406 
407 	if ((smmu->features & feat_mask) != feat_mask)
408 		return false;
409 
410 	if (!(smmu->pgsize_bitmap & PAGE_SIZE))
411 		return false;
412 
413 	/*
414 	 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
415 	 * not even pretending to support AArch32 here. Abort if the MMU outputs
416 	 * addresses larger than what we support.
417 	 */
418 	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
419 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
420 	oas = id_aa64mmfr0_parange_to_phys_shift(fld);
421 	if (smmu->oas < oas)
422 		return false;
423 
424 	/* We can support bigger ASIDs than the CPU, but not smaller */
425 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
426 	asid_bits = fld ? 16 : 8;
427 	if (smmu->asid_bits < asid_bits)
428 		return false;
429 
430 	/*
431 	 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
432 	 * generally the maximum number of bindable processes.
433 	 */
434 	if (arm64_kernel_unmapped_at_el0())
435 		asid_bits--;
436 	dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
437 		num_possible_cpus() - 2);
438 
439 	return true;
440 }
441 
442 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
443 {
444 	/* We're not keeping track of SIDs in fault events */
445 	if (master->num_streams != 1)
446 		return false;
447 
448 	return master->stall_enabled;
449 }
450 
451 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
452 {
453 	if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
454 		return false;
455 
456 	/* SSID support is mandatory for the moment */
457 	return master->ssid_bits;
458 }
459 
460 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
461 {
462 	bool enabled;
463 
464 	mutex_lock(&sva_lock);
465 	enabled = master->sva_enabled;
466 	mutex_unlock(&sva_lock);
467 	return enabled;
468 }
469 
470 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
471 {
472 	int ret;
473 	struct device *dev = master->dev;
474 
475 	/*
476 	 * Drivers for devices supporting PRI or stall should enable IOPF first.
477 	 * Others have device-specific fault handlers and don't need IOPF.
478 	 */
479 	if (!arm_smmu_master_iopf_supported(master))
480 		return 0;
481 
482 	if (!master->iopf_enabled)
483 		return -EINVAL;
484 
485 	ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
486 	if (ret)
487 		return ret;
488 
489 	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
490 	if (ret) {
491 		iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
492 		return ret;
493 	}
494 	return 0;
495 }
496 
497 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
498 {
499 	struct device *dev = master->dev;
500 
501 	if (!master->iopf_enabled)
502 		return;
503 
504 	iommu_unregister_device_fault_handler(dev);
505 	iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
506 }
507 
508 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
509 {
510 	int ret;
511 
512 	mutex_lock(&sva_lock);
513 	ret = arm_smmu_master_sva_enable_iopf(master);
514 	if (!ret)
515 		master->sva_enabled = true;
516 	mutex_unlock(&sva_lock);
517 
518 	return ret;
519 }
520 
521 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
522 {
523 	mutex_lock(&sva_lock);
524 	if (!list_empty(&master->bonds)) {
525 		dev_err(master->dev, "cannot disable SVA, device is bound\n");
526 		mutex_unlock(&sva_lock);
527 		return -EBUSY;
528 	}
529 	arm_smmu_master_sva_disable_iopf(master);
530 	master->sva_enabled = false;
531 	mutex_unlock(&sva_lock);
532 
533 	return 0;
534 }
535 
536 void arm_smmu_sva_notifier_synchronize(void)
537 {
538 	/*
539 	 * Some MMU notifiers may still be waiting to be freed, using
540 	 * arm_smmu_mmu_notifier_free(). Wait for them.
541 	 */
542 	mmu_notifier_synchronize();
543 }
544