1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * intel-pasid.c - PASID idr, table and entry manipulation
4  *
5  * Copyright (C) 2018 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #define pr_fmt(fmt)	"DMAR: " fmt
11 
12 #include <linux/bitops.h>
13 #include <linux/cpufeature.h>
14 #include <linux/dmar.h>
15 #include <linux/intel-iommu.h>
16 #include <linux/iommu.h>
17 #include <linux/memory.h>
18 #include <linux/pci.h>
19 #include <linux/pci-ats.h>
20 #include <linux/spinlock.h>
21 
22 #include "pasid.h"
23 
24 /*
25  * Intel IOMMU system wide PASID name space:
26  */
27 u32 intel_pasid_max_id = PASID_MAX;
28 
vcmd_alloc_pasid(struct intel_iommu * iommu,u32 * pasid)29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
30 {
31 	unsigned long flags;
32 	u8 status_code;
33 	int ret = 0;
34 	u64 res;
35 
36 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 	dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
39 		      !(res & VCMD_VRSP_IP), res);
40 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
41 
42 	status_code = VCMD_VRSP_SC(res);
43 	switch (status_code) {
44 	case VCMD_VRSP_SC_SUCCESS:
45 		*pasid = VCMD_VRSP_RESULT_PASID(res);
46 		break;
47 	case VCMD_VRSP_SC_NO_PASID_AVAIL:
48 		pr_info("IOMMU: %s: No PASID available\n", iommu->name);
49 		ret = -ENOSPC;
50 		break;
51 	default:
52 		ret = -ENODEV;
53 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
54 			iommu->name, status_code);
55 	}
56 
57 	return ret;
58 }
59 
vcmd_free_pasid(struct intel_iommu * iommu,u32 pasid)60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
61 {
62 	unsigned long flags;
63 	u8 status_code;
64 	u64 res;
65 
66 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 	dmar_writeq(iommu->reg + DMAR_VCMD_REG,
68 		    VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
69 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
70 		      !(res & VCMD_VRSP_IP), res);
71 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
72 
73 	status_code = VCMD_VRSP_SC(res);
74 	switch (status_code) {
75 	case VCMD_VRSP_SC_SUCCESS:
76 		break;
77 	case VCMD_VRSP_SC_INVALID_PASID:
78 		pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
79 		break;
80 	default:
81 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
82 			iommu->name, status_code);
83 	}
84 }
85 
86 /*
87  * Per device pasid table management:
88  */
89 static inline void
device_attach_pasid_table(struct device_domain_info * info,struct pasid_table * pasid_table)90 device_attach_pasid_table(struct device_domain_info *info,
91 			  struct pasid_table *pasid_table)
92 {
93 	info->pasid_table = pasid_table;
94 	list_add(&info->table, &pasid_table->dev);
95 }
96 
97 static inline void
device_detach_pasid_table(struct device_domain_info * info,struct pasid_table * pasid_table)98 device_detach_pasid_table(struct device_domain_info *info,
99 			  struct pasid_table *pasid_table)
100 {
101 	info->pasid_table = NULL;
102 	list_del(&info->table);
103 }
104 
105 struct pasid_table_opaque {
106 	struct pasid_table	**pasid_table;
107 	int			segment;
108 	int			bus;
109 	int			devfn;
110 };
111 
search_pasid_table(struct device_domain_info * info,void * opaque)112 static int search_pasid_table(struct device_domain_info *info, void *opaque)
113 {
114 	struct pasid_table_opaque *data = opaque;
115 
116 	if (info->iommu->segment == data->segment &&
117 	    info->bus == data->bus &&
118 	    info->devfn == data->devfn &&
119 	    info->pasid_table) {
120 		*data->pasid_table = info->pasid_table;
121 		return 1;
122 	}
123 
124 	return 0;
125 }
126 
get_alias_pasid_table(struct pci_dev * pdev,u16 alias,void * opaque)127 static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
128 {
129 	struct pasid_table_opaque *data = opaque;
130 
131 	data->segment = pci_domain_nr(pdev->bus);
132 	data->bus = PCI_BUS_NUM(alias);
133 	data->devfn = alias & 0xff;
134 
135 	return for_each_device_domain(&search_pasid_table, data);
136 }
137 
138 /*
139  * Allocate a pasid table for @dev. It should be called in a
140  * single-thread context.
141  */
intel_pasid_alloc_table(struct device * dev)142 int intel_pasid_alloc_table(struct device *dev)
143 {
144 	struct device_domain_info *info;
145 	struct pasid_table *pasid_table;
146 	struct pasid_table_opaque data;
147 	struct page *pages;
148 	u32 max_pasid = 0;
149 	int ret, order;
150 	int size;
151 
152 	might_sleep();
153 	info = get_domain_info(dev);
154 	if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
155 		return -EINVAL;
156 
157 	/* DMA alias device already has a pasid table, use it: */
158 	data.pasid_table = &pasid_table;
159 	ret = pci_for_each_dma_alias(to_pci_dev(dev),
160 				     &get_alias_pasid_table, &data);
161 	if (ret)
162 		goto attach_out;
163 
164 	pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
165 	if (!pasid_table)
166 		return -ENOMEM;
167 	INIT_LIST_HEAD(&pasid_table->dev);
168 
169 	if (info->pasid_supported)
170 		max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
171 				  intel_pasid_max_id);
172 
173 	size = max_pasid >> (PASID_PDE_SHIFT - 3);
174 	order = size ? get_order(size) : 0;
175 	pages = alloc_pages_node(info->iommu->node,
176 				 GFP_KERNEL | __GFP_ZERO, order);
177 	if (!pages) {
178 		kfree(pasid_table);
179 		return -ENOMEM;
180 	}
181 
182 	pasid_table->table = page_address(pages);
183 	pasid_table->order = order;
184 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
185 
186 attach_out:
187 	device_attach_pasid_table(info, pasid_table);
188 
189 	return 0;
190 }
191 
intel_pasid_free_table(struct device * dev)192 void intel_pasid_free_table(struct device *dev)
193 {
194 	struct device_domain_info *info;
195 	struct pasid_table *pasid_table;
196 	struct pasid_dir_entry *dir;
197 	struct pasid_entry *table;
198 	int i, max_pde;
199 
200 	info = get_domain_info(dev);
201 	if (!info || !dev_is_pci(dev) || !info->pasid_table)
202 		return;
203 
204 	pasid_table = info->pasid_table;
205 	device_detach_pasid_table(info, pasid_table);
206 
207 	if (!list_empty(&pasid_table->dev))
208 		return;
209 
210 	/* Free scalable mode PASID directory tables: */
211 	dir = pasid_table->table;
212 	max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
213 	for (i = 0; i < max_pde; i++) {
214 		table = get_pasid_table_from_pde(&dir[i]);
215 		free_pgtable_page(table);
216 	}
217 
218 	free_pages((unsigned long)pasid_table->table, pasid_table->order);
219 	kfree(pasid_table);
220 }
221 
intel_pasid_get_table(struct device * dev)222 struct pasid_table *intel_pasid_get_table(struct device *dev)
223 {
224 	struct device_domain_info *info;
225 
226 	info = get_domain_info(dev);
227 	if (!info)
228 		return NULL;
229 
230 	return info->pasid_table;
231 }
232 
intel_pasid_get_dev_max_id(struct device * dev)233 static int intel_pasid_get_dev_max_id(struct device *dev)
234 {
235 	struct device_domain_info *info;
236 
237 	info = get_domain_info(dev);
238 	if (!info || !info->pasid_table)
239 		return 0;
240 
241 	return info->pasid_table->max_pasid;
242 }
243 
intel_pasid_get_entry(struct device * dev,u32 pasid)244 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
245 {
246 	struct device_domain_info *info;
247 	struct pasid_table *pasid_table;
248 	struct pasid_dir_entry *dir;
249 	struct pasid_entry *entries;
250 	int dir_index, index;
251 
252 	pasid_table = intel_pasid_get_table(dev);
253 	if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
254 		return NULL;
255 
256 	dir = pasid_table->table;
257 	info = get_domain_info(dev);
258 	dir_index = pasid >> PASID_PDE_SHIFT;
259 	index = pasid & PASID_PTE_MASK;
260 
261 retry:
262 	entries = get_pasid_table_from_pde(&dir[dir_index]);
263 	if (!entries) {
264 		entries = alloc_pgtable_page(info->iommu->node);
265 		if (!entries)
266 			return NULL;
267 
268 		/*
269 		 * The pasid directory table entry won't be freed after
270 		 * allocation. No worry about the race with free and
271 		 * clear. However, this entry might be populated by others
272 		 * while we are preparing it. Use theirs with a retry.
273 		 */
274 		if (cmpxchg64(&dir[dir_index].val, 0ULL,
275 			      (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
276 			free_pgtable_page(entries);
277 			goto retry;
278 		}
279 	}
280 
281 	return &entries[index];
282 }
283 
284 /*
285  * Interfaces for PASID table entry manipulation:
286  */
pasid_clear_entry(struct pasid_entry * pe)287 static inline void pasid_clear_entry(struct pasid_entry *pe)
288 {
289 	WRITE_ONCE(pe->val[0], 0);
290 	WRITE_ONCE(pe->val[1], 0);
291 	WRITE_ONCE(pe->val[2], 0);
292 	WRITE_ONCE(pe->val[3], 0);
293 	WRITE_ONCE(pe->val[4], 0);
294 	WRITE_ONCE(pe->val[5], 0);
295 	WRITE_ONCE(pe->val[6], 0);
296 	WRITE_ONCE(pe->val[7], 0);
297 }
298 
pasid_clear_entry_with_fpd(struct pasid_entry * pe)299 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
300 {
301 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
302 	WRITE_ONCE(pe->val[1], 0);
303 	WRITE_ONCE(pe->val[2], 0);
304 	WRITE_ONCE(pe->val[3], 0);
305 	WRITE_ONCE(pe->val[4], 0);
306 	WRITE_ONCE(pe->val[5], 0);
307 	WRITE_ONCE(pe->val[6], 0);
308 	WRITE_ONCE(pe->val[7], 0);
309 }
310 
311 static void
intel_pasid_clear_entry(struct device * dev,u32 pasid,bool fault_ignore)312 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
313 {
314 	struct pasid_entry *pe;
315 
316 	pe = intel_pasid_get_entry(dev, pasid);
317 	if (WARN_ON(!pe))
318 		return;
319 
320 	if (fault_ignore && pasid_pte_is_present(pe))
321 		pasid_clear_entry_with_fpd(pe);
322 	else
323 		pasid_clear_entry(pe);
324 }
325 
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)326 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
327 {
328 	u64 old;
329 
330 	old = READ_ONCE(*ptr);
331 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
332 }
333 
334 /*
335  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
336  * PASID entry.
337  */
338 static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)339 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
340 {
341 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
342 }
343 
344 /*
345  * Get domain ID value of a scalable mode PASID entry.
346  */
347 static inline u16
pasid_get_domain_id(struct pasid_entry * pe)348 pasid_get_domain_id(struct pasid_entry *pe)
349 {
350 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
351 }
352 
353 /*
354  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
355  * of a scalable mode PASID entry.
356  */
357 static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)358 pasid_set_slptr(struct pasid_entry *pe, u64 value)
359 {
360 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
361 }
362 
363 /*
364  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
365  * entry.
366  */
367 static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)368 pasid_set_address_width(struct pasid_entry *pe, u64 value)
369 {
370 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
371 }
372 
373 /*
374  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
375  * of a scalable mode PASID entry.
376  */
377 static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)378 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
379 {
380 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
381 }
382 
383 /*
384  * Enable fault processing by clearing the FPD(Fault Processing
385  * Disable) field (Bit 1) of a scalable mode PASID entry.
386  */
pasid_set_fault_enable(struct pasid_entry * pe)387 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
388 {
389 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
390 }
391 
392 /*
393  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
394  * scalable mode PASID entry.
395  */
pasid_set_sre(struct pasid_entry * pe)396 static inline void pasid_set_sre(struct pasid_entry *pe)
397 {
398 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
399 }
400 
401 /*
402  * Setup the WPE(Write Protect Enable) field (Bit 132) of a
403  * scalable mode PASID entry.
404  */
pasid_set_wpe(struct pasid_entry * pe)405 static inline void pasid_set_wpe(struct pasid_entry *pe)
406 {
407 	pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
408 }
409 
410 /*
411  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
412  * entry.
413  */
pasid_set_present(struct pasid_entry * pe)414 static inline void pasid_set_present(struct pasid_entry *pe)
415 {
416 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
417 }
418 
419 /*
420  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
421  * entry.
422  */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)423 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
424 {
425 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
426 }
427 
428 /*
429  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
430  * PASID entry.
431  */
432 static inline void
pasid_set_pgsnp(struct pasid_entry * pe)433 pasid_set_pgsnp(struct pasid_entry *pe)
434 {
435 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
436 }
437 
438 /*
439  * Setup the First Level Page table Pointer field (Bit 140~191)
440  * of a scalable mode PASID entry.
441  */
442 static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)443 pasid_set_flptr(struct pasid_entry *pe, u64 value)
444 {
445 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
446 }
447 
448 /*
449  * Setup the First Level Paging Mode field (Bit 130~131) of a
450  * scalable mode PASID entry.
451  */
452 static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)453 pasid_set_flpm(struct pasid_entry *pe, u64 value)
454 {
455 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
456 }
457 
458 /*
459  * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
460  * of a scalable mode PASID entry.
461  */
462 static inline void
pasid_set_eafe(struct pasid_entry * pe)463 pasid_set_eafe(struct pasid_entry *pe)
464 {
465 	pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
466 }
467 
468 static void
pasid_cache_invalidation_with_pasid(struct intel_iommu * iommu,u16 did,u32 pasid)469 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
470 				    u16 did, u32 pasid)
471 {
472 	struct qi_desc desc;
473 
474 	desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
475 		QI_PC_PASID(pasid) | QI_PC_TYPE;
476 	desc.qw1 = 0;
477 	desc.qw2 = 0;
478 	desc.qw3 = 0;
479 
480 	qi_submit_sync(iommu, &desc, 1, 0);
481 }
482 
483 static void
devtlb_invalidation_with_pasid(struct intel_iommu * iommu,struct device * dev,u32 pasid)484 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
485 			       struct device *dev, u32 pasid)
486 {
487 	struct device_domain_info *info;
488 	u16 sid, qdep, pfsid;
489 
490 	info = get_domain_info(dev);
491 	if (!info || !info->ats_enabled)
492 		return;
493 
494 	sid = info->bus << 8 | info->devfn;
495 	qdep = info->ats_qdep;
496 	pfsid = info->pfsid;
497 
498 	/*
499 	 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
500 	 * devTLB flush w/o PASID should be used. For non-zero PASID under
501 	 * SVA usage, device could do DMA with multiple PASIDs. It is more
502 	 * efficient to flush devTLB specific to the PASID.
503 	 */
504 	if (pasid == PASID_RID2PASID)
505 		qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
506 	else
507 		qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
508 }
509 
intel_pasid_tear_down_entry(struct intel_iommu * iommu,struct device * dev,u32 pasid,bool fault_ignore)510 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
511 				 u32 pasid, bool fault_ignore)
512 {
513 	struct pasid_entry *pte;
514 	u16 did;
515 
516 	pte = intel_pasid_get_entry(dev, pasid);
517 	if (WARN_ON(!pte))
518 		return;
519 
520 	if (!(pte->val[0] & PASID_PTE_PRESENT))
521 		return;
522 
523 	did = pasid_get_domain_id(pte);
524 	intel_pasid_clear_entry(dev, pasid, fault_ignore);
525 
526 	if (!ecap_coherent(iommu->ecap))
527 		clflush_cache_range(pte, sizeof(*pte));
528 
529 	pasid_cache_invalidation_with_pasid(iommu, did, pasid);
530 	qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
531 
532 	/* Device IOTLB doesn't need to be flushed in caching mode. */
533 	if (!cap_caching_mode(iommu->cap))
534 		devtlb_invalidation_with_pasid(iommu, dev, pasid);
535 }
536 
pasid_flush_caches(struct intel_iommu * iommu,struct pasid_entry * pte,u32 pasid,u16 did)537 static void pasid_flush_caches(struct intel_iommu *iommu,
538 				struct pasid_entry *pte,
539 			       u32 pasid, u16 did)
540 {
541 	if (!ecap_coherent(iommu->ecap))
542 		clflush_cache_range(pte, sizeof(*pte));
543 
544 	if (cap_caching_mode(iommu->cap)) {
545 		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
546 		qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
547 	} else {
548 		iommu_flush_write_buffer(iommu);
549 	}
550 }
551 
pasid_enable_wpe(struct pasid_entry * pte)552 static inline int pasid_enable_wpe(struct pasid_entry *pte)
553 {
554 #ifdef CONFIG_X86
555 	unsigned long cr0 = read_cr0();
556 
557 	/* CR0.WP is normally set but just to be sure */
558 	if (unlikely(!(cr0 & X86_CR0_WP))) {
559 		pr_err_ratelimited("No CPU write protect!\n");
560 		return -EINVAL;
561 	}
562 #endif
563 	pasid_set_wpe(pte);
564 
565 	return 0;
566 };
567 
568 /*
569  * Set up the scalable mode pasid table entry for first only
570  * translation type.
571  */
intel_pasid_setup_first_level(struct intel_iommu * iommu,struct device * dev,pgd_t * pgd,u32 pasid,u16 did,int flags)572 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
573 				  struct device *dev, pgd_t *pgd,
574 				  u32 pasid, u16 did, int flags)
575 {
576 	struct pasid_entry *pte;
577 
578 	if (!ecap_flts(iommu->ecap)) {
579 		pr_err("No first level translation support on %s\n",
580 		       iommu->name);
581 		return -EINVAL;
582 	}
583 
584 	pte = intel_pasid_get_entry(dev, pasid);
585 	if (WARN_ON(!pte))
586 		return -EINVAL;
587 
588 	pasid_clear_entry(pte);
589 
590 	/* Setup the first level page table pointer: */
591 	pasid_set_flptr(pte, (u64)__pa(pgd));
592 	if (flags & PASID_FLAG_SUPERVISOR_MODE) {
593 		if (!ecap_srs(iommu->ecap)) {
594 			pr_err("No supervisor request support on %s\n",
595 			       iommu->name);
596 			return -EINVAL;
597 		}
598 		pasid_set_sre(pte);
599 		if (pasid_enable_wpe(pte))
600 			return -EINVAL;
601 
602 	}
603 
604 	if (flags & PASID_FLAG_FL5LP) {
605 		if (cap_5lp_support(iommu->cap)) {
606 			pasid_set_flpm(pte, 1);
607 		} else {
608 			pr_err("No 5-level paging support for first-level\n");
609 			pasid_clear_entry(pte);
610 			return -EINVAL;
611 		}
612 	}
613 
614 	if (flags & PASID_FLAG_PAGE_SNOOP)
615 		pasid_set_pgsnp(pte);
616 
617 	pasid_set_domain_id(pte, did);
618 	pasid_set_address_width(pte, iommu->agaw);
619 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
620 
621 	/* Setup Present and PASID Granular Transfer Type: */
622 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
623 	pasid_set_present(pte);
624 	pasid_flush_caches(iommu, pte, pasid, did);
625 
626 	return 0;
627 }
628 
629 /*
630  * Skip top levels of page tables for iommu which has less agaw
631  * than default. Unnecessary for PT mode.
632  */
iommu_skip_agaw(struct dmar_domain * domain,struct intel_iommu * iommu,struct dma_pte ** pgd)633 static inline int iommu_skip_agaw(struct dmar_domain *domain,
634 				  struct intel_iommu *iommu,
635 				  struct dma_pte **pgd)
636 {
637 	int agaw;
638 
639 	for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
640 		*pgd = phys_to_virt(dma_pte_addr(*pgd));
641 		if (!dma_pte_present(*pgd))
642 			return -EINVAL;
643 	}
644 
645 	return agaw;
646 }
647 
648 /*
649  * Set up the scalable mode pasid entry for second only translation type.
650  */
intel_pasid_setup_second_level(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)651 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
652 				   struct dmar_domain *domain,
653 				   struct device *dev, u32 pasid)
654 {
655 	struct pasid_entry *pte;
656 	struct dma_pte *pgd;
657 	u64 pgd_val;
658 	int agaw;
659 	u16 did;
660 
661 	/*
662 	 * If hardware advertises no support for second level
663 	 * translation, return directly.
664 	 */
665 	if (!ecap_slts(iommu->ecap)) {
666 		pr_err("No second level translation support on %s\n",
667 		       iommu->name);
668 		return -EINVAL;
669 	}
670 
671 	pgd = domain->pgd;
672 	agaw = iommu_skip_agaw(domain, iommu, &pgd);
673 	if (agaw < 0) {
674 		dev_err(dev, "Invalid domain page table\n");
675 		return -EINVAL;
676 	}
677 
678 	pgd_val = virt_to_phys(pgd);
679 	did = domain->iommu_did[iommu->seq_id];
680 
681 	pte = intel_pasid_get_entry(dev, pasid);
682 	if (!pte) {
683 		dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
684 		return -ENODEV;
685 	}
686 
687 	pasid_clear_entry(pte);
688 	pasid_set_domain_id(pte, did);
689 	pasid_set_slptr(pte, pgd_val);
690 	pasid_set_address_width(pte, agaw);
691 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
692 	pasid_set_fault_enable(pte);
693 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
694 
695 	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
696 		pasid_set_pgsnp(pte);
697 
698 	/*
699 	 * Since it is a second level only translation setup, we should
700 	 * set SRE bit as well (addresses are expected to be GPAs).
701 	 */
702 	pasid_set_sre(pte);
703 	pasid_set_present(pte);
704 	pasid_flush_caches(iommu, pte, pasid, did);
705 
706 	return 0;
707 }
708 
709 /*
710  * Set up the scalable mode pasid entry for passthrough translation type.
711  */
intel_pasid_setup_pass_through(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)712 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
713 				   struct dmar_domain *domain,
714 				   struct device *dev, u32 pasid)
715 {
716 	u16 did = FLPT_DEFAULT_DID;
717 	struct pasid_entry *pte;
718 
719 	pte = intel_pasid_get_entry(dev, pasid);
720 	if (!pte) {
721 		dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
722 		return -ENODEV;
723 	}
724 
725 	pasid_clear_entry(pte);
726 	pasid_set_domain_id(pte, did);
727 	pasid_set_address_width(pte, iommu->agaw);
728 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
729 	pasid_set_fault_enable(pte);
730 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
731 
732 	/*
733 	 * We should set SRE bit as well since the addresses are expected
734 	 * to be GPAs.
735 	 */
736 	pasid_set_sre(pte);
737 	pasid_set_present(pte);
738 	pasid_flush_caches(iommu, pte, pasid, did);
739 
740 	return 0;
741 }
742 
743 static int
intel_pasid_setup_bind_data(struct intel_iommu * iommu,struct pasid_entry * pte,struct iommu_gpasid_bind_data_vtd * pasid_data)744 intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
745 			    struct iommu_gpasid_bind_data_vtd *pasid_data)
746 {
747 	/*
748 	 * Not all guest PASID table entry fields are passed down during bind,
749 	 * here we only set up the ones that are dependent on guest settings.
750 	 * Execution related bits such as NXE, SMEP are not supported.
751 	 * Other fields, such as snoop related, are set based on host needs
752 	 * regardless of guest settings.
753 	 */
754 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
755 		if (!ecap_srs(iommu->ecap)) {
756 			pr_err_ratelimited("No supervisor request support on %s\n",
757 					   iommu->name);
758 			return -EINVAL;
759 		}
760 		pasid_set_sre(pte);
761 		/* Enable write protect WP if guest requested */
762 		if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
763 			pasid_set_wpe(pte);
764 	}
765 
766 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
767 		if (!ecap_eafs(iommu->ecap)) {
768 			pr_err_ratelimited("No extended access flag support on %s\n",
769 					   iommu->name);
770 			return -EINVAL;
771 		}
772 		pasid_set_eafe(pte);
773 	}
774 
775 	/*
776 	 * Memory type is only applicable to devices inside processor coherent
777 	 * domain. Will add MTS support once coherent devices are available.
778 	 */
779 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
780 		pr_warn_ratelimited("No memory type support %s\n",
781 				    iommu->name);
782 		return -EINVAL;
783 	}
784 
785 	return 0;
786 }
787 
788 /**
789  * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
790  * This could be used for guest shared virtual address. In this case, the
791  * first level page tables are used for GVA-GPA translation in the guest,
792  * second level page tables are used for GPA-HPA translation.
793  *
794  * @iommu:      IOMMU which the device belong to
795  * @dev:        Device to be set up for translation
796  * @gpgd:       FLPTPTR: First Level Page translation pointer in GPA
797  * @pasid:      PASID to be programmed in the device PASID table
798  * @pasid_data: Additional PASID info from the guest bind request
799  * @domain:     Domain info for setting up second level page tables
800  * @addr_width: Address width of the first level (guest)
801  */
intel_pasid_setup_nested(struct intel_iommu * iommu,struct device * dev,pgd_t * gpgd,u32 pasid,struct iommu_gpasid_bind_data_vtd * pasid_data,struct dmar_domain * domain,int addr_width)802 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
803 			     pgd_t *gpgd, u32 pasid,
804 			     struct iommu_gpasid_bind_data_vtd *pasid_data,
805 			     struct dmar_domain *domain, int addr_width)
806 {
807 	struct pasid_entry *pte;
808 	struct dma_pte *pgd;
809 	int ret = 0;
810 	u64 pgd_val;
811 	int agaw;
812 	u16 did;
813 
814 	if (!ecap_nest(iommu->ecap)) {
815 		pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
816 				   iommu->name);
817 		return -EINVAL;
818 	}
819 
820 	if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
821 		pr_err_ratelimited("Domain is not in nesting mode, %x\n",
822 				   domain->flags);
823 		return -EINVAL;
824 	}
825 
826 	pte = intel_pasid_get_entry(dev, pasid);
827 	if (WARN_ON(!pte))
828 		return -EINVAL;
829 
830 	/*
831 	 * Caller must ensure PASID entry is not in use, i.e. not bind the
832 	 * same PASID to the same device twice.
833 	 */
834 	if (pasid_pte_is_present(pte))
835 		return -EBUSY;
836 
837 	pasid_clear_entry(pte);
838 
839 	/* Sanity checking performed by caller to make sure address
840 	 * width matching in two dimensions:
841 	 * 1. CPU vs. IOMMU
842 	 * 2. Guest vs. Host.
843 	 */
844 	switch (addr_width) {
845 #ifdef CONFIG_X86
846 	case ADDR_WIDTH_5LEVEL:
847 		if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
848 		    !cap_5lp_support(iommu->cap)) {
849 			dev_err_ratelimited(dev,
850 					    "5-level paging not supported\n");
851 			return -EINVAL;
852 		}
853 
854 		pasid_set_flpm(pte, 1);
855 		break;
856 #endif
857 	case ADDR_WIDTH_4LEVEL:
858 		pasid_set_flpm(pte, 0);
859 		break;
860 	default:
861 		dev_err_ratelimited(dev, "Invalid guest address width %d\n",
862 				    addr_width);
863 		return -EINVAL;
864 	}
865 
866 	/* First level PGD is in GPA, must be supported by the second level */
867 	if ((uintptr_t)gpgd > domain->max_addr) {
868 		dev_err_ratelimited(dev,
869 				    "Guest PGD %lx not supported, max %llx\n",
870 				    (uintptr_t)gpgd, domain->max_addr);
871 		return -EINVAL;
872 	}
873 	pasid_set_flptr(pte, (uintptr_t)gpgd);
874 
875 	ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
876 	if (ret)
877 		return ret;
878 
879 	/* Setup the second level based on the given domain */
880 	pgd = domain->pgd;
881 
882 	agaw = iommu_skip_agaw(domain, iommu, &pgd);
883 	if (agaw < 0) {
884 		dev_err_ratelimited(dev, "Invalid domain page table\n");
885 		return -EINVAL;
886 	}
887 	pgd_val = virt_to_phys(pgd);
888 	pasid_set_slptr(pte, pgd_val);
889 	pasid_set_fault_enable(pte);
890 
891 	did = domain->iommu_did[iommu->seq_id];
892 	pasid_set_domain_id(pte, did);
893 
894 	pasid_set_address_width(pte, agaw);
895 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
896 
897 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
898 	pasid_set_present(pte);
899 	pasid_flush_caches(iommu, pte, pasid, did);
900 
901 	return ret;
902 }
903