1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7 #ifndef __LINUX_IOMMU_H 8 #define __LINUX_IOMMU_H 9 10 #include <linux/scatterlist.h> 11 #include <linux/device.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/ioasid.h> 17 #include <uapi/linux/iommu.h> 18 19 #define IOMMU_READ (1 << 0) 20 #define IOMMU_WRITE (1 << 1) 21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22 #define IOMMU_NOEXEC (1 << 3) 23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24 /* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33 #define IOMMU_PRIV (1 << 5) 34 35 struct iommu_ops; 36 struct iommu_group; 37 struct bus_type; 38 struct device; 39 struct iommu_domain; 40 struct iommu_domain_ops; 41 struct notifier_block; 42 struct iommu_sva; 43 struct iommu_fault_event; 44 struct iommu_dma_cookie; 45 46 /* iommu fault flags */ 47 #define IOMMU_FAULT_READ 0x0 48 #define IOMMU_FAULT_WRITE 0x1 49 50 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 51 struct device *, unsigned long, int, void *); 52 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 53 54 struct iommu_domain_geometry { 55 dma_addr_t aperture_start; /* First address that can be mapped */ 56 dma_addr_t aperture_end; /* Last address that can be mapped */ 57 bool force_aperture; /* DMA only allowed in mappable range? */ 58 }; 59 60 /* Domain feature flags */ 61 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 62 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 63 implementation */ 64 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 65 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 66 67 /* 68 * This are the possible domain-types 69 * 70 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 71 * devices 72 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 73 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 74 * for VMs 75 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 76 * This flag allows IOMMU drivers to implement 77 * certain optimizations for these domains 78 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 79 * invalidation. 80 */ 81 #define IOMMU_DOMAIN_BLOCKED (0U) 82 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 83 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 84 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 85 __IOMMU_DOMAIN_DMA_API) 86 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 87 __IOMMU_DOMAIN_DMA_API | \ 88 __IOMMU_DOMAIN_DMA_FQ) 89 90 struct iommu_domain { 91 unsigned type; 92 const struct iommu_domain_ops *ops; 93 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 94 iommu_fault_handler_t handler; 95 void *handler_token; 96 struct iommu_domain_geometry geometry; 97 struct iommu_dma_cookie *iova_cookie; 98 }; 99 100 static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 101 { 102 return domain->type & __IOMMU_DOMAIN_DMA_API; 103 } 104 105 enum iommu_cap { 106 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 107 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 108 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 109 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 110 DMA protection and we should too */ 111 }; 112 113 /* These are the possible reserved region types */ 114 enum iommu_resv_type { 115 /* Memory regions which must be mapped 1:1 at all times */ 116 IOMMU_RESV_DIRECT, 117 /* 118 * Memory regions which are advertised to be 1:1 but are 119 * commonly considered relaxable in some conditions, 120 * for instance in device assignment use case (USB, Graphics) 121 */ 122 IOMMU_RESV_DIRECT_RELAXABLE, 123 /* Arbitrary "never map this or give it to a device" address ranges */ 124 IOMMU_RESV_RESERVED, 125 /* Hardware MSI region (untranslated) */ 126 IOMMU_RESV_MSI, 127 /* Software-managed MSI translation window */ 128 IOMMU_RESV_SW_MSI, 129 }; 130 131 /** 132 * struct iommu_resv_region - descriptor for a reserved memory region 133 * @list: Linked list pointers 134 * @start: System physical start address of the region 135 * @length: Length of the region in bytes 136 * @prot: IOMMU Protection flags (READ/WRITE/...) 137 * @type: Type of the reserved region 138 * @free: Callback to free associated memory allocations 139 */ 140 struct iommu_resv_region { 141 struct list_head list; 142 phys_addr_t start; 143 size_t length; 144 int prot; 145 enum iommu_resv_type type; 146 void (*free)(struct device *dev, struct iommu_resv_region *region); 147 }; 148 149 struct iommu_iort_rmr_data { 150 struct iommu_resv_region rr; 151 152 /* Stream IDs associated with IORT RMR entry */ 153 const u32 *sids; 154 u32 num_sids; 155 }; 156 157 /** 158 * enum iommu_dev_features - Per device IOMMU features 159 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 160 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 161 * enabling %IOMMU_DEV_FEAT_SVA requires 162 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 163 * Faults themselves instead of relying on the IOMMU. When 164 * supported, this feature must be enabled before and 165 * disabled after %IOMMU_DEV_FEAT_SVA. 166 * 167 * Device drivers enable a feature using iommu_dev_enable_feature(). 168 */ 169 enum iommu_dev_features { 170 IOMMU_DEV_FEAT_SVA, 171 IOMMU_DEV_FEAT_IOPF, 172 }; 173 174 #define IOMMU_PASID_INVALID (-1U) 175 176 #ifdef CONFIG_IOMMU_API 177 178 /** 179 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 180 * 181 * @start: IOVA representing the start of the range to be flushed 182 * @end: IOVA representing the end of the range to be flushed (inclusive) 183 * @pgsize: The interval at which to perform the flush 184 * @freelist: Removed pages to free after sync 185 * @queued: Indicates that the flush will be queued 186 * 187 * This structure is intended to be updated by multiple calls to the 188 * ->unmap() function in struct iommu_ops before eventually being passed 189 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 190 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 191 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 192 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 193 */ 194 struct iommu_iotlb_gather { 195 unsigned long start; 196 unsigned long end; 197 size_t pgsize; 198 struct list_head freelist; 199 bool queued; 200 }; 201 202 /** 203 * struct iommu_ops - iommu ops and capabilities 204 * @capable: check capability 205 * @domain_alloc: allocate iommu domain 206 * @probe_device: Add device to iommu driver handling 207 * @release_device: Remove device from iommu driver handling 208 * @probe_finalize: Do final setup work after the device is added to an IOMMU 209 * group and attached to the groups domain 210 * @device_group: find iommu group for a particular device 211 * @get_resv_regions: Request list of reserved regions for a device 212 * @of_xlate: add OF master IDs to iommu grouping 213 * @is_attach_deferred: Check if domain attach should be deferred from iommu 214 * driver init to device driver init (default no) 215 * @dev_enable/disable_feat: per device entries to enable/disable 216 * iommu specific features. 217 * @sva_bind: Bind process address space to device 218 * @sva_unbind: Unbind process address space from device 219 * @sva_get_pasid: Get PASID associated to a SVA handle 220 * @page_response: handle page request response 221 * @def_domain_type: device default domain type, return value: 222 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 223 * - IOMMU_DOMAIN_DMA: must use a dma domain 224 * - 0: use the default setting 225 * @default_domain_ops: the default ops for domains 226 * @pgsize_bitmap: bitmap of all possible supported page sizes 227 * @owner: Driver module providing these ops 228 */ 229 struct iommu_ops { 230 bool (*capable)(struct device *dev, enum iommu_cap); 231 232 /* Domain allocation and freeing by the iommu driver */ 233 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 234 235 struct iommu_device *(*probe_device)(struct device *dev); 236 void (*release_device)(struct device *dev); 237 void (*probe_finalize)(struct device *dev); 238 struct iommu_group *(*device_group)(struct device *dev); 239 240 /* Request/Free a list of reserved regions for a device */ 241 void (*get_resv_regions)(struct device *dev, struct list_head *list); 242 243 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 244 bool (*is_attach_deferred)(struct device *dev); 245 246 /* Per device IOMMU features */ 247 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 248 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 249 250 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, 251 void *drvdata); 252 void (*sva_unbind)(struct iommu_sva *handle); 253 u32 (*sva_get_pasid)(struct iommu_sva *handle); 254 255 int (*page_response)(struct device *dev, 256 struct iommu_fault_event *evt, 257 struct iommu_page_response *msg); 258 259 int (*def_domain_type)(struct device *dev); 260 261 const struct iommu_domain_ops *default_domain_ops; 262 unsigned long pgsize_bitmap; 263 struct module *owner; 264 }; 265 266 /** 267 * struct iommu_domain_ops - domain specific operations 268 * @attach_dev: attach an iommu domain to a device 269 * @detach_dev: detach an iommu domain from a device 270 * @map: map a physically contiguous memory region to an iommu domain 271 * @map_pages: map a physically contiguous set of pages of the same size to 272 * an iommu domain. 273 * @unmap: unmap a physically contiguous memory region from an iommu domain 274 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 275 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 276 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 277 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 278 * queue 279 * @iova_to_phys: translate iova to physical address 280 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 281 * including no-snoop TLPs on PCIe or other platform 282 * specific mechanisms. 283 * @enable_nesting: Enable nesting 284 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 285 * @free: Release the domain after use. 286 */ 287 struct iommu_domain_ops { 288 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 289 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 290 291 int (*map)(struct iommu_domain *domain, unsigned long iova, 292 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 293 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 294 phys_addr_t paddr, size_t pgsize, size_t pgcount, 295 int prot, gfp_t gfp, size_t *mapped); 296 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 297 size_t size, struct iommu_iotlb_gather *iotlb_gather); 298 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 299 size_t pgsize, size_t pgcount, 300 struct iommu_iotlb_gather *iotlb_gather); 301 302 void (*flush_iotlb_all)(struct iommu_domain *domain); 303 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 304 size_t size); 305 void (*iotlb_sync)(struct iommu_domain *domain, 306 struct iommu_iotlb_gather *iotlb_gather); 307 308 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 309 dma_addr_t iova); 310 311 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 312 int (*enable_nesting)(struct iommu_domain *domain); 313 int (*set_pgtable_quirks)(struct iommu_domain *domain, 314 unsigned long quirks); 315 316 void (*free)(struct iommu_domain *domain); 317 }; 318 319 /** 320 * struct iommu_device - IOMMU core representation of one IOMMU hardware 321 * instance 322 * @list: Used by the iommu-core to keep a list of registered iommus 323 * @ops: iommu-ops for talking to this iommu 324 * @dev: struct device for sysfs handling 325 */ 326 struct iommu_device { 327 struct list_head list; 328 const struct iommu_ops *ops; 329 struct fwnode_handle *fwnode; 330 struct device *dev; 331 }; 332 333 /** 334 * struct iommu_fault_event - Generic fault event 335 * 336 * Can represent recoverable faults such as a page requests or 337 * unrecoverable faults such as DMA or IRQ remapping faults. 338 * 339 * @fault: fault descriptor 340 * @list: pending fault event list, used for tracking responses 341 */ 342 struct iommu_fault_event { 343 struct iommu_fault fault; 344 struct list_head list; 345 }; 346 347 /** 348 * struct iommu_fault_param - per-device IOMMU fault data 349 * @handler: Callback function to handle IOMMU faults at device level 350 * @data: handler private data 351 * @faults: holds the pending faults which needs response 352 * @lock: protect pending faults list 353 */ 354 struct iommu_fault_param { 355 iommu_dev_fault_handler_t handler; 356 void *data; 357 struct list_head faults; 358 struct mutex lock; 359 }; 360 361 /** 362 * struct dev_iommu - Collection of per-device IOMMU data 363 * 364 * @fault_param: IOMMU detected device fault reporting data 365 * @iopf_param: I/O Page Fault queue and data 366 * @fwspec: IOMMU fwspec data 367 * @iommu_dev: IOMMU device this device is linked to 368 * @priv: IOMMU Driver private data 369 * 370 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 371 * struct iommu_group *iommu_group; 372 */ 373 struct dev_iommu { 374 struct mutex lock; 375 struct iommu_fault_param *fault_param; 376 struct iopf_device_param *iopf_param; 377 struct iommu_fwspec *fwspec; 378 struct iommu_device *iommu_dev; 379 void *priv; 380 }; 381 382 int iommu_device_register(struct iommu_device *iommu, 383 const struct iommu_ops *ops, 384 struct device *hwdev); 385 void iommu_device_unregister(struct iommu_device *iommu); 386 int iommu_device_sysfs_add(struct iommu_device *iommu, 387 struct device *parent, 388 const struct attribute_group **groups, 389 const char *fmt, ...) __printf(4, 5); 390 void iommu_device_sysfs_remove(struct iommu_device *iommu); 391 int iommu_device_link(struct iommu_device *iommu, struct device *link); 392 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 393 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 394 395 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 396 { 397 return (struct iommu_device *)dev_get_drvdata(dev); 398 } 399 400 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 401 { 402 *gather = (struct iommu_iotlb_gather) { 403 .start = ULONG_MAX, 404 .freelist = LIST_HEAD_INIT(gather->freelist), 405 }; 406 } 407 408 static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) 409 { 410 /* 411 * Assume that valid ops must be installed if iommu_probe_device() 412 * has succeeded. The device ops are essentially for internal use 413 * within the IOMMU subsystem itself, so we should be able to trust 414 * ourselves not to misuse the helper. 415 */ 416 return dev->iommu->iommu_dev->ops; 417 } 418 419 extern int bus_iommu_probe(struct bus_type *bus); 420 extern bool iommu_present(struct bus_type *bus); 421 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 422 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 423 extern struct iommu_group *iommu_group_get_by_id(int id); 424 extern void iommu_domain_free(struct iommu_domain *domain); 425 extern int iommu_attach_device(struct iommu_domain *domain, 426 struct device *dev); 427 extern void iommu_detach_device(struct iommu_domain *domain, 428 struct device *dev); 429 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 430 struct device *dev, ioasid_t pasid); 431 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 432 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 433 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 434 phys_addr_t paddr, size_t size, int prot); 435 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 436 phys_addr_t paddr, size_t size, int prot); 437 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 438 size_t size); 439 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 440 unsigned long iova, size_t size, 441 struct iommu_iotlb_gather *iotlb_gather); 442 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 443 struct scatterlist *sg, unsigned int nents, int prot); 444 extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 445 unsigned long iova, struct scatterlist *sg, 446 unsigned int nents, int prot); 447 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 448 extern void iommu_set_fault_handler(struct iommu_domain *domain, 449 iommu_fault_handler_t handler, void *token); 450 451 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 452 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 453 extern void iommu_set_default_passthrough(bool cmd_line); 454 extern void iommu_set_default_translated(bool cmd_line); 455 extern bool iommu_default_passthrough(void); 456 extern struct iommu_resv_region * 457 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 458 enum iommu_resv_type type, gfp_t gfp); 459 extern int iommu_get_group_resv_regions(struct iommu_group *group, 460 struct list_head *head); 461 462 extern int iommu_attach_group(struct iommu_domain *domain, 463 struct iommu_group *group); 464 extern void iommu_detach_group(struct iommu_domain *domain, 465 struct iommu_group *group); 466 extern struct iommu_group *iommu_group_alloc(void); 467 extern void *iommu_group_get_iommudata(struct iommu_group *group); 468 extern void iommu_group_set_iommudata(struct iommu_group *group, 469 void *iommu_data, 470 void (*release)(void *iommu_data)); 471 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 472 extern int iommu_group_add_device(struct iommu_group *group, 473 struct device *dev); 474 extern void iommu_group_remove_device(struct device *dev); 475 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 476 int (*fn)(struct device *, void *)); 477 extern struct iommu_group *iommu_group_get(struct device *dev); 478 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 479 extern void iommu_group_put(struct iommu_group *group); 480 extern int iommu_register_device_fault_handler(struct device *dev, 481 iommu_dev_fault_handler_t handler, 482 void *data); 483 484 extern int iommu_unregister_device_fault_handler(struct device *dev); 485 486 extern int iommu_report_device_fault(struct device *dev, 487 struct iommu_fault_event *evt); 488 extern int iommu_page_response(struct device *dev, 489 struct iommu_page_response *msg); 490 491 extern int iommu_group_id(struct iommu_group *group); 492 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 493 494 int iommu_enable_nesting(struct iommu_domain *domain); 495 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 496 unsigned long quirks); 497 498 void iommu_set_dma_strict(void); 499 500 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 501 unsigned long iova, int flags); 502 503 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 504 { 505 if (domain->ops->flush_iotlb_all) 506 domain->ops->flush_iotlb_all(domain); 507 } 508 509 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 510 struct iommu_iotlb_gather *iotlb_gather) 511 { 512 if (domain->ops->iotlb_sync) 513 domain->ops->iotlb_sync(domain, iotlb_gather); 514 515 iommu_iotlb_gather_init(iotlb_gather); 516 } 517 518 /** 519 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 520 * 521 * @gather: TLB gather data 522 * @iova: start of page to invalidate 523 * @size: size of page to invalidate 524 * 525 * Helper for IOMMU drivers to check whether a new range and the gathered range 526 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 527 * than merging the two, which might lead to unnecessary invalidations. 528 */ 529 static inline 530 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 531 unsigned long iova, size_t size) 532 { 533 unsigned long start = iova, end = start + size - 1; 534 535 return gather->end != 0 && 536 (end + 1 < gather->start || start > gather->end + 1); 537 } 538 539 540 /** 541 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 542 * @gather: TLB gather data 543 * @iova: start of page to invalidate 544 * @size: size of page to invalidate 545 * 546 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 547 * where only the address range matters, and simply minimising intermediate 548 * syncs is preferred. 549 */ 550 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 551 unsigned long iova, size_t size) 552 { 553 unsigned long end = iova + size - 1; 554 555 if (gather->start > iova) 556 gather->start = iova; 557 if (gather->end < end) 558 gather->end = end; 559 } 560 561 /** 562 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 563 * @domain: IOMMU domain to be invalidated 564 * @gather: TLB gather data 565 * @iova: start of page to invalidate 566 * @size: size of page to invalidate 567 * 568 * Helper for IOMMU drivers to build invalidation commands based on individual 569 * pages, or with page size/table level hints which cannot be gathered if they 570 * differ. 571 */ 572 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 573 struct iommu_iotlb_gather *gather, 574 unsigned long iova, size_t size) 575 { 576 /* 577 * If the new page is disjoint from the current range or is mapped at 578 * a different granularity, then sync the TLB so that the gather 579 * structure can be rewritten. 580 */ 581 if ((gather->pgsize && gather->pgsize != size) || 582 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 583 iommu_iotlb_sync(domain, gather); 584 585 gather->pgsize = size; 586 iommu_iotlb_gather_add_range(gather, iova, size); 587 } 588 589 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 590 { 591 return gather && gather->queued; 592 } 593 594 /* PCI device grouping function */ 595 extern struct iommu_group *pci_device_group(struct device *dev); 596 /* Generic device grouping function */ 597 extern struct iommu_group *generic_device_group(struct device *dev); 598 /* FSL-MC device grouping function */ 599 struct iommu_group *fsl_mc_device_group(struct device *dev); 600 601 /** 602 * struct iommu_fwspec - per-device IOMMU instance data 603 * @ops: ops for this device's IOMMU 604 * @iommu_fwnode: firmware handle for this device's IOMMU 605 * @flags: IOMMU_FWSPEC_* flags 606 * @num_ids: number of associated device IDs 607 * @ids: IDs which this device may present to the IOMMU 608 */ 609 struct iommu_fwspec { 610 const struct iommu_ops *ops; 611 struct fwnode_handle *iommu_fwnode; 612 u32 flags; 613 unsigned int num_ids; 614 u32 ids[]; 615 }; 616 617 /* ATS is supported */ 618 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 619 620 /** 621 * struct iommu_sva - handle to a device-mm bond 622 */ 623 struct iommu_sva { 624 struct device *dev; 625 }; 626 627 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 628 const struct iommu_ops *ops); 629 void iommu_fwspec_free(struct device *dev); 630 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 631 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 632 633 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 634 { 635 if (dev->iommu) 636 return dev->iommu->fwspec; 637 else 638 return NULL; 639 } 640 641 static inline void dev_iommu_fwspec_set(struct device *dev, 642 struct iommu_fwspec *fwspec) 643 { 644 dev->iommu->fwspec = fwspec; 645 } 646 647 static inline void *dev_iommu_priv_get(struct device *dev) 648 { 649 if (dev->iommu) 650 return dev->iommu->priv; 651 else 652 return NULL; 653 } 654 655 static inline void dev_iommu_priv_set(struct device *dev, void *priv) 656 { 657 dev->iommu->priv = priv; 658 } 659 660 int iommu_probe_device(struct device *dev); 661 void iommu_release_device(struct device *dev); 662 663 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 664 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 665 666 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 667 struct mm_struct *mm, 668 void *drvdata); 669 void iommu_sva_unbind_device(struct iommu_sva *handle); 670 u32 iommu_sva_get_pasid(struct iommu_sva *handle); 671 672 int iommu_device_use_default_domain(struct device *dev); 673 void iommu_device_unuse_default_domain(struct device *dev); 674 675 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 676 void iommu_group_release_dma_owner(struct iommu_group *group); 677 bool iommu_group_dma_owner_claimed(struct iommu_group *group); 678 679 #else /* CONFIG_IOMMU_API */ 680 681 struct iommu_ops {}; 682 struct iommu_group {}; 683 struct iommu_fwspec {}; 684 struct iommu_device {}; 685 struct iommu_fault_param {}; 686 struct iommu_iotlb_gather {}; 687 688 static inline bool iommu_present(struct bus_type *bus) 689 { 690 return false; 691 } 692 693 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 694 { 695 return false; 696 } 697 698 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 699 { 700 return NULL; 701 } 702 703 static inline struct iommu_group *iommu_group_get_by_id(int id) 704 { 705 return NULL; 706 } 707 708 static inline void iommu_domain_free(struct iommu_domain *domain) 709 { 710 } 711 712 static inline int iommu_attach_device(struct iommu_domain *domain, 713 struct device *dev) 714 { 715 return -ENODEV; 716 } 717 718 static inline void iommu_detach_device(struct iommu_domain *domain, 719 struct device *dev) 720 { 721 } 722 723 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 724 { 725 return NULL; 726 } 727 728 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 729 phys_addr_t paddr, size_t size, int prot) 730 { 731 return -ENODEV; 732 } 733 734 static inline int iommu_map_atomic(struct iommu_domain *domain, 735 unsigned long iova, phys_addr_t paddr, 736 size_t size, int prot) 737 { 738 return -ENODEV; 739 } 740 741 static inline size_t iommu_unmap(struct iommu_domain *domain, 742 unsigned long iova, size_t size) 743 { 744 return 0; 745 } 746 747 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 748 unsigned long iova, int gfp_order, 749 struct iommu_iotlb_gather *iotlb_gather) 750 { 751 return 0; 752 } 753 754 static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 755 unsigned long iova, struct scatterlist *sg, 756 unsigned int nents, int prot) 757 { 758 return -ENODEV; 759 } 760 761 static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 762 unsigned long iova, struct scatterlist *sg, 763 unsigned int nents, int prot) 764 { 765 return -ENODEV; 766 } 767 768 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 769 { 770 } 771 772 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 773 struct iommu_iotlb_gather *iotlb_gather) 774 { 775 } 776 777 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 778 { 779 return 0; 780 } 781 782 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 783 iommu_fault_handler_t handler, void *token) 784 { 785 } 786 787 static inline void iommu_get_resv_regions(struct device *dev, 788 struct list_head *list) 789 { 790 } 791 792 static inline void iommu_put_resv_regions(struct device *dev, 793 struct list_head *list) 794 { 795 } 796 797 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 798 struct list_head *head) 799 { 800 return -ENODEV; 801 } 802 803 static inline void iommu_set_default_passthrough(bool cmd_line) 804 { 805 } 806 807 static inline void iommu_set_default_translated(bool cmd_line) 808 { 809 } 810 811 static inline bool iommu_default_passthrough(void) 812 { 813 return true; 814 } 815 816 static inline int iommu_attach_group(struct iommu_domain *domain, 817 struct iommu_group *group) 818 { 819 return -ENODEV; 820 } 821 822 static inline void iommu_detach_group(struct iommu_domain *domain, 823 struct iommu_group *group) 824 { 825 } 826 827 static inline struct iommu_group *iommu_group_alloc(void) 828 { 829 return ERR_PTR(-ENODEV); 830 } 831 832 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 833 { 834 return NULL; 835 } 836 837 static inline void iommu_group_set_iommudata(struct iommu_group *group, 838 void *iommu_data, 839 void (*release)(void *iommu_data)) 840 { 841 } 842 843 static inline int iommu_group_set_name(struct iommu_group *group, 844 const char *name) 845 { 846 return -ENODEV; 847 } 848 849 static inline int iommu_group_add_device(struct iommu_group *group, 850 struct device *dev) 851 { 852 return -ENODEV; 853 } 854 855 static inline void iommu_group_remove_device(struct device *dev) 856 { 857 } 858 859 static inline int iommu_group_for_each_dev(struct iommu_group *group, 860 void *data, 861 int (*fn)(struct device *, void *)) 862 { 863 return -ENODEV; 864 } 865 866 static inline struct iommu_group *iommu_group_get(struct device *dev) 867 { 868 return NULL; 869 } 870 871 static inline void iommu_group_put(struct iommu_group *group) 872 { 873 } 874 875 static inline 876 int iommu_register_device_fault_handler(struct device *dev, 877 iommu_dev_fault_handler_t handler, 878 void *data) 879 { 880 return -ENODEV; 881 } 882 883 static inline int iommu_unregister_device_fault_handler(struct device *dev) 884 { 885 return 0; 886 } 887 888 static inline 889 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 890 { 891 return -ENODEV; 892 } 893 894 static inline int iommu_page_response(struct device *dev, 895 struct iommu_page_response *msg) 896 { 897 return -ENODEV; 898 } 899 900 static inline int iommu_group_id(struct iommu_group *group) 901 { 902 return -ENODEV; 903 } 904 905 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 906 unsigned long quirks) 907 { 908 return 0; 909 } 910 911 static inline int iommu_device_register(struct iommu_device *iommu, 912 const struct iommu_ops *ops, 913 struct device *hwdev) 914 { 915 return -ENODEV; 916 } 917 918 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 919 { 920 return NULL; 921 } 922 923 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 924 { 925 } 926 927 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 928 struct iommu_iotlb_gather *gather, 929 unsigned long iova, size_t size) 930 { 931 } 932 933 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 934 { 935 return false; 936 } 937 938 static inline void iommu_device_unregister(struct iommu_device *iommu) 939 { 940 } 941 942 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 943 struct device *parent, 944 const struct attribute_group **groups, 945 const char *fmt, ...) 946 { 947 return -ENODEV; 948 } 949 950 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 951 { 952 } 953 954 static inline int iommu_device_link(struct device *dev, struct device *link) 955 { 956 return -EINVAL; 957 } 958 959 static inline void iommu_device_unlink(struct device *dev, struct device *link) 960 { 961 } 962 963 static inline int iommu_fwspec_init(struct device *dev, 964 struct fwnode_handle *iommu_fwnode, 965 const struct iommu_ops *ops) 966 { 967 return -ENODEV; 968 } 969 970 static inline void iommu_fwspec_free(struct device *dev) 971 { 972 } 973 974 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 975 int num_ids) 976 { 977 return -ENODEV; 978 } 979 980 static inline 981 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 982 { 983 return NULL; 984 } 985 986 static inline int 987 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 988 { 989 return -ENODEV; 990 } 991 992 static inline int 993 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 994 { 995 return -ENODEV; 996 } 997 998 static inline struct iommu_sva * 999 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 1000 { 1001 return NULL; 1002 } 1003 1004 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1005 { 1006 } 1007 1008 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1009 { 1010 return IOMMU_PASID_INVALID; 1011 } 1012 1013 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1014 { 1015 return NULL; 1016 } 1017 1018 static inline int iommu_device_use_default_domain(struct device *dev) 1019 { 1020 return 0; 1021 } 1022 1023 static inline void iommu_device_unuse_default_domain(struct device *dev) 1024 { 1025 } 1026 1027 static inline int 1028 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1029 { 1030 return -ENODEV; 1031 } 1032 1033 static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1034 { 1035 } 1036 1037 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1038 { 1039 return false; 1040 } 1041 #endif /* CONFIG_IOMMU_API */ 1042 1043 /** 1044 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1045 * @domain: The IOMMU domain to perform the mapping 1046 * @iova: The start address to map the buffer 1047 * @sgt: The sg_table object describing the buffer 1048 * @prot: IOMMU protection bits 1049 * 1050 * Creates a mapping at @iova for the buffer described by a scatterlist 1051 * stored in the given sg_table object in the provided IOMMU domain. 1052 */ 1053 static inline size_t iommu_map_sgtable(struct iommu_domain *domain, 1054 unsigned long iova, struct sg_table *sgt, int prot) 1055 { 1056 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); 1057 } 1058 1059 #ifdef CONFIG_IOMMU_DEBUGFS 1060 extern struct dentry *iommu_debugfs_dir; 1061 void iommu_debugfs_setup(void); 1062 #else 1063 static inline void iommu_debugfs_setup(void) {} 1064 #endif 1065 1066 #ifdef CONFIG_IOMMU_DMA 1067 #include <linux/msi.h> 1068 1069 /* Setup call for arch DMA mapping code */ 1070 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); 1071 1072 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1073 1074 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1075 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1076 1077 #else /* CONFIG_IOMMU_DMA */ 1078 1079 struct msi_desc; 1080 struct msi_msg; 1081 1082 static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1083 { 1084 } 1085 1086 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1087 { 1088 return -ENODEV; 1089 } 1090 1091 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1092 { 1093 return 0; 1094 } 1095 1096 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1097 { 1098 } 1099 1100 #endif /* CONFIG_IOMMU_DMA */ 1101 1102 #endif /* __LINUX_IOMMU_H */ 1103