1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2015 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #ifndef __X86_IOMMU_INTEL_DMAR_H 35 #define __X86_IOMMU_INTEL_DMAR_H 36 37 #include <sys/iommu.h> 38 39 struct dmar_unit; 40 41 RB_HEAD(dmar_gas_entries_tree, iommu_map_entry); 42 RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry, 43 dmar_gas_cmp_entries); 44 45 /* 46 * Locking annotations: 47 * (u) - Protected by iommu unit lock 48 * (d) - Protected by domain lock 49 * (c) - Immutable after initialization 50 */ 51 52 /* 53 * The domain abstraction. Most non-constant members of the domain 54 * are protected by owning dmar unit lock, not by the domain lock. 55 * Most important, the dmar lock protects the contexts list. 56 * 57 * The domain lock protects the address map for the domain, and list 58 * of unload entries delayed. 59 * 60 * Page tables pages and pages content is protected by the vm object 61 * lock pgtbl_obj, which contains the page tables pages. 62 */ 63 struct dmar_domain { 64 struct iommu_domain iodom; 65 int domain; /* (c) DID, written in context entry */ 66 int mgaw; /* (c) Real max address width */ 67 int agaw; /* (c) Adjusted guest address width */ 68 int pglvl; /* (c) The pagelevel */ 69 int awlvl; /* (c) The pagelevel as the bitmask, 70 to set in context entry */ 71 iommu_gaddr_t end; /* (c) Highest address + 1 in 72 the guest AS */ 73 u_int ctx_cnt; /* (u) Number of contexts owned */ 74 u_int refs; /* (u) Refs, including ctx */ 75 struct dmar_unit *dmar; /* (c) */ 76 LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */ 77 LIST_HEAD(, dmar_ctx) contexts; /* (u) */ 78 vm_object_t pgtbl_obj; /* (c) Page table pages */ 79 u_int flags; /* (u) */ 80 struct dmar_gas_entries_tree rb_root; /* (d) */ 81 struct iommu_map_entry *first_place, *last_place; /* (d) */ 82 u_int batch_no; 83 }; 84 85 struct dmar_ctx { 86 struct iommu_ctx context; 87 uint16_t rid; /* (c) pci RID */ 88 uint64_t last_fault_rec[2]; /* Last fault reported */ 89 LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */ 90 u_int refs; /* (u) References from tags */ 91 }; 92 93 #define DMAR_DOMAIN_GAS_INITED 0x0001 94 #define DMAR_DOMAIN_PGTBL_INITED 0x0002 95 #define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity 96 page table */ 97 #define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, 98 cannot be turned off */ 99 100 #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj) 101 #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj) 102 #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj) 103 #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \ 104 VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj) 105 106 #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock) 107 #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock) 108 #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED) 109 110 struct dmar_msi_data { 111 int irq; 112 int irq_rid; 113 struct resource *irq_res; 114 void *intr_handle; 115 int (*handler)(void *); 116 int msi_data_reg; 117 int msi_addr_reg; 118 int msi_uaddr_reg; 119 void (*enable_intr)(struct dmar_unit *); 120 void (*disable_intr)(struct dmar_unit *); 121 const char *name; 122 }; 123 124 #define DMAR_INTR_FAULT 0 125 #define DMAR_INTR_QI 1 126 #define DMAR_INTR_TOTAL 2 127 128 struct dmar_unit { 129 struct iommu_unit iommu; 130 device_t dev; 131 uint16_t segment; 132 uint64_t base; 133 134 /* Resources */ 135 int reg_rid; 136 struct resource *regs; 137 138 struct dmar_msi_data intrs[DMAR_INTR_TOTAL]; 139 140 /* Hardware registers cache */ 141 uint32_t hw_ver; 142 uint64_t hw_cap; 143 uint64_t hw_ecap; 144 uint32_t hw_gcmd; 145 146 /* Data for being a dmar */ 147 LIST_HEAD(, dmar_domain) domains; 148 struct unrhdr *domids; 149 vm_object_t ctx_obj; 150 u_int barrier_flags; 151 152 /* Fault handler data */ 153 struct mtx fault_lock; 154 uint64_t *fault_log; 155 int fault_log_head; 156 int fault_log_tail; 157 int fault_log_size; 158 struct task fault_task; 159 struct taskqueue *fault_taskqueue; 160 161 /* QI */ 162 int qi_enabled; 163 vm_offset_t inv_queue; 164 vm_size_t inv_queue_size; 165 uint32_t inv_queue_avail; 166 uint32_t inv_queue_tail; 167 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait 168 descr completion */ 169 uint64_t inv_waitd_seq_hw_phys; 170 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */ 171 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */ 172 u_int inv_seq_waiters; /* count of waiters for seq */ 173 u_int inv_queue_full; /* informational counter */ 174 175 /* IR */ 176 int ir_enabled; 177 vm_paddr_t irt_phys; 178 dmar_irte_t *irt; 179 u_int irte_cnt; 180 vmem_t *irtids; 181 182 /* Delayed freeing of map entries queue processing */ 183 struct iommu_map_entries_tailq tlb_flush_entries; 184 struct task qi_task; 185 struct taskqueue *qi_taskqueue; 186 187 /* 188 * Bitmap of buses for which context must ignore slot:func, 189 * duplicating the page table pointer into all context table 190 * entries. This is a client-controlled quirk to support some 191 * NTBs. 192 */ 193 uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; 194 195 }; 196 197 #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock) 198 #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock) 199 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED) 200 201 #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock) 202 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock) 203 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED) 204 205 #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0) 206 #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0) 207 #define DMAR_X2APIC(dmar) \ 208 (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0) 209 210 /* Barrier ids */ 211 #define DMAR_BARRIER_RMRR 0 212 #define DMAR_BARRIER_USEQ 1 213 214 struct dmar_unit *dmar_find(device_t dev, bool verbose); 215 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid); 216 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid); 217 218 u_int dmar_nd2mask(u_int nd); 219 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl); 220 int domain_set_agaw(struct dmar_domain *domain, int mgaw); 221 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, 222 bool allow_less); 223 vm_pindex_t pglvl_max_pages(int pglvl); 224 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl); 225 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl); 226 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl); 227 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size, 228 iommu_gaddr_t *isizep); 229 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags); 230 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); 231 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, 232 struct sf_buf **sf); 233 void dmar_unmap_pgtbl(struct sf_buf *sf); 234 int dmar_load_root_entry_ptr(struct dmar_unit *unit); 235 int dmar_inv_ctx_glob(struct dmar_unit *unit); 236 int dmar_inv_iotlb_glob(struct dmar_unit *unit); 237 int dmar_flush_write_bufs(struct dmar_unit *unit); 238 void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); 239 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); 240 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); 241 int dmar_enable_translation(struct dmar_unit *unit); 242 int dmar_disable_translation(struct dmar_unit *unit); 243 int dmar_load_irt_ptr(struct dmar_unit *unit); 244 int dmar_enable_ir(struct dmar_unit *unit); 245 int dmar_disable_ir(struct dmar_unit *unit); 246 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); 247 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id); 248 uint64_t dmar_get_timeout(void); 249 void dmar_update_timeout(uint64_t newval); 250 251 int dmar_fault_intr(void *arg); 252 void dmar_enable_fault_intr(struct dmar_unit *unit); 253 void dmar_disable_fault_intr(struct dmar_unit *unit); 254 int dmar_init_fault_log(struct dmar_unit *unit); 255 void dmar_fini_fault_log(struct dmar_unit *unit); 256 257 int dmar_qi_intr(void *arg); 258 void dmar_enable_qi_intr(struct dmar_unit *unit); 259 void dmar_disable_qi_intr(struct dmar_unit *unit); 260 int dmar_init_qi(struct dmar_unit *unit); 261 void dmar_fini_qi(struct dmar_unit *unit); 262 void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start, 263 iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait); 264 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit); 265 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); 266 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit); 267 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt); 268 269 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, 270 iommu_gaddr_t maxaddr); 271 void put_idmap_pgtbl(vm_object_t obj); 272 int domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base, 273 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); 274 int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base, 275 iommu_gaddr_t size, int flags); 276 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, 277 iommu_gaddr_t size); 278 int domain_alloc_pgtbl(struct dmar_domain *domain); 279 void domain_free_pgtbl(struct dmar_domain *domain); 280 281 int dmar_dev_depth(device_t child); 282 void dmar_dev_path(device_t child, int *busno, void *path1, int depth); 283 284 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, 285 uint16_t rid, bool id_mapped, bool rmrr_init); 286 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, 287 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, 288 bool id_mapped, bool rmrr_init); 289 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx); 290 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx); 291 void dmar_free_ctx(struct dmar_ctx *ctx); 292 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid); 293 void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free); 294 void dmar_domain_unload(struct dmar_domain *domain, 295 struct iommu_map_entries_tailq *entries, bool cansleep); 296 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free); 297 298 void dmar_gas_init_domain(struct dmar_domain *domain); 299 void dmar_gas_fini_domain(struct dmar_domain *domain); 300 struct iommu_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain, 301 u_int flags); 302 void dmar_gas_free_entry(struct dmar_domain *domain, 303 struct iommu_map_entry *entry); 304 void dmar_gas_free_space(struct dmar_domain *domain, 305 struct iommu_map_entry *entry); 306 int dmar_gas_map(struct dmar_domain *domain, 307 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 308 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); 309 void dmar_gas_free_region(struct dmar_domain *domain, 310 struct iommu_map_entry *entry); 311 int dmar_gas_map_region(struct dmar_domain *domain, 312 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); 313 int dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start, 314 iommu_gaddr_t end); 315 316 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, 317 int dev_busno, const void *dev_path, int dev_path_len, 318 struct iommu_map_entries_tailq *rmrr_entries); 319 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar); 320 321 void dmar_quirks_post_ident(struct dmar_unit *dmar); 322 void dmar_quirks_pre_use(struct iommu_unit *dmar); 323 324 int dmar_init_irt(struct dmar_unit *unit); 325 void dmar_fini_irt(struct dmar_unit *unit); 326 327 void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno); 328 bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno); 329 330 /* Map flags */ 331 #define IOMMU_MF_CANWAIT 0x0001 332 #define IOMMU_MF_CANSPLIT 0x0002 333 #define IOMMU_MF_RMRR 0x0004 334 335 #define DMAR_PGF_WAITOK 0x0001 336 #define DMAR_PGF_ZERO 0x0002 337 #define DMAR_PGF_ALLOC 0x0004 338 #define DMAR_PGF_NOALLOC 0x0008 339 #define DMAR_PGF_OBJL 0x0010 340 341 extern iommu_haddr_t dmar_high; 342 extern int haw; 343 extern int dmar_tbl_pagecnt; 344 extern int dmar_batch_coalesce; 345 extern int dmar_check_free; 346 347 static inline uint32_t 348 dmar_read4(const struct dmar_unit *unit, int reg) 349 { 350 351 return (bus_read_4(unit->regs, reg)); 352 } 353 354 static inline uint64_t 355 dmar_read8(const struct dmar_unit *unit, int reg) 356 { 357 #ifdef __i386__ 358 uint32_t high, low; 359 360 low = bus_read_4(unit->regs, reg); 361 high = bus_read_4(unit->regs, reg + 4); 362 return (low | ((uint64_t)high << 32)); 363 #else 364 return (bus_read_8(unit->regs, reg)); 365 #endif 366 } 367 368 static inline void 369 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val) 370 { 371 372 KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) == 373 (unit->hw_gcmd & DMAR_GCMD_TE), 374 ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit, 375 unit->hw_gcmd, val)); 376 bus_write_4(unit->regs, reg, val); 377 } 378 379 static inline void 380 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val) 381 { 382 383 KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write")); 384 #ifdef __i386__ 385 uint32_t high, low; 386 387 low = val; 388 high = val >> 32; 389 bus_write_4(unit->regs, reg, low); 390 bus_write_4(unit->regs, reg + 4, high); 391 #else 392 bus_write_8(unit->regs, reg, val); 393 #endif 394 } 395 396 /* 397 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes 398 * are issued in the correct order. For store, the lower word, 399 * containing the P or R and W bits, is set only after the high word 400 * is written. For clear, the P bit is cleared first, then the high 401 * word is cleared. 402 * 403 * dmar_pte_update updates the pte. For amd64, the update is atomic. 404 * For i386, it first disables the entry by clearing the word 405 * containing the P bit, and then defer to dmar_pte_store. The locked 406 * cmpxchg8b is probably available on any machine having DMAR support, 407 * but interrupt translation table may be mapped uncached. 408 */ 409 static inline void 410 dmar_pte_store1(volatile uint64_t *dst, uint64_t val) 411 { 412 #ifdef __i386__ 413 volatile uint32_t *p; 414 uint32_t hi, lo; 415 416 hi = val >> 32; 417 lo = val; 418 p = (volatile uint32_t *)dst; 419 *(p + 1) = hi; 420 *p = lo; 421 #else 422 *dst = val; 423 #endif 424 } 425 426 static inline void 427 dmar_pte_store(volatile uint64_t *dst, uint64_t val) 428 { 429 430 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx", 431 dst, (uintmax_t)*dst, (uintmax_t)val)); 432 dmar_pte_store1(dst, val); 433 } 434 435 static inline void 436 dmar_pte_update(volatile uint64_t *dst, uint64_t val) 437 { 438 439 #ifdef __i386__ 440 volatile uint32_t *p; 441 442 p = (volatile uint32_t *)dst; 443 *p = 0; 444 #endif 445 dmar_pte_store1(dst, val); 446 } 447 448 static inline void 449 dmar_pte_clear(volatile uint64_t *dst) 450 { 451 #ifdef __i386__ 452 volatile uint32_t *p; 453 454 p = (volatile uint32_t *)dst; 455 *p = 0; 456 *(p + 1) = 0; 457 #else 458 *dst = 0; 459 #endif 460 } 461 462 extern struct timespec dmar_hw_timeout; 463 464 #define DMAR_WAIT_UNTIL(cond) \ 465 { \ 466 struct timespec last, curr; \ 467 bool forever; \ 468 \ 469 if (dmar_hw_timeout.tv_sec == 0 && \ 470 dmar_hw_timeout.tv_nsec == 0) { \ 471 forever = true; \ 472 } else { \ 473 forever = false; \ 474 nanouptime(&curr); \ 475 timespecadd(&curr, &dmar_hw_timeout, &last); \ 476 } \ 477 for (;;) { \ 478 if (cond) { \ 479 error = 0; \ 480 break; \ 481 } \ 482 nanouptime(&curr); \ 483 if (!forever && timespeccmp(&last, &curr, <)) { \ 484 error = ETIMEDOUT; \ 485 break; \ 486 } \ 487 cpu_spinwait(); \ 488 } \ 489 } 490 491 #ifdef INVARIANTS 492 #define TD_PREP_PINNED_ASSERT \ 493 int old_td_pinned; \ 494 old_td_pinned = curthread->td_pinned 495 #define TD_PINNED_ASSERT \ 496 KASSERT(curthread->td_pinned == old_td_pinned, \ 497 ("pin count leak: %d %d %s:%d", curthread->td_pinned, \ 498 old_td_pinned, __FILE__, __LINE__)) 499 #else 500 #define TD_PREP_PINNED_ASSERT 501 #define TD_PINNED_ASSERT 502 #endif 503 504 #endif 505