1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2015 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifndef __X86_IOMMU_INTEL_DMAR_H
32 #define __X86_IOMMU_INTEL_DMAR_H
33
34 #include <dev/iommu/iommu.h>
35
36 struct dmar_unit;
37
38 /*
39 * Locking annotations:
40 * (u) - Protected by iommu unit lock
41 * (d) - Protected by domain lock
42 * (c) - Immutable after initialization
43 */
44
45 /*
46 * The domain abstraction. Most non-constant members of the domain
47 * are protected by owning dmar unit lock, not by the domain lock.
48 * Most important, the dmar lock protects the contexts list.
49 *
50 * The domain lock protects the address map for the domain, and list
51 * of unload entries delayed.
52 *
53 * Page tables pages and pages content is protected by the vm object
54 * lock pgtbl_obj, which contains the page tables pages.
55 */
56 struct dmar_domain {
57 struct iommu_domain iodom;
58 int domain; /* (c) DID, written in context entry */
59 int mgaw; /* (c) Real max address width */
60 int agaw; /* (c) Adjusted guest address width */
61 int pglvl; /* (c) The pagelevel */
62 int awlvl; /* (c) The pagelevel as the bitmask,
63 to set in context entry */
64 u_int ctx_cnt; /* (u) Number of contexts owned */
65 u_int refs; /* (u) Refs, including ctx */
66 struct dmar_unit *dmar; /* (c) */
67 LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
68 LIST_HEAD(, dmar_ctx) contexts; /* (u) */
69 vm_object_t pgtbl_obj; /* (c) Page table pages */
70 u_int batch_no;
71 };
72
73 struct dmar_ctx {
74 struct iommu_ctx context;
75 uint64_t last_fault_rec[2]; /* Last fault reported */
76 LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
77 u_int refs; /* (u) References from tags */
78 };
79
80 #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
81 #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
82 #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
83 #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
84 VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
85
86 #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
87 #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
88 #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
89
90 #define DMAR2IOMMU(dmar) &((dmar)->iommu)
91 #define IOMMU2DMAR(dmar) \
92 __containerof((dmar), struct dmar_unit, iommu)
93
94 #define DOM2IODOM(domain) &((domain)->iodom)
95 #define IODOM2DOM(domain) \
96 __containerof((domain), struct dmar_domain, iodom)
97
98 #define CTX2IOCTX(ctx) &((ctx)->context)
99 #define IOCTX2CTX(ctx) \
100 __containerof((ctx), struct dmar_ctx, context)
101
102 #define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain)
103 #define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
104 #define DOM2DMAR(domain) ((domain)->dmar)
105
106 struct dmar_msi_data {
107 int irq;
108 int irq_rid;
109 struct resource *irq_res;
110 void *intr_handle;
111 int (*handler)(void *);
112 int msi_data_reg;
113 int msi_addr_reg;
114 int msi_uaddr_reg;
115 void (*enable_intr)(struct dmar_unit *);
116 void (*disable_intr)(struct dmar_unit *);
117 const char *name;
118 };
119
120 #define DMAR_INTR_FAULT 0
121 #define DMAR_INTR_QI 1
122 #define DMAR_INTR_TOTAL 2
123
124 struct dmar_unit {
125 struct iommu_unit iommu;
126 uint16_t segment;
127 uint64_t base;
128
129 /* Resources */
130 int reg_rid;
131 struct resource *regs;
132
133 struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
134
135 /* Hardware registers cache */
136 uint32_t hw_ver;
137 uint64_t hw_cap;
138 uint64_t hw_ecap;
139 uint32_t hw_gcmd;
140
141 /* Data for being a dmar */
142 LIST_HEAD(, dmar_domain) domains;
143 struct unrhdr *domids;
144 vm_object_t ctx_obj;
145 u_int barrier_flags;
146
147 /* Fault handler data */
148 struct mtx fault_lock;
149 uint64_t *fault_log;
150 int fault_log_head;
151 int fault_log_tail;
152 int fault_log_size;
153 struct task fault_task;
154 struct taskqueue *fault_taskqueue;
155
156 /* QI */
157 int qi_enabled;
158 char *inv_queue;
159 vm_size_t inv_queue_size;
160 uint32_t inv_queue_avail;
161 uint32_t inv_queue_tail;
162 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
163 descr completion */
164 uint64_t inv_waitd_seq_hw_phys;
165 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
166 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
167 u_int inv_seq_waiters; /* count of waiters for seq */
168 u_int inv_queue_full; /* informational counter */
169
170 /* IR */
171 int ir_enabled;
172 vm_paddr_t irt_phys;
173 dmar_irte_t *irt;
174 u_int irte_cnt;
175 vmem_t *irtids;
176
177 /*
178 * Delayed freeing of map entries queue processing:
179 *
180 * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
181 * queue that supports concurrent dequeues and enqueues. However,
182 * there can only be a single dequeuer (accessing tlb_flush_head) and
183 * a single enqueuer (accessing tlb_flush_tail) at a time. Since the
184 * unit's qi_task is the only dequeuer, it can access tlb_flush_head
185 * without any locking. In contrast, there may be multiple enqueuers,
186 * so the enqueuers acquire the iommu unit lock to serialize their
187 * accesses to tlb_flush_tail.
188 *
189 * In this FIFO queue implementation, the key to enabling concurrent
190 * dequeues and enqueues is that the dequeuer never needs to access
191 * tlb_flush_tail and the enqueuer never needs to access
192 * tlb_flush_head. In particular, tlb_flush_head and tlb_flush_tail
193 * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
194 * update both. Instead, tlb_flush_head always points to a "zombie"
195 * struct, which previously held the last dequeued item. Thus, the
196 * zombie's next field actually points to the struct holding the first
197 * item in the queue. When an item is dequeued, the current zombie is
198 * finally freed, and the struct that held the just dequeued item
199 * becomes the new zombie. When the queue is empty, tlb_flush_tail
200 * also points to the zombie.
201 */
202 struct iommu_map_entry *tlb_flush_head;
203 struct iommu_map_entry *tlb_flush_tail;
204 struct task qi_task;
205 struct taskqueue *qi_taskqueue;
206 };
207
208 #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
209 #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
210 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
211
212 #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
213 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
214 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
215
216 #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
217 #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
218 #define DMAR_X2APIC(dmar) \
219 (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
220
221 /* Barrier ids */
222 #define DMAR_BARRIER_RMRR 0
223 #define DMAR_BARRIER_USEQ 1
224
225 struct dmar_unit *dmar_find(device_t dev, bool verbose);
226 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
227 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
228
229 u_int dmar_nd2mask(u_int nd);
230 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
231 int domain_set_agaw(struct dmar_domain *domain, int mgaw);
232 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
233 bool allow_less);
234 vm_pindex_t pglvl_max_pages(int pglvl);
235 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
236 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
237 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
238 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
239 iommu_gaddr_t *isizep);
240 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
241 int dmar_inv_ctx_glob(struct dmar_unit *unit);
242 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
243 int dmar_flush_write_bufs(struct dmar_unit *unit);
244 void dmar_flush_pte_to_ram(struct dmar_unit *unit, iommu_pte_t *dst);
245 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
246 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
247 int dmar_disable_protected_regions(struct dmar_unit *unit);
248 int dmar_enable_translation(struct dmar_unit *unit);
249 int dmar_disable_translation(struct dmar_unit *unit);
250 int dmar_load_irt_ptr(struct dmar_unit *unit);
251 int dmar_enable_ir(struct dmar_unit *unit);
252 int dmar_disable_ir(struct dmar_unit *unit);
253 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
254 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
255 uint64_t dmar_get_timeout(void);
256 void dmar_update_timeout(uint64_t newval);
257
258 int dmar_fault_intr(void *arg);
259 void dmar_enable_fault_intr(struct dmar_unit *unit);
260 void dmar_disable_fault_intr(struct dmar_unit *unit);
261 int dmar_init_fault_log(struct dmar_unit *unit);
262 void dmar_fini_fault_log(struct dmar_unit *unit);
263
264 int dmar_qi_intr(void *arg);
265 void dmar_enable_qi_intr(struct dmar_unit *unit);
266 void dmar_disable_qi_intr(struct dmar_unit *unit);
267 int dmar_init_qi(struct dmar_unit *unit);
268 void dmar_fini_qi(struct dmar_unit *unit);
269 void dmar_qi_invalidate_locked(struct dmar_domain *domain,
270 struct iommu_map_entry *entry, bool emit_wait);
271 void dmar_qi_invalidate_sync(struct dmar_domain *domain, iommu_gaddr_t start,
272 iommu_gaddr_t size, bool cansleep);
273 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
274 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
275 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
276 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
277
278 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
279 iommu_gaddr_t maxaddr);
280 void put_idmap_pgtbl(vm_object_t obj);
281 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
282 iommu_gaddr_t size);
283 int domain_alloc_pgtbl(struct dmar_domain *domain);
284 void domain_free_pgtbl(struct dmar_domain *domain);
285 extern const struct iommu_domain_map_ops dmar_domain_map_ops;
286
287 int dmar_dev_depth(device_t child);
288 void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
289
290 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
291 uint16_t rid, bool id_mapped, bool rmrr_init);
292 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
293 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
294 bool id_mapped, bool rmrr_init);
295 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
296 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
297 void dmar_free_ctx(struct dmar_ctx *ctx);
298 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
299 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
300
301 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
302 int dev_busno, const void *dev_path, int dev_path_len,
303 struct iommu_map_entries_tailq *rmrr_entries);
304 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
305
306 void dmar_quirks_post_ident(struct dmar_unit *dmar);
307 void dmar_quirks_pre_use(struct iommu_unit *dmar);
308
309 int dmar_init_irt(struct dmar_unit *unit);
310 void dmar_fini_irt(struct dmar_unit *unit);
311
312 extern int haw;
313 extern int dmar_batch_coalesce;
314 extern int dmar_rmrr_enable;
315
316 static inline uint32_t
dmar_read4(const struct dmar_unit * unit,int reg)317 dmar_read4(const struct dmar_unit *unit, int reg)
318 {
319
320 return (bus_read_4(unit->regs, reg));
321 }
322
323 static inline uint64_t
dmar_read8(const struct dmar_unit * unit,int reg)324 dmar_read8(const struct dmar_unit *unit, int reg)
325 {
326 #ifdef __i386__
327 uint32_t high, low;
328
329 low = bus_read_4(unit->regs, reg);
330 high = bus_read_4(unit->regs, reg + 4);
331 return (low | ((uint64_t)high << 32));
332 #else
333 return (bus_read_8(unit->regs, reg));
334 #endif
335 }
336
337 static inline void
dmar_write4(const struct dmar_unit * unit,int reg,uint32_t val)338 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
339 {
340
341 KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
342 (unit->hw_gcmd & DMAR_GCMD_TE),
343 ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
344 unit->hw_gcmd, val));
345 bus_write_4(unit->regs, reg, val);
346 }
347
348 static inline void
dmar_write8(const struct dmar_unit * unit,int reg,uint64_t val)349 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
350 {
351
352 KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
353 #ifdef __i386__
354 uint32_t high, low;
355
356 low = val;
357 high = val >> 32;
358 bus_write_4(unit->regs, reg, low);
359 bus_write_4(unit->regs, reg + 4, high);
360 #else
361 bus_write_8(unit->regs, reg, val);
362 #endif
363 }
364
365 /*
366 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
367 * are issued in the correct order. For store, the lower word,
368 * containing the P or R and W bits, is set only after the high word
369 * is written. For clear, the P bit is cleared first, then the high
370 * word is cleared.
371 *
372 * dmar_pte_update updates the pte. For amd64, the update is atomic.
373 * For i386, it first disables the entry by clearing the word
374 * containing the P bit, and then defer to dmar_pte_store. The locked
375 * cmpxchg8b is probably available on any machine having DMAR support,
376 * but interrupt translation table may be mapped uncached.
377 */
378 static inline void
dmar_pte_store1(volatile uint64_t * dst,uint64_t val)379 dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
380 {
381 #ifdef __i386__
382 volatile uint32_t *p;
383 uint32_t hi, lo;
384
385 hi = val >> 32;
386 lo = val;
387 p = (volatile uint32_t *)dst;
388 *(p + 1) = hi;
389 *p = lo;
390 #else
391 *dst = val;
392 #endif
393 }
394
395 static inline void
dmar_pte_store(volatile uint64_t * dst,uint64_t val)396 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
397 {
398
399 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
400 dst, (uintmax_t)*dst, (uintmax_t)val));
401 dmar_pte_store1(dst, val);
402 }
403
404 static inline void
dmar_pte_update(volatile uint64_t * dst,uint64_t val)405 dmar_pte_update(volatile uint64_t *dst, uint64_t val)
406 {
407
408 #ifdef __i386__
409 volatile uint32_t *p;
410
411 p = (volatile uint32_t *)dst;
412 *p = 0;
413 #endif
414 dmar_pte_store1(dst, val);
415 }
416
417 static inline void
dmar_pte_clear(volatile uint64_t * dst)418 dmar_pte_clear(volatile uint64_t *dst)
419 {
420 #ifdef __i386__
421 volatile uint32_t *p;
422
423 p = (volatile uint32_t *)dst;
424 *p = 0;
425 *(p + 1) = 0;
426 #else
427 *dst = 0;
428 #endif
429 }
430
431 extern struct timespec dmar_hw_timeout;
432
433 #define DMAR_WAIT_UNTIL(cond) \
434 { \
435 struct timespec last, curr; \
436 bool forever; \
437 \
438 if (dmar_hw_timeout.tv_sec == 0 && \
439 dmar_hw_timeout.tv_nsec == 0) { \
440 forever = true; \
441 } else { \
442 forever = false; \
443 nanouptime(&curr); \
444 timespecadd(&curr, &dmar_hw_timeout, &last); \
445 } \
446 for (;;) { \
447 if (cond) { \
448 error = 0; \
449 break; \
450 } \
451 nanouptime(&curr); \
452 if (!forever && timespeccmp(&last, &curr, <)) { \
453 error = ETIMEDOUT; \
454 break; \
455 } \
456 cpu_spinwait(); \
457 } \
458 }
459
460 #ifdef INVARIANTS
461 #define TD_PREP_PINNED_ASSERT \
462 int old_td_pinned; \
463 old_td_pinned = curthread->td_pinned
464 #define TD_PINNED_ASSERT \
465 KASSERT(curthread->td_pinned == old_td_pinned, \
466 ("pin count leak: %d %d %s:%d", curthread->td_pinned, \
467 old_td_pinned, __FILE__, __LINE__))
468 #else
469 #define TD_PREP_PINNED_ASSERT
470 #define TD_PINNED_ASSERT
471 #endif
472
473 #endif
474