1 /*
2 * Copyright (c) 2015 Jordan Hargrave <jordan_hargrave@hotmail.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _DEV_ACPI_DMARREG_H_
18 #define _DEV_ACPI_DMARREG_H_
19
20 /*#define IOMMU_DEBUG*/
21
22 #define VTD_STRIDE_MASK 0x1FF
23 #define VTD_STRIDE_SIZE 9
24 #define VTD_PAGE_SIZE 4096
25 #define VTD_PAGE_MASK 0xFFF
26 #define VTD_PTE_MASK 0x0000FFFFFFFFF000LL
27
28 #define VTD_LEVEL0 12
29 #define VTD_LEVEL1 21
30 #define VTD_LEVEL2 30 /* Minimum level supported */
31 #define VTD_LEVEL3 39 /* Also supported */
32 #define VTD_LEVEL4 48
33 #define VTD_LEVEL5 57
34
35 #define _xbit(x,y) (((x)>> (y)) & 1)
36 #define _xfld(x,y) (uint32_t)(((x)>> y##_SHIFT) & y##_MASK)
37
38 #define VTD_AWTOLEVEL(x) (((x) - 30) / VTD_STRIDE_SIZE)
39 #define VTD_LEVELTOAW(x) (((x) * VTD_STRIDE_SIZE) + 30)
40
41 #define DMAR_VER_REG 0x00 /* 32:Arch version supported by this IOMMU */
42 #define DMAR_RTADDR_REG 0x20 /* 64:Root entry table */
43 #define DMAR_FEDATA_REG 0x3c /* 32:Fault event interrupt data register */
44 #define DMAR_FEADDR_REG 0x40 /* 32:Fault event interrupt addr register */
45 #define DMAR_FEUADDR_REG 0x44 /* 32:Upper address register */
46 #define DMAR_AFLOG_REG 0x58 /* 64:Advanced Fault control */
47 #define DMAR_PMEN_REG 0x64 /* 32:Enable Protected Memory Region */
48 #define DMAR_PLMBASE_REG 0x68 /* 32:PMRR Low addr */
49 #define DMAR_PLMLIMIT_REG 0x6c /* 32:PMRR low limit */
50 #define DMAR_PHMBASE_REG 0x70 /* 64:pmrr high base addr */
51 #define DMAR_PHMLIMIT_REG 0x78 /* 64:pmrr high limit */
52 #define DMAR_ICS_REG 0x9C /* 32:Invalidation complete status register */
53 #define DMAR_IECTL_REG 0xa0 /* 32:Invalidation event control register */
54 #define DMAR_IEDATA_REG 0xa4 /* 32:Invalidation event data register */
55 #define DMAR_IEADDR_REG 0xa8 /* 32:Invalidation event address register */
56 #define DMAR_IEUADDR_REG 0xac /* 32:Invalidation event upper address register */
57 #define DMAR_IRTA_REG 0xb8 /* 64:Interrupt remapping table addr register */
58 #define DMAR_CAP_REG 0x08 /* 64:Hardware supported capabilities */
59 #define CAP_PI (1LL << 59)
60 #define CAP_FL1GP (1LL << 56)
61 #define CAP_DRD (1LL << 55)
62 #define CAP_DWD (1LL << 54)
63 #define CAP_MAMV_MASK 0x3F
64 #define CAP_MAMV_SHIFT 48LL
65 #define cap_mamv(x) _xfld(x,CAP_MAMV)
66 #define CAP_NFR_MASK 0xFF
67 #define CAP_NFR_SHIFT 40LL
68 #define cap_nfr(x) (_xfld(x,CAP_NFR) + 1)
69 #define CAP_PSI (1LL << 39)
70 #define CAP_SLLPS_MASK 0xF
71 #define CAP_SLLPS_SHIFT 34LL
72 #define cap_sllps(x) _xfld(x,CAP_SLLPS)
73 #define CAP_FRO_MASK 0x3FF
74 #define CAP_FRO_SHIFT 24LL
75 #define cap_fro(x) (_xfld(x,CAP_FRO) * 16)
76 #define CAP_ZLR (1LL << 22)
77 #define CAP_MGAW_MASK 0x3F
78 #define CAP_MGAW_SHIFT 16LL
79 #define cap_mgaw(x) (_xfld(x,CAP_MGAW) + 1)
80 #define CAP_SAGAW_MASK 0x1F
81 #define CAP_SAGAW_SHIFT 8LL
82 #define cap_sagaw(x) _xfld(x,CAP_SAGAW)
83 #define CAP_CM (1LL << 7)
84 #define CAP_PHMR (1LL << 6)
85 #define CAP_PLMR (1LL << 5)
86 #define CAP_RWBF (1LL << 4)
87 #define CAP_AFL (1LL << 3)
88 #define CAP_ND_MASK 0x7
89 #define CAP_ND_SHIFT 0x00
90 #define cap_nd(x) (16 << (((x) & CAP_ND_MASK) << 1))
91
92 #define DMAR_ECAP_REG 0x10 /* 64:Extended capabilities supported */
93 #define ECAP_PSS_MASK 0x1F
94 #define ECAP_PSS_SHIFT 35
95 #define ECAP_EAFS (1LL << 34)
96 #define ECAP_NWFS (1LL << 33)
97 #define ECAP_SRS (1LL << 31)
98 #define ECAP_ERS (1LL << 30)
99 #define ECAP_PRS (1LL << 29)
100 #define ECAP_PASID (1LL << 28)
101 #define ECAP_DIS (1LL << 27)
102 #define ECAP_NEST (1LL << 26)
103 #define ECAP_MTS (1LL << 25)
104 #define ECAP_ECS (1LL << 24)
105 #define ECAP_MHMV_MASK 0xF
106 #define ECAP_MHMV_SHIFT 0x20
107 #define ecap_mhmv(x) _xfld(x,ECAP_MHMV)
108 #define ECAP_IRO_MASK 0x3FF /* IOTLB Register */
109 #define ECAP_IRO_SHIFT 0x8
110 #define ecap_iro(x) (_xfld(x,ECAP_IRO) * 16)
111 #define ECAP_SC (1LL << 7) /* Snoop Control */
112 #define ECAP_PT (1LL << 6) /* HW Passthru */
113 #define ECAP_EIM (1LL << 4)
114 #define ECAP_IR (1LL << 3) /* Interrupt remap */
115 #define ECAP_DT (1LL << 2) /* Device IOTLB */
116 #define ECAP_QI (1LL << 1) /* Queued Invalidation */
117 #define ECAP_C (1LL << 0) /* Coherent cache */
118
119 #define DMAR_GCMD_REG 0x18 /* 32:Global command register */
120 #define GCMD_TE (1LL << 31)
121 #define GCMD_SRTP (1LL << 30)
122 #define GCMD_SFL (1LL << 29)
123 #define GCMD_EAFL (1LL << 28)
124 #define GCMD_WBF (1LL << 27)
125 #define GCMD_QIE (1LL << 26)
126 #define GCMD_IRE (1LL << 25)
127 #define GCMD_SIRTP (1LL << 24)
128 #define GCMD_CFI (1LL << 23)
129
130 #define DMAR_GSTS_REG 0x1c /* 32:Global status register */
131 #define GSTS_TES (1LL << 31)
132 #define GSTS_RTPS (1LL << 30)
133 #define GSTS_FLS (1LL << 29)
134 #define GSTS_AFLS (1LL << 28)
135 #define GSTS_WBFS (1LL << 27)
136 #define GSTS_QIES (1LL << 26)
137 #define GSTS_IRES (1LL << 25)
138 #define GSTS_IRTPS (1LL << 24)
139 #define GSTS_CFIS (1LL << 23)
140
141 #define DMAR_CCMD_REG 0x28 /* 64:Context command reg */
142 #define CCMD_ICC (1LL << 63)
143 #define CCMD_CIRG_MASK 0x3
144 #define CCMD_CIRG_SHIFT 61
145 #define CCMD_CIRG(x) ((uint64_t)(x) << CCMD_CIRG_SHIFT)
146 #define CCMD_CAIG_MASK 0x3
147 #define CCMD_CAIG_SHIFT 59
148 #define CCMD_FM_MASK 0x3
149 #define CCMD_FM_SHIFT 32
150 #define CCMD_FM(x) (((uint64_t)(x) << CCMD_FM_SHIFT))
151 #define CCMD_SID_MASK 0xFFFF
152 #define CCMD_SID_SHIFT 8
153 #define CCMD_SID(x) (((x) << CCMD_SID_SHIFT))
154 #define CCMD_DID_MASK 0xFFFF
155 #define CCMD_DID_SHIFT 0
156 #define CCMD_DID(x) (((x) << CCMD_DID_SHIFT))
157
158 #define CIG_GLOBAL CCMD_CIRG(CTX_GLOBAL)
159 #define CIG_DOMAIN CCMD_CIRG(CTX_DOMAIN)
160 #define CIG_DEVICE CCMD_CIRG(CTX_DEVICE)
161
162
163 #define DMAR_FSTS_REG 0x34 /* 32:Fault Status register */
164 #define FSTS_FRI_MASK 0xFF
165 #define FSTS_FRI_SHIFT 8
166 #define FSTS_PRO (1LL << 7)
167 #define FSTS_ITE (1LL << 6)
168 #define FSTS_ICE (1LL << 5)
169 #define FSTS_IQE (1LL << 4)
170 #define FSTS_APF (1LL << 3)
171 #define FSTS_APO (1LL << 2)
172 #define FSTS_PPF (1LL << 1)
173 #define FSTS_PFO (1LL << 0)
174
175 #define DMAR_FECTL_REG 0x38 /* 32:Fault control register */
176 #define FECTL_IM (1LL << 31)
177 #define FECTL_IP (1LL << 30)
178
179 #define FRCD_HI_F (1LL << (127-64))
180 #define FRCD_HI_T (1LL << (126-64))
181 #define FRCD_HI_AT_MASK 0x3
182 #define FRCD_HI_AT_SHIFT (124-64)
183 #define FRCD_HI_PV_MASK 0xFFFFF
184 #define FRCD_HI_PV_SHIFT (104-64)
185 #define FRCD_HI_FR_MASK 0xFF
186 #define FRCD_HI_FR_SHIFT (96-64)
187 #define FRCD_HI_PP (1LL << (95-64))
188
189 #define FRCD_HI_SID_MASK 0xFF
190 #define FRCD_HI_SID_SHIFT 0
191 #define FRCD_HI_BUS_SHIFT 8
192 #define FRCD_HI_BUS_MASK 0xFF
193 #define FRCD_HI_DEV_SHIFT 3
194 #define FRCD_HI_DEV_MASK 0x1F
195 #define FRCD_HI_FUN_SHIFT 0
196 #define FRCD_HI_FUN_MASK 0x7
197
198 #define DMAR_IOTLB_REG(x) (ecap_iro((x)->ecap) + 8)
199 #define DMAR_IVA_REG(x) (ecap_iro((x)->ecap) + 0)
200
201 #define DMAR_FRIH_REG(x,i) (cap_fro((x)->cap) + 16*(i) + 8)
202 #define DMAR_FRIL_REG(x,i) (cap_fro((x)->cap) + 16*(i) + 0)
203
204 #define IOTLB_IVT (1LL << 63)
205 #define IOTLB_IIRG_MASK 0x3
206 #define IOTLB_IIRG_SHIFT 60
207 #define IOTLB_IIRG(x) ((uint64_t)(x) << IOTLB_IIRG_SHIFT)
208 #define IOTLB_IAIG_MASK 0x3
209 #define IOTLB_IAIG_SHIFT 57
210 #define IOTLB_DR (1LL << 49)
211 #define IOTLB_DW (1LL << 48)
212 #define IOTLB_DID_MASK 0xFFFF
213 #define IOTLB_DID_SHIFT 32
214 #define IOTLB_DID(x) ((uint64_t)(x) << IOTLB_DID_SHIFT)
215
216 #define IIG_GLOBAL IOTLB_IIRG(IOTLB_GLOBAL)
217 #define IIG_DOMAIN IOTLB_IIRG(IOTLB_DOMAIN)
218 #define IIG_PAGE IOTLB_IIRG(IOTLB_PAGE)
219
220 #define DMAR_IQH_REG 0x80 /* 64:Invalidation queue head register */
221 #define DMAR_IQT_REG 0x88 /* 64:Invalidation queue tail register */
222 #define DMAR_IQA_REG 0x90 /* 64:Invalidation queue addr register */
223 #define IQA_QS_256 0 /* 256 entries */
224 #define IQA_QS_512 1 /* 512 */
225 #define IQA_QS_1K 2 /* 1024 */
226 #define IQA_QS_2K 3 /* 2048 */
227 #define IQA_QS_4K 4 /* 4096 */
228 #define IQA_QS_8K 5 /* 8192 */
229 #define IQA_QS_16K 6 /* 16384 */
230 #define IQA_QS_32K 7 /* 32768 */
231
232 /* Read-Modify-Write helpers */
233 static inline void
iommu_rmw32(void * ov,uint32_t mask,uint32_t shift,uint32_t nv)234 iommu_rmw32(void *ov, uint32_t mask, uint32_t shift, uint32_t nv)
235 {
236 *(uint32_t *)ov &= ~(mask << shift);
237 *(uint32_t *)ov |= (nv & mask) << shift;
238 }
239
240 static inline void
iommu_rmw64(void * ov,uint32_t mask,uint32_t shift,uint64_t nv)241 iommu_rmw64(void *ov, uint32_t mask, uint32_t shift, uint64_t nv)
242 {
243 *(uint64_t *)ov &= ~(mask << shift);
244 *(uint64_t *)ov |= (nv & mask) << shift;
245 }
246
247 /*
248 * Root Entry: one per bus (256 x 128 bit = 4k)
249 * 0 = Present
250 * 1:11 = Reserved
251 * 12:HAW-1 = Context Table Pointer
252 * HAW:63 = Reserved
253 * 64:127 = Reserved
254 */
255 #define ROOT_P (1L << 0)
256 struct root_entry {
257 uint64_t lo;
258 uint64_t hi;
259 };
260
261 /* Check if root entry is valid */
262 static inline bool
root_entry_is_valid(struct root_entry * re)263 root_entry_is_valid(struct root_entry *re)
264 {
265 return (re->lo & ROOT_P);
266 }
267
268 /*
269 * Context Entry: one per devfn (256 x 128 bit = 4k)
270 * 0 = Present
271 * 1 = Fault Processing Disable
272 * 2:3 = Translation Type
273 * 4:11 = Reserved
274 * 12:63 = Second Level Page Translation
275 * 64:66 = Address Width (# PTE levels)
276 * 67:70 = Ignore
277 * 71 = Reserved
278 * 72:87 = Domain ID
279 * 88:127 = Reserved
280 */
281 #define CTX_P (1L << 0)
282 #define CTX_FPD (1L << 1)
283 #define CTX_T_MASK 0x3
284 #define CTX_T_SHIFT 2
285 enum {
286 CTX_T_MULTI,
287 CTX_T_IOTLB,
288 CTX_T_PASSTHRU
289 };
290
291 #define CTX_H_AW_MASK 0x7
292 #define CTX_H_AW_SHIFT 0
293 #define CTX_H_USER_MASK 0xF
294 #define CTX_H_USER_SHIFT 3
295 #define CTX_H_DID_MASK 0xFFFF
296 #define CTX_H_DID_SHIFT 8
297
298 struct context_entry {
299 uint64_t lo;
300 uint64_t hi;
301 };
302
303 /* Set fault processing enable/disable */
304 static inline void
context_set_fpd(struct context_entry * ce,int enable)305 context_set_fpd(struct context_entry *ce, int enable)
306 {
307 ce->lo &= ~CTX_FPD;
308 if (enable)
309 ce->lo |= CTX_FPD;
310 }
311
312 /* Set context entry present */
313 static inline void
context_set_present(struct context_entry * ce)314 context_set_present(struct context_entry *ce)
315 {
316 ce->lo |= CTX_P;
317 }
318
319 /* Set Second Level Page Table Entry PA */
320 static inline void
context_set_slpte(struct context_entry * ce,paddr_t slpte)321 context_set_slpte(struct context_entry *ce, paddr_t slpte)
322 {
323 ce->lo &= VTD_PAGE_MASK;
324 ce->lo |= (slpte & ~VTD_PAGE_MASK);
325 }
326
327 /* Set translation type */
328 static inline void
context_set_translation_type(struct context_entry * ce,int tt)329 context_set_translation_type(struct context_entry *ce, int tt)
330 {
331 ce->lo &= ~(CTX_T_MASK << CTX_T_SHIFT);
332 ce->lo |= ((tt & CTX_T_MASK) << CTX_T_SHIFT);
333 }
334
335 /* Set Address Width (# of Page Table levels) */
336 static inline void
context_set_address_width(struct context_entry * ce,int lvl)337 context_set_address_width(struct context_entry *ce, int lvl)
338 {
339 ce->hi &= ~(CTX_H_AW_MASK << CTX_H_AW_SHIFT);
340 ce->hi |= ((lvl & CTX_H_AW_MASK) << CTX_H_AW_SHIFT);
341 }
342
343 /* Set domain ID */
344 static inline void
context_set_domain_id(struct context_entry * ce,int did)345 context_set_domain_id(struct context_entry *ce, int did)
346 {
347 ce->hi &= ~(CTX_H_DID_MASK << CTX_H_DID_SHIFT);
348 ce->hi |= ((did & CTX_H_DID_MASK) << CTX_H_DID_SHIFT);
349 }
350
351 /* Get Second Level Page Table PA */
352 static inline uint64_t
context_pte(struct context_entry * ce)353 context_pte(struct context_entry *ce)
354 {
355 return (ce->lo & ~VTD_PAGE_MASK);
356 }
357
358 /* Get translation type */
359 static inline int
context_translation_type(struct context_entry * ce)360 context_translation_type(struct context_entry *ce)
361 {
362 return (ce->lo >> CTX_T_SHIFT) & CTX_T_MASK;
363 }
364
365 /* Get domain ID */
366 static inline int
context_domain_id(struct context_entry * ce)367 context_domain_id(struct context_entry *ce)
368 {
369 return (ce->hi >> CTX_H_DID_SHIFT) & CTX_H_DID_MASK;
370 }
371
372 /* Get Address Width */
373 static inline int
context_address_width(struct context_entry * ce)374 context_address_width(struct context_entry *ce)
375 {
376 return VTD_LEVELTOAW((ce->hi >> CTX_H_AW_SHIFT) & CTX_H_AW_MASK);
377 }
378
379 /* Check if context entry is valid */
380 static inline bool
context_entry_is_valid(struct context_entry * ce)381 context_entry_is_valid(struct context_entry *ce)
382 {
383 return (ce->lo & CTX_P);
384 }
385
386 /* User-available bits in context entry */
387 static inline int
context_user(struct context_entry * ce)388 context_user(struct context_entry *ce)
389 {
390 return (ce->hi >> CTX_H_USER_SHIFT) & CTX_H_USER_MASK;
391 }
392
393 static inline void
context_set_user(struct context_entry * ce,int v)394 context_set_user(struct context_entry *ce, int v)
395 {
396 ce->hi &= ~(CTX_H_USER_MASK << CTX_H_USER_SHIFT);
397 ce->hi |= ((v & CTX_H_USER_MASK) << CTX_H_USER_SHIFT);
398 }
399
400 /*
401 * Fault entry
402 * 0..HAW-1 = Fault address
403 * HAW:63 = Reserved
404 * 64:71 = Source ID
405 * 96:103 = Fault Reason
406 * 104:123 = PV
407 * 124:125 = Address Translation type
408 * 126 = Type (0 = Read, 1 = Write)
409 * 127 = Fault bit
410 */
411 struct fault_entry {
412 uint64_t lo;
413 uint64_t hi;
414 };
415
416 /* PTE Entry: 512 x 64-bit = 4k */
417 #define PTE_P (1L << 0)
418 #define PTE_R 0x00
419 #define PTE_W (1L << 1)
420 #define PTE_US (1L << 2)
421 #define PTE_PWT (1L << 3)
422 #define PTE_PCD (1L << 4)
423 #define PTE_A (1L << 5)
424 #define PTE_D (1L << 6)
425 #define PTE_PAT (1L << 7)
426 #define PTE_G (1L << 8)
427 #define PTE_EA (1L << 10)
428 #define PTE_XD (1LL << 63)
429
430 /* PDE Level entry */
431 #define PTE_PS (1L << 7)
432
433 /* PDPE Level entry */
434
435 /* ----------------------------------------------------------------
436 * 5555555444444444333333333222222222111111111000000000------------
437 * [PML4 ->] PDPE.1GB
438 * [PML4 ->] PDPE.PDE -> PDE.2MB
439 * [PML4 ->] PDPE.PDE -> PDE -> PTE
440 * GAW0 = (12.20) (PTE)
441 * GAW1 = (21.29) (PDE)
442 * GAW2 = (30.38) (PDPE)
443 * GAW3 = (39.47) (PML4)
444 * GAW4 = (48.57) (n/a)
445 * GAW5 = (58.63) (n/a)
446 */
447 struct pte_entry {
448 uint64_t val;
449 };
450
451 /*
452 * Queued Invalidation entry
453 * 0:3 = 01h
454 * 4:5 = Granularity
455 * 6:15 = Reserved
456 * 16:31 = Domain ID
457 * 32:47 = Source ID
458 * 48:49 = FM
459 */
460
461 /* Invalidate Context Entry */
462 #define QI_CTX_DID_MASK 0xFFFF
463 #define QI_CTX_DID_SHIFT 16
464 #define QI_CTX_SID_MASK 0xFFFF
465 #define QI_CTX_SID_SHIFT 32
466 #define QI_CTX_FM_MASK 0x3
467 #define QI_CTX_FM_SHIFT 48
468 #define QI_CTX_IG_MASK 0x3
469 #define QI_CTX_IG_SHIFT 4
470 #define QI_CTX_DID(x) (((uint64_t)(x) << QI_CTX_DID_SHIFT))
471 #define QI_CTX_SID(x) (((uint64_t)(x) << QI_CTX_SID_SHIFT))
472 #define QI_CTX_FM(x) (((uint64_t)(x) << QI_CTX_FM_SHIFT))
473
474 #define QI_CTX_IG_GLOBAL (CTX_GLOBAL << QI_CTX_IG_SHIFT)
475 #define QI_CTX_IG_DOMAIN (CTX_DOMAIN << QI_CTX_IG_SHIFT)
476 #define QI_CTX_IG_DEVICE (CTX_DEVICE << QI_CTX_IG_SHIFT)
477
478 /* Invalidate IOTLB Entry */
479 #define QI_IOTLB_DID_MASK 0xFFFF
480 #define QI_IOTLB_DID_SHIFT 16
481 #define QI_IOTLB_IG_MASK 0x3
482 #define QI_IOTLB_IG_SHIFT 4
483 #define QI_IOTLB_DR (1LL << 6)
484 #define QI_IOTLB_DW (1LL << 5)
485 #define QI_IOTLB_DID(x) (((uint64_t)(x) << QI_IOTLB_DID_SHIFT))
486
487 #define QI_IOTLB_IG_GLOBAL (1 << QI_IOTLB_IG_SHIFT)
488 #define QI_IOTLB_IG_DOMAIN (2 << QI_IOTLB_IG_SHIFT)
489 #define QI_IOTLB_IG_PAGE (3 << QI_IOTLB_IG_SHIFT)
490
491 /* QI Commands */
492 #define QI_CTX 0x1
493 #define QI_IOTLB 0x2
494 #define QI_DEVTLB 0x3
495 #define QI_INTR 0x4
496 #define QI_WAIT 0x5
497 #define QI_EXTTLB 0x6
498 #define QI_PAS 0x7
499 #define QI_EXTDEV 0x8
500
501 struct qi_entry {
502 uint64_t lo;
503 uint64_t hi;
504 };
505
506 enum {
507 CTX_GLOBAL = 1,
508 CTX_DOMAIN,
509 CTX_DEVICE,
510
511 IOTLB_GLOBAL = 1,
512 IOTLB_DOMAIN,
513 IOTLB_PAGE,
514 };
515
516 enum {
517 VTD_FAULT_ROOT_P = 0x1, /* P field in root entry is 0 */
518 VTD_FAULT_CTX_P = 0x2, /* P field in context entry is 0 */
519 VTD_FAULT_CTX_INVAL = 0x3, /* context AW/TT/SLPPTR invalid */
520 VTD_FAULT_LIMIT = 0x4, /* Address is outside of MGAW */
521 VTD_FAULT_WRITE = 0x5, /* Address-translation fault, non-writable */
522 VTD_FAULT_READ = 0x6, /* Address-translation fault, non-readable */
523 VTD_FAULT_PTE_INVAL = 0x7, /* page table hw access error */
524 VTD_FAULT_ROOT_INVAL = 0x8, /* root table hw access error */
525 VTD_FAULT_CTX_TBL_INVAL = 0x9, /* context entry hw access error */
526 VTD_FAULT_ROOT_RESERVED = 0xa, /* non-zero reserved field in root entry */
527 VTD_FAULT_CTX_RESERVED = 0xb, /* non-zero reserved field in context entry */
528 VTD_FAULT_PTE_RESERVED = 0xc, /* non-zero reserved field in paging entry */
529 VTD_FAULT_CTX_TT = 0xd, /* invalid translation type */
530 };
531
532 #endif
533
534 void acpidmar_pci_hook(pci_chipset_tag_t, struct pci_attach_args *);
535 void dmar_ptmap(bus_dma_tag_t, bus_addr_t);
536
537 #define __EXTRACT(v,m) (((v) >> m##_SHIFT) & m##_MASK)
538