xref: /openbsd/sys/dev/acpi/acpidmar.h (revision 73471bf0)
1 /*
2  * Copyright (c) 2015 Jordan Hargrave <jordan_hargrave@hotmail.com>
3  *
4  * Permission to use, copy, modify, and distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifndef _DEV_ACPI_DMARREG_H_
18 #define _DEV_ACPI_DMARREG_H_
19 
20 /*#define IOMMU_DEBUG*/
21 
22 #define VTD_STRIDE_MASK 0x1FF
23 #define VTD_STRIDE_SIZE 9
24 #define VTD_PAGE_SIZE   4096
25 #define VTD_PAGE_MASK   0xFFF
26 #define VTD_PTE_MASK    0x0000FFFFFFFFF000LL
27 
28 #define VTD_LEVEL0	12
29 #define VTD_LEVEL1	21
30 #define VTD_LEVEL2	30 /* Minimum level supported */
31 #define VTD_LEVEL3	39 /* Also supported */
32 #define VTD_LEVEL4	48
33 #define VTD_LEVEL5	57
34 
35 #define _xbit(x,y) (((x)>> (y)) & 1)
36 #define _xfld(x,y) (uint32_t)(((x)>> y##_SHIFT) & y##_MASK)
37 
38 #define VTD_AWTOLEVEL(x)    (((x) - 30) / VTD_STRIDE_SIZE)
39 #define VTD_LEVELTOAW(x)    (((x) * VTD_STRIDE_SIZE) + 30)
40 
41 #define DMAR_VER_REG		0x00    /* 32:Arch version supported by this IOMMU */
42 #define DMAR_RTADDR_REG		0x20    /* 64:Root entry table */
43 #define DMAR_FEDATA_REG		0x3c    /* 32:Fault event interrupt data register */
44 #define DMAR_FEADDR_REG		0x40    /* 32:Fault event interrupt addr register */
45 #define DMAR_FEUADDR_REG	0x44    /* 32:Upper address register */
46 #define DMAR_AFLOG_REG		0x58    /* 64:Advanced Fault control */
47 #define DMAR_PMEN_REG		0x64    /* 32:Enable Protected Memory Region */
48 #define DMAR_PLMBASE_REG	0x68    /* 32:PMRR Low addr */
49 #define DMAR_PLMLIMIT_REG	0x6c    /* 32:PMRR low limit */
50 #define DMAR_PHMBASE_REG	0x70    /* 64:pmrr high base addr */
51 #define DMAR_PHMLIMIT_REG	0x78    /* 64:pmrr high limit */
52 #define DMAR_ICS_REG		0x9C    /* 32:Invalidation complete status register */
53 #define DMAR_IECTL_REG		0xa0    /* 32:Invalidation event control register */
54 #define DMAR_IEDATA_REG		0xa4    /* 32:Invalidation event data register */
55 #define DMAR_IEADDR_REG		0xa8    /* 32:Invalidation event address register */
56 #define DMAR_IEUADDR_REG	0xac    /* 32:Invalidation event upper address register */
57 #define DMAR_IRTA_REG		0xb8    /* 64:Interrupt remapping table addr register */
58 #define DMAR_CAP_REG		0x08    /* 64:Hardware supported capabilities */
59 #define   CAP_PI		(1LL << 59)
60 #define   CAP_FL1GP		(1LL << 56)
61 #define   CAP_DRD		(1LL << 55)
62 #define   CAP_DWD		(1LL << 54)
63 #define   CAP_MAMV_MASK		0x3F
64 #define   CAP_MAMV_SHIFT	48LL
65 #define   cap_mamv(x)		_xfld(x,CAP_MAMV)
66 #define   CAP_NFR_MASK		0xFF
67 #define   CAP_NFR_SHIFT		40LL
68 #define   cap_nfr(x)		(_xfld(x,CAP_NFR) + 1)
69 #define   CAP_PSI		(1LL << 39)
70 #define   CAP_SLLPS_MASK	0xF
71 #define   CAP_SLLPS_SHIFT	34LL
72 #define   cap_sllps(x)		_xfld(x,CAP_SLLPS)
73 #define   CAP_FRO_MASK		0x3FF
74 #define   CAP_FRO_SHIFT		24LL
75 #define   cap_fro(x)		(_xfld(x,CAP_FRO) * 16)
76 #define   CAP_ZLR		(1LL << 22)
77 #define   CAP_MGAW_MASK		0x3F
78 #define   CAP_MGAW_SHIFT	16LL
79 #define   cap_mgaw(x)		(_xfld(x,CAP_MGAW) + 1)
80 #define   CAP_SAGAW_MASK	0x1F
81 #define   CAP_SAGAW_SHIFT	8LL
82 #define   cap_sagaw(x)		_xfld(x,CAP_SAGAW)
83 #define   CAP_CM		(1LL << 7)
84 #define   CAP_PHMR		(1LL << 6)
85 #define   CAP_PLMR		(1LL << 5)
86 #define   CAP_RWBF		(1LL << 4)
87 #define   CAP_AFL		(1LL << 3)
88 #define   CAP_ND_MASK		0x7
89 #define   CAP_ND_SHIFT		0x00
90 #define   cap_nd(x)		(16 << (((x) & CAP_ND_MASK) << 1))
91 
92 #define DMAR_ECAP_REG		0x10	/* 64:Extended capabilities supported */
93 #define   ECAP_PSS_MASK		0x1F
94 #define   ECAP_PSS_SHIFT	35
95 #define   ECAP_EAFS		(1LL << 34)
96 #define   ECAP_NWFS		(1LL << 33)
97 #define   ECAP_SRS		(1LL << 31)
98 #define   ECAP_ERS		(1LL << 30)
99 #define   ECAP_PRS		(1LL << 29)
100 #define   ECAP_PASID		(1LL << 28)
101 #define   ECAP_DIS		(1LL << 27)
102 #define   ECAP_NEST		(1LL << 26)
103 #define   ECAP_MTS		(1LL << 25)
104 #define   ECAP_ECS		(1LL << 24)
105 #define   ECAP_MHMV_MASK	0xF
106 #define   ECAP_MHMV_SHIFT	0x20
107 #define   ecap_mhmv(x)		_xfld(x,ECAP_MHMV)
108 #define   ECAP_IRO_MASK		0x3FF	/* IOTLB Register */
109 #define   ECAP_IRO_SHIFT	0x8
110 #define   ecap_iro(x)		(_xfld(x,ECAP_IRO) * 16)
111 #define   ECAP_SC		(1LL << 7)	/* Snoop Control */
112 #define   ECAP_PT		(1LL << 6)	/* HW Passthru */
113 #define   ECAP_EIM		(1LL << 4)
114 #define   ECAP_IR		(1LL << 3)	/* Interrupt remap */
115 #define   ECAP_DT		(1LL << 2)	/* Device IOTLB */
116 #define   ECAP_QI		(1LL << 1)	/* Queued Invalidation */
117 #define   ECAP_C		(1LL << 0)	/* Coherent cache */
118 
119 #define DMAR_GCMD_REG		0x18		/* 32:Global command register */
120 #define   GCMD_TE		(1LL << 31)
121 #define   GCMD_SRTP		(1LL << 30)
122 #define   GCMD_SFL		(1LL << 29)
123 #define   GCMD_EAFL		(1LL << 28)
124 #define   GCMD_WBF		(1LL << 27)
125 #define   GCMD_QIE		(1LL << 26)
126 #define   GCMD_IRE		(1LL << 25)
127 #define   GCMD_SIRTP		(1LL << 24)
128 #define   GCMD_CFI		(1LL << 23)
129 
130 #define DMAR_GSTS_REG		0x1c		/* 32:Global status register */
131 #define   GSTS_TES		(1LL << 31)
132 #define   GSTS_RTPS		(1LL << 30)
133 #define   GSTS_FLS		(1LL << 29)
134 #define   GSTS_AFLS		(1LL << 28)
135 #define   GSTS_WBFS		(1LL << 27)
136 #define   GSTS_QIES		(1LL << 26)
137 #define   GSTS_IRES		(1LL << 25)
138 #define   GSTS_IRTPS		(1LL << 24)
139 #define   GSTS_CFIS		(1LL << 23)
140 
141 #define DMAR_CCMD_REG		0x28		/* 64:Context command reg */
142 #define   CCMD_ICC		(1LL << 63)
143 #define   CCMD_CIRG_MASK	0x3
144 #define   CCMD_CIRG_SHIFT	61
145 #define   CCMD_CIRG(x)		((uint64_t)(x) << CCMD_CIRG_SHIFT)
146 #define   CCMD_CAIG_MASK	0x3
147 #define   CCMD_CAIG_SHIFT	59
148 #define   CCMD_FM_MASK		0x3
149 #define   CCMD_FM_SHIFT		32
150 #define   CCMD_FM(x)		(((uint64_t)(x) << CCMD_FM_SHIFT))
151 #define   CCMD_SID_MASK		0xFFFF
152 #define   CCMD_SID_SHIFT	8
153 #define   CCMD_SID(x)		(((x) << CCMD_SID_SHIFT))
154 #define   CCMD_DID_MASK		0xFFFF
155 #define   CCMD_DID_SHIFT	0
156 #define   CCMD_DID(x)		(((x) << CCMD_DID_SHIFT))
157 
158 #define CIG_GLOBAL		CCMD_CIRG(CTX_GLOBAL)
159 #define CIG_DOMAIN		CCMD_CIRG(CTX_DOMAIN)
160 #define CIG_DEVICE		CCMD_CIRG(CTX_DEVICE)
161 
162 
163 #define DMAR_FSTS_REG		0x34	/* 32:Fault Status register */
164 #define   FSTS_FRI_MASK		0xFF
165 #define   FSTS_FRI_SHIFT	8
166 #define   FSTS_PRO		(1LL << 7)
167 #define   FSTS_ITE		(1LL << 6)
168 #define   FSTS_ICE		(1LL << 5)
169 #define   FSTS_IQE		(1LL << 4)
170 #define   FSTS_APF		(1LL << 3)
171 #define   FSTS_APO		(1LL << 2)
172 #define   FSTS_PPF		(1LL << 1)
173 #define   FSTS_PFO		(1LL << 0)
174 
175 #define DMAR_FECTL_REG		0x38	/* 32:Fault control register */
176 #define   FECTL_IM		(1LL << 31)
177 #define   FECTL_IP		(1LL << 30)
178 
179 #define FRCD_HI_F		(1LL << (127-64))
180 #define FRCD_HI_T		(1LL << (126-64))
181 #define FRCD_HI_AT_MASK		0x3
182 #define FRCD_HI_AT_SHIFT	(124-64)
183 #define FRCD_HI_PV_MASK		0xFFFFF
184 #define FRCD_HI_PV_SHIFT	(104-64)
185 #define FRCD_HI_FR_MASK		0xFF
186 #define FRCD_HI_FR_SHIFT	(96-64)
187 #define FRCD_HI_PP		(1LL << (95-64))
188 
189 #define FRCD_HI_SID_MASK	0xFF
190 #define FRCD_HI_SID_SHIFT	0
191 #define FRCD_HI_BUS_SHIFT	8
192 #define FRCD_HI_BUS_MASK	0xFF
193 #define FRCD_HI_DEV_SHIFT	3
194 #define FRCD_HI_DEV_MASK	0x1F
195 #define FRCD_HI_FUN_SHIFT	0
196 #define FRCD_HI_FUN_MASK	0x7
197 
198 #define DMAR_IOTLB_REG(x)	(ecap_iro((x)->ecap) + 8)
199 #define DMAR_IVA_REG(x)		(ecap_iro((x)->ecap) + 0)
200 
201 #define DMAR_FRIH_REG(x,i)	(cap_fro((x)->cap) + 16*(i) + 8)
202 #define DMAR_FRIL_REG(x,i)	(cap_fro((x)->cap) + 16*(i) + 0)
203 
204 #define IOTLB_IVT		(1LL << 63)
205 #define IOTLB_IIRG_MASK		0x3
206 #define IOTLB_IIRG_SHIFT	60
207 #define IOTLB_IIRG(x)		((uint64_t)(x) << IOTLB_IIRG_SHIFT)
208 #define IOTLB_IAIG_MASK		0x3
209 #define IOTLB_IAIG_SHIFT	57
210 #define IOTLB_DR		(1LL << 49)
211 #define IOTLB_DW		(1LL << 48)
212 #define IOTLB_DID_MASK		0xFFFF
213 #define IOTLB_DID_SHIFT		32
214 #define IOTLB_DID(x)		((uint64_t)(x) << IOTLB_DID_SHIFT)
215 
216 #define IIG_GLOBAL	IOTLB_IIRG(IOTLB_GLOBAL)
217 #define IIG_DOMAIN	IOTLB_IIRG(IOTLB_DOMAIN)
218 #define IIG_PAGE	IOTLB_IIRG(IOTLB_PAGE)
219 
220 #define DMAR_IQH_REG	0x80	/* 64:Invalidation queue head register */
221 #define DMAR_IQT_REG	0x88	/* 64:Invalidation queue tail register */
222 #define DMAR_IQA_REG	0x90	/* 64:Invalidation queue addr register */
223 #define IQA_QS_256	0	/* 256 entries */
224 #define IQA_QS_512	1	/* 512 */
225 #define IQA_QS_1K	2	/* 1024 */
226 #define IQA_QS_2K	3	/* 2048 */
227 #define IQA_QS_4K	4	/* 4096 */
228 #define IQA_QS_8K	5	/* 8192 */
229 #define IQA_QS_16K	6	/* 16384 */
230 #define IQA_QS_32K	7	/* 32768 */
231 
232 /* Read-Modify-Write helpers */
233 static inline void iommu_rmw32(void *ov, uint32_t mask, uint32_t shift, uint32_t nv)
234 {
235 	*(uint32_t *)ov &= ~(mask << shift);
236 	*(uint32_t *)ov |= (nv & mask) << shift;
237 }
238 static inline void iommu_rmw64(void *ov, uint32_t mask, uint32_t shift, uint64_t nv)
239 {
240 	*(uint64_t *)ov &= ~(mask << shift);
241 	*(uint64_t *)ov |= (nv & mask) << shift;
242 }
243 
244 /*
245  * Root Entry: one per bus (256 x 128 bit = 4k)
246  *   0        = Present
247  *   1:11     = Reserved
248  *   12:HAW-1 = Context Table Pointer
249  *   HAW:63   = Reserved
250  *   64:127   = Reserved
251  */
252 #define ROOT_P	(1L << 0)
253 struct root_entry {
254 	uint64_t		lo;
255 	uint64_t		hi;
256 };
257 
258 /* Check if root entry is valid */
259 static inline bool
260 root_entry_is_valid(struct root_entry *re)
261 {
262 	return (re->lo & ROOT_P);
263 }
264 
265 /*
266  * Context Entry: one per devfn (256 x 128 bit = 4k)
267  *   0      = Present
268  *   1      = Fault Processing Disable
269  *   2:3    = Translation Type
270  *   4:11   = Reserved
271  *   12:63  = Second Level Page Translation
272  *   64:66  = Address Width (# PTE levels)
273  *   67:70  = Ignore
274  *   71     = Reserved
275  *   72:87  = Domain ID
276  *   88:127 = Reserved
277  */
278 #define CTX_P		(1L << 0)
279 #define CTX_FPD		(1L << 1)
280 #define CTX_T_MASK	0x3
281 #define CTX_T_SHIFT	2
282 enum {
283 	CTX_T_MULTI,
284 	CTX_T_IOTLB,
285 	CTX_T_PASSTHRU
286 };
287 
288 #define CTX_H_AW_MASK	0x7
289 #define CTX_H_AW_SHIFT	0
290 #define CTX_H_USER_MASK 0xF
291 #define CTX_H_USER_SHIFT 3
292 #define CTX_H_DID_MASK	0xFFFF
293 #define CTX_H_DID_SHIFT	8
294 
295 struct context_entry {
296 	uint64_t		lo;
297 	uint64_t		hi;
298 };
299 
300 /* Set fault processing enable/disable */
301 static inline void
302 context_set_fpd(struct context_entry *ce, int enable)
303 {
304 	ce->lo &= ~CTX_FPD;
305 	if (enable)
306 		ce->lo |= CTX_FPD;
307 }
308 
309 /* Set context entry present */
310 static inline void
311 context_set_present(struct context_entry *ce)
312 {
313 	ce->lo |= CTX_P;
314 }
315 
316 /* Set Second Level Page Table Entry PA */
317 static inline void
318 context_set_slpte(struct context_entry *ce, paddr_t slpte)
319 {
320 	ce->lo &= VTD_PAGE_MASK;
321 	ce->lo |= (slpte & ~VTD_PAGE_MASK);
322 }
323 
324 /* Set translation type */
325 static inline void
326 context_set_translation_type(struct context_entry *ce, int tt)
327 {
328 	ce->lo &= ~(CTX_T_MASK << CTX_T_SHIFT);
329 	ce->lo |= ((tt & CTX_T_MASK) << CTX_T_SHIFT);
330 }
331 
332 /* Set Address Width (# of Page Table levels) */
333 static inline void
334 context_set_address_width(struct context_entry *ce, int lvl)
335 {
336 	ce->hi &= ~(CTX_H_AW_MASK << CTX_H_AW_SHIFT);
337 	ce->hi |= ((lvl & CTX_H_AW_MASK) << CTX_H_AW_SHIFT);
338 }
339 
340 /* Set domain ID */
341 static inline void
342 context_set_domain_id(struct context_entry *ce, int did)
343 {
344 	ce->hi &= ~(CTX_H_DID_MASK << CTX_H_DID_SHIFT);
345 	ce->hi |= ((did & CTX_H_DID_MASK) << CTX_H_DID_SHIFT);
346 }
347 
348 /* Get Second Level Page Table PA */
349 static inline uint64_t
350 context_pte(struct context_entry *ce)
351 {
352 	return (ce->lo & ~VTD_PAGE_MASK);
353 }
354 
355 /* Get translation type */
356 static inline int
357 context_translation_type(struct context_entry *ce)
358 {
359 	return (ce->lo >> CTX_T_SHIFT) & CTX_T_MASK;
360 }
361 
362 /* Get domain ID */
363 static inline int
364 context_domain_id(struct context_entry *ce)
365 {
366 	return (ce->hi >> CTX_H_DID_SHIFT) & CTX_H_DID_MASK;
367 }
368 
369 /* Get Address Width */
370 static inline int
371 context_address_width(struct context_entry *ce)
372 {
373 	return VTD_LEVELTOAW((ce->hi >> CTX_H_AW_SHIFT) & CTX_H_AW_MASK);
374 }
375 
376 /* Check if context entry is valid */
377 static inline bool
378 context_entry_is_valid(struct context_entry *ce)
379 {
380 	return (ce->lo & CTX_P);
381 }
382 
383 /* User-available bits in context entry */
384 static inline int
385 context_user(struct context_entry *ce)
386 {
387 	return (ce->hi >> CTX_H_USER_SHIFT) & CTX_H_USER_MASK;
388 }
389 
390 static inline void
391 context_set_user(struct context_entry *ce, int v)
392 {
393 	ce->hi &= ~(CTX_H_USER_MASK << CTX_H_USER_SHIFT);
394 	ce->hi |=  ((v & CTX_H_USER_MASK) << CTX_H_USER_SHIFT);
395 }
396 
397 /*
398  * Fault entry
399  *   0..HAW-1 = Fault address
400  *   HAW:63   = Reserved
401  *   64:71    = Source ID
402  *   96:103   = Fault Reason
403  *   104:123  = PV
404  *   124:125  = Address Translation type
405  *   126      = Type (0 = Read, 1 = Write)
406  *   127      = Fault bit
407  */
408 struct fault_entry
409 {
410 	uint64_t	lo;
411 	uint64_t	hi;
412 };
413 
414 /* PTE Entry: 512 x 64-bit = 4k */
415 #define PTE_P	(1L << 0)
416 #define PTE_R	0x00
417 #define PTE_W	(1L << 1)
418 #define PTE_US  (1L << 2)
419 #define PTE_PWT (1L << 3)
420 #define PTE_PCD (1L << 4)
421 #define PTE_A   (1L << 5)
422 #define PTE_D   (1L << 6)
423 #define PTE_PAT (1L << 7)
424 #define PTE_G   (1L << 8)
425 #define PTE_EA  (1L << 10)
426 #define PTE_XD  (1LL << 63)
427 
428 /* PDE Level entry */
429 #define PTE_PS  (1L << 7)
430 
431 /* PDPE Level entry */
432 
433 /* ----------------------------------------------------------------
434  * 5555555444444444333333333222222222111111111000000000------------
435  * [PML4 ->] PDPE.1GB
436  * [PML4 ->] PDPE.PDE -> PDE.2MB
437  * [PML4 ->] PDPE.PDE -> PDE -> PTE
438  * GAW0 = (12.20) (PTE)
439  * GAW1 = (21.29) (PDE)
440  * GAW2 = (30.38) (PDPE)
441  * GAW3 = (39.47) (PML4)
442  * GAW4 = (48.57) (n/a)
443  * GAW5 = (58.63) (n/a)
444  */
445 struct pte_entry {
446 	uint64_t	val;
447 };
448 
449 /*
450  * Queued Invalidation entry
451  *  0:3   = 01h
452  *  4:5   = Granularity
453  *  6:15  = Reserved
454  *  16:31 = Domain ID
455  *  32:47 = Source ID
456  *  48:49 = FM
457  */
458 
459 /* Invalidate Context Entry */
460 #define QI_CTX_DID_MASK		0xFFFF
461 #define QI_CTX_DID_SHIFT	16
462 #define QI_CTX_SID_MASK		0xFFFF
463 #define QI_CTX_SID_SHIFT	32
464 #define QI_CTX_FM_MASK		0x3
465 #define QI_CTX_FM_SHIFT		48
466 #define QI_CTX_IG_MASK		0x3
467 #define QI_CTX_IG_SHIFT		4
468 #define QI_CTX_DID(x)		(((uint64_t)(x) << QI_CTX_DID_SHIFT))
469 #define QI_CTX_SID(x)		(((uint64_t)(x) << QI_CTX_SID_SHIFT))
470 #define QI_CTX_FM(x)		(((uint64_t)(x) << QI_CTX_FM_SHIFT))
471 
472 #define QI_CTX_IG_GLOBAL	(CTX_GLOBAL << QI_CTX_IG_SHIFT)
473 #define QI_CTX_IG_DOMAIN	(CTX_DOMAIN << QI_CTX_IG_SHIFT)
474 #define QI_CTX_IG_DEVICE	(CTX_DEVICE << QI_CTX_IG_SHIFT)
475 
476 /* Invalidate IOTLB Entry */
477 #define QI_IOTLB_DID_MASK	0xFFFF
478 #define QI_IOTLB_DID_SHIFT	16
479 #define QI_IOTLB_IG_MASK	0x3
480 #define QI_IOTLB_IG_SHIFT	4
481 #define QI_IOTLB_DR		(1LL << 6)
482 #define QI_IOTLB_DW		(1LL << 5)
483 #define QI_IOTLB_DID(x)		(((uint64_t)(x) << QI_IOTLB_DID_SHIFT))
484 
485 #define QI_IOTLB_IG_GLOBAL	(1 << QI_IOTLB_IG_SHIFT)
486 #define QI_IOTLB_IG_DOMAIN	(2 << QI_IOTLB_IG_SHIFT)
487 #define QI_IOTLB_IG_PAGE	(3 << QI_IOTLB_IG_SHIFT)
488 
489 /* QI Commands */
490 #define QI_CTX		0x1
491 #define QI_IOTLB	0x2
492 #define QI_DEVTLB	0x3
493 #define QI_INTR		0x4
494 #define QI_WAIT		0x5
495 #define QI_EXTTLB	0x6
496 #define QI_PAS		0x7
497 #define QI_EXTDEV	0x8
498 
499 struct qi_entry {
500 	uint64_t	lo;
501 	uint64_t	hi;
502 };
503 
504 enum {
505 	CTX_GLOBAL = 1,
506 	CTX_DOMAIN,
507 	CTX_DEVICE,
508 
509 	IOTLB_GLOBAL = 1,
510 	IOTLB_DOMAIN,
511 	IOTLB_PAGE,
512 };
513 
514 enum {
515 	VTD_FAULT_ROOT_P = 0x1,         /* P field in root entry is 0 */
516 	VTD_FAULT_CTX_P = 0x2,          /* P field in context entry is 0 */
517 	VTD_FAULT_CTX_INVAL = 0x3,      /* context AW/TT/SLPPTR invalid */
518 	VTD_FAULT_LIMIT = 0x4,          /* Address is outside of MGAW */
519 	VTD_FAULT_WRITE = 0x5,          /* Address-translation fault, non-writable */
520 	VTD_FAULT_READ = 0x6,           /* Address-translation fault, non-readable */
521 	VTD_FAULT_PTE_INVAL = 0x7,      /* page table hw access error */
522 	VTD_FAULT_ROOT_INVAL = 0x8,     /* root table hw access error */
523 	VTD_FAULT_CTX_TBL_INVAL = 0x9,  /* context entry hw access error */
524 	VTD_FAULT_ROOT_RESERVED = 0xa,  /* non-zero reserved field in root entry */
525 	VTD_FAULT_CTX_RESERVED = 0xb,   /* non-zero reserved field in context entry */
526 	VTD_FAULT_PTE_RESERVED = 0xc,   /* non-zero reserved field in paging entry */
527 	VTD_FAULT_CTX_TT = 0xd,         /* invalid translation type */
528 };
529 
530 #endif
531 
532 void	acpidmar_pci_hook(pci_chipset_tag_t, struct pci_attach_args *);
533 void	dmar_ptmap(bus_dma_tag_t, bus_addr_t);
534 void	acpidmar_sw(int);
535 
536 #define __EXTRACT(v,m) (((v) >> m##_SHIFT) & m##_MASK)
537