xref: /linux/arch/x86/include/asm/xen/page.h (revision db10cb9b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_XEN_PAGE_H
3 #define _ASM_X86_XEN_PAGE_H
4 
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/device.h>
11 
12 #include <asm/extable.h>
13 #include <asm/page.h>
14 
15 #include <xen/interface/xen.h>
16 #include <xen/interface/grant_table.h>
17 #include <xen/features.h>
18 
19 /* Xen machine address */
20 typedef struct xmaddr {
21 	phys_addr_t maddr;
22 } xmaddr_t;
23 
24 /* Xen pseudo-physical address */
25 typedef struct xpaddr {
26 	phys_addr_t paddr;
27 } xpaddr_t;
28 
29 #ifdef CONFIG_X86_64
30 #define XEN_PHYSICAL_MASK	__sme_clr((1UL << 52) - 1)
31 #else
32 #define XEN_PHYSICAL_MASK	__PHYSICAL_MASK
33 #endif
34 
35 #define XEN_PTE_MFN_MASK	((pteval_t)(((signed long)PAGE_MASK) & \
36 					    XEN_PHYSICAL_MASK))
37 
38 #define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
39 #define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
40 
41 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
42 #define INVALID_P2M_ENTRY	(~0UL)
43 #define FOREIGN_FRAME_BIT	(1UL<<(BITS_PER_LONG-1))
44 #define IDENTITY_FRAME_BIT	(1UL<<(BITS_PER_LONG-2))
45 #define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
46 #define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
47 
48 #define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
49 
50 extern unsigned long *machine_to_phys_mapping;
51 extern unsigned long  machine_to_phys_nr;
52 extern unsigned long *xen_p2m_addr;
53 extern unsigned long  xen_p2m_size;
54 extern unsigned long  xen_max_p2m_pfn;
55 
56 extern int xen_alloc_p2m_entry(unsigned long pfn);
57 
58 extern unsigned long get_phys_to_machine(unsigned long pfn);
59 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
60 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
61 extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
62 						    unsigned long pfn_e);
63 
64 #ifdef CONFIG_XEN_PV
65 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
66 				   struct gnttab_map_grant_ref *kmap_ops,
67 				   struct page **pages, unsigned int count);
68 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
69 				     struct gnttab_unmap_grant_ref *kunmap_ops,
70 				     struct page **pages, unsigned int count);
71 #else
72 static inline int
73 set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
74 			struct gnttab_map_grant_ref *kmap_ops,
75 			struct page **pages, unsigned int count)
76 {
77 	return 0;
78 }
79 
80 static inline int
81 clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
82 			  struct gnttab_unmap_grant_ref *kunmap_ops,
83 			  struct page **pages, unsigned int count)
84 {
85 	return 0;
86 }
87 #endif
88 
89 /*
90  * Helper functions to write or read unsigned long values to/from
91  * memory, when the access may fault.
92  */
93 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
94 {
95 	int ret = 0;
96 
97 	asm volatile("1: mov %[val], %[ptr]\n"
98 		     "2:\n"
99 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
100 		     : [ret] "+r" (ret), [ptr] "=m" (*addr)
101 		     : [val] "r" (val));
102 
103 	return ret;
104 }
105 
106 static inline int xen_safe_read_ulong(const unsigned long *addr,
107 				      unsigned long *val)
108 {
109 	unsigned long rval = ~0ul;
110 	int ret = 0;
111 
112 	asm volatile("1: mov %[ptr], %[rval]\n"
113 		     "2:\n"
114 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
115 		     : [ret] "+r" (ret), [rval] "+r" (rval)
116 		     : [ptr] "m" (*addr));
117 	*val = rval;
118 
119 	return ret;
120 }
121 
122 #ifdef CONFIG_XEN_PV
123 /*
124  * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
125  * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
126  *   bits (identity or foreign) are set.
127  * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
128  *   identity or foreign indicator will be still set. __pfn_to_mfn() is
129  *   encapsulating get_phys_to_machine() which is called in special cases only.
130  * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
131  *   cases needing an extended handling.
132  */
133 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
134 {
135 	unsigned long mfn;
136 
137 	if (pfn < xen_p2m_size)
138 		mfn = xen_p2m_addr[pfn];
139 	else if (unlikely(pfn < xen_max_p2m_pfn))
140 		return get_phys_to_machine(pfn);
141 	else
142 		return IDENTITY_FRAME(pfn);
143 
144 	if (unlikely(mfn == INVALID_P2M_ENTRY))
145 		return get_phys_to_machine(pfn);
146 
147 	return mfn;
148 }
149 #else
150 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
151 {
152 	return pfn;
153 }
154 #endif
155 
156 static inline unsigned long pfn_to_mfn(unsigned long pfn)
157 {
158 	unsigned long mfn;
159 
160 	/*
161 	 * Some x86 code are still using pfn_to_mfn instead of
162 	 * pfn_to_mfn. This will have to be removed when we figured
163 	 * out which call.
164 	 */
165 	if (xen_feature(XENFEAT_auto_translated_physmap))
166 		return pfn;
167 
168 	mfn = __pfn_to_mfn(pfn);
169 
170 	if (mfn != INVALID_P2M_ENTRY)
171 		mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
172 
173 	return mfn;
174 }
175 
176 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
177 {
178 	if (xen_feature(XENFEAT_auto_translated_physmap))
179 		return 1;
180 
181 	return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
182 }
183 
184 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
185 {
186 	unsigned long pfn;
187 	int ret;
188 
189 	if (unlikely(mfn >= machine_to_phys_nr))
190 		return ~0;
191 
192 	/*
193 	 * The array access can fail (e.g., device space beyond end of RAM).
194 	 * In such cases it doesn't matter what we return (we return garbage),
195 	 * but we must handle the fault without crashing!
196 	 */
197 	ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
198 	if (ret < 0)
199 		return ~0;
200 
201 	return pfn;
202 }
203 
204 static inline unsigned long mfn_to_pfn(unsigned long mfn)
205 {
206 	unsigned long pfn;
207 
208 	/*
209 	 * Some x86 code are still using mfn_to_pfn instead of
210 	 * gfn_to_pfn. This will have to be removed when we figure
211 	 * out which call.
212 	 */
213 	if (xen_feature(XENFEAT_auto_translated_physmap))
214 		return mfn;
215 
216 	pfn = mfn_to_pfn_no_overrides(mfn);
217 	if (__pfn_to_mfn(pfn) != mfn)
218 		pfn = ~0;
219 
220 	/*
221 	 * pfn is ~0 if there are no entries in the m2p for mfn or the
222 	 * entry doesn't map back to the mfn.
223 	 */
224 	if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
225 		pfn = mfn;
226 
227 	return pfn;
228 }
229 
230 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
231 {
232 	unsigned offset = phys.paddr & ~PAGE_MASK;
233 	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
234 }
235 
236 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
237 {
238 	unsigned offset = machine.maddr & ~PAGE_MASK;
239 	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
240 }
241 
242 /* Pseudo-physical <-> Guest conversion */
243 static inline unsigned long pfn_to_gfn(unsigned long pfn)
244 {
245 	if (xen_feature(XENFEAT_auto_translated_physmap))
246 		return pfn;
247 	else
248 		return pfn_to_mfn(pfn);
249 }
250 
251 static inline unsigned long gfn_to_pfn(unsigned long gfn)
252 {
253 	if (xen_feature(XENFEAT_auto_translated_physmap))
254 		return gfn;
255 	else
256 		return mfn_to_pfn(gfn);
257 }
258 
259 /* Pseudo-physical <-> Bus conversion */
260 #define pfn_to_bfn(pfn)		pfn_to_gfn(pfn)
261 #define bfn_to_pfn(bfn)		gfn_to_pfn(bfn)
262 
263 /*
264  * We detect special mappings in one of two ways:
265  *  1. If the MFN is an I/O page then Xen will set the m2p entry
266  *     to be outside our maximum possible pseudophys range.
267  *  2. If the MFN belongs to a different domain then we will certainly
268  *     not have MFN in our p2m table. Conversely, if the page is ours,
269  *     then we'll have p2m(m2p(MFN))==MFN.
270  * If we detect a special mapping then it doesn't have a 'struct page'.
271  * We force !pfn_valid() by returning an out-of-range pointer.
272  *
273  * NB. These checks require that, for any MFN that is not in our reservation,
274  * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
275  * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
276  * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
277  *
278  * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
279  *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
280  *      require. In all the cases we care about, the FOREIGN_FRAME bit is
281  *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
282  */
283 static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
284 {
285 	unsigned long pfn;
286 
287 	if (xen_feature(XENFEAT_auto_translated_physmap))
288 		return mfn;
289 
290 	pfn = mfn_to_pfn(mfn);
291 	if (__pfn_to_mfn(pfn) != mfn)
292 		return -1; /* force !pfn_valid() */
293 	return pfn;
294 }
295 
296 /* VIRT <-> MACHINE conversion */
297 #define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
298 static inline unsigned long virt_to_pfn(const void *v)
299 {
300 	return PFN_DOWN(__pa(v));
301 }
302 #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
303 #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
304 
305 /* VIRT <-> GUEST conversion */
306 #define virt_to_gfn(v)		(pfn_to_gfn(virt_to_pfn(v)))
307 #define gfn_to_virt(g)		(__va(gfn_to_pfn(g) << PAGE_SHIFT))
308 
309 static inline unsigned long pte_mfn(pte_t pte)
310 {
311 	return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
312 }
313 
314 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
315 {
316 	pte_t pte;
317 
318 	pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
319 			massage_pgprot(pgprot);
320 
321 	return pte;
322 }
323 
324 static inline pteval_t pte_val_ma(pte_t pte)
325 {
326 	return pte.pte;
327 }
328 
329 static inline pte_t __pte_ma(pteval_t x)
330 {
331 	return (pte_t) { .pte = x };
332 }
333 
334 #define pmd_val_ma(v) ((v).pmd)
335 #ifdef __PAGETABLE_PUD_FOLDED
336 #define pud_val_ma(v) ((v).p4d.pgd.pgd)
337 #else
338 #define pud_val_ma(v) ((v).pud)
339 #endif
340 #define __pmd_ma(x)	((pmd_t) { (x) } )
341 
342 #ifdef __PAGETABLE_P4D_FOLDED
343 #define p4d_val_ma(x)	((x).pgd.pgd)
344 #else
345 #define p4d_val_ma(x)	((x).p4d)
346 #endif
347 
348 xmaddr_t arbitrary_virt_to_machine(void *address);
349 unsigned long arbitrary_virt_to_mfn(void *vaddr);
350 void make_lowmem_page_readonly(void *vaddr);
351 void make_lowmem_page_readwrite(void *vaddr);
352 
353 static inline bool xen_arch_need_swiotlb(struct device *dev,
354 					 phys_addr_t phys,
355 					 dma_addr_t dev_addr)
356 {
357 	return false;
358 }
359 
360 #endif /* _ASM_X86_XEN_PAGE_H */
361