xref: /linux/arch/x86/include/asm/page.h (revision 2da68a77)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PAGE_H
3 #define _ASM_X86_PAGE_H
4 
5 #include <linux/types.h>
6 
7 #ifdef __KERNEL__
8 
9 #include <asm/page_types.h>
10 
11 #ifdef CONFIG_X86_64
12 #include <asm/page_64.h>
13 #else
14 #include <asm/page_32.h>
15 #endif	/* CONFIG_X86_64 */
16 
17 #ifndef __ASSEMBLY__
18 
19 struct page;
20 
21 #include <linux/range.h>
22 extern struct range pfn_mapped[];
23 extern int nr_pfn_mapped;
24 
25 static inline void clear_user_page(void *page, unsigned long vaddr,
26 				   struct page *pg)
27 {
28 	clear_page(page);
29 }
30 
31 static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
32 				  struct page *topage)
33 {
34 	copy_page(to, from);
35 }
36 
37 #define alloc_zeroed_user_highpage_movable(vma, vaddr) \
38 	alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
39 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
40 
41 #ifndef __pa
42 #define __pa(x)		__phys_addr((unsigned long)(x))
43 #endif
44 
45 #define __pa_nodebug(x)	__phys_addr_nodebug((unsigned long)(x))
46 /* __pa_symbol should be used for C visible symbols.
47    This seems to be the official gcc blessed way to do such arithmetic. */
48 /*
49  * We need __phys_reloc_hide() here because gcc may assume that there is no
50  * overflow during __pa() calculation and can optimize it unexpectedly.
51  * Newer versions of gcc provide -fno-strict-overflow switch to handle this
52  * case properly. Once all supported versions of gcc understand it, we can
53  * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
54  */
55 #define __pa_symbol(x) \
56 	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
57 
58 #ifndef __va
59 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
60 #endif
61 
62 #define __boot_va(x)		__va(x)
63 #define __boot_pa(x)		__pa(x)
64 
65 /*
66  * virt_to_page(kaddr) returns a valid pointer if and only if
67  * virt_addr_valid(kaddr) returns true.
68  */
69 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
70 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
71 extern bool __virt_addr_valid(unsigned long kaddr);
72 #define virt_addr_valid(kaddr)	__virt_addr_valid((unsigned long) (kaddr))
73 
74 static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
75 {
76 	return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
77 }
78 
79 static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
80 {
81 	return __canonical_address(vaddr, vaddr_bits) == vaddr;
82 }
83 
84 #endif	/* __ASSEMBLY__ */
85 
86 #include <asm-generic/memory_model.h>
87 #include <asm-generic/getorder.h>
88 
89 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
90 
91 #endif	/* __KERNEL__ */
92 #endif /* _ASM_X86_PAGE_H */
93