1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <linux/llist.h>
9 #include <asm/page.h>		/* pgprot_t */
10 #include <linux/rbtree.h>
11 #include <linux/overflow.h>
12 
13 #include <asm/vmalloc.h>
14 
15 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
16 struct notifier_block;		/* in notifier.h */
17 
18 /* bits in flags of vmalloc's vm_struct below */
19 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
20 #define VM_ALLOC		0x00000002	/* vmalloc() */
21 #define VM_MAP			0x00000004	/* vmap()ed pages */
22 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
23 #define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
24 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
25 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
26 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
27 #define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28 #define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */
29 #define VM_NO_HUGE_VMAP		0x00000400	/* force PAGE_SIZE pte mapping */
30 
31 /*
32  * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
33  *
34  * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
35  * shadow memory has been mapped. It's used to handle allocation errors so that
36  * we don't try to poison shadow on free if it was never allocated.
37  *
38  * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
39  * determine which allocations need the module shadow freed.
40  */
41 
42 /* bits [20..32] reserved for arch specific ioremap internals */
43 
44 /*
45  * Maximum alignment for ioremap() regions.
46  * Can be overridden by arch-specific value.
47  */
48 #ifndef IOREMAP_MAX_ORDER
49 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
50 #endif
51 
52 struct vm_struct {
53 	struct vm_struct	*next;
54 	void			*addr;
55 	unsigned long		size;
56 	unsigned long		flags;
57 	struct page		**pages;
58 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
59 	unsigned int		page_order;
60 #endif
61 	unsigned int		nr_pages;
62 	phys_addr_t		phys_addr;
63 	const void		*caller;
64 };
65 
66 struct vmap_area {
67 	unsigned long va_start;
68 	unsigned long va_end;
69 
70 	struct rb_node rb_node;         /* address sorted rbtree */
71 	struct list_head list;          /* address sorted list */
72 
73 	/*
74 	 * The following two variables can be packed, because
75 	 * a vmap_area object can be either:
76 	 *    1) in "free" tree (root is vmap_area_root)
77 	 *    2) or "busy" tree (root is free_vmap_area_root)
78 	 */
79 	union {
80 		unsigned long subtree_max_size; /* in "free" tree */
81 		struct vm_struct *vm;           /* in "busy" tree */
82 	};
83 };
84 
85 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
86 #ifndef arch_vmap_p4d_supported
arch_vmap_p4d_supported(pgprot_t prot)87 static inline bool arch_vmap_p4d_supported(pgprot_t prot)
88 {
89 	return false;
90 }
91 #endif
92 
93 #ifndef arch_vmap_pud_supported
arch_vmap_pud_supported(pgprot_t prot)94 static inline bool arch_vmap_pud_supported(pgprot_t prot)
95 {
96 	return false;
97 }
98 #endif
99 
100 #ifndef arch_vmap_pmd_supported
arch_vmap_pmd_supported(pgprot_t prot)101 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
102 {
103 	return false;
104 }
105 #endif
106 
107 /*
108  *	Highlevel APIs for driver use
109  */
110 extern void vm_unmap_ram(const void *mem, unsigned int count);
111 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
112 extern void vm_unmap_aliases(void);
113 
114 #ifdef CONFIG_MMU
115 extern void __init vmalloc_init(void);
116 extern unsigned long vmalloc_nr_pages(void);
117 #else
vmalloc_init(void)118 static inline void vmalloc_init(void)
119 {
120 }
vmalloc_nr_pages(void)121 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
122 #endif
123 
124 extern void *vmalloc(unsigned long size);
125 extern void *vzalloc(unsigned long size);
126 extern void *vmalloc_user(unsigned long size);
127 extern void *vmalloc_node(unsigned long size, int node);
128 extern void *vzalloc_node(unsigned long size, int node);
129 extern void *vmalloc_32(unsigned long size);
130 extern void *vmalloc_32_user(unsigned long size);
131 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
132 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
133 			unsigned long start, unsigned long end, gfp_t gfp_mask,
134 			pgprot_t prot, unsigned long vm_flags, int node,
135 			const void *caller);
136 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
137 		int node, const void *caller);
138 
139 extern void vfree(const void *addr);
140 extern void vfree_atomic(const void *addr);
141 
142 extern void *vmap(struct page **pages, unsigned int count,
143 			unsigned long flags, pgprot_t prot);
144 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
145 extern void vunmap(const void *addr);
146 
147 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
148 				       unsigned long uaddr, void *kaddr,
149 				       unsigned long pgoff, unsigned long size);
150 
151 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
152 							unsigned long pgoff);
153 
154 /*
155  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
156  * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
157  * needs to be called.
158  */
159 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
160 #define ARCH_PAGE_TABLE_SYNC_MASK 0
161 #endif
162 
163 /*
164  * There is no default implementation for arch_sync_kernel_mappings(). It is
165  * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
166  * is 0.
167  */
168 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
169 
170 /*
171  *	Lowlevel-APIs (not for driver use!)
172  */
173 
get_vm_area_size(const struct vm_struct * area)174 static inline size_t get_vm_area_size(const struct vm_struct *area)
175 {
176 	if (!(area->flags & VM_NO_GUARD))
177 		/* return actual size without guard page */
178 		return area->size - PAGE_SIZE;
179 	else
180 		return area->size;
181 
182 }
183 
184 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
185 extern struct vm_struct *get_vm_area_caller(unsigned long size,
186 					unsigned long flags, const void *caller);
187 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
188 					unsigned long flags,
189 					unsigned long start, unsigned long end,
190 					const void *caller);
191 void free_vm_area(struct vm_struct *area);
192 extern struct vm_struct *remove_vm_area(const void *addr);
193 extern struct vm_struct *find_vm_area(const void *addr);
194 
is_vm_area_hugepages(const void * addr)195 static inline bool is_vm_area_hugepages(const void *addr)
196 {
197 	/*
198 	 * This may not 100% tell if the area is mapped with > PAGE_SIZE
199 	 * page table entries, if for some reason the architecture indicates
200 	 * larger sizes are available but decides not to use them, nothing
201 	 * prevents that. This only indicates the size of the physical page
202 	 * allocated in the vmalloc layer.
203 	 */
204 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
205 	return find_vm_area(addr)->page_order > 0;
206 #else
207 	return false;
208 #endif
209 }
210 
211 #ifdef CONFIG_MMU
212 int vmap_range(unsigned long addr, unsigned long end,
213 			phys_addr_t phys_addr, pgprot_t prot,
214 			unsigned int max_page_shift);
215 void vunmap_range(unsigned long addr, unsigned long end);
set_vm_flush_reset_perms(void * addr)216 static inline void set_vm_flush_reset_perms(void *addr)
217 {
218 	struct vm_struct *vm = find_vm_area(addr);
219 
220 	if (vm)
221 		vm->flags |= VM_FLUSH_RESET_PERMS;
222 }
223 
224 #else
set_vm_flush_reset_perms(void * addr)225 static inline void set_vm_flush_reset_perms(void *addr)
226 {
227 }
228 #endif
229 
230 /* for /proc/kcore */
231 extern long vread(char *buf, char *addr, unsigned long count);
232 
233 /*
234  *	Internals.  Dont't use..
235  */
236 extern struct list_head vmap_area_list;
237 extern __init void vm_area_add_early(struct vm_struct *vm);
238 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
239 
240 #ifdef CONFIG_SMP
241 # ifdef CONFIG_MMU
242 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
243 				     const size_t *sizes, int nr_vms,
244 				     size_t align);
245 
246 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
247 # else
248 static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)249 pcpu_get_vm_areas(const unsigned long *offsets,
250 		const size_t *sizes, int nr_vms,
251 		size_t align)
252 {
253 	return NULL;
254 }
255 
256 static inline void
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)257 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
258 {
259 }
260 # endif
261 #endif
262 
263 #ifdef CONFIG_MMU
264 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
265 #else
266 #define VMALLOC_TOTAL 0UL
267 #endif
268 
269 int register_vmap_purge_notifier(struct notifier_block *nb);
270 int unregister_vmap_purge_notifier(struct notifier_block *nb);
271 
272 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
273 bool vmalloc_dump_obj(void *object);
274 #else
vmalloc_dump_obj(void * object)275 static inline bool vmalloc_dump_obj(void *object) { return false; }
276 #endif
277 
278 #endif /* _LINUX_VMALLOC_H */
279