1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15 
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19 
20 #ifndef is_hugepd
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
24 #endif
25 
26 #ifdef CONFIG_HUGETLB_PAGE
27 
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31 
32 struct hugepage_subpool {
33 	spinlock_t lock;
34 	long count;
35 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
36 	long used_hpages;	/* Used count against maximum, includes */
37 				/* both alloced and reserved pages. */
38 	struct hstate *hstate;
39 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
40 	long rsv_hpages;	/* Pages reserved against global pool to */
41 				/* satisfy minimum size. */
42 };
43 
44 struct resv_map {
45 	struct kref refs;
46 	spinlock_t lock;
47 	struct list_head regions;
48 	long adds_in_progress;
49 	struct list_head region_cache;
50 	long region_cache_count;
51 #ifdef CONFIG_CGROUP_HUGETLB
52 	/*
53 	 * On private mappings, the counter to uncharge reservations is stored
54 	 * here. If these fields are 0, then either the mapping is shared, or
55 	 * cgroup accounting is disabled for this resv_map.
56 	 */
57 	struct page_counter *reservation_counter;
58 	unsigned long pages_per_hpage;
59 	struct cgroup_subsys_state *css;
60 #endif
61 };
62 
63 /*
64  * Region tracking -- allows tracking of reservations and instantiated pages
65  *                    across the pages in a mapping.
66  *
67  * The region data structures are embedded into a resv_map and protected
68  * by a resv_map's lock.  The set of regions within the resv_map represent
69  * reservations for huge pages, or huge pages that have already been
70  * instantiated within the map.  The from and to elements are huge page
71  * indicies into the associated mapping.  from indicates the starting index
72  * of the region.  to represents the first index past the end of  the region.
73  *
74  * For example, a file region structure with from == 0 and to == 4 represents
75  * four huge pages in a mapping.  It is important to note that the to element
76  * represents the first element past the end of the region. This is used in
77  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
78  *
79  * Interval notation of the form [from, to) will be used to indicate that
80  * the endpoint from is inclusive and to is exclusive.
81  */
82 struct file_region {
83 	struct list_head link;
84 	long from;
85 	long to;
86 #ifdef CONFIG_CGROUP_HUGETLB
87 	/*
88 	 * On shared mappings, each reserved region appears as a struct
89 	 * file_region in resv_map. These fields hold the info needed to
90 	 * uncharge each reservation.
91 	 */
92 	struct page_counter *reservation_counter;
93 	struct cgroup_subsys_state *css;
94 #endif
95 };
96 
97 extern struct resv_map *resv_map_alloc(void);
98 void resv_map_release(struct kref *ref);
99 
100 extern spinlock_t hugetlb_lock;
101 extern int hugetlb_max_hstate __read_mostly;
102 #define for_each_hstate(h) \
103 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
104 
105 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
106 						long min_hpages);
107 void hugepage_put_subpool(struct hugepage_subpool *spool);
108 
109 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
110 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
111 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
112 		loff_t *);
113 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
114 		loff_t *);
115 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
116 		loff_t *);
117 
118 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
119 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
120 			 struct page **, struct vm_area_struct **,
121 			 unsigned long *, unsigned long *, long, unsigned int,
122 			 int *);
123 void unmap_hugepage_range(struct vm_area_struct *,
124 			  unsigned long, unsigned long, struct page *);
125 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
126 			  struct vm_area_struct *vma,
127 			  unsigned long start, unsigned long end,
128 			  struct page *ref_page);
129 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
130 				unsigned long start, unsigned long end,
131 				struct page *ref_page);
132 void hugetlb_report_meminfo(struct seq_file *);
133 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
134 void hugetlb_show_meminfo(void);
135 unsigned long hugetlb_total_pages(void);
136 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
137 			unsigned long address, unsigned int flags);
138 #ifdef CONFIG_USERFAULTFD
139 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
140 				struct vm_area_struct *dst_vma,
141 				unsigned long dst_addr,
142 				unsigned long src_addr,
143 				enum mcopy_atomic_mode mode,
144 				struct page **pagep);
145 #endif /* CONFIG_USERFAULTFD */
146 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
147 						struct vm_area_struct *vma,
148 						vm_flags_t vm_flags);
149 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
150 						long freed);
151 bool isolate_huge_page(struct page *page, struct list_head *list);
152 void putback_active_hugepage(struct page *page);
153 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
154 void free_huge_page(struct page *page);
155 void hugetlb_fix_reserve_counts(struct inode *inode);
156 extern struct mutex *hugetlb_fault_mutex_table;
157 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
158 
159 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
160 		      unsigned long addr, pud_t *pud);
161 
162 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
163 
164 extern int sysctl_hugetlb_shm_group;
165 extern struct list_head huge_boot_pages;
166 
167 /* arch callbacks */
168 
169 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
170 			unsigned long addr, unsigned long sz);
171 pte_t *huge_pte_offset(struct mm_struct *mm,
172 		       unsigned long addr, unsigned long sz);
173 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
174 				unsigned long *addr, pte_t *ptep);
175 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
176 				unsigned long *start, unsigned long *end);
177 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
178 			      int write);
179 struct page *follow_huge_pd(struct vm_area_struct *vma,
180 			    unsigned long address, hugepd_t hpd,
181 			    int flags, int pdshift);
182 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
183 				pmd_t *pmd, int flags);
184 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
185 				pud_t *pud, int flags);
186 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
187 			     pgd_t *pgd, int flags);
188 
189 int pmd_huge(pmd_t pmd);
190 int pud_huge(pud_t pud);
191 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
192 		unsigned long address, unsigned long end, pgprot_t newprot);
193 
194 bool is_hugetlb_entry_migration(pte_t pte);
195 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
196 
197 #else /* !CONFIG_HUGETLB_PAGE */
198 
reset_vma_resv_huge_pages(struct vm_area_struct * vma)199 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
200 {
201 }
202 
hugetlb_total_pages(void)203 static inline unsigned long hugetlb_total_pages(void)
204 {
205 	return 0;
206 }
207 
hugetlb_page_mapping_lock_write(struct page * hpage)208 static inline struct address_space *hugetlb_page_mapping_lock_write(
209 							struct page *hpage)
210 {
211 	return NULL;
212 }
213 
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)214 static inline int huge_pmd_unshare(struct mm_struct *mm,
215 					struct vm_area_struct *vma,
216 					unsigned long *addr, pte_t *ptep)
217 {
218 	return 0;
219 }
220 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)221 static inline void adjust_range_if_pmd_sharing_possible(
222 				struct vm_area_struct *vma,
223 				unsigned long *start, unsigned long *end)
224 {
225 }
226 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)227 static inline long follow_hugetlb_page(struct mm_struct *mm,
228 			struct vm_area_struct *vma, struct page **pages,
229 			struct vm_area_struct **vmas, unsigned long *position,
230 			unsigned long *nr_pages, long i, unsigned int flags,
231 			int *nonblocking)
232 {
233 	BUG();
234 	return 0;
235 }
236 
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)237 static inline struct page *follow_huge_addr(struct mm_struct *mm,
238 					unsigned long address, int write)
239 {
240 	return ERR_PTR(-EINVAL);
241 }
242 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)243 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
244 			struct mm_struct *src, struct vm_area_struct *vma)
245 {
246 	BUG();
247 	return 0;
248 }
249 
hugetlb_report_meminfo(struct seq_file * m)250 static inline void hugetlb_report_meminfo(struct seq_file *m)
251 {
252 }
253 
hugetlb_report_node_meminfo(char * buf,int len,int nid)254 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
255 {
256 	return 0;
257 }
258 
hugetlb_show_meminfo(void)259 static inline void hugetlb_show_meminfo(void)
260 {
261 }
262 
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)263 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
264 				unsigned long address, hugepd_t hpd, int flags,
265 				int pdshift)
266 {
267 	return NULL;
268 }
269 
follow_huge_pmd(struct mm_struct * mm,unsigned long address,pmd_t * pmd,int flags)270 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
271 				unsigned long address, pmd_t *pmd, int flags)
272 {
273 	return NULL;
274 }
275 
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)276 static inline struct page *follow_huge_pud(struct mm_struct *mm,
277 				unsigned long address, pud_t *pud, int flags)
278 {
279 	return NULL;
280 }
281 
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)282 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
283 				unsigned long address, pgd_t *pgd, int flags)
284 {
285 	return NULL;
286 }
287 
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)288 static inline int prepare_hugepage_range(struct file *file,
289 				unsigned long addr, unsigned long len)
290 {
291 	return -EINVAL;
292 }
293 
pmd_huge(pmd_t pmd)294 static inline int pmd_huge(pmd_t pmd)
295 {
296 	return 0;
297 }
298 
pud_huge(pud_t pud)299 static inline int pud_huge(pud_t pud)
300 {
301 	return 0;
302 }
303 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)304 static inline int is_hugepage_only_range(struct mm_struct *mm,
305 					unsigned long addr, unsigned long len)
306 {
307 	return 0;
308 }
309 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)310 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
311 				unsigned long addr, unsigned long end,
312 				unsigned long floor, unsigned long ceiling)
313 {
314 	BUG();
315 }
316 
317 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)318 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
319 						pte_t *dst_pte,
320 						struct vm_area_struct *dst_vma,
321 						unsigned long dst_addr,
322 						unsigned long src_addr,
323 						enum mcopy_atomic_mode mode,
324 						struct page **pagep)
325 {
326 	BUG();
327 	return 0;
328 }
329 #endif /* CONFIG_USERFAULTFD */
330 
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)331 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
332 					unsigned long sz)
333 {
334 	return NULL;
335 }
336 
isolate_huge_page(struct page * page,struct list_head * list)337 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
338 {
339 	return false;
340 }
341 
putback_active_hugepage(struct page * page)342 static inline void putback_active_hugepage(struct page *page)
343 {
344 }
345 
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)346 static inline void move_hugetlb_state(struct page *oldpage,
347 					struct page *newpage, int reason)
348 {
349 }
350 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)351 static inline unsigned long hugetlb_change_protection(
352 			struct vm_area_struct *vma, unsigned long address,
353 			unsigned long end, pgprot_t newprot)
354 {
355 	return 0;
356 }
357 
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)358 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
359 			struct vm_area_struct *vma, unsigned long start,
360 			unsigned long end, struct page *ref_page)
361 {
362 	BUG();
363 }
364 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)365 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
366 			struct vm_area_struct *vma, unsigned long start,
367 			unsigned long end, struct page *ref_page)
368 {
369 	BUG();
370 }
371 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)372 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
373 			struct vm_area_struct *vma, unsigned long address,
374 			unsigned int flags)
375 {
376 	BUG();
377 	return 0;
378 }
379 
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)380 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
381 
382 #endif /* !CONFIG_HUGETLB_PAGE */
383 /*
384  * hugepages at page global directory. If arch support
385  * hugepages at pgd level, they need to define this.
386  */
387 #ifndef pgd_huge
388 #define pgd_huge(x)	0
389 #endif
390 #ifndef p4d_huge
391 #define p4d_huge(x)	0
392 #endif
393 
394 #ifndef pgd_write
pgd_write(pgd_t pgd)395 static inline int pgd_write(pgd_t pgd)
396 {
397 	BUG();
398 	return 0;
399 }
400 #endif
401 
402 #define HUGETLB_ANON_FILE "anon_hugepage"
403 
404 enum {
405 	/*
406 	 * The file will be used as an shm file so shmfs accounting rules
407 	 * apply
408 	 */
409 	HUGETLB_SHMFS_INODE     = 1,
410 	/*
411 	 * The file is being created on the internal vfs mount and shmfs
412 	 * accounting rules do not apply
413 	 */
414 	HUGETLB_ANONHUGE_INODE  = 2,
415 };
416 
417 #ifdef CONFIG_HUGETLBFS
418 struct hugetlbfs_sb_info {
419 	long	max_inodes;   /* inodes allowed */
420 	long	free_inodes;  /* inodes free */
421 	spinlock_t	stat_lock;
422 	struct hstate *hstate;
423 	struct hugepage_subpool *spool;
424 	kuid_t	uid;
425 	kgid_t	gid;
426 	umode_t mode;
427 };
428 
HUGETLBFS_SB(struct super_block * sb)429 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
430 {
431 	return sb->s_fs_info;
432 }
433 
434 struct hugetlbfs_inode_info {
435 	struct shared_policy policy;
436 	struct inode vfs_inode;
437 	unsigned int seals;
438 };
439 
HUGETLBFS_I(struct inode * inode)440 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
441 {
442 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
443 }
444 
445 extern const struct file_operations hugetlbfs_file_operations;
446 extern const struct vm_operations_struct hugetlb_vm_ops;
447 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
448 				struct user_struct **user, int creat_flags,
449 				int page_size_log);
450 
is_file_hugepages(struct file * file)451 static inline bool is_file_hugepages(struct file *file)
452 {
453 	if (file->f_op == &hugetlbfs_file_operations)
454 		return true;
455 
456 	return is_file_shm_hugepages(file);
457 }
458 
hstate_inode(struct inode * i)459 static inline struct hstate *hstate_inode(struct inode *i)
460 {
461 	return HUGETLBFS_SB(i->i_sb)->hstate;
462 }
463 #else /* !CONFIG_HUGETLBFS */
464 
465 #define is_file_hugepages(file)			false
466 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)467 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
468 		struct user_struct **user, int creat_flags,
469 		int page_size_log)
470 {
471 	return ERR_PTR(-ENOSYS);
472 }
473 
hstate_inode(struct inode * i)474 static inline struct hstate *hstate_inode(struct inode *i)
475 {
476 	return NULL;
477 }
478 #endif /* !CONFIG_HUGETLBFS */
479 
480 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
481 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
482 					unsigned long len, unsigned long pgoff,
483 					unsigned long flags);
484 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
485 
486 /*
487  * huegtlb page specific state flags.  These flags are located in page.private
488  * of the hugetlb head page.  Functions created via the below macros should be
489  * used to manipulate these flags.
490  *
491  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
492  *	allocation time.  Cleared when page is fully instantiated.  Free
493  *	routine checks flag to restore a reservation on error paths.
494  *	Synchronization:  Examined or modified by code that knows it has
495  *	the only reference to page.  i.e. After allocation but before use
496  *	or when the page is being freed.
497  * HPG_migratable  - Set after a newly allocated page is added to the page
498  *	cache and/or page tables.  Indicates the page is a candidate for
499  *	migration.
500  *	Synchronization:  Initially set after new page allocation with no
501  *	locking.  When examined and modified during migration processing
502  *	(isolate, migrate, putback) the hugetlb_lock is held.
503  * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
504  *	allocator.  Typically used for migration target pages when no pages
505  *	are available in the pool.  The hugetlb free page path will
506  *	immediately free pages with this flag set to the buddy allocator.
507  *	Synchronization: Can be set after huge page allocation from buddy when
508  *	code knows it has only reference.  All other examinations and
509  *	modifications require hugetlb_lock.
510  * HPG_freed - Set when page is on the free lists.
511  *	Synchronization: hugetlb_lock held for examination and modification.
512  */
513 enum hugetlb_page_flags {
514 	HPG_restore_reserve = 0,
515 	HPG_migratable,
516 	HPG_temporary,
517 	HPG_freed,
518 	__NR_HPAGEFLAGS,
519 };
520 
521 /*
522  * Macros to create test, set and clear function definitions for
523  * hugetlb specific page flags.
524  */
525 #ifdef CONFIG_HUGETLB_PAGE
526 #define TESTHPAGEFLAG(uname, flname)				\
527 static inline int HPage##uname(struct page *page)		\
528 	{ return test_bit(HPG_##flname, &(page->private)); }
529 
530 #define SETHPAGEFLAG(uname, flname)				\
531 static inline void SetHPage##uname(struct page *page)		\
532 	{ set_bit(HPG_##flname, &(page->private)); }
533 
534 #define CLEARHPAGEFLAG(uname, flname)				\
535 static inline void ClearHPage##uname(struct page *page)		\
536 	{ clear_bit(HPG_##flname, &(page->private)); }
537 #else
538 #define TESTHPAGEFLAG(uname, flname)				\
539 static inline int HPage##uname(struct page *page)		\
540 	{ return 0; }
541 
542 #define SETHPAGEFLAG(uname, flname)				\
543 static inline void SetHPage##uname(struct page *page)		\
544 	{ }
545 
546 #define CLEARHPAGEFLAG(uname, flname)				\
547 static inline void ClearHPage##uname(struct page *page)		\
548 	{ }
549 #endif
550 
551 #define HPAGEFLAG(uname, flname)				\
552 	TESTHPAGEFLAG(uname, flname)				\
553 	SETHPAGEFLAG(uname, flname)				\
554 	CLEARHPAGEFLAG(uname, flname)				\
555 
556 /*
557  * Create functions associated with hugetlb page flags
558  */
559 HPAGEFLAG(RestoreReserve, restore_reserve)
560 HPAGEFLAG(Migratable, migratable)
561 HPAGEFLAG(Temporary, temporary)
562 HPAGEFLAG(Freed, freed)
563 
564 #ifdef CONFIG_HUGETLB_PAGE
565 
566 #define HSTATE_NAME_LEN 32
567 /* Defines one hugetlb page size */
568 struct hstate {
569 	struct mutex resize_lock;
570 	int next_nid_to_alloc;
571 	int next_nid_to_free;
572 	unsigned int order;
573 	unsigned long mask;
574 	unsigned long max_huge_pages;
575 	unsigned long nr_huge_pages;
576 	unsigned long free_huge_pages;
577 	unsigned long resv_huge_pages;
578 	unsigned long surplus_huge_pages;
579 	unsigned long nr_overcommit_huge_pages;
580 	struct list_head hugepage_activelist;
581 	struct list_head hugepage_freelists[MAX_NUMNODES];
582 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
583 	unsigned int free_huge_pages_node[MAX_NUMNODES];
584 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
585 #ifdef CONFIG_CGROUP_HUGETLB
586 	/* cgroup control files */
587 	struct cftype cgroup_files_dfl[7];
588 	struct cftype cgroup_files_legacy[9];
589 #endif
590 	char name[HSTATE_NAME_LEN];
591 };
592 
593 struct huge_bootmem_page {
594 	struct list_head list;
595 	struct hstate *hstate;
596 };
597 
598 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
599 struct page *alloc_huge_page(struct vm_area_struct *vma,
600 				unsigned long addr, int avoid_reserve);
601 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
602 				nodemask_t *nmask, gfp_t gfp_mask);
603 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
604 				unsigned long address);
605 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
606 			pgoff_t idx);
607 
608 /* arch callback */
609 int __init __alloc_bootmem_huge_page(struct hstate *h);
610 int __init alloc_bootmem_huge_page(struct hstate *h);
611 
612 void __init hugetlb_add_hstate(unsigned order);
613 bool __init arch_hugetlb_valid_size(unsigned long size);
614 struct hstate *size_to_hstate(unsigned long size);
615 
616 #ifndef HUGE_MAX_HSTATE
617 #define HUGE_MAX_HSTATE 1
618 #endif
619 
620 extern struct hstate hstates[HUGE_MAX_HSTATE];
621 extern unsigned int default_hstate_idx;
622 
623 #define default_hstate (hstates[default_hstate_idx])
624 
625 /*
626  * hugetlb page subpool pointer located in hpage[1].private
627  */
hugetlb_page_subpool(struct page * hpage)628 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
629 {
630 	return (struct hugepage_subpool *)(hpage+1)->private;
631 }
632 
hugetlb_set_page_subpool(struct page * hpage,struct hugepage_subpool * subpool)633 static inline void hugetlb_set_page_subpool(struct page *hpage,
634 					struct hugepage_subpool *subpool)
635 {
636 	set_page_private(hpage+1, (unsigned long)subpool);
637 }
638 
hstate_file(struct file * f)639 static inline struct hstate *hstate_file(struct file *f)
640 {
641 	return hstate_inode(file_inode(f));
642 }
643 
hstate_sizelog(int page_size_log)644 static inline struct hstate *hstate_sizelog(int page_size_log)
645 {
646 	if (!page_size_log)
647 		return &default_hstate;
648 
649 	return size_to_hstate(1UL << page_size_log);
650 }
651 
hstate_vma(struct vm_area_struct * vma)652 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
653 {
654 	return hstate_file(vma->vm_file);
655 }
656 
huge_page_size(struct hstate * h)657 static inline unsigned long huge_page_size(struct hstate *h)
658 {
659 	return (unsigned long)PAGE_SIZE << h->order;
660 }
661 
662 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
663 
664 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
665 
huge_page_mask(struct hstate * h)666 static inline unsigned long huge_page_mask(struct hstate *h)
667 {
668 	return h->mask;
669 }
670 
huge_page_order(struct hstate * h)671 static inline unsigned int huge_page_order(struct hstate *h)
672 {
673 	return h->order;
674 }
675 
huge_page_shift(struct hstate * h)676 static inline unsigned huge_page_shift(struct hstate *h)
677 {
678 	return h->order + PAGE_SHIFT;
679 }
680 
hstate_is_gigantic(struct hstate * h)681 static inline bool hstate_is_gigantic(struct hstate *h)
682 {
683 	return huge_page_order(h) >= MAX_ORDER;
684 }
685 
pages_per_huge_page(struct hstate * h)686 static inline unsigned int pages_per_huge_page(struct hstate *h)
687 {
688 	return 1 << h->order;
689 }
690 
blocks_per_huge_page(struct hstate * h)691 static inline unsigned int blocks_per_huge_page(struct hstate *h)
692 {
693 	return huge_page_size(h) / 512;
694 }
695 
696 #include <asm/hugetlb.h>
697 
698 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)699 static inline int is_hugepage_only_range(struct mm_struct *mm,
700 					unsigned long addr, unsigned long len)
701 {
702 	return 0;
703 }
704 #define is_hugepage_only_range is_hugepage_only_range
705 #endif
706 
707 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)708 static inline void arch_clear_hugepage_flags(struct page *page) { }
709 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
710 #endif
711 
712 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)713 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
714 				       struct page *page, int writable)
715 {
716 	return entry;
717 }
718 #endif
719 
page_hstate(struct page * page)720 static inline struct hstate *page_hstate(struct page *page)
721 {
722 	VM_BUG_ON_PAGE(!PageHuge(page), page);
723 	return size_to_hstate(page_size(page));
724 }
725 
hstate_index_to_shift(unsigned index)726 static inline unsigned hstate_index_to_shift(unsigned index)
727 {
728 	return hstates[index].order + PAGE_SHIFT;
729 }
730 
hstate_index(struct hstate * h)731 static inline int hstate_index(struct hstate *h)
732 {
733 	return h - hstates;
734 }
735 
736 pgoff_t __basepage_index(struct page *page);
737 
738 /* Return page->index in PAGE_SIZE units */
basepage_index(struct page * page)739 static inline pgoff_t basepage_index(struct page *page)
740 {
741 	if (!PageCompound(page))
742 		return page->index;
743 
744 	return __basepage_index(page);
745 }
746 
747 extern int dissolve_free_huge_page(struct page *page);
748 extern int dissolve_free_huge_pages(unsigned long start_pfn,
749 				    unsigned long end_pfn);
750 
751 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
752 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)753 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
754 {
755 	if ((huge_page_shift(h) == PMD_SHIFT) ||
756 		(huge_page_shift(h) == PUD_SHIFT) ||
757 			(huge_page_shift(h) == PGDIR_SHIFT))
758 		return true;
759 	else
760 		return false;
761 }
762 #endif
763 #else
arch_hugetlb_migration_supported(struct hstate * h)764 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
765 {
766 	return false;
767 }
768 #endif
769 
hugepage_migration_supported(struct hstate * h)770 static inline bool hugepage_migration_supported(struct hstate *h)
771 {
772 	return arch_hugetlb_migration_supported(h);
773 }
774 
775 /*
776  * Movability check is different as compared to migration check.
777  * It determines whether or not a huge page should be placed on
778  * movable zone or not. Movability of any huge page should be
779  * required only if huge page size is supported for migration.
780  * There wont be any reason for the huge page to be movable if
781  * it is not migratable to start with. Also the size of the huge
782  * page should be large enough to be placed under a movable zone
783  * and still feasible enough to be migratable. Just the presence
784  * in movable zone does not make the migration feasible.
785  *
786  * So even though large huge page sizes like the gigantic ones
787  * are migratable they should not be movable because its not
788  * feasible to migrate them from movable zone.
789  */
hugepage_movable_supported(struct hstate * h)790 static inline bool hugepage_movable_supported(struct hstate *h)
791 {
792 	if (!hugepage_migration_supported(h))
793 		return false;
794 
795 	if (hstate_is_gigantic(h))
796 		return false;
797 	return true;
798 }
799 
800 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)801 static inline gfp_t htlb_alloc_mask(struct hstate *h)
802 {
803 	if (hugepage_movable_supported(h))
804 		return GFP_HIGHUSER_MOVABLE;
805 	else
806 		return GFP_HIGHUSER;
807 }
808 
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)809 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
810 {
811 	gfp_t modified_mask = htlb_alloc_mask(h);
812 
813 	/* Some callers might want to enforce node */
814 	modified_mask |= (gfp_mask & __GFP_THISNODE);
815 
816 	modified_mask |= (gfp_mask & __GFP_NOWARN);
817 
818 	return modified_mask;
819 }
820 
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)821 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
822 					   struct mm_struct *mm, pte_t *pte)
823 {
824 	if (huge_page_size(h) == PMD_SIZE)
825 		return pmd_lockptr(mm, (pmd_t *) pte);
826 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
827 	return &mm->page_table_lock;
828 }
829 
830 #ifndef hugepages_supported
831 /*
832  * Some platform decide whether they support huge pages at boot
833  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
834  * when there is no such support
835  */
836 #define hugepages_supported() (HPAGE_SHIFT != 0)
837 #endif
838 
839 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
840 
hugetlb_count_add(long l,struct mm_struct * mm)841 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
842 {
843 	atomic_long_add(l, &mm->hugetlb_usage);
844 }
845 
hugetlb_count_sub(long l,struct mm_struct * mm)846 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
847 {
848 	atomic_long_sub(l, &mm->hugetlb_usage);
849 }
850 
851 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)852 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
853 					pte_t *ptep, pte_t pte, unsigned long sz)
854 {
855 	set_huge_pte_at(mm, addr, ptep, pte);
856 }
857 #endif
858 
859 #ifndef huge_ptep_modify_prot_start
860 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)861 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
862 						unsigned long addr, pte_t *ptep)
863 {
864 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
865 }
866 #endif
867 
868 #ifndef huge_ptep_modify_prot_commit
869 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)870 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
871 						unsigned long addr, pte_t *ptep,
872 						pte_t old_pte, pte_t pte)
873 {
874 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
875 }
876 #endif
877 
878 #else	/* CONFIG_HUGETLB_PAGE */
879 struct hstate {};
880 
881 static inline int isolate_or_dissolve_huge_page(struct page *page,
882 						struct list_head *list)
883 {
884 	return -ENOMEM;
885 }
886 
887 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
888 					   unsigned long addr,
889 					   int avoid_reserve)
890 {
891 	return NULL;
892 }
893 
894 static inline struct page *
895 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
896 			nodemask_t *nmask, gfp_t gfp_mask)
897 {
898 	return NULL;
899 }
900 
901 static inline struct page *alloc_huge_page_vma(struct hstate *h,
902 					       struct vm_area_struct *vma,
903 					       unsigned long address)
904 {
905 	return NULL;
906 }
907 
908 static inline int __alloc_bootmem_huge_page(struct hstate *h)
909 {
910 	return 0;
911 }
912 
913 static inline struct hstate *hstate_file(struct file *f)
914 {
915 	return NULL;
916 }
917 
918 static inline struct hstate *hstate_sizelog(int page_size_log)
919 {
920 	return NULL;
921 }
922 
923 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
924 {
925 	return NULL;
926 }
927 
928 static inline struct hstate *page_hstate(struct page *page)
929 {
930 	return NULL;
931 }
932 
933 static inline unsigned long huge_page_size(struct hstate *h)
934 {
935 	return PAGE_SIZE;
936 }
937 
938 static inline unsigned long huge_page_mask(struct hstate *h)
939 {
940 	return PAGE_MASK;
941 }
942 
943 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
944 {
945 	return PAGE_SIZE;
946 }
947 
948 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
949 {
950 	return PAGE_SIZE;
951 }
952 
953 static inline unsigned int huge_page_order(struct hstate *h)
954 {
955 	return 0;
956 }
957 
958 static inline unsigned int huge_page_shift(struct hstate *h)
959 {
960 	return PAGE_SHIFT;
961 }
962 
963 static inline bool hstate_is_gigantic(struct hstate *h)
964 {
965 	return false;
966 }
967 
968 static inline unsigned int pages_per_huge_page(struct hstate *h)
969 {
970 	return 1;
971 }
972 
973 static inline unsigned hstate_index_to_shift(unsigned index)
974 {
975 	return 0;
976 }
977 
978 static inline int hstate_index(struct hstate *h)
979 {
980 	return 0;
981 }
982 
983 static inline pgoff_t basepage_index(struct page *page)
984 {
985 	return page->index;
986 }
987 
988 static inline int dissolve_free_huge_page(struct page *page)
989 {
990 	return 0;
991 }
992 
993 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
994 					   unsigned long end_pfn)
995 {
996 	return 0;
997 }
998 
999 static inline bool hugepage_migration_supported(struct hstate *h)
1000 {
1001 	return false;
1002 }
1003 
1004 static inline bool hugepage_movable_supported(struct hstate *h)
1005 {
1006 	return false;
1007 }
1008 
1009 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1010 {
1011 	return 0;
1012 }
1013 
1014 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1015 {
1016 	return 0;
1017 }
1018 
1019 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1020 					   struct mm_struct *mm, pte_t *pte)
1021 {
1022 	return &mm->page_table_lock;
1023 }
1024 
1025 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1026 {
1027 }
1028 
1029 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1030 {
1031 }
1032 
1033 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1034 					pte_t *ptep, pte_t pte, unsigned long sz)
1035 {
1036 }
1037 #endif	/* CONFIG_HUGETLB_PAGE */
1038 
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1039 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1040 					struct mm_struct *mm, pte_t *pte)
1041 {
1042 	spinlock_t *ptl;
1043 
1044 	ptl = huge_pte_lockptr(h, mm, pte);
1045 	spin_lock(ptl);
1046 	return ptl;
1047 }
1048 
1049 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1050 extern void __init hugetlb_cma_reserve(int order);
1051 extern void __init hugetlb_cma_check(void);
1052 #else
hugetlb_cma_reserve(int order)1053 static inline __init void hugetlb_cma_reserve(int order)
1054 {
1055 }
hugetlb_cma_check(void)1056 static inline __init void hugetlb_cma_check(void)
1057 {
1058 }
1059 #endif
1060 
1061 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1062 
1063 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1064 /*
1065  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1066  * implement this.
1067  */
1068 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1069 #endif
1070 
1071 #endif /* _LINUX_HUGETLB_H */
1072