xref: /linux/include/linux/swap.h (revision 908fc4c2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAP_H
3 #define _LINUX_SWAP_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
17 #include <asm/page.h>
18 
19 struct notifier_block;
20 
21 struct bio;
22 
23 struct pagevec;
24 
25 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK	0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT	0
28 #define SWAP_FLAG_DISCARD	0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE	0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31 
32 #define SWAP_FLAGS_VALID	(SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 				 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 				 SWAP_FLAG_DISCARD_PAGES)
35 #define SWAP_BATCH 64
36 
37 static inline int current_is_kswapd(void)
38 {
39 	return current->flags & PF_KSWAPD;
40 }
41 
42 /*
43  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44  * be swapped to.  The swap type and the offset into that swap type are
45  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
46  * for the type means that the maximum number of swapcache pages is 27 bits
47  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
48  * the type/offset into the pte as 5/27 as well.
49  */
50 #define MAX_SWAPFILES_SHIFT	5
51 
52 /*
53  * Use some of the swap files numbers for other purposes. This
54  * is a convenient way to hook into the VM to trigger special
55  * actions on faults.
56  */
57 
58 #define SWP_SWAPIN_ERROR_NUM 1
59 #define SWP_SWAPIN_ERROR     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
60 			     SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
61 			     SWP_PTE_MARKER_NUM)
62 /*
63  * PTE markers are used to persist information onto PTEs that are mapped with
64  * file-backed memories.  As its name "PTE" hints, it should only be applied to
65  * the leaves of pgtables.
66  */
67 #ifdef CONFIG_PTE_MARKER
68 #define SWP_PTE_MARKER_NUM 1
69 #define SWP_PTE_MARKER     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
70 			    SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
71 #else
72 #define SWP_PTE_MARKER_NUM 0
73 #endif
74 
75 /*
76  * Unaddressable device memory support. See include/linux/hmm.h and
77  * Documentation/vm/hmm.rst. Short description is we need struct pages for
78  * device memory that is unaddressable (inaccessible) by CPU, so that we can
79  * migrate part of a process memory to device memory.
80  *
81  * When a page is migrated from CPU to device, we set the CPU page table entry
82  * to a special SWP_DEVICE_{READ|WRITE} entry.
83  *
84  * When a page is mapped by the device for exclusive access we set the CPU page
85  * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
86  */
87 #ifdef CONFIG_DEVICE_PRIVATE
88 #define SWP_DEVICE_NUM 4
89 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
90 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
91 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
92 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
93 #else
94 #define SWP_DEVICE_NUM 0
95 #endif
96 
97 /*
98  * Page migration support.
99  *
100  * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
101  * indicates that the referenced (part of) an anonymous page is exclusive to
102  * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
103  * (part of) an anonymous page that are mapped writable are exclusive to a
104  * single process.
105  */
106 #ifdef CONFIG_MIGRATION
107 #define SWP_MIGRATION_NUM 3
108 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
109 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
110 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
111 #else
112 #define SWP_MIGRATION_NUM 0
113 #endif
114 
115 /*
116  * Handling of hardware poisoned pages with memory corruption.
117  */
118 #ifdef CONFIG_MEMORY_FAILURE
119 #define SWP_HWPOISON_NUM 1
120 #define SWP_HWPOISON		MAX_SWAPFILES
121 #else
122 #define SWP_HWPOISON_NUM 0
123 #endif
124 
125 #define MAX_SWAPFILES \
126 	((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
127 	SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
128 	SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
129 
130 /*
131  * Magic header for a swap area. The first part of the union is
132  * what the swap magic looks like for the old (limited to 128MB)
133  * swap area format, the second part of the union adds - in the
134  * old reserved area - some extra information. Note that the first
135  * kilobyte is reserved for boot loader or disk label stuff...
136  *
137  * Having the magic at the end of the PAGE_SIZE makes detecting swap
138  * areas somewhat tricky on machines that support multiple page sizes.
139  * For 2.5 we'll probably want to move the magic to just beyond the
140  * bootbits...
141  */
142 union swap_header {
143 	struct {
144 		char reserved[PAGE_SIZE - 10];
145 		char magic[10];			/* SWAP-SPACE or SWAPSPACE2 */
146 	} magic;
147 	struct {
148 		char		bootbits[1024];	/* Space for disklabel etc. */
149 		__u32		version;
150 		__u32		last_page;
151 		__u32		nr_badpages;
152 		unsigned char	sws_uuid[16];
153 		unsigned char	sws_volume[16];
154 		__u32		padding[117];
155 		__u32		badpages[1];
156 	} info;
157 };
158 
159 /*
160  * current->reclaim_state points to one of these when a task is running
161  * memory reclaim
162  */
163 struct reclaim_state {
164 	unsigned long reclaimed_slab;
165 };
166 
167 #ifdef __KERNEL__
168 
169 struct address_space;
170 struct sysinfo;
171 struct writeback_control;
172 struct zone;
173 
174 /*
175  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
176  * disk blocks.  A rbtree of swap extents maps the entire swapfile (Where the
177  * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
178  * from setup, they're handled identically.
179  *
180  * We always assume that blocks are of size PAGE_SIZE.
181  */
182 struct swap_extent {
183 	struct rb_node rb_node;
184 	pgoff_t start_page;
185 	pgoff_t nr_pages;
186 	sector_t start_block;
187 };
188 
189 /*
190  * Max bad pages in the new format..
191  */
192 #define MAX_SWAP_BADPAGES \
193 	((offsetof(union swap_header, magic.magic) - \
194 	  offsetof(union swap_header, info.badpages)) / sizeof(int))
195 
196 enum {
197 	SWP_USED	= (1 << 0),	/* is slot in swap_info[] used? */
198 	SWP_WRITEOK	= (1 << 1),	/* ok to write to this swap?	*/
199 	SWP_DISCARDABLE = (1 << 2),	/* blkdev support discard */
200 	SWP_DISCARDING	= (1 << 3),	/* now discarding a free cluster */
201 	SWP_SOLIDSTATE	= (1 << 4),	/* blkdev seeks are cheap */
202 	SWP_CONTINUED	= (1 << 5),	/* swap_map has count continuation */
203 	SWP_BLKDEV	= (1 << 6),	/* its a block device */
204 	SWP_ACTIVATED	= (1 << 7),	/* set after swap_activate success */
205 	SWP_FS_OPS	= (1 << 8),	/* swapfile operations go through fs */
206 	SWP_AREA_DISCARD = (1 << 9),	/* single-time swap area discards */
207 	SWP_PAGE_DISCARD = (1 << 10),	/* freed swap page-cluster discards */
208 	SWP_STABLE_WRITES = (1 << 11),	/* no overwrite PG_writeback pages */
209 	SWP_SYNCHRONOUS_IO = (1 << 12),	/* synchronous IO is efficient */
210 					/* add others here before... */
211 	SWP_SCANNING	= (1 << 14),	/* refcount in scan_swap_map */
212 };
213 
214 #define SWAP_CLUSTER_MAX 32UL
215 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
216 
217 /* Bit flag in swap_map */
218 #define SWAP_HAS_CACHE	0x40	/* Flag page is cached, in first swap_map */
219 #define COUNT_CONTINUED	0x80	/* Flag swap_map continuation for full count */
220 
221 /* Special value in first swap_map */
222 #define SWAP_MAP_MAX	0x3e	/* Max count */
223 #define SWAP_MAP_BAD	0x3f	/* Note page is bad */
224 #define SWAP_MAP_SHMEM	0xbf	/* Owned by shmem/tmpfs */
225 
226 /* Special value in each swap_map continuation */
227 #define SWAP_CONT_MAX	0x7f	/* Max count */
228 
229 /*
230  * We use this to track usage of a cluster. A cluster is a block of swap disk
231  * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
232  * free clusters are organized into a list. We fetch an entry from the list to
233  * get a free cluster.
234  *
235  * The data field stores next cluster if the cluster is free or cluster usage
236  * counter otherwise. The flags field determines if a cluster is free. This is
237  * protected by swap_info_struct.lock.
238  */
239 struct swap_cluster_info {
240 	spinlock_t lock;	/*
241 				 * Protect swap_cluster_info fields
242 				 * and swap_info_struct->swap_map
243 				 * elements correspond to the swap
244 				 * cluster
245 				 */
246 	unsigned int data:24;
247 	unsigned int flags:8;
248 };
249 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
250 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
251 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
252 
253 /*
254  * We assign a cluster to each CPU, so each CPU can allocate swap entry from
255  * its own cluster and swapout sequentially. The purpose is to optimize swapout
256  * throughput.
257  */
258 struct percpu_cluster {
259 	struct swap_cluster_info index; /* Current cluster index */
260 	unsigned int next; /* Likely next allocation offset */
261 };
262 
263 struct swap_cluster_list {
264 	struct swap_cluster_info head;
265 	struct swap_cluster_info tail;
266 };
267 
268 /*
269  * The in-memory structure used to track swap areas.
270  */
271 struct swap_info_struct {
272 	struct percpu_ref users;	/* indicate and keep swap device valid. */
273 	unsigned long	flags;		/* SWP_USED etc: see above */
274 	signed short	prio;		/* swap priority of this type */
275 	struct plist_node list;		/* entry in swap_active_head */
276 	signed char	type;		/* strange name for an index */
277 	unsigned int	max;		/* extent of the swap_map */
278 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
279 	struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
280 	struct swap_cluster_list free_clusters; /* free clusters list */
281 	unsigned int lowest_bit;	/* index of first free in swap_map */
282 	unsigned int highest_bit;	/* index of last free in swap_map */
283 	unsigned int pages;		/* total of usable pages of swap */
284 	unsigned int inuse_pages;	/* number of those currently in use */
285 	unsigned int cluster_next;	/* likely index for next allocation */
286 	unsigned int cluster_nr;	/* countdown to next cluster search */
287 	unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
288 	struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
289 	struct rb_root swap_extent_root;/* root of the swap extent rbtree */
290 	struct block_device *bdev;	/* swap device or bdev of swap file */
291 	struct file *swap_file;		/* seldom referenced */
292 	unsigned int old_block_size;	/* seldom referenced */
293 	struct completion comp;		/* seldom referenced */
294 #ifdef CONFIG_FRONTSWAP
295 	unsigned long *frontswap_map;	/* frontswap in-use, one bit per page */
296 	atomic_t frontswap_pages;	/* frontswap pages in-use counter */
297 #endif
298 	spinlock_t lock;		/*
299 					 * protect map scan related fields like
300 					 * swap_map, lowest_bit, highest_bit,
301 					 * inuse_pages, cluster_next,
302 					 * cluster_nr, lowest_alloc,
303 					 * highest_alloc, free/discard cluster
304 					 * list. other fields are only changed
305 					 * at swapon/swapoff, so are protected
306 					 * by swap_lock. changing flags need
307 					 * hold this lock and swap_lock. If
308 					 * both locks need hold, hold swap_lock
309 					 * first.
310 					 */
311 	spinlock_t cont_lock;		/*
312 					 * protect swap count continuation page
313 					 * list.
314 					 */
315 	struct work_struct discard_work; /* discard worker */
316 	struct swap_cluster_list discard_clusters; /* discard clusters list */
317 	struct plist_node avail_lists[]; /*
318 					   * entries in swap_avail_heads, one
319 					   * entry per node.
320 					   * Must be last as the number of the
321 					   * array is nr_node_ids, which is not
322 					   * a fixed value so have to allocate
323 					   * dynamically.
324 					   * And it has to be an array so that
325 					   * plist_for_each_* can work.
326 					   */
327 };
328 
329 #ifdef CONFIG_64BIT
330 #define SWAP_RA_ORDER_CEILING	5
331 #else
332 /* Avoid stack overflow, because we need to save part of page table */
333 #define SWAP_RA_ORDER_CEILING	3
334 #define SWAP_RA_PTE_CACHE_SIZE	(1 << SWAP_RA_ORDER_CEILING)
335 #endif
336 
337 struct vma_swap_readahead {
338 	unsigned short win;
339 	unsigned short offset;
340 	unsigned short nr_pte;
341 #ifdef CONFIG_64BIT
342 	pte_t *ptes;
343 #else
344 	pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
345 #endif
346 };
347 
348 static inline swp_entry_t folio_swap_entry(struct folio *folio)
349 {
350 	swp_entry_t entry = { .val = page_private(&folio->page) };
351 	return entry;
352 }
353 
354 /* linux/mm/workingset.c */
355 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
356 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
357 void workingset_refault(struct folio *folio, void *shadow);
358 void workingset_activation(struct folio *folio);
359 
360 /* Only track the nodes of mappings with shadow entries */
361 void workingset_update_node(struct xa_node *node);
362 extern struct list_lru shadow_nodes;
363 #define mapping_set_update(xas, mapping) do {				\
364 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {		\
365 		xas_set_update(xas, workingset_update_node);		\
366 		xas_set_lru(xas, &shadow_nodes);			\
367 	}								\
368 } while (0)
369 
370 /* linux/mm/page_alloc.c */
371 extern unsigned long totalreserve_pages;
372 
373 /* Definition of global_zone_page_state not available yet */
374 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
375 
376 
377 /* linux/mm/swap.c */
378 extern void lru_note_cost(struct lruvec *lruvec, bool file,
379 			  unsigned int nr_pages);
380 extern void lru_note_cost_folio(struct folio *);
381 extern void folio_add_lru(struct folio *);
382 extern void lru_cache_add(struct page *);
383 void mark_page_accessed(struct page *);
384 void folio_mark_accessed(struct folio *);
385 
386 extern atomic_t lru_disable_count;
387 
388 static inline bool lru_cache_disabled(void)
389 {
390 	return atomic_read(&lru_disable_count);
391 }
392 
393 static inline void lru_cache_enable(void)
394 {
395 	atomic_dec(&lru_disable_count);
396 }
397 
398 extern void lru_cache_disable(void);
399 extern void lru_add_drain(void);
400 extern void lru_add_drain_cpu(int cpu);
401 extern void lru_add_drain_cpu_zone(struct zone *zone);
402 extern void lru_add_drain_all(void);
403 extern void deactivate_page(struct page *page);
404 extern void mark_page_lazyfree(struct page *page);
405 extern void swap_setup(void);
406 
407 extern void lru_cache_add_inactive_or_unevictable(struct page *page,
408 						struct vm_area_struct *vma);
409 
410 /* linux/mm/vmscan.c */
411 extern unsigned long zone_reclaimable_pages(struct zone *zone);
412 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
413 					gfp_t gfp_mask, nodemask_t *mask);
414 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
415 						  unsigned long nr_pages,
416 						  gfp_t gfp_mask,
417 						  bool may_swap);
418 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
419 						gfp_t gfp_mask, bool noswap,
420 						pg_data_t *pgdat,
421 						unsigned long *nr_scanned);
422 extern unsigned long shrink_all_memory(unsigned long nr_pages);
423 extern int vm_swappiness;
424 long remove_mapping(struct address_space *mapping, struct folio *folio);
425 
426 extern unsigned long reclaim_pages(struct list_head *page_list);
427 #ifdef CONFIG_NUMA
428 extern int node_reclaim_mode;
429 extern int sysctl_min_unmapped_ratio;
430 extern int sysctl_min_slab_ratio;
431 #else
432 #define node_reclaim_mode 0
433 #endif
434 
435 static inline bool node_reclaim_enabled(void)
436 {
437 	/* Is any node_reclaim_mode bit set? */
438 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
439 }
440 
441 extern void check_move_unevictable_pages(struct pagevec *pvec);
442 
443 extern void kswapd_run(int nid);
444 extern void kswapd_stop(int nid);
445 
446 #ifdef CONFIG_SWAP
447 
448 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
449 		unsigned long nr_pages, sector_t start_block);
450 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
451 		sector_t *);
452 
453 static inline unsigned long total_swapcache_pages(void)
454 {
455 	return global_node_page_state(NR_SWAPCACHE);
456 }
457 
458 extern void free_page_and_swap_cache(struct page *);
459 extern void free_pages_and_swap_cache(struct page **, int);
460 /* linux/mm/swapfile.c */
461 extern atomic_long_t nr_swap_pages;
462 extern long total_swap_pages;
463 extern atomic_t nr_rotate_swap;
464 extern bool has_usable_swap(void);
465 
466 /* Swap 50% full? Release swapcache more aggressively.. */
467 static inline bool vm_swap_full(void)
468 {
469 	return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
470 }
471 
472 static inline long get_nr_swap_pages(void)
473 {
474 	return atomic_long_read(&nr_swap_pages);
475 }
476 
477 extern void si_swapinfo(struct sysinfo *);
478 swp_entry_t folio_alloc_swap(struct folio *folio);
479 extern void put_swap_page(struct page *page, swp_entry_t entry);
480 extern swp_entry_t get_swap_page_of_type(int);
481 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
482 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
483 extern void swap_shmem_alloc(swp_entry_t);
484 extern int swap_duplicate(swp_entry_t);
485 extern int swapcache_prepare(swp_entry_t);
486 extern void swap_free(swp_entry_t);
487 extern void swapcache_free_entries(swp_entry_t *entries, int n);
488 extern int free_swap_and_cache(swp_entry_t);
489 int swap_type_of(dev_t device, sector_t offset);
490 int find_first_swap(dev_t *device);
491 extern unsigned int count_swap_pages(int, int);
492 extern sector_t swapdev_block(int, pgoff_t);
493 extern int __swap_count(swp_entry_t entry);
494 extern int __swp_swapcount(swp_entry_t entry);
495 extern int swp_swapcount(swp_entry_t entry);
496 extern struct swap_info_struct *page_swap_info(struct page *);
497 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
498 extern int try_to_free_swap(struct page *);
499 struct backing_dev_info;
500 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
501 extern void exit_swap_address_space(unsigned int type);
502 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
503 sector_t swap_page_sector(struct page *page);
504 
505 static inline void put_swap_device(struct swap_info_struct *si)
506 {
507 	percpu_ref_put(&si->users);
508 }
509 
510 #else /* CONFIG_SWAP */
511 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
512 {
513 	return NULL;
514 }
515 
516 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
517 {
518 	return NULL;
519 }
520 
521 static inline void put_swap_device(struct swap_info_struct *si)
522 {
523 }
524 
525 #define get_nr_swap_pages()			0L
526 #define total_swap_pages			0L
527 #define total_swapcache_pages()			0UL
528 #define vm_swap_full()				0
529 
530 #define si_swapinfo(val) \
531 	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
532 /* only sparc can not include linux/pagemap.h in this file
533  * so leave put_page and release_pages undeclared... */
534 #define free_page_and_swap_cache(page) \
535 	put_page(page)
536 #define free_pages_and_swap_cache(pages, nr) \
537 	release_pages((pages), (nr));
538 
539 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
540 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
541 
542 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
543 {
544 	return 0;
545 }
546 
547 static inline void swap_shmem_alloc(swp_entry_t swp)
548 {
549 }
550 
551 static inline int swap_duplicate(swp_entry_t swp)
552 {
553 	return 0;
554 }
555 
556 static inline void swap_free(swp_entry_t swp)
557 {
558 }
559 
560 static inline void put_swap_page(struct page *page, swp_entry_t swp)
561 {
562 }
563 
564 static inline int __swap_count(swp_entry_t entry)
565 {
566 	return 0;
567 }
568 
569 static inline int __swp_swapcount(swp_entry_t entry)
570 {
571 	return 0;
572 }
573 
574 static inline int swp_swapcount(swp_entry_t entry)
575 {
576 	return 0;
577 }
578 
579 static inline int try_to_free_swap(struct page *page)
580 {
581 	return 0;
582 }
583 
584 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
585 {
586 	swp_entry_t entry;
587 	entry.val = 0;
588 	return entry;
589 }
590 
591 static inline int add_swap_extent(struct swap_info_struct *sis,
592 				  unsigned long start_page,
593 				  unsigned long nr_pages, sector_t start_block)
594 {
595 	return -EINVAL;
596 }
597 #endif /* CONFIG_SWAP */
598 
599 #ifdef CONFIG_THP_SWAP
600 extern int split_swap_cluster(swp_entry_t entry);
601 #else
602 static inline int split_swap_cluster(swp_entry_t entry)
603 {
604 	return 0;
605 }
606 #endif
607 
608 #ifdef CONFIG_MEMCG
609 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
610 {
611 	/* Cgroup2 doesn't have per-cgroup swappiness */
612 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
613 		return vm_swappiness;
614 
615 	/* root ? */
616 	if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
617 		return vm_swappiness;
618 
619 	return memcg->swappiness;
620 }
621 #else
622 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
623 {
624 	return vm_swappiness;
625 }
626 #endif
627 
628 #ifdef CONFIG_ZSWAP
629 extern u64 zswap_pool_total_size;
630 extern atomic_t zswap_stored_pages;
631 #endif
632 
633 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
634 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
635 static inline  void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
636 {
637 	if (mem_cgroup_disabled())
638 		return;
639 	__cgroup_throttle_swaprate(page, gfp_mask);
640 }
641 #else
642 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
643 {
644 }
645 #endif
646 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
647 {
648 	cgroup_throttle_swaprate(&folio->page, gfp);
649 }
650 
651 #ifdef CONFIG_MEMCG_SWAP
652 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
653 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
654 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
655 		swp_entry_t entry)
656 {
657 	if (mem_cgroup_disabled())
658 		return 0;
659 	return __mem_cgroup_try_charge_swap(folio, entry);
660 }
661 
662 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
663 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
664 {
665 	if (mem_cgroup_disabled())
666 		return;
667 	__mem_cgroup_uncharge_swap(entry, nr_pages);
668 }
669 
670 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
671 extern bool mem_cgroup_swap_full(struct page *page);
672 #else
673 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
674 {
675 }
676 
677 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
678 					     swp_entry_t entry)
679 {
680 	return 0;
681 }
682 
683 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
684 					    unsigned int nr_pages)
685 {
686 }
687 
688 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
689 {
690 	return get_nr_swap_pages();
691 }
692 
693 static inline bool mem_cgroup_swap_full(struct page *page)
694 {
695 	return vm_swap_full();
696 }
697 #endif
698 
699 #endif /* __KERNEL__*/
700 #endif /* _LINUX_SWAP_H */
701