xref: /linux/fs/f2fs/f2fs.h (revision 2af583af)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/slab.h>
15 #include <linux/crc32.h>
16 #include <linux/magic.h>
17 #include <linux/kobject.h>
18 #include <linux/sched.h>
19 #include <linux/cred.h>
20 #include <linux/sched/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <linux/rw_hint.h>
27 #include <crypto/hash.h>
28 
29 #include <linux/fscrypt.h>
30 #include <linux/fsverity.h>
31 
32 struct pagevec;
33 
34 #ifdef CONFIG_F2FS_CHECK_FS
35 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
36 #else
37 #define f2fs_bug_on(sbi, condition)					\
38 	do {								\
39 		if (WARN_ON(condition))					\
40 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
41 	} while (0)
42 #endif
43 
44 enum {
45 	FAULT_KMALLOC,
46 	FAULT_KVMALLOC,
47 	FAULT_PAGE_ALLOC,
48 	FAULT_PAGE_GET,
49 	FAULT_ALLOC_BIO,	/* it's obsolete due to bio_alloc() will never fail */
50 	FAULT_ALLOC_NID,
51 	FAULT_ORPHAN,
52 	FAULT_BLOCK,
53 	FAULT_DIR_DEPTH,
54 	FAULT_EVICT_INODE,
55 	FAULT_TRUNCATE,
56 	FAULT_READ_IO,
57 	FAULT_CHECKPOINT,
58 	FAULT_DISCARD,
59 	FAULT_WRITE_IO,
60 	FAULT_SLAB_ALLOC,
61 	FAULT_DQUOT_INIT,
62 	FAULT_LOCK_OP,
63 	FAULT_BLKADDR_VALIDITY,
64 	FAULT_BLKADDR_CONSISTENCE,
65 	FAULT_NO_SEGMENT,
66 	FAULT_MAX,
67 };
68 
69 #ifdef CONFIG_F2FS_FAULT_INJECTION
70 #define F2FS_ALL_FAULT_TYPE		(GENMASK(FAULT_MAX - 1, 0))
71 
72 struct f2fs_fault_info {
73 	atomic_t inject_ops;
74 	int inject_rate;
75 	unsigned int inject_type;
76 };
77 
78 extern const char *f2fs_fault_name[FAULT_MAX];
79 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
80 
81 /* maximum retry count for injected failure */
82 #define DEFAULT_FAILURE_RETRY_COUNT		8
83 #else
84 #define DEFAULT_FAILURE_RETRY_COUNT		1
85 #endif
86 
87 /*
88  * For mount options
89  */
90 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000001
91 #define F2FS_MOUNT_DISCARD		0x00000002
92 #define F2FS_MOUNT_NOHEAP		0x00000004
93 #define F2FS_MOUNT_XATTR_USER		0x00000008
94 #define F2FS_MOUNT_POSIX_ACL		0x00000010
95 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000020
96 #define F2FS_MOUNT_INLINE_XATTR		0x00000040
97 #define F2FS_MOUNT_INLINE_DATA		0x00000080
98 #define F2FS_MOUNT_INLINE_DENTRY	0x00000100
99 #define F2FS_MOUNT_FLUSH_MERGE		0x00000200
100 #define F2FS_MOUNT_NOBARRIER		0x00000400
101 #define F2FS_MOUNT_FASTBOOT		0x00000800
102 #define F2FS_MOUNT_READ_EXTENT_CACHE	0x00001000
103 #define F2FS_MOUNT_DATA_FLUSH		0x00002000
104 #define F2FS_MOUNT_FAULT_INJECTION	0x00004000
105 #define F2FS_MOUNT_USRQUOTA		0x00008000
106 #define F2FS_MOUNT_GRPQUOTA		0x00010000
107 #define F2FS_MOUNT_PRJQUOTA		0x00020000
108 #define F2FS_MOUNT_QUOTA		0x00040000
109 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00080000
110 #define F2FS_MOUNT_RESERVE_ROOT		0x00100000
111 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x00200000
112 #define F2FS_MOUNT_NORECOVERY		0x00400000
113 #define F2FS_MOUNT_ATGC			0x00800000
114 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x01000000
115 #define	F2FS_MOUNT_GC_MERGE		0x02000000
116 #define F2FS_MOUNT_COMPRESS_CACHE	0x04000000
117 #define F2FS_MOUNT_AGE_EXTENT_CACHE	0x08000000
118 
119 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
120 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
121 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
122 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
123 
124 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
125 		typecheck(unsigned long long, b) &&			\
126 		((long long)((a) - (b)) > 0))
127 
128 typedef u32 block_t;	/*
129 			 * should not change u32, since it is the on-disk block
130 			 * address format, __le32.
131 			 */
132 typedef u32 nid_t;
133 
134 #define COMPRESS_EXT_NUM		16
135 
136 enum blkzone_allocation_policy {
137 	BLKZONE_ALLOC_PRIOR_SEQ,	/* Prioritize writing to sequential zones */
138 	BLKZONE_ALLOC_ONLY_SEQ,		/* Only allow writing to sequential zones */
139 	BLKZONE_ALLOC_PRIOR_CONV,	/* Prioritize writing to conventional zones */
140 };
141 
142 /*
143  * An implementation of an rwsem that is explicitly unfair to readers. This
144  * prevents priority inversion when a low-priority reader acquires the read lock
145  * while sleeping on the write lock but the write lock is needed by
146  * higher-priority clients.
147  */
148 
149 struct f2fs_rwsem {
150         struct rw_semaphore internal_rwsem;
151 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
152         wait_queue_head_t read_waiters;
153 #endif
154 };
155 
156 struct f2fs_mount_info {
157 	unsigned int opt;
158 	block_t root_reserved_blocks;	/* root reserved blocks */
159 	kuid_t s_resuid;		/* reserved blocks for uid */
160 	kgid_t s_resgid;		/* reserved blocks for gid */
161 	int active_logs;		/* # of active logs */
162 	int inline_xattr_size;		/* inline xattr size */
163 #ifdef CONFIG_F2FS_FAULT_INJECTION
164 	struct f2fs_fault_info fault_info;	/* For fault injection */
165 #endif
166 #ifdef CONFIG_QUOTA
167 	/* Names of quota files with journalled quota */
168 	char *s_qf_names[MAXQUOTAS];
169 	int s_jquota_fmt;			/* Format of quota to use */
170 #endif
171 	/* For which write hints are passed down to block layer */
172 	int alloc_mode;			/* segment allocation policy */
173 	int fsync_mode;			/* fsync policy */
174 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
175 	int bggc_mode;			/* bggc mode: off, on or sync */
176 	int memory_mode;		/* memory mode */
177 	int errors;			/* errors parameter */
178 	int discard_unit;		/*
179 					 * discard command's offset/size should
180 					 * be aligned to this unit: block,
181 					 * segment or section
182 					 */
183 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
184 	block_t unusable_cap_perc;	/* percentage for cap */
185 	block_t unusable_cap;		/* Amount of space allowed to be
186 					 * unusable when disabling checkpoint
187 					 */
188 
189 	/* For compression */
190 	unsigned char compress_algorithm;	/* algorithm type */
191 	unsigned char compress_log_size;	/* cluster log size */
192 	unsigned char compress_level;		/* compress level */
193 	bool compress_chksum;			/* compressed data chksum */
194 	unsigned char compress_ext_cnt;		/* extension count */
195 	unsigned char nocompress_ext_cnt;		/* nocompress extension count */
196 	int compress_mode;			/* compression mode */
197 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
198 	unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
199 };
200 
201 #define F2FS_FEATURE_ENCRYPT			0x00000001
202 #define F2FS_FEATURE_BLKZONED			0x00000002
203 #define F2FS_FEATURE_ATOMIC_WRITE		0x00000004
204 #define F2FS_FEATURE_EXTRA_ATTR			0x00000008
205 #define F2FS_FEATURE_PRJQUOTA			0x00000010
206 #define F2FS_FEATURE_INODE_CHKSUM		0x00000020
207 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x00000040
208 #define F2FS_FEATURE_QUOTA_INO			0x00000080
209 #define F2FS_FEATURE_INODE_CRTIME		0x00000100
210 #define F2FS_FEATURE_LOST_FOUND			0x00000200
211 #define F2FS_FEATURE_VERITY			0x00000400
212 #define F2FS_FEATURE_SB_CHKSUM			0x00000800
213 #define F2FS_FEATURE_CASEFOLD			0x00001000
214 #define F2FS_FEATURE_COMPRESSION		0x00002000
215 #define F2FS_FEATURE_RO				0x00004000
216 
217 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
218 	((raw_super->feature & cpu_to_le32(mask)) != 0)
219 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
220 
221 /*
222  * Default values for user and/or group using reserved blocks
223  */
224 #define	F2FS_DEF_RESUID		0
225 #define	F2FS_DEF_RESGID		0
226 
227 /*
228  * For checkpoint manager
229  */
230 enum {
231 	NAT_BITMAP,
232 	SIT_BITMAP
233 };
234 
235 #define	CP_UMOUNT	0x00000001
236 #define	CP_FASTBOOT	0x00000002
237 #define	CP_SYNC		0x00000004
238 #define	CP_RECOVERY	0x00000008
239 #define	CP_DISCARD	0x00000010
240 #define CP_TRIMMED	0x00000020
241 #define CP_PAUSE	0x00000040
242 #define CP_RESIZE 	0x00000080
243 
244 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
245 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
246 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
247 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
248 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
249 #define DEF_CP_INTERVAL			60	/* 60 secs */
250 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
251 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
252 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
253 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
254 
255 struct cp_control {
256 	int reason;
257 	__u64 trim_start;
258 	__u64 trim_end;
259 	__u64 trim_minlen;
260 };
261 
262 /*
263  * indicate meta/data type
264  */
265 enum {
266 	META_CP,
267 	META_NAT,
268 	META_SIT,
269 	META_SSA,
270 	META_MAX,
271 	META_POR,
272 	DATA_GENERIC,		/* check range only */
273 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
274 	DATA_GENERIC_ENHANCE_READ,	/*
275 					 * strong check on range and segment
276 					 * bitmap but no warning due to race
277 					 * condition of read on truncated area
278 					 * by extent_cache
279 					 */
280 	DATA_GENERIC_ENHANCE_UPDATE,	/*
281 					 * strong check on range and segment
282 					 * bitmap for update case
283 					 */
284 	META_GENERIC,
285 };
286 
287 /* for the list of ino */
288 enum {
289 	ORPHAN_INO,		/* for orphan ino list */
290 	APPEND_INO,		/* for append ino list */
291 	UPDATE_INO,		/* for update ino list */
292 	TRANS_DIR_INO,		/* for transactions dir ino list */
293 	XATTR_DIR_INO,		/* for xattr updated dir ino list */
294 	FLUSH_INO,		/* for multiple device flushing */
295 	MAX_INO_ENTRY,		/* max. list */
296 };
297 
298 struct ino_entry {
299 	struct list_head list;		/* list head */
300 	nid_t ino;			/* inode number */
301 	unsigned int dirty_device;	/* dirty device bitmap */
302 };
303 
304 /* for the list of inodes to be GCed */
305 struct inode_entry {
306 	struct list_head list;	/* list head */
307 	struct inode *inode;	/* vfs inode pointer */
308 };
309 
310 struct fsync_node_entry {
311 	struct list_head list;	/* list head */
312 	struct page *page;	/* warm node page pointer */
313 	unsigned int seq_id;	/* sequence id */
314 };
315 
316 struct ckpt_req {
317 	struct completion wait;		/* completion for checkpoint done */
318 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
319 	int ret;			/* return code of checkpoint */
320 	ktime_t queue_time;		/* request queued time */
321 };
322 
323 struct ckpt_req_control {
324 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
325 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
326 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
327 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
328 	atomic_t total_ckpt;		/* # of total ckpts */
329 	atomic_t queued_ckpt;		/* # of queued ckpts */
330 	struct llist_head issue_list;	/* list for command issue */
331 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
332 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
333 	unsigned int peak_time;		/* peak wait time in msec until now */
334 };
335 
336 /* for the bitmap indicate blocks to be discarded */
337 struct discard_entry {
338 	struct list_head list;	/* list head */
339 	block_t start_blkaddr;	/* start blockaddr of current segment */
340 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
341 };
342 
343 /* minimum discard granularity, unit: block count */
344 #define MIN_DISCARD_GRANULARITY		1
345 /* default discard granularity of inner discard thread, unit: block count */
346 #define DEFAULT_DISCARD_GRANULARITY		16
347 /* default maximum discard granularity of ordered discard, unit: block count */
348 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY	16
349 
350 /* max discard pend list number */
351 #define MAX_PLIST_NUM		512
352 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
353 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
354 
355 enum {
356 	D_PREP,			/* initial */
357 	D_PARTIAL,		/* partially submitted */
358 	D_SUBMIT,		/* all submitted */
359 	D_DONE,			/* finished */
360 };
361 
362 struct discard_info {
363 	block_t lstart;			/* logical start address */
364 	block_t len;			/* length */
365 	block_t start;			/* actual start address in dev */
366 };
367 
368 struct discard_cmd {
369 	struct rb_node rb_node;		/* rb node located in rb-tree */
370 	struct discard_info di;		/* discard info */
371 	struct list_head list;		/* command list */
372 	struct completion wait;		/* compleation */
373 	struct block_device *bdev;	/* bdev */
374 	unsigned short ref;		/* reference count */
375 	unsigned char state;		/* state */
376 	unsigned char queued;		/* queued discard */
377 	int error;			/* bio error */
378 	spinlock_t lock;		/* for state/bio_ref updating */
379 	unsigned short bio_ref;		/* bio reference count */
380 };
381 
382 enum {
383 	DPOLICY_BG,
384 	DPOLICY_FORCE,
385 	DPOLICY_FSTRIM,
386 	DPOLICY_UMOUNT,
387 	MAX_DPOLICY,
388 };
389 
390 enum {
391 	DPOLICY_IO_AWARE_DISABLE,	/* force to not be aware of IO */
392 	DPOLICY_IO_AWARE_ENABLE,	/* force to be aware of IO */
393 	DPOLICY_IO_AWARE_MAX,
394 };
395 
396 struct discard_policy {
397 	int type;			/* type of discard */
398 	unsigned int min_interval;	/* used for candidates exist */
399 	unsigned int mid_interval;	/* used for device busy */
400 	unsigned int max_interval;	/* used for candidates not exist */
401 	unsigned int max_requests;	/* # of discards issued per round */
402 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
403 	bool io_aware;			/* issue discard in idle time */
404 	bool sync;			/* submit discard with REQ_SYNC flag */
405 	bool ordered;			/* issue discard by lba order */
406 	bool timeout;			/* discard timeout for put_super */
407 	unsigned int granularity;	/* discard granularity */
408 };
409 
410 struct discard_cmd_control {
411 	struct task_struct *f2fs_issue_discard;	/* discard thread */
412 	struct list_head entry_list;		/* 4KB discard entry list */
413 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
414 	struct list_head wait_list;		/* store on-flushing entries */
415 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
416 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
417 	struct mutex cmd_lock;
418 	unsigned int nr_discards;		/* # of discards in the list */
419 	unsigned int max_discards;		/* max. discards to be issued */
420 	unsigned int max_discard_request;	/* max. discard request per round */
421 	unsigned int min_discard_issue_time;	/* min. interval between discard issue */
422 	unsigned int mid_discard_issue_time;	/* mid. interval between discard issue */
423 	unsigned int max_discard_issue_time;	/* max. interval between discard issue */
424 	unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
425 	unsigned int discard_urgent_util;	/* utilization which issue discard proactively */
426 	unsigned int discard_granularity;	/* discard granularity */
427 	unsigned int max_ordered_discard;	/* maximum discard granularity issued by lba order */
428 	unsigned int discard_io_aware;		/* io_aware policy */
429 	unsigned int undiscard_blks;		/* # of undiscard blocks */
430 	unsigned int next_pos;			/* next discard position */
431 	atomic_t issued_discard;		/* # of issued discard */
432 	atomic_t queued_discard;		/* # of queued discard */
433 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
434 	struct rb_root_cached root;		/* root of discard rb-tree */
435 	bool rbtree_check;			/* config for consistence check */
436 	bool discard_wake;			/* to wake up discard thread */
437 };
438 
439 /* for the list of fsync inodes, used only during recovery */
440 struct fsync_inode_entry {
441 	struct list_head list;	/* list head */
442 	struct inode *inode;	/* vfs inode pointer */
443 	block_t blkaddr;	/* block address locating the last fsync */
444 	block_t last_dentry;	/* block address locating the last dentry */
445 };
446 
447 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
448 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
449 
450 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
451 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
452 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
453 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
454 
455 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
456 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
457 
update_nats_in_cursum(struct f2fs_journal * journal,int i)458 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
459 {
460 	int before = nats_in_cursum(journal);
461 
462 	journal->n_nats = cpu_to_le16(before + i);
463 	return before;
464 }
465 
update_sits_in_cursum(struct f2fs_journal * journal,int i)466 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
467 {
468 	int before = sits_in_cursum(journal);
469 
470 	journal->n_sits = cpu_to_le16(before + i);
471 	return before;
472 }
473 
__has_cursum_space(struct f2fs_journal * journal,int size,int type)474 static inline bool __has_cursum_space(struct f2fs_journal *journal,
475 							int size, int type)
476 {
477 	if (type == NAT_JOURNAL)
478 		return size <= MAX_NAT_JENTRIES(journal);
479 	return size <= MAX_SIT_JENTRIES(journal);
480 }
481 
482 /* for inline stuff */
483 #define DEF_INLINE_RESERVED_SIZE	1
484 static inline int get_extra_isize(struct inode *inode);
485 static inline int get_inline_xattr_addrs(struct inode *inode);
486 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
487 				(CUR_ADDRS_PER_INODE(inode) -		\
488 				get_inline_xattr_addrs(inode) -	\
489 				DEF_INLINE_RESERVED_SIZE))
490 
491 /* for inline dir */
492 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
493 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
494 				BITS_PER_BYTE + 1))
495 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
496 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
497 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
498 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
499 				NR_INLINE_DENTRY(inode) + \
500 				INLINE_DENTRY_BITMAP_SIZE(inode)))
501 
502 /*
503  * For INODE and NODE manager
504  */
505 /* for directory operations */
506 
507 struct f2fs_filename {
508 	/*
509 	 * The filename the user specified.  This is NULL for some
510 	 * filesystem-internal operations, e.g. converting an inline directory
511 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
512 	 */
513 	const struct qstr *usr_fname;
514 
515 	/*
516 	 * The on-disk filename.  For encrypted directories, this is encrypted.
517 	 * This may be NULL for lookups in an encrypted dir without the key.
518 	 */
519 	struct fscrypt_str disk_name;
520 
521 	/* The dirhash of this filename */
522 	f2fs_hash_t hash;
523 
524 #ifdef CONFIG_FS_ENCRYPTION
525 	/*
526 	 * For lookups in encrypted directories: either the buffer backing
527 	 * disk_name, or a buffer that holds the decoded no-key name.
528 	 */
529 	struct fscrypt_str crypto_buf;
530 #endif
531 #if IS_ENABLED(CONFIG_UNICODE)
532 	/*
533 	 * For casefolded directories: the casefolded name, but it's left NULL
534 	 * if the original name is not valid Unicode, if the original name is
535 	 * "." or "..", if the directory is both casefolded and encrypted and
536 	 * its encryption key is unavailable, or if the filesystem is doing an
537 	 * internal operation where usr_fname is also NULL.  In all these cases
538 	 * we fall back to treating the name as an opaque byte sequence.
539 	 */
540 	struct qstr cf_name;
541 #endif
542 };
543 
544 struct f2fs_dentry_ptr {
545 	struct inode *inode;
546 	void *bitmap;
547 	struct f2fs_dir_entry *dentry;
548 	__u8 (*filename)[F2FS_SLOT_LEN];
549 	int max;
550 	int nr_bitmap;
551 };
552 
make_dentry_ptr_block(struct inode * inode,struct f2fs_dentry_ptr * d,struct f2fs_dentry_block * t)553 static inline void make_dentry_ptr_block(struct inode *inode,
554 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
555 {
556 	d->inode = inode;
557 	d->max = NR_DENTRY_IN_BLOCK;
558 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
559 	d->bitmap = t->dentry_bitmap;
560 	d->dentry = t->dentry;
561 	d->filename = t->filename;
562 }
563 
make_dentry_ptr_inline(struct inode * inode,struct f2fs_dentry_ptr * d,void * t)564 static inline void make_dentry_ptr_inline(struct inode *inode,
565 					struct f2fs_dentry_ptr *d, void *t)
566 {
567 	int entry_cnt = NR_INLINE_DENTRY(inode);
568 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
569 	int reserved_size = INLINE_RESERVED_SIZE(inode);
570 
571 	d->inode = inode;
572 	d->max = entry_cnt;
573 	d->nr_bitmap = bitmap_size;
574 	d->bitmap = t;
575 	d->dentry = t + bitmap_size + reserved_size;
576 	d->filename = t + bitmap_size + reserved_size +
577 					SIZE_OF_DIR_ENTRY * entry_cnt;
578 }
579 
580 /*
581  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
582  * as its node offset to distinguish from index node blocks.
583  * But some bits are used to mark the node block.
584  */
585 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
586 				>> OFFSET_BIT_SHIFT)
587 enum {
588 	ALLOC_NODE,			/* allocate a new node page if needed */
589 	LOOKUP_NODE,			/* look up a node without readahead */
590 	LOOKUP_NODE_RA,			/*
591 					 * look up a node with readahead called
592 					 * by get_data_block.
593 					 */
594 };
595 
596 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO or flush count */
597 
598 /* congestion wait timeout value, default: 20ms */
599 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
600 
601 /* maximum retry quota flush count */
602 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
603 
604 /* maximum retry of EIO'ed page */
605 #define MAX_RETRY_PAGE_EIO			100
606 
607 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
608 
609 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
610 
611 /* dirty segments threshold for triggering CP */
612 #define DEFAULT_DIRTY_THRESHOLD		4
613 
614 #define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
615 #define RECOVERY_MIN_RA_BLOCKS		1
616 
617 #define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
618 
619 /* for in-memory extent cache entry */
620 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
621 
622 /* number of extent info in extent cache we try to shrink */
623 #define READ_EXTENT_CACHE_SHRINK_NUMBER	128
624 
625 /* number of age extent info in extent cache we try to shrink */
626 #define AGE_EXTENT_CACHE_SHRINK_NUMBER	128
627 #define LAST_AGE_WEIGHT			30
628 #define SAME_AGE_REGION			1024
629 
630 /*
631  * Define data block with age less than 1GB as hot data
632  * define data block with age less than 10GB but more than 1GB as warm data
633  */
634 #define DEF_HOT_DATA_AGE_THRESHOLD	262144
635 #define DEF_WARM_DATA_AGE_THRESHOLD	2621440
636 
637 /* extent cache type */
638 enum extent_type {
639 	EX_READ,
640 	EX_BLOCK_AGE,
641 	NR_EXTENT_CACHES,
642 };
643 
644 struct extent_info {
645 	unsigned int fofs;		/* start offset in a file */
646 	unsigned int len;		/* length of the extent */
647 	union {
648 		/* read extent_cache */
649 		struct {
650 			/* start block address of the extent */
651 			block_t blk;
652 #ifdef CONFIG_F2FS_FS_COMPRESSION
653 			/* physical extent length of compressed blocks */
654 			unsigned int c_len;
655 #endif
656 		};
657 		/* block age extent_cache */
658 		struct {
659 			/* block age of the extent */
660 			unsigned long long age;
661 			/* last total blocks allocated */
662 			unsigned long long last_blocks;
663 		};
664 	};
665 };
666 
667 struct extent_node {
668 	struct rb_node rb_node;		/* rb node located in rb-tree */
669 	struct extent_info ei;		/* extent info */
670 	struct list_head list;		/* node in global extent list of sbi */
671 	struct extent_tree *et;		/* extent tree pointer */
672 };
673 
674 struct extent_tree {
675 	nid_t ino;			/* inode number */
676 	enum extent_type type;		/* keep the extent tree type */
677 	struct rb_root_cached root;	/* root of extent info rb-tree */
678 	struct extent_node *cached_en;	/* recently accessed extent node */
679 	struct list_head list;		/* to be used by sbi->zombie_list */
680 	rwlock_t lock;			/* protect extent info rb-tree */
681 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
682 	bool largest_updated;		/* largest extent updated */
683 	struct extent_info largest;	/* largest cached extent for EX_READ */
684 };
685 
686 struct extent_tree_info {
687 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
688 	struct mutex extent_tree_lock;	/* locking extent radix tree */
689 	struct list_head extent_list;		/* lru list for shrinker */
690 	spinlock_t extent_lock;			/* locking extent lru list */
691 	atomic_t total_ext_tree;		/* extent tree count */
692 	struct list_head zombie_list;		/* extent zombie tree list */
693 	atomic_t total_zombie_tree;		/* extent zombie tree count */
694 	atomic_t total_ext_node;		/* extent info count */
695 };
696 
697 /*
698  * State of block returned by f2fs_map_blocks.
699  */
700 #define F2FS_MAP_NEW		(1U << 0)
701 #define F2FS_MAP_MAPPED		(1U << 1)
702 #define F2FS_MAP_DELALLOC	(1U << 2)
703 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
704 				F2FS_MAP_DELALLOC)
705 
706 struct f2fs_map_blocks {
707 	struct block_device *m_bdev;	/* for multi-device dio */
708 	block_t m_pblk;
709 	block_t m_lblk;
710 	unsigned int m_len;
711 	unsigned int m_flags;
712 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
713 	pgoff_t *m_next_extent;		/* point to next possible extent */
714 	int m_seg_type;
715 	bool m_may_create;		/* indicate it is from write path */
716 	bool m_multidev_dio;		/* indicate it allows multi-device dio */
717 };
718 
719 /* for flag in get_data_block */
720 enum {
721 	F2FS_GET_BLOCK_DEFAULT,
722 	F2FS_GET_BLOCK_FIEMAP,
723 	F2FS_GET_BLOCK_BMAP,
724 	F2FS_GET_BLOCK_DIO,
725 	F2FS_GET_BLOCK_PRE_DIO,
726 	F2FS_GET_BLOCK_PRE_AIO,
727 	F2FS_GET_BLOCK_PRECACHE,
728 };
729 
730 /*
731  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
732  */
733 #define FADVISE_COLD_BIT	0x01
734 #define FADVISE_LOST_PINO_BIT	0x02
735 #define FADVISE_ENCRYPT_BIT	0x04
736 #define FADVISE_ENC_NAME_BIT	0x08
737 #define FADVISE_KEEP_SIZE_BIT	0x10
738 #define FADVISE_HOT_BIT		0x20
739 #define FADVISE_VERITY_BIT	0x40
740 #define FADVISE_TRUNC_BIT	0x80
741 
742 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
743 
744 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
745 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
746 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
747 
748 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
749 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
750 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
751 
752 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
753 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
754 
755 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
756 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
757 
758 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
759 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
760 
761 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
762 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
763 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
764 
765 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
766 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
767 
768 #define file_should_truncate(inode)	is_file(inode, FADVISE_TRUNC_BIT)
769 #define file_need_truncate(inode)	set_file(inode, FADVISE_TRUNC_BIT)
770 #define file_dont_truncate(inode)	clear_file(inode, FADVISE_TRUNC_BIT)
771 
772 #define DEF_DIR_LEVEL		0
773 
774 /* used for f2fs_inode_info->flags */
775 enum {
776 	FI_NEW_INODE,		/* indicate newly allocated inode */
777 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
778 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
779 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
780 	FI_INC_LINK,		/* need to increment i_nlink */
781 	FI_ACL_MODE,		/* indicate acl mode */
782 	FI_NO_ALLOC,		/* should not allocate any blocks */
783 	FI_FREE_NID,		/* free allocated nide */
784 	FI_NO_EXTENT,		/* not to use the extent cache */
785 	FI_INLINE_XATTR,	/* used for inline xattr */
786 	FI_INLINE_DATA,		/* used for inline data*/
787 	FI_INLINE_DENTRY,	/* used for inline dentry */
788 	FI_APPEND_WRITE,	/* inode has appended data */
789 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
790 	FI_NEED_IPU,		/* used for ipu per file */
791 	FI_ATOMIC_FILE,		/* indicate atomic file */
792 	FI_DATA_EXIST,		/* indicate data exists */
793 	FI_SKIP_WRITES,		/* should skip data page writeback */
794 	FI_OPU_WRITE,		/* used for opu per file */
795 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
796 	FI_PREALLOCATED_ALL,	/* all blocks for write were preallocated */
797 	FI_HOT_DATA,		/* indicate file is hot */
798 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
799 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
800 	FI_PIN_FILE,		/* indicate file should not be gced */
801 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
802 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
803 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
804 	FI_MMAP_FILE,		/* indicate file was mmapped */
805 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
806 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
807 	FI_ALIGNED_WRITE,	/* enable aligned write */
808 	FI_COW_FILE,		/* indicate COW file */
809 	FI_ATOMIC_COMMITTED,	/* indicate atomic commit completed except disk sync */
810 	FI_ATOMIC_DIRTIED,	/* indicate atomic file is dirtied */
811 	FI_ATOMIC_REPLACE,	/* indicate atomic replace */
812 	FI_OPENED_FILE,		/* indicate file has been opened */
813 	FI_MAX,			/* max flag, never be used */
814 };
815 
816 struct f2fs_inode_info {
817 	struct inode vfs_inode;		/* serve a vfs inode */
818 	unsigned long i_flags;		/* keep an inode flags for ioctl */
819 	unsigned char i_advise;		/* use to give file attribute hints */
820 	unsigned char i_dir_level;	/* use for dentry level for large dir */
821 	union {
822 		unsigned int i_current_depth;	/* only for directory depth */
823 		unsigned short i_gc_failures;	/* for gc failure statistic */
824 	};
825 	unsigned int i_pino;		/* parent inode number */
826 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
827 
828 	/* Use below internally in f2fs*/
829 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
830 	struct f2fs_rwsem i_sem;	/* protect fi info */
831 	atomic_t dirty_pages;		/* # of dirty pages */
832 	f2fs_hash_t chash;		/* hash value of given file name */
833 	unsigned int clevel;		/* maximum level of given file name */
834 	struct task_struct *task;	/* lookup and create consistency */
835 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
836 	struct task_struct *wb_task;	/* indicate inode is in context of writeback */
837 	nid_t i_xattr_nid;		/* node id that contains xattrs */
838 	loff_t	last_disk_size;		/* lastly written file size */
839 	spinlock_t i_size_lock;		/* protect last_disk_size */
840 
841 #ifdef CONFIG_QUOTA
842 	struct dquot __rcu *i_dquot[MAXQUOTAS];
843 
844 	/* quota space reservation, managed internally by quota code */
845 	qsize_t i_reserved_quota;
846 #endif
847 	struct list_head dirty_list;	/* dirty list for dirs and files */
848 	struct list_head gdirty_list;	/* linked in global dirty list */
849 	struct task_struct *atomic_write_task;	/* store atomic write task */
850 	struct extent_tree *extent_tree[NR_EXTENT_CACHES];
851 					/* cached extent_tree entry */
852 	union {
853 		struct inode *cow_inode;	/* copy-on-write inode for atomic write */
854 		struct inode *atomic_inode;
855 					/* point to atomic_inode, available only for cow_inode */
856 	};
857 
858 	/* avoid racing between foreground op and gc */
859 	struct f2fs_rwsem i_gc_rwsem[2];
860 	struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
861 
862 	int i_extra_isize;		/* size of extra space located in i_addr */
863 	kprojid_t i_projid;		/* id for project quota */
864 	int i_inline_xattr_size;	/* inline xattr size */
865 	struct timespec64 i_crtime;	/* inode creation time */
866 	struct timespec64 i_disk_time[3];/* inode disk times */
867 
868 	/* for file compress */
869 	atomic_t i_compr_blocks;		/* # of compressed blocks */
870 	unsigned char i_compress_algorithm;	/* algorithm type */
871 	unsigned char i_log_cluster_size;	/* log of cluster size */
872 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
873 	unsigned char i_compress_flag;		/* compress flag */
874 	unsigned int i_cluster_size;		/* cluster size */
875 
876 	unsigned int atomic_write_cnt;
877 	loff_t original_i_size;		/* original i_size before atomic write */
878 };
879 
get_read_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)880 static inline void get_read_extent_info(struct extent_info *ext,
881 					struct f2fs_extent *i_ext)
882 {
883 	ext->fofs = le32_to_cpu(i_ext->fofs);
884 	ext->blk = le32_to_cpu(i_ext->blk);
885 	ext->len = le32_to_cpu(i_ext->len);
886 }
887 
set_raw_read_extent(struct extent_info * ext,struct f2fs_extent * i_ext)888 static inline void set_raw_read_extent(struct extent_info *ext,
889 					struct f2fs_extent *i_ext)
890 {
891 	i_ext->fofs = cpu_to_le32(ext->fofs);
892 	i_ext->blk = cpu_to_le32(ext->blk);
893 	i_ext->len = cpu_to_le32(ext->len);
894 }
895 
__is_discard_mergeable(struct discard_info * back,struct discard_info * front,unsigned int max_len)896 static inline bool __is_discard_mergeable(struct discard_info *back,
897 			struct discard_info *front, unsigned int max_len)
898 {
899 	return (back->lstart + back->len == front->lstart) &&
900 		(back->len + front->len <= max_len);
901 }
902 
__is_discard_back_mergeable(struct discard_info * cur,struct discard_info * back,unsigned int max_len)903 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
904 			struct discard_info *back, unsigned int max_len)
905 {
906 	return __is_discard_mergeable(back, cur, max_len);
907 }
908 
__is_discard_front_mergeable(struct discard_info * cur,struct discard_info * front,unsigned int max_len)909 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
910 			struct discard_info *front, unsigned int max_len)
911 {
912 	return __is_discard_mergeable(cur, front, max_len);
913 }
914 
915 /*
916  * For free nid management
917  */
918 enum nid_state {
919 	FREE_NID,		/* newly added to free nid list */
920 	PREALLOC_NID,		/* it is preallocated */
921 	MAX_NID_STATE,
922 };
923 
924 enum nat_state {
925 	TOTAL_NAT,
926 	DIRTY_NAT,
927 	RECLAIMABLE_NAT,
928 	MAX_NAT_STATE,
929 };
930 
931 struct f2fs_nm_info {
932 	block_t nat_blkaddr;		/* base disk address of NAT */
933 	nid_t max_nid;			/* maximum possible node ids */
934 	nid_t available_nids;		/* # of available node ids */
935 	nid_t next_scan_nid;		/* the next nid to be scanned */
936 	nid_t max_rf_node_blocks;	/* max # of nodes for recovery */
937 	unsigned int ram_thresh;	/* control the memory footprint */
938 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
939 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
940 
941 	/* NAT cache management */
942 	struct radix_tree_root nat_root;/* root of the nat entry cache */
943 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
944 	struct f2fs_rwsem nat_tree_lock;	/* protect nat entry tree */
945 	struct list_head nat_entries;	/* cached nat entry list (clean) */
946 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
947 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
948 	unsigned int nat_blocks;	/* # of nat blocks */
949 
950 	/* free node ids management */
951 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
952 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
953 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
954 	spinlock_t nid_list_lock;	/* protect nid lists ops */
955 	struct mutex build_lock;	/* lock for build free nids */
956 	unsigned char **free_nid_bitmap;
957 	unsigned char *nat_block_bitmap;
958 	unsigned short *free_nid_count;	/* free nid count of NAT block */
959 
960 	/* for checkpoint */
961 	char *nat_bitmap;		/* NAT bitmap pointer */
962 
963 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
964 	unsigned char *nat_bits;	/* NAT bits blocks */
965 	unsigned char *full_nat_bits;	/* full NAT pages */
966 	unsigned char *empty_nat_bits;	/* empty NAT pages */
967 #ifdef CONFIG_F2FS_CHECK_FS
968 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
969 #endif
970 	int bitmap_size;		/* bitmap size */
971 };
972 
973 /*
974  * this structure is used as one of function parameters.
975  * all the information are dedicated to a given direct node block determined
976  * by the data offset in a file.
977  */
978 struct dnode_of_data {
979 	struct inode *inode;		/* vfs inode pointer */
980 	struct page *inode_page;	/* its inode page, NULL is possible */
981 	struct page *node_page;		/* cached direct node page */
982 	nid_t nid;			/* node id of the direct node block */
983 	unsigned int ofs_in_node;	/* data offset in the node page */
984 	bool inode_page_locked;		/* inode page is locked or not */
985 	bool node_changed;		/* is node block changed */
986 	char cur_level;			/* level of hole node page */
987 	char max_level;			/* level of current page located */
988 	block_t	data_blkaddr;		/* block address of the node block */
989 };
990 
set_new_dnode(struct dnode_of_data * dn,struct inode * inode,struct page * ipage,struct page * npage,nid_t nid)991 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
992 		struct page *ipage, struct page *npage, nid_t nid)
993 {
994 	memset(dn, 0, sizeof(*dn));
995 	dn->inode = inode;
996 	dn->inode_page = ipage;
997 	dn->node_page = npage;
998 	dn->nid = nid;
999 }
1000 
1001 /*
1002  * For SIT manager
1003  *
1004  * By default, there are 6 active log areas across the whole main area.
1005  * When considering hot and cold data separation to reduce cleaning overhead,
1006  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
1007  * respectively.
1008  * In the current design, you should not change the numbers intentionally.
1009  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
1010  * logs individually according to the underlying devices. (default: 6)
1011  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
1012  * data and 8 for node logs.
1013  */
1014 #define	NR_CURSEG_DATA_TYPE	(3)
1015 #define NR_CURSEG_NODE_TYPE	(3)
1016 #define NR_CURSEG_INMEM_TYPE	(2)
1017 #define NR_CURSEG_RO_TYPE	(2)
1018 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1019 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1020 
1021 enum {
1022 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
1023 	CURSEG_WARM_DATA,	/* data blocks */
1024 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
1025 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
1026 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
1027 	CURSEG_COLD_NODE,	/* indirect node blocks */
1028 	NR_PERSISTENT_LOG,	/* number of persistent log */
1029 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1030 				/* pinned file that needs consecutive block address */
1031 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
1032 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
1033 };
1034 
1035 struct flush_cmd {
1036 	struct completion wait;
1037 	struct llist_node llnode;
1038 	nid_t ino;
1039 	int ret;
1040 };
1041 
1042 struct flush_cmd_control {
1043 	struct task_struct *f2fs_issue_flush;	/* flush thread */
1044 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
1045 	atomic_t issued_flush;			/* # of issued flushes */
1046 	atomic_t queued_flush;			/* # of queued flushes */
1047 	struct llist_head issue_list;		/* list for command issue */
1048 	struct llist_node *dispatch_list;	/* list for command dispatch */
1049 };
1050 
1051 struct f2fs_sm_info {
1052 	struct sit_info *sit_info;		/* whole segment information */
1053 	struct free_segmap_info *free_info;	/* free segment information */
1054 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1055 	struct curseg_info *curseg_array;	/* active segment information */
1056 
1057 	struct f2fs_rwsem curseg_lock;	/* for preventing curseg change */
1058 
1059 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1060 	block_t main_blkaddr;		/* start block address of main area */
1061 	block_t ssa_blkaddr;		/* start block address of SSA area */
1062 
1063 	unsigned int segment_count;	/* total # of segments */
1064 	unsigned int main_segments;	/* # of segments in main area */
1065 	unsigned int reserved_segments;	/* # of reserved segments */
1066 	unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1067 	unsigned int ovp_segments;	/* # of overprovision segments */
1068 
1069 	/* a threshold to reclaim prefree segments */
1070 	unsigned int rec_prefree_segments;
1071 
1072 	struct list_head sit_entry_set;	/* sit entry set list */
1073 
1074 	unsigned int ipu_policy;	/* in-place-update policy */
1075 	unsigned int min_ipu_util;	/* in-place-update threshold */
1076 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1077 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1078 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1079 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1080 
1081 	/* for flush command control */
1082 	struct flush_cmd_control *fcc_info;
1083 
1084 	/* for discard command control */
1085 	struct discard_cmd_control *dcc_info;
1086 };
1087 
1088 /*
1089  * For superblock
1090  */
1091 /*
1092  * COUNT_TYPE for monitoring
1093  *
1094  * f2fs monitors the number of several block types such as on-writeback,
1095  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1096  */
1097 #define WB_DATA_TYPE(p, f)			\
1098 	(f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1099 enum count_type {
1100 	F2FS_DIRTY_DENTS,
1101 	F2FS_DIRTY_DATA,
1102 	F2FS_DIRTY_QDATA,
1103 	F2FS_DIRTY_NODES,
1104 	F2FS_DIRTY_META,
1105 	F2FS_DIRTY_IMETA,
1106 	F2FS_WB_CP_DATA,
1107 	F2FS_WB_DATA,
1108 	F2FS_RD_DATA,
1109 	F2FS_RD_NODE,
1110 	F2FS_RD_META,
1111 	F2FS_DIO_WRITE,
1112 	F2FS_DIO_READ,
1113 	NR_COUNT_TYPE,
1114 };
1115 
1116 /*
1117  * The below are the page types of bios used in submit_bio().
1118  * The available types are:
1119  * DATA			User data pages. It operates as async mode.
1120  * NODE			Node pages. It operates as async mode.
1121  * META			FS metadata pages such as SIT, NAT, CP.
1122  * NR_PAGE_TYPE		The number of page types.
1123  * META_FLUSH		Make sure the previous pages are written
1124  *			with waiting the bio's completion
1125  * ...			Only can be used with META.
1126  */
1127 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1128 #define PAGE_TYPE_ON_MAIN(type)	((type) == DATA || (type) == NODE)
1129 enum page_type {
1130 	DATA = 0,
1131 	NODE = 1,	/* should not change this */
1132 	META,
1133 	NR_PAGE_TYPE,
1134 	META_FLUSH,
1135 	IPU,		/* the below types are used by tracepoints only. */
1136 	OPU,
1137 };
1138 
1139 enum temp_type {
1140 	HOT = 0,	/* must be zero for meta bio */
1141 	WARM,
1142 	COLD,
1143 	NR_TEMP_TYPE,
1144 };
1145 
1146 enum need_lock_type {
1147 	LOCK_REQ = 0,
1148 	LOCK_DONE,
1149 	LOCK_RETRY,
1150 };
1151 
1152 enum cp_reason_type {
1153 	CP_NO_NEEDED,
1154 	CP_NON_REGULAR,
1155 	CP_COMPRESSED,
1156 	CP_HARDLINK,
1157 	CP_SB_NEED_CP,
1158 	CP_WRONG_PINO,
1159 	CP_NO_SPC_ROLL,
1160 	CP_NODE_NEED_CP,
1161 	CP_FASTBOOT_MODE,
1162 	CP_SPEC_LOG_NUM,
1163 	CP_RECOVER_DIR,
1164 	CP_XATTR_DIR,
1165 };
1166 
1167 enum iostat_type {
1168 	/* WRITE IO */
1169 	APP_DIRECT_IO,			/* app direct write IOs */
1170 	APP_BUFFERED_IO,		/* app buffered write IOs */
1171 	APP_WRITE_IO,			/* app write IOs */
1172 	APP_MAPPED_IO,			/* app mapped IOs */
1173 	APP_BUFFERED_CDATA_IO,		/* app buffered write IOs on compressed file */
1174 	APP_MAPPED_CDATA_IO,		/* app mapped write IOs on compressed file */
1175 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1176 	FS_CDATA_IO,			/* data IOs from kworker/fsync/reclaimer on compressed file */
1177 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1178 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1179 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1180 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1181 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1182 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1183 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1184 
1185 	/* READ IO */
1186 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1187 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1188 	APP_READ_IO,			/* app read IOs */
1189 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1190 	APP_BUFFERED_CDATA_READ_IO,	/* app buffered read IOs on compressed file  */
1191 	APP_MAPPED_CDATA_READ_IO,	/* app mapped read IOs on compressed file  */
1192 	FS_DATA_READ_IO,		/* data read IOs */
1193 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1194 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1195 	FS_NODE_READ_IO,		/* node read IOs */
1196 	FS_META_READ_IO,		/* meta read IOs */
1197 
1198 	/* other */
1199 	FS_DISCARD_IO,			/* discard */
1200 	FS_FLUSH_IO,			/* flush */
1201 	FS_ZONE_RESET_IO,		/* zone reset */
1202 	NR_IO_TYPE,
1203 };
1204 
1205 struct f2fs_io_info {
1206 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1207 	nid_t ino;		/* inode number */
1208 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1209 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1210 	enum req_op op;		/* contains REQ_OP_ */
1211 	blk_opf_t op_flags;	/* req_flag_bits */
1212 	block_t new_blkaddr;	/* new block address to be written */
1213 	block_t old_blkaddr;	/* old block address before Cow */
1214 	struct page *page;	/* page to be written */
1215 	struct page *encrypted_page;	/* encrypted page */
1216 	struct page *compressed_page;	/* compressed page */
1217 	struct list_head list;		/* serialize IOs */
1218 	unsigned int compr_blocks;	/* # of compressed block addresses */
1219 	unsigned int need_lock:8;	/* indicate we need to lock cp_rwsem */
1220 	unsigned int version:8;		/* version of the node */
1221 	unsigned int submitted:1;	/* indicate IO submission */
1222 	unsigned int in_list:1;		/* indicate fio is in io_list */
1223 	unsigned int is_por:1;		/* indicate IO is from recovery or not */
1224 	unsigned int encrypted:1;	/* indicate file is encrypted */
1225 	unsigned int meta_gc:1;		/* require meta inode GC */
1226 	enum iostat_type io_type;	/* io type */
1227 	struct writeback_control *io_wbc; /* writeback control */
1228 	struct bio **bio;		/* bio for ipu */
1229 	sector_t *last_block;		/* last block number in bio */
1230 };
1231 
1232 struct bio_entry {
1233 	struct bio *bio;
1234 	struct list_head list;
1235 };
1236 
1237 #define is_read_io(rw) ((rw) == READ)
1238 struct f2fs_bio_info {
1239 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1240 	struct bio *bio;		/* bios to merge */
1241 	sector_t last_block_in_bio;	/* last block number */
1242 	struct f2fs_io_info fio;	/* store buffered io info. */
1243 #ifdef CONFIG_BLK_DEV_ZONED
1244 	struct completion zone_wait;	/* condition value for the previous open zone to close */
1245 	struct bio *zone_pending_bio;	/* pending bio for the previous zone */
1246 	void *bi_private;		/* previous bi_private for pending bio */
1247 #endif
1248 	struct f2fs_rwsem io_rwsem;	/* blocking op for bio */
1249 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1250 	struct list_head io_list;	/* track fios */
1251 	struct list_head bio_list;	/* bio entry list head */
1252 	struct f2fs_rwsem bio_list_lock;	/* lock to protect bio entry list */
1253 };
1254 
1255 #define FDEV(i)				(sbi->devs[i])
1256 #define RDEV(i)				(raw_super->devs[i])
1257 struct f2fs_dev_info {
1258 	struct file *bdev_file;
1259 	struct block_device *bdev;
1260 	char path[MAX_PATH_LEN];
1261 	unsigned int total_segments;
1262 	block_t start_blk;
1263 	block_t end_blk;
1264 #ifdef CONFIG_BLK_DEV_ZONED
1265 	unsigned int nr_blkz;		/* Total number of zones */
1266 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1267 #endif
1268 };
1269 
1270 enum inode_type {
1271 	DIR_INODE,			/* for dirty dir inode */
1272 	FILE_INODE,			/* for dirty regular/symlink inode */
1273 	DIRTY_META,			/* for all dirtied inode metadata */
1274 	NR_INODE_TYPE,
1275 };
1276 
1277 /* for inner inode cache management */
1278 struct inode_management {
1279 	struct radix_tree_root ino_root;	/* ino entry array */
1280 	spinlock_t ino_lock;			/* for ino entry lock */
1281 	struct list_head ino_list;		/* inode list head */
1282 	unsigned long ino_num;			/* number of entries */
1283 };
1284 
1285 /* for GC_AT */
1286 struct atgc_management {
1287 	bool atgc_enabled;			/* ATGC is enabled or not */
1288 	struct rb_root_cached root;		/* root of victim rb-tree */
1289 	struct list_head victim_list;		/* linked with all victim entries */
1290 	unsigned int victim_count;		/* victim count in rb-tree */
1291 	unsigned int candidate_ratio;		/* candidate ratio */
1292 	unsigned int max_candidate_count;	/* max candidate count */
1293 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1294 	unsigned long long age_threshold;	/* age threshold */
1295 };
1296 
1297 struct f2fs_gc_control {
1298 	unsigned int victim_segno;	/* target victim segment number */
1299 	int init_gc_type;		/* FG_GC or BG_GC */
1300 	bool no_bg_gc;			/* check the space and stop bg_gc */
1301 	bool should_migrate_blocks;	/* should migrate blocks */
1302 	bool err_gc_skipped;		/* return EAGAIN if GC skipped */
1303 	bool one_time;			/* require one time GC in one migration unit */
1304 	unsigned int nr_free_secs;	/* # of free sections to do GC */
1305 };
1306 
1307 /*
1308  * For s_flag in struct f2fs_sb_info
1309  * Modification on enum should be synchronized with s_flag array
1310  */
1311 enum {
1312 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1313 	SBI_IS_CLOSE,				/* specify unmounting */
1314 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1315 	SBI_POR_DOING,				/* recovery is doing or not */
1316 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1317 	SBI_NEED_CP,				/* need to checkpoint */
1318 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1319 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1320 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1321 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1322 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1323 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1324 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1325 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1326 	SBI_IS_FREEZING,			/* freezefs is in process */
1327 	SBI_IS_WRITABLE,			/* remove ro mountoption transiently */
1328 	MAX_SBI_FLAG,
1329 };
1330 
1331 enum {
1332 	CP_TIME,
1333 	REQ_TIME,
1334 	DISCARD_TIME,
1335 	GC_TIME,
1336 	DISABLE_TIME,
1337 	UMOUNT_DISCARD_TIMEOUT,
1338 	MAX_TIME,
1339 };
1340 
1341 /* Note that you need to keep synchronization with this gc_mode_names array */
1342 enum {
1343 	GC_NORMAL,
1344 	GC_IDLE_CB,
1345 	GC_IDLE_GREEDY,
1346 	GC_IDLE_AT,
1347 	GC_URGENT_HIGH,
1348 	GC_URGENT_LOW,
1349 	GC_URGENT_MID,
1350 	MAX_GC_MODE,
1351 };
1352 
1353 enum {
1354 	BGGC_MODE_ON,		/* background gc is on */
1355 	BGGC_MODE_OFF,		/* background gc is off */
1356 	BGGC_MODE_SYNC,		/*
1357 				 * background gc is on, migrating blocks
1358 				 * like foreground gc
1359 				 */
1360 };
1361 
1362 enum {
1363 	FS_MODE_ADAPTIVE,		/* use both lfs/ssr allocation */
1364 	FS_MODE_LFS,			/* use lfs allocation only */
1365 	FS_MODE_FRAGMENT_SEG,		/* segment fragmentation mode */
1366 	FS_MODE_FRAGMENT_BLK,		/* block fragmentation mode */
1367 };
1368 
1369 enum {
1370 	ALLOC_MODE_DEFAULT,	/* stay default */
1371 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1372 };
1373 
1374 enum fsync_mode {
1375 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1376 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1377 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1378 };
1379 
1380 enum {
1381 	COMPR_MODE_FS,		/*
1382 				 * automatically compress compression
1383 				 * enabled files
1384 				 */
1385 	COMPR_MODE_USER,	/*
1386 				 * automatical compression is disabled.
1387 				 * user can control the file compression
1388 				 * using ioctls
1389 				 */
1390 };
1391 
1392 enum {
1393 	DISCARD_UNIT_BLOCK,	/* basic discard unit is block */
1394 	DISCARD_UNIT_SEGMENT,	/* basic discard unit is segment */
1395 	DISCARD_UNIT_SECTION,	/* basic discard unit is section */
1396 };
1397 
1398 enum {
1399 	MEMORY_MODE_NORMAL,	/* memory mode for normal devices */
1400 	MEMORY_MODE_LOW,	/* memory mode for low memry devices */
1401 };
1402 
1403 enum errors_option {
1404 	MOUNT_ERRORS_READONLY,	/* remount fs ro on errors */
1405 	MOUNT_ERRORS_CONTINUE,	/* continue on errors */
1406 	MOUNT_ERRORS_PANIC,	/* panic on errors */
1407 };
1408 
1409 enum {
1410 	BACKGROUND,
1411 	FOREGROUND,
1412 	MAX_CALL_TYPE,
1413 	TOTAL_CALL = FOREGROUND,
1414 };
1415 
1416 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1417 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1418 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1419 
1420 /*
1421  * Layout of f2fs page.private:
1422  *
1423  * Layout A: lowest bit should be 1
1424  * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1425  * bit 0	PAGE_PRIVATE_NOT_POINTER
1426  * bit 1	PAGE_PRIVATE_ONGOING_MIGRATION
1427  * bit 2	PAGE_PRIVATE_INLINE_INODE
1428  * bit 3	PAGE_PRIVATE_REF_RESOURCE
1429  * bit 4	PAGE_PRIVATE_ATOMIC_WRITE
1430  * bit 5-	f2fs private data
1431  *
1432  * Layout B: lowest bit should be 0
1433  * page.private is a wrapped pointer.
1434  */
1435 enum {
1436 	PAGE_PRIVATE_NOT_POINTER,		/* private contains non-pointer data */
1437 	PAGE_PRIVATE_ONGOING_MIGRATION,		/* data page which is on-going migrating */
1438 	PAGE_PRIVATE_INLINE_INODE,		/* inode page contains inline data */
1439 	PAGE_PRIVATE_REF_RESOURCE,		/* dirty page has referenced resources */
1440 	PAGE_PRIVATE_ATOMIC_WRITE,		/* data page from atomic write path */
1441 	PAGE_PRIVATE_MAX
1442 };
1443 
1444 /* For compression */
1445 enum compress_algorithm_type {
1446 	COMPRESS_LZO,
1447 	COMPRESS_LZ4,
1448 	COMPRESS_ZSTD,
1449 	COMPRESS_LZORLE,
1450 	COMPRESS_MAX,
1451 };
1452 
1453 enum compress_flag {
1454 	COMPRESS_CHKSUM,
1455 	COMPRESS_MAX_FLAG,
1456 };
1457 
1458 #define	COMPRESS_WATERMARK			20
1459 #define	COMPRESS_PERCENT			20
1460 
1461 #define COMPRESS_DATA_RESERVED_SIZE		4
1462 struct compress_data {
1463 	__le32 clen;			/* compressed data size */
1464 	__le32 chksum;			/* compressed data chksum */
1465 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1466 	u8 cdata[];			/* compressed data */
1467 };
1468 
1469 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1470 
1471 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1472 
1473 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
1474 
1475 #define	COMPRESS_LEVEL_OFFSET	8
1476 
1477 /* compress context */
1478 struct compress_ctx {
1479 	struct inode *inode;		/* inode the context belong to */
1480 	pgoff_t cluster_idx;		/* cluster index number */
1481 	unsigned int cluster_size;	/* page count in cluster */
1482 	unsigned int log_cluster_size;	/* log of cluster size */
1483 	struct page **rpages;		/* pages store raw data in cluster */
1484 	unsigned int nr_rpages;		/* total page number in rpages */
1485 	struct page **cpages;		/* pages store compressed data in cluster */
1486 	unsigned int nr_cpages;		/* total page number in cpages */
1487 	unsigned int valid_nr_cpages;	/* valid page number in cpages */
1488 	void *rbuf;			/* virtual mapped address on rpages */
1489 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1490 	size_t rlen;			/* valid data length in rbuf */
1491 	size_t clen;			/* valid data length in cbuf */
1492 	void *private;			/* payload buffer for specified compression algorithm */
1493 	void *private2;			/* extra payload buffer */
1494 };
1495 
1496 /* compress context for write IO path */
1497 struct compress_io_ctx {
1498 	u32 magic;			/* magic number to indicate page is compressed */
1499 	struct inode *inode;		/* inode the context belong to */
1500 	struct page **rpages;		/* pages store raw data in cluster */
1501 	unsigned int nr_rpages;		/* total page number in rpages */
1502 	atomic_t pending_pages;		/* in-flight compressed page count */
1503 };
1504 
1505 /* Context for decompressing one cluster on the read IO path */
1506 struct decompress_io_ctx {
1507 	u32 magic;			/* magic number to indicate page is compressed */
1508 	struct inode *inode;		/* inode the context belong to */
1509 	pgoff_t cluster_idx;		/* cluster index number */
1510 	unsigned int cluster_size;	/* page count in cluster */
1511 	unsigned int log_cluster_size;	/* log of cluster size */
1512 	struct page **rpages;		/* pages store raw data in cluster */
1513 	unsigned int nr_rpages;		/* total page number in rpages */
1514 	struct page **cpages;		/* pages store compressed data in cluster */
1515 	unsigned int nr_cpages;		/* total page number in cpages */
1516 	struct page **tpages;		/* temp pages to pad holes in cluster */
1517 	void *rbuf;			/* virtual mapped address on rpages */
1518 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1519 	size_t rlen;			/* valid data length in rbuf */
1520 	size_t clen;			/* valid data length in cbuf */
1521 
1522 	/*
1523 	 * The number of compressed pages remaining to be read in this cluster.
1524 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1525 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1526 	 * is decompressed (or an error is reported).
1527 	 *
1528 	 * If an error occurs before all the pages have been submitted for I/O,
1529 	 * then this will never reach 0.  In this case the I/O submitter is
1530 	 * responsible for calling f2fs_decompress_end_io() instead.
1531 	 */
1532 	atomic_t remaining_pages;
1533 
1534 	/*
1535 	 * Number of references to this decompress_io_ctx.
1536 	 *
1537 	 * One reference is held for I/O completion.  This reference is dropped
1538 	 * after the pagecache pages are updated and unlocked -- either after
1539 	 * decompression (and verity if enabled), or after an error.
1540 	 *
1541 	 * In addition, each compressed page holds a reference while it is in a
1542 	 * bio.  These references are necessary prevent compressed pages from
1543 	 * being freed while they are still in a bio.
1544 	 */
1545 	refcount_t refcnt;
1546 
1547 	bool failed;			/* IO error occurred before decompression? */
1548 	bool need_verity;		/* need fs-verity verification after decompression? */
1549 	void *private;			/* payload buffer for specified decompression algorithm */
1550 	void *private2;			/* extra payload buffer */
1551 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1552 	struct work_struct free_work;	/* work for late free this structure itself */
1553 };
1554 
1555 #define NULL_CLUSTER			((unsigned int)(~0))
1556 #define MIN_COMPRESS_LOG_SIZE		2
1557 #define MAX_COMPRESS_LOG_SIZE		8
1558 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1559 
1560 struct f2fs_sb_info {
1561 	struct super_block *sb;			/* pointer to VFS super block */
1562 	struct proc_dir_entry *s_proc;		/* proc entry */
1563 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1564 	struct f2fs_rwsem sb_lock;		/* lock for raw super block */
1565 	int valid_super_block;			/* valid super block no */
1566 	unsigned long s_flag;				/* flags for sbi */
1567 	struct mutex writepages;		/* mutex for writepages() */
1568 
1569 #ifdef CONFIG_BLK_DEV_ZONED
1570 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1571 	unsigned int max_open_zones;		/* max open zone resources of the zoned device */
1572 	/* For adjust the priority writing position of data in zone UFS */
1573 	unsigned int blkzone_alloc_policy;
1574 #endif
1575 
1576 	/* for node-related operations */
1577 	struct f2fs_nm_info *nm_info;		/* node manager */
1578 	struct inode *node_inode;		/* cache node blocks */
1579 
1580 	/* for segment-related operations */
1581 	struct f2fs_sm_info *sm_info;		/* segment manager */
1582 
1583 	/* for bio operations */
1584 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1585 	/* keep migration IO order for LFS mode */
1586 	struct f2fs_rwsem io_order_lock;
1587 	pgoff_t page_eio_ofs[NR_PAGE_TYPE];	/* EIO page offset */
1588 	int page_eio_cnt[NR_PAGE_TYPE];		/* EIO count */
1589 
1590 	/* for checkpoint */
1591 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1592 	int cur_cp_pack;			/* remain current cp pack */
1593 	spinlock_t cp_lock;			/* for flag in ckpt */
1594 	struct inode *meta_inode;		/* cache meta blocks */
1595 	struct f2fs_rwsem cp_global_sem;	/* checkpoint procedure lock */
1596 	struct f2fs_rwsem cp_rwsem;		/* blocking FS operations */
1597 	struct f2fs_rwsem node_write;		/* locking node writes */
1598 	struct f2fs_rwsem node_change;	/* locking node change */
1599 	wait_queue_head_t cp_wait;
1600 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1601 	long interval_time[MAX_TIME];		/* to store thresholds */
1602 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1603 
1604 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1605 
1606 	spinlock_t fsync_node_lock;		/* for node entry lock */
1607 	struct list_head fsync_node_list;	/* node list head */
1608 	unsigned int fsync_seg_id;		/* sequence id */
1609 	unsigned int fsync_node_num;		/* number of node entries */
1610 
1611 	/* for orphan inode, use 0'th array */
1612 	unsigned int max_orphans;		/* max orphan inodes */
1613 
1614 	/* for inode management */
1615 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1616 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1617 	struct mutex flush_lock;		/* for flush exclusion */
1618 
1619 	/* for extent tree cache */
1620 	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1621 	atomic64_t allocated_data_blocks;	/* for block age extent_cache */
1622 
1623 	/* The threshold used for hot and warm data seperation*/
1624 	unsigned int hot_data_age_threshold;
1625 	unsigned int warm_data_age_threshold;
1626 	unsigned int last_age_weight;
1627 
1628 	/* basic filesystem units */
1629 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1630 	unsigned int log_blocksize;		/* log2 block size */
1631 	unsigned int blocksize;			/* block size */
1632 	unsigned int root_ino_num;		/* root inode number*/
1633 	unsigned int node_ino_num;		/* node inode number*/
1634 	unsigned int meta_ino_num;		/* meta inode number*/
1635 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1636 	unsigned int blocks_per_seg;		/* blocks per segment */
1637 	unsigned int unusable_blocks_per_sec;	/* unusable blocks per section */
1638 	unsigned int segs_per_sec;		/* segments per section */
1639 	unsigned int secs_per_zone;		/* sections per zone */
1640 	unsigned int total_sections;		/* total section count */
1641 	unsigned int total_node_count;		/* total node block count */
1642 	unsigned int total_valid_node_count;	/* valid node block count */
1643 	int dir_level;				/* directory level */
1644 	bool readdir_ra;			/* readahead inode in readdir */
1645 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1646 
1647 	block_t user_block_count;		/* # of user blocks */
1648 	block_t total_valid_block_count;	/* # of valid blocks */
1649 	block_t discard_blks;			/* discard command candidats */
1650 	block_t last_valid_block_count;		/* for recovery */
1651 	block_t reserved_blocks;		/* configurable reserved blocks */
1652 	block_t current_reserved_blocks;	/* current reserved blocks */
1653 
1654 	/* Additional tracking for no checkpoint mode */
1655 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1656 
1657 	unsigned int nquota_files;		/* # of quota sysfile */
1658 	struct f2fs_rwsem quota_sem;		/* blocking cp for flags */
1659 
1660 	/* # of pages, see count_type */
1661 	atomic_t nr_pages[NR_COUNT_TYPE];
1662 	/* # of allocated blocks */
1663 	struct percpu_counter alloc_valid_block_count;
1664 	/* # of node block writes as roll forward recovery */
1665 	struct percpu_counter rf_node_block_count;
1666 
1667 	/* writeback control */
1668 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1669 
1670 	/* valid inode count */
1671 	struct percpu_counter total_valid_inode_count;
1672 
1673 	struct f2fs_mount_info mount_opt;	/* mount options */
1674 
1675 	/* for cleaning operations */
1676 	struct f2fs_rwsem gc_lock;		/*
1677 						 * semaphore for GC, avoid
1678 						 * race between GC and GC or CP
1679 						 */
1680 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1681 	struct atgc_management am;		/* atgc management */
1682 	unsigned int cur_victim_sec;		/* current victim section num */
1683 	unsigned int gc_mode;			/* current GC state */
1684 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1685 	spinlock_t gc_remaining_trials_lock;
1686 	/* remaining trial count for GC_URGENT_* and GC_IDLE_* */
1687 	unsigned int gc_remaining_trials;
1688 
1689 	/* for skip statistic */
1690 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1691 
1692 	/* threshold for gc trials on pinned files */
1693 	unsigned short gc_pin_file_threshold;
1694 	struct f2fs_rwsem pin_sem;
1695 
1696 	/* maximum # of trials to find a victim segment for SSR and GC */
1697 	unsigned int max_victim_search;
1698 	/* migration granularity of garbage collection, unit: segment */
1699 	unsigned int migration_granularity;
1700 	/* migration window granularity of garbage collection, unit: segment */
1701 	unsigned int migration_window_granularity;
1702 
1703 	/*
1704 	 * for stat information.
1705 	 * one is for the LFS mode, and the other is for the SSR mode.
1706 	 */
1707 #ifdef CONFIG_F2FS_STAT_FS
1708 	struct f2fs_stat_info *stat_info;	/* FS status information */
1709 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1710 	unsigned int segment_count[2];		/* # of allocated segments */
1711 	unsigned int block_count[2];		/* # of allocated blocks */
1712 	atomic_t inplace_count;		/* # of inplace update */
1713 	/* # of lookup extent cache */
1714 	atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1715 	/* # of hit rbtree extent node */
1716 	atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1717 	/* # of hit cached extent node */
1718 	atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1719 	/* # of hit largest extent node in read extent cache */
1720 	atomic64_t read_hit_largest;
1721 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1722 	atomic_t inline_inode;			/* # of inline_data inodes */
1723 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1724 	atomic_t compr_inode;			/* # of compressed inodes */
1725 	atomic64_t compr_blocks;		/* # of compressed blocks */
1726 	atomic_t swapfile_inode;		/* # of swapfile inodes */
1727 	atomic_t atomic_files;			/* # of opened atomic file */
1728 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1729 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1730 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1731 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1732 	atomic_t cp_call_count[MAX_CALL_TYPE];	/* # of cp call */
1733 #endif
1734 	spinlock_t stat_lock;			/* lock for stat operations */
1735 
1736 	/* to attach REQ_META|REQ_FUA flags */
1737 	unsigned int data_io_flag;
1738 	unsigned int node_io_flag;
1739 
1740 	/* For sysfs support */
1741 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1742 	struct completion s_kobj_unregister;
1743 
1744 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1745 	struct completion s_stat_kobj_unregister;
1746 
1747 	struct kobject s_feature_list_kobj;		/* /sys/fs/f2fs/<devname>/feature_list */
1748 	struct completion s_feature_list_kobj_unregister;
1749 
1750 	/* For shrinker support */
1751 	struct list_head s_list;
1752 	struct mutex umount_mutex;
1753 	unsigned int shrinker_run_no;
1754 
1755 	/* For multi devices */
1756 	int s_ndevs;				/* number of devices */
1757 	struct f2fs_dev_info *devs;		/* for device list */
1758 	unsigned int dirty_device;		/* for checkpoint data flush */
1759 	spinlock_t dev_lock;			/* protect dirty_device */
1760 	bool aligned_blksize;			/* all devices has the same logical blksize */
1761 
1762 	/* For write statistics */
1763 	u64 sectors_written_start;
1764 	u64 kbytes_written;
1765 
1766 	/* Reference to checksum algorithm driver via cryptoapi */
1767 	struct crypto_shash *s_chksum_driver;
1768 
1769 	/* Precomputed FS UUID checksum for seeding other checksums */
1770 	__u32 s_chksum_seed;
1771 
1772 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1773 
1774 	/*
1775 	 * If we are in irq context, let's update error information into
1776 	 * on-disk superblock in the work.
1777 	 */
1778 	struct work_struct s_error_work;
1779 	unsigned char errors[MAX_F2FS_ERRORS];		/* error flags */
1780 	unsigned char stop_reason[MAX_STOP_REASON];	/* stop reason */
1781 	spinlock_t error_lock;			/* protect errors/stop_reason array */
1782 	bool error_dirty;			/* errors of sb is dirty */
1783 
1784 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1785 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1786 
1787 	/* For reclaimed segs statistics per each GC mode */
1788 	unsigned int gc_segment_mode;		/* GC state for reclaimed segments */
1789 	unsigned int gc_reclaimed_segs[MAX_GC_MODE];	/* Reclaimed segs for each mode */
1790 
1791 	unsigned long seq_file_ra_mul;		/* multiplier for ra_pages of seq. files in fadvise */
1792 
1793 	int max_fragment_chunk;			/* max chunk size for block fragmentation mode */
1794 	int max_fragment_hole;			/* max hole size for block fragmentation mode */
1795 
1796 	/* For atomic write statistics */
1797 	atomic64_t current_atomic_write;
1798 	s64 peak_atomic_write;
1799 	u64 committed_atomic_block;
1800 	u64 revoked_atomic_block;
1801 
1802 #ifdef CONFIG_F2FS_FS_COMPRESSION
1803 	struct kmem_cache *page_array_slab;	/* page array entry */
1804 	unsigned int page_array_slab_size;	/* default page array slab size */
1805 
1806 	/* For runtime compression statistics */
1807 	u64 compr_written_block;
1808 	u64 compr_saved_block;
1809 	u32 compr_new_inode;
1810 
1811 	/* For compressed block cache */
1812 	struct inode *compress_inode;		/* cache compressed blocks */
1813 	unsigned int compress_percent;		/* cache page percentage */
1814 	unsigned int compress_watermark;	/* cache page watermark */
1815 	atomic_t compress_page_hit;		/* cache hit count */
1816 #endif
1817 
1818 #ifdef CONFIG_F2FS_IOSTAT
1819 	/* For app/fs IO statistics */
1820 	spinlock_t iostat_lock;
1821 	unsigned long long iostat_count[NR_IO_TYPE];
1822 	unsigned long long iostat_bytes[NR_IO_TYPE];
1823 	unsigned long long prev_iostat_bytes[NR_IO_TYPE];
1824 	bool iostat_enable;
1825 	unsigned long iostat_next_period;
1826 	unsigned int iostat_period_ms;
1827 
1828 	/* For io latency related statistics info in one iostat period */
1829 	spinlock_t iostat_lat_lock;
1830 	struct iostat_lat_info *iostat_io_lat;
1831 #endif
1832 };
1833 
1834 /* Definitions to access f2fs_sb_info */
1835 #define SEGS_TO_BLKS(sbi, segs)					\
1836 		((segs) << (sbi)->log_blocks_per_seg)
1837 #define BLKS_TO_SEGS(sbi, blks)					\
1838 		((blks) >> (sbi)->log_blocks_per_seg)
1839 
1840 #define BLKS_PER_SEG(sbi)	((sbi)->blocks_per_seg)
1841 #define BLKS_PER_SEC(sbi)	(SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec))
1842 #define SEGS_PER_SEC(sbi)	((sbi)->segs_per_sec)
1843 
1844 __printf(3, 4)
1845 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
1846 
1847 #define f2fs_err(sbi, fmt, ...)						\
1848 	f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
1849 #define f2fs_warn(sbi, fmt, ...)					\
1850 	f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
1851 #define f2fs_notice(sbi, fmt, ...)					\
1852 	f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
1853 #define f2fs_info(sbi, fmt, ...)					\
1854 	f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
1855 #define f2fs_debug(sbi, fmt, ...)					\
1856 	f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
1857 
1858 #define f2fs_err_ratelimited(sbi, fmt, ...)				\
1859 	f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
1860 #define f2fs_warn_ratelimited(sbi, fmt, ...)				\
1861 	f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
1862 #define f2fs_info_ratelimited(sbi, fmt, ...)				\
1863 	f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
1864 
1865 #ifdef CONFIG_F2FS_FAULT_INJECTION
1866 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__,	\
1867 									__builtin_return_address(0))
__time_to_inject(struct f2fs_sb_info * sbi,int type,const char * func,const char * parent_func)1868 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
1869 				const char *func, const char *parent_func)
1870 {
1871 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1872 
1873 	if (!ffi->inject_rate)
1874 		return false;
1875 
1876 	if (!IS_FAULT_SET(ffi, type))
1877 		return false;
1878 
1879 	atomic_inc(&ffi->inject_ops);
1880 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1881 		atomic_set(&ffi->inject_ops, 0);
1882 		f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
1883 				f2fs_fault_name[type], func, parent_func);
1884 		return true;
1885 	}
1886 	return false;
1887 }
1888 #else
time_to_inject(struct f2fs_sb_info * sbi,int type)1889 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1890 {
1891 	return false;
1892 }
1893 #endif
1894 
1895 /*
1896  * Test if the mounted volume is a multi-device volume.
1897  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1898  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1899  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1900  */
f2fs_is_multi_device(struct f2fs_sb_info * sbi)1901 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1902 {
1903 	return sbi->s_ndevs > 1;
1904 }
1905 
f2fs_update_time(struct f2fs_sb_info * sbi,int type)1906 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1907 {
1908 	unsigned long now = jiffies;
1909 
1910 	sbi->last_time[type] = now;
1911 
1912 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1913 	if (type == REQ_TIME) {
1914 		sbi->last_time[DISCARD_TIME] = now;
1915 		sbi->last_time[GC_TIME] = now;
1916 	}
1917 }
1918 
f2fs_time_over(struct f2fs_sb_info * sbi,int type)1919 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1920 {
1921 	unsigned long interval = sbi->interval_time[type] * HZ;
1922 
1923 	return time_after(jiffies, sbi->last_time[type] + interval);
1924 }
1925 
f2fs_time_to_wait(struct f2fs_sb_info * sbi,int type)1926 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1927 						int type)
1928 {
1929 	unsigned long interval = sbi->interval_time[type] * HZ;
1930 	unsigned int wait_ms = 0;
1931 	long delta;
1932 
1933 	delta = (sbi->last_time[type] + interval) - jiffies;
1934 	if (delta > 0)
1935 		wait_ms = jiffies_to_msecs(delta);
1936 
1937 	return wait_ms;
1938 }
1939 
1940 /*
1941  * Inline functions
1942  */
__f2fs_crc32(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1943 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1944 			      const void *address, unsigned int length)
1945 {
1946 	struct {
1947 		struct shash_desc shash;
1948 		char ctx[4];
1949 	} desc;
1950 	int err;
1951 
1952 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1953 
1954 	desc.shash.tfm = sbi->s_chksum_driver;
1955 	*(u32 *)desc.ctx = crc;
1956 
1957 	err = crypto_shash_update(&desc.shash, address, length);
1958 	BUG_ON(err);
1959 
1960 	return *(u32 *)desc.ctx;
1961 }
1962 
f2fs_crc32(struct f2fs_sb_info * sbi,const void * address,unsigned int length)1963 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1964 			   unsigned int length)
1965 {
1966 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1967 }
1968 
f2fs_crc_valid(struct f2fs_sb_info * sbi,__u32 blk_crc,void * buf,size_t buf_size)1969 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1970 				  void *buf, size_t buf_size)
1971 {
1972 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1973 }
1974 
f2fs_chksum(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1975 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1976 			      const void *address, unsigned int length)
1977 {
1978 	return __f2fs_crc32(sbi, crc, address, length);
1979 }
1980 
F2FS_I(struct inode * inode)1981 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1982 {
1983 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1984 }
1985 
F2FS_SB(struct super_block * sb)1986 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1987 {
1988 	return sb->s_fs_info;
1989 }
1990 
F2FS_I_SB(struct inode * inode)1991 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1992 {
1993 	return F2FS_SB(inode->i_sb);
1994 }
1995 
F2FS_M_SB(struct address_space * mapping)1996 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1997 {
1998 	return F2FS_I_SB(mapping->host);
1999 }
2000 
F2FS_P_SB(struct page * page)2001 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
2002 {
2003 	return F2FS_M_SB(page_file_mapping(page));
2004 }
2005 
F2FS_RAW_SUPER(struct f2fs_sb_info * sbi)2006 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
2007 {
2008 	return (struct f2fs_super_block *)(sbi->raw_super);
2009 }
2010 
F2FS_SUPER_BLOCK(struct folio * folio,pgoff_t index)2011 static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
2012 								pgoff_t index)
2013 {
2014 	pgoff_t idx_in_folio = index % (1 << folio_order(folio));
2015 
2016 	return (struct f2fs_super_block *)
2017 		(page_address(folio_page(folio, idx_in_folio)) +
2018 						F2FS_SUPER_OFFSET);
2019 }
2020 
F2FS_CKPT(struct f2fs_sb_info * sbi)2021 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
2022 {
2023 	return (struct f2fs_checkpoint *)(sbi->ckpt);
2024 }
2025 
F2FS_NODE(struct page * page)2026 static inline struct f2fs_node *F2FS_NODE(struct page *page)
2027 {
2028 	return (struct f2fs_node *)page_address(page);
2029 }
2030 
F2FS_INODE(struct page * page)2031 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
2032 {
2033 	return &((struct f2fs_node *)page_address(page))->i;
2034 }
2035 
NM_I(struct f2fs_sb_info * sbi)2036 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
2037 {
2038 	return (struct f2fs_nm_info *)(sbi->nm_info);
2039 }
2040 
SM_I(struct f2fs_sb_info * sbi)2041 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
2042 {
2043 	return (struct f2fs_sm_info *)(sbi->sm_info);
2044 }
2045 
SIT_I(struct f2fs_sb_info * sbi)2046 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
2047 {
2048 	return (struct sit_info *)(SM_I(sbi)->sit_info);
2049 }
2050 
FREE_I(struct f2fs_sb_info * sbi)2051 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
2052 {
2053 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
2054 }
2055 
DIRTY_I(struct f2fs_sb_info * sbi)2056 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
2057 {
2058 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
2059 }
2060 
META_MAPPING(struct f2fs_sb_info * sbi)2061 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2062 {
2063 	return sbi->meta_inode->i_mapping;
2064 }
2065 
NODE_MAPPING(struct f2fs_sb_info * sbi)2066 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2067 {
2068 	return sbi->node_inode->i_mapping;
2069 }
2070 
is_sbi_flag_set(struct f2fs_sb_info * sbi,unsigned int type)2071 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2072 {
2073 	return test_bit(type, &sbi->s_flag);
2074 }
2075 
set_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2076 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2077 {
2078 	set_bit(type, &sbi->s_flag);
2079 }
2080 
clear_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2081 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2082 {
2083 	clear_bit(type, &sbi->s_flag);
2084 }
2085 
cur_cp_version(struct f2fs_checkpoint * cp)2086 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2087 {
2088 	return le64_to_cpu(cp->checkpoint_ver);
2089 }
2090 
f2fs_qf_ino(struct super_block * sb,int type)2091 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2092 {
2093 	if (type < F2FS_MAX_QUOTAS)
2094 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2095 	return 0;
2096 }
2097 
cur_cp_crc(struct f2fs_checkpoint * cp)2098 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2099 {
2100 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2101 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2102 }
2103 
__is_set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2104 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2105 {
2106 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2107 
2108 	return ckpt_flags & f;
2109 }
2110 
is_set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2111 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2112 {
2113 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2114 }
2115 
__set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2116 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2117 {
2118 	unsigned int ckpt_flags;
2119 
2120 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2121 	ckpt_flags |= f;
2122 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2123 }
2124 
set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2125 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2126 {
2127 	unsigned long flags;
2128 
2129 	spin_lock_irqsave(&sbi->cp_lock, flags);
2130 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
2131 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2132 }
2133 
__clear_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2134 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2135 {
2136 	unsigned int ckpt_flags;
2137 
2138 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2139 	ckpt_flags &= (~f);
2140 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2141 }
2142 
clear_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2143 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2144 {
2145 	unsigned long flags;
2146 
2147 	spin_lock_irqsave(&sbi->cp_lock, flags);
2148 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
2149 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2150 }
2151 
2152 #define init_f2fs_rwsem(sem)					\
2153 do {								\
2154 	static struct lock_class_key __key;			\
2155 								\
2156 	__init_f2fs_rwsem((sem), #sem, &__key);			\
2157 } while (0)
2158 
__init_f2fs_rwsem(struct f2fs_rwsem * sem,const char * sem_name,struct lock_class_key * key)2159 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2160 		const char *sem_name, struct lock_class_key *key)
2161 {
2162 	__init_rwsem(&sem->internal_rwsem, sem_name, key);
2163 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2164 	init_waitqueue_head(&sem->read_waiters);
2165 #endif
2166 }
2167 
f2fs_rwsem_is_locked(struct f2fs_rwsem * sem)2168 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2169 {
2170 	return rwsem_is_locked(&sem->internal_rwsem);
2171 }
2172 
f2fs_rwsem_is_contended(struct f2fs_rwsem * sem)2173 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2174 {
2175 	return rwsem_is_contended(&sem->internal_rwsem);
2176 }
2177 
f2fs_down_read(struct f2fs_rwsem * sem)2178 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2179 {
2180 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2181 	wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2182 #else
2183 	down_read(&sem->internal_rwsem);
2184 #endif
2185 }
2186 
f2fs_down_read_trylock(struct f2fs_rwsem * sem)2187 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2188 {
2189 	return down_read_trylock(&sem->internal_rwsem);
2190 }
2191 
f2fs_up_read(struct f2fs_rwsem * sem)2192 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2193 {
2194 	up_read(&sem->internal_rwsem);
2195 }
2196 
f2fs_down_write(struct f2fs_rwsem * sem)2197 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2198 {
2199 	down_write(&sem->internal_rwsem);
2200 }
2201 
2202 #ifdef CONFIG_DEBUG_LOCK_ALLOC
f2fs_down_read_nested(struct f2fs_rwsem * sem,int subclass)2203 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2204 {
2205 	down_read_nested(&sem->internal_rwsem, subclass);
2206 }
2207 
f2fs_down_write_nested(struct f2fs_rwsem * sem,int subclass)2208 static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
2209 {
2210 	down_write_nested(&sem->internal_rwsem, subclass);
2211 }
2212 #else
2213 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2214 #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
2215 #endif
2216 
f2fs_down_write_trylock(struct f2fs_rwsem * sem)2217 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2218 {
2219 	return down_write_trylock(&sem->internal_rwsem);
2220 }
2221 
f2fs_up_write(struct f2fs_rwsem * sem)2222 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2223 {
2224 	up_write(&sem->internal_rwsem);
2225 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2226 	wake_up_all(&sem->read_waiters);
2227 #endif
2228 }
2229 
f2fs_lock_op(struct f2fs_sb_info * sbi)2230 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2231 {
2232 	f2fs_down_read(&sbi->cp_rwsem);
2233 }
2234 
f2fs_trylock_op(struct f2fs_sb_info * sbi)2235 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2236 {
2237 	if (time_to_inject(sbi, FAULT_LOCK_OP))
2238 		return 0;
2239 	return f2fs_down_read_trylock(&sbi->cp_rwsem);
2240 }
2241 
f2fs_unlock_op(struct f2fs_sb_info * sbi)2242 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2243 {
2244 	f2fs_up_read(&sbi->cp_rwsem);
2245 }
2246 
f2fs_lock_all(struct f2fs_sb_info * sbi)2247 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2248 {
2249 	f2fs_down_write(&sbi->cp_rwsem);
2250 }
2251 
f2fs_unlock_all(struct f2fs_sb_info * sbi)2252 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2253 {
2254 	f2fs_up_write(&sbi->cp_rwsem);
2255 }
2256 
__get_cp_reason(struct f2fs_sb_info * sbi)2257 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2258 {
2259 	int reason = CP_SYNC;
2260 
2261 	if (test_opt(sbi, FASTBOOT))
2262 		reason = CP_FASTBOOT;
2263 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2264 		reason = CP_UMOUNT;
2265 	return reason;
2266 }
2267 
__remain_node_summaries(int reason)2268 static inline bool __remain_node_summaries(int reason)
2269 {
2270 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
2271 }
2272 
__exist_node_summaries(struct f2fs_sb_info * sbi)2273 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2274 {
2275 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2276 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2277 }
2278 
2279 /*
2280  * Check whether the inode has blocks or not
2281  */
F2FS_HAS_BLOCKS(struct inode * inode)2282 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2283 {
2284 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2285 
2286 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2287 }
2288 
f2fs_has_xattr_block(unsigned int ofs)2289 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2290 {
2291 	return ofs == XATTR_NODE_OFFSET;
2292 }
2293 
__allow_reserved_blocks(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2294 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2295 					struct inode *inode, bool cap)
2296 {
2297 	if (!inode)
2298 		return true;
2299 	if (!test_opt(sbi, RESERVE_ROOT))
2300 		return false;
2301 	if (IS_NOQUOTA(inode))
2302 		return true;
2303 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2304 		return true;
2305 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2306 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2307 		return true;
2308 	if (cap && capable(CAP_SYS_RESOURCE))
2309 		return true;
2310 	return false;
2311 }
2312 
get_available_block_count(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2313 static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
2314 						struct inode *inode, bool cap)
2315 {
2316 	block_t avail_user_block_count;
2317 
2318 	avail_user_block_count = sbi->user_block_count -
2319 					sbi->current_reserved_blocks;
2320 
2321 	if (!__allow_reserved_blocks(sbi, inode, cap))
2322 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2323 
2324 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2325 		if (avail_user_block_count > sbi->unusable_block_count)
2326 			avail_user_block_count -= sbi->unusable_block_count;
2327 		else
2328 			avail_user_block_count = 0;
2329 	}
2330 
2331 	return avail_user_block_count;
2332 }
2333 
2334 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
inc_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,blkcnt_t * count,bool partial)2335 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2336 				 struct inode *inode, blkcnt_t *count, bool partial)
2337 {
2338 	long long diff = 0, release = 0;
2339 	block_t avail_user_block_count;
2340 	int ret;
2341 
2342 	ret = dquot_reserve_block(inode, *count);
2343 	if (ret)
2344 		return ret;
2345 
2346 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2347 		release = *count;
2348 		goto release_quota;
2349 	}
2350 
2351 	/*
2352 	 * let's increase this in prior to actual block count change in order
2353 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2354 	 */
2355 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2356 
2357 	spin_lock(&sbi->stat_lock);
2358 
2359 	avail_user_block_count = get_available_block_count(sbi, inode, true);
2360 	diff = (long long)sbi->total_valid_block_count + *count -
2361 						avail_user_block_count;
2362 	if (unlikely(diff > 0)) {
2363 		if (!partial) {
2364 			spin_unlock(&sbi->stat_lock);
2365 			release = *count;
2366 			goto enospc;
2367 		}
2368 		if (diff > *count)
2369 			diff = *count;
2370 		*count -= diff;
2371 		release = diff;
2372 		if (!*count) {
2373 			spin_unlock(&sbi->stat_lock);
2374 			goto enospc;
2375 		}
2376 	}
2377 	sbi->total_valid_block_count += (block_t)(*count);
2378 
2379 	spin_unlock(&sbi->stat_lock);
2380 
2381 	if (unlikely(release)) {
2382 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2383 		dquot_release_reservation_block(inode, release);
2384 	}
2385 	f2fs_i_blocks_write(inode, *count, true, true);
2386 	return 0;
2387 
2388 enospc:
2389 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2390 release_quota:
2391 	dquot_release_reservation_block(inode, release);
2392 	return -ENOSPC;
2393 }
2394 
2395 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
2396 static inline bool page_private_##name(struct page *page) \
2397 { \
2398 	return PagePrivate(page) && \
2399 		test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
2400 		test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2401 }
2402 
2403 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
2404 static inline void set_page_private_##name(struct page *page) \
2405 { \
2406 	if (!PagePrivate(page)) \
2407 		attach_page_private(page, (void *)0); \
2408 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
2409 	set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2410 }
2411 
2412 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
2413 static inline void clear_page_private_##name(struct page *page) \
2414 { \
2415 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2416 	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
2417 		detach_page_private(page); \
2418 }
2419 
2420 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
2421 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
2422 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
2423 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
2424 
2425 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
2426 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
2427 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
2428 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
2429 
2430 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
2431 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
2432 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
2433 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
2434 
get_page_private_data(struct page * page)2435 static inline unsigned long get_page_private_data(struct page *page)
2436 {
2437 	unsigned long data = page_private(page);
2438 
2439 	if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
2440 		return 0;
2441 	return data >> PAGE_PRIVATE_MAX;
2442 }
2443 
set_page_private_data(struct page * page,unsigned long data)2444 static inline void set_page_private_data(struct page *page, unsigned long data)
2445 {
2446 	if (!PagePrivate(page))
2447 		attach_page_private(page, (void *)0);
2448 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
2449 	page_private(page) |= data << PAGE_PRIVATE_MAX;
2450 }
2451 
clear_page_private_data(struct page * page)2452 static inline void clear_page_private_data(struct page *page)
2453 {
2454 	page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
2455 	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
2456 		detach_page_private(page);
2457 }
2458 
clear_page_private_all(struct page * page)2459 static inline void clear_page_private_all(struct page *page)
2460 {
2461 	clear_page_private_data(page);
2462 	clear_page_private_reference(page);
2463 	clear_page_private_gcing(page);
2464 	clear_page_private_inline(page);
2465 	clear_page_private_atomic(page);
2466 
2467 	f2fs_bug_on(F2FS_P_SB(page), page_private(page));
2468 }
2469 
dec_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,block_t count)2470 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2471 						struct inode *inode,
2472 						block_t count)
2473 {
2474 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2475 
2476 	spin_lock(&sbi->stat_lock);
2477 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2478 	sbi->total_valid_block_count -= (block_t)count;
2479 	if (sbi->reserved_blocks &&
2480 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2481 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2482 					sbi->current_reserved_blocks + count);
2483 	spin_unlock(&sbi->stat_lock);
2484 	if (unlikely(inode->i_blocks < sectors)) {
2485 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2486 			  inode->i_ino,
2487 			  (unsigned long long)inode->i_blocks,
2488 			  (unsigned long long)sectors);
2489 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2490 		return;
2491 	}
2492 	f2fs_i_blocks_write(inode, count, false, true);
2493 }
2494 
inc_page_count(struct f2fs_sb_info * sbi,int count_type)2495 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2496 {
2497 	atomic_inc(&sbi->nr_pages[count_type]);
2498 
2499 	if (count_type == F2FS_DIRTY_DENTS ||
2500 			count_type == F2FS_DIRTY_NODES ||
2501 			count_type == F2FS_DIRTY_META ||
2502 			count_type == F2FS_DIRTY_QDATA ||
2503 			count_type == F2FS_DIRTY_IMETA)
2504 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2505 }
2506 
inode_inc_dirty_pages(struct inode * inode)2507 static inline void inode_inc_dirty_pages(struct inode *inode)
2508 {
2509 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2510 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2511 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2512 	if (IS_NOQUOTA(inode))
2513 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2514 }
2515 
dec_page_count(struct f2fs_sb_info * sbi,int count_type)2516 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2517 {
2518 	atomic_dec(&sbi->nr_pages[count_type]);
2519 }
2520 
inode_dec_dirty_pages(struct inode * inode)2521 static inline void inode_dec_dirty_pages(struct inode *inode)
2522 {
2523 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2524 			!S_ISLNK(inode->i_mode))
2525 		return;
2526 
2527 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2528 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2529 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2530 	if (IS_NOQUOTA(inode))
2531 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2532 }
2533 
inc_atomic_write_cnt(struct inode * inode)2534 static inline void inc_atomic_write_cnt(struct inode *inode)
2535 {
2536 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2537 	struct f2fs_inode_info *fi = F2FS_I(inode);
2538 	u64 current_write;
2539 
2540 	fi->atomic_write_cnt++;
2541 	atomic64_inc(&sbi->current_atomic_write);
2542 	current_write = atomic64_read(&sbi->current_atomic_write);
2543 	if (current_write > sbi->peak_atomic_write)
2544 		sbi->peak_atomic_write = current_write;
2545 }
2546 
release_atomic_write_cnt(struct inode * inode)2547 static inline void release_atomic_write_cnt(struct inode *inode)
2548 {
2549 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2550 	struct f2fs_inode_info *fi = F2FS_I(inode);
2551 
2552 	atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
2553 	fi->atomic_write_cnt = 0;
2554 }
2555 
get_pages(struct f2fs_sb_info * sbi,int count_type)2556 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2557 {
2558 	return atomic_read(&sbi->nr_pages[count_type]);
2559 }
2560 
get_dirty_pages(struct inode * inode)2561 static inline int get_dirty_pages(struct inode *inode)
2562 {
2563 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2564 }
2565 
get_blocktype_secs(struct f2fs_sb_info * sbi,int block_type)2566 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2567 {
2568 	return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
2569 							BLKS_PER_SEC(sbi));
2570 }
2571 
valid_user_blocks(struct f2fs_sb_info * sbi)2572 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2573 {
2574 	return sbi->total_valid_block_count;
2575 }
2576 
discard_blocks(struct f2fs_sb_info * sbi)2577 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2578 {
2579 	return sbi->discard_blks;
2580 }
2581 
__bitmap_size(struct f2fs_sb_info * sbi,int flag)2582 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2583 {
2584 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2585 
2586 	/* return NAT or SIT bitmap */
2587 	if (flag == NAT_BITMAP)
2588 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2589 	else if (flag == SIT_BITMAP)
2590 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2591 
2592 	return 0;
2593 }
2594 
__cp_payload(struct f2fs_sb_info * sbi)2595 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2596 {
2597 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2598 }
2599 
__bitmap_ptr(struct f2fs_sb_info * sbi,int flag)2600 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2601 {
2602 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2603 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2604 	int offset;
2605 
2606 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2607 		offset = (flag == SIT_BITMAP) ?
2608 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2609 		/*
2610 		 * if large_nat_bitmap feature is enabled, leave checksum
2611 		 * protection for all nat/sit bitmaps.
2612 		 */
2613 		return tmp_ptr + offset + sizeof(__le32);
2614 	}
2615 
2616 	if (__cp_payload(sbi) > 0) {
2617 		if (flag == NAT_BITMAP)
2618 			return tmp_ptr;
2619 		else
2620 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2621 	} else {
2622 		offset = (flag == NAT_BITMAP) ?
2623 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2624 		return tmp_ptr + offset;
2625 	}
2626 }
2627 
__start_cp_addr(struct f2fs_sb_info * sbi)2628 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2629 {
2630 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2631 
2632 	if (sbi->cur_cp_pack == 2)
2633 		start_addr += BLKS_PER_SEG(sbi);
2634 	return start_addr;
2635 }
2636 
__start_cp_next_addr(struct f2fs_sb_info * sbi)2637 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2638 {
2639 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2640 
2641 	if (sbi->cur_cp_pack == 1)
2642 		start_addr += BLKS_PER_SEG(sbi);
2643 	return start_addr;
2644 }
2645 
__set_cp_next_pack(struct f2fs_sb_info * sbi)2646 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2647 {
2648 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2649 }
2650 
__start_sum_addr(struct f2fs_sb_info * sbi)2651 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2652 {
2653 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2654 }
2655 
2656 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
inc_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2657 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2658 					struct inode *inode, bool is_inode)
2659 {
2660 	block_t	valid_block_count;
2661 	unsigned int valid_node_count;
2662 	unsigned int avail_user_block_count;
2663 	int err;
2664 
2665 	if (is_inode) {
2666 		if (inode) {
2667 			err = dquot_alloc_inode(inode);
2668 			if (err)
2669 				return err;
2670 		}
2671 	} else {
2672 		err = dquot_reserve_block(inode, 1);
2673 		if (err)
2674 			return err;
2675 	}
2676 
2677 	if (time_to_inject(sbi, FAULT_BLOCK))
2678 		goto enospc;
2679 
2680 	spin_lock(&sbi->stat_lock);
2681 
2682 	valid_block_count = sbi->total_valid_block_count + 1;
2683 	avail_user_block_count = get_available_block_count(sbi, inode, false);
2684 
2685 	if (unlikely(valid_block_count > avail_user_block_count)) {
2686 		spin_unlock(&sbi->stat_lock);
2687 		goto enospc;
2688 	}
2689 
2690 	valid_node_count = sbi->total_valid_node_count + 1;
2691 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2692 		spin_unlock(&sbi->stat_lock);
2693 		goto enospc;
2694 	}
2695 
2696 	sbi->total_valid_node_count++;
2697 	sbi->total_valid_block_count++;
2698 	spin_unlock(&sbi->stat_lock);
2699 
2700 	if (inode) {
2701 		if (is_inode)
2702 			f2fs_mark_inode_dirty_sync(inode, true);
2703 		else
2704 			f2fs_i_blocks_write(inode, 1, true, true);
2705 	}
2706 
2707 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2708 	return 0;
2709 
2710 enospc:
2711 	if (is_inode) {
2712 		if (inode)
2713 			dquot_free_inode(inode);
2714 	} else {
2715 		dquot_release_reservation_block(inode, 1);
2716 	}
2717 	return -ENOSPC;
2718 }
2719 
dec_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2720 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2721 					struct inode *inode, bool is_inode)
2722 {
2723 	spin_lock(&sbi->stat_lock);
2724 
2725 	if (unlikely(!sbi->total_valid_block_count ||
2726 			!sbi->total_valid_node_count)) {
2727 		f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2728 			  sbi->total_valid_block_count,
2729 			  sbi->total_valid_node_count);
2730 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2731 	} else {
2732 		sbi->total_valid_block_count--;
2733 		sbi->total_valid_node_count--;
2734 	}
2735 
2736 	if (sbi->reserved_blocks &&
2737 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2738 		sbi->current_reserved_blocks++;
2739 
2740 	spin_unlock(&sbi->stat_lock);
2741 
2742 	if (is_inode) {
2743 		dquot_free_inode(inode);
2744 	} else {
2745 		if (unlikely(inode->i_blocks == 0)) {
2746 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2747 				  inode->i_ino,
2748 				  (unsigned long long)inode->i_blocks);
2749 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2750 			return;
2751 		}
2752 		f2fs_i_blocks_write(inode, 1, false, true);
2753 	}
2754 }
2755 
valid_node_count(struct f2fs_sb_info * sbi)2756 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2757 {
2758 	return sbi->total_valid_node_count;
2759 }
2760 
inc_valid_inode_count(struct f2fs_sb_info * sbi)2761 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2762 {
2763 	percpu_counter_inc(&sbi->total_valid_inode_count);
2764 }
2765 
dec_valid_inode_count(struct f2fs_sb_info * sbi)2766 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2767 {
2768 	percpu_counter_dec(&sbi->total_valid_inode_count);
2769 }
2770 
valid_inode_count(struct f2fs_sb_info * sbi)2771 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2772 {
2773 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2774 }
2775 
f2fs_grab_cache_page(struct address_space * mapping,pgoff_t index,bool for_write)2776 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2777 						pgoff_t index, bool for_write)
2778 {
2779 	struct page *page;
2780 	unsigned int flags;
2781 
2782 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2783 		if (!for_write)
2784 			page = find_get_page_flags(mapping, index,
2785 							FGP_LOCK | FGP_ACCESSED);
2786 		else
2787 			page = find_lock_page(mapping, index);
2788 		if (page)
2789 			return page;
2790 
2791 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
2792 			return NULL;
2793 	}
2794 
2795 	if (!for_write)
2796 		return grab_cache_page(mapping, index);
2797 
2798 	flags = memalloc_nofs_save();
2799 	page = grab_cache_page_write_begin(mapping, index);
2800 	memalloc_nofs_restore(flags);
2801 
2802 	return page;
2803 }
2804 
f2fs_pagecache_get_page(struct address_space * mapping,pgoff_t index,fgf_t fgp_flags,gfp_t gfp_mask)2805 static inline struct page *f2fs_pagecache_get_page(
2806 				struct address_space *mapping, pgoff_t index,
2807 				fgf_t fgp_flags, gfp_t gfp_mask)
2808 {
2809 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2810 		return NULL;
2811 
2812 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2813 }
2814 
f2fs_put_page(struct page * page,int unlock)2815 static inline void f2fs_put_page(struct page *page, int unlock)
2816 {
2817 	if (!page)
2818 		return;
2819 
2820 	if (unlock) {
2821 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2822 		unlock_page(page);
2823 	}
2824 	put_page(page);
2825 }
2826 
f2fs_put_dnode(struct dnode_of_data * dn)2827 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2828 {
2829 	if (dn->node_page)
2830 		f2fs_put_page(dn->node_page, 1);
2831 	if (dn->inode_page && dn->node_page != dn->inode_page)
2832 		f2fs_put_page(dn->inode_page, 0);
2833 	dn->node_page = NULL;
2834 	dn->inode_page = NULL;
2835 }
2836 
f2fs_kmem_cache_create(const char * name,size_t size)2837 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2838 					size_t size)
2839 {
2840 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2841 }
2842 
f2fs_kmem_cache_alloc_nofail(struct kmem_cache * cachep,gfp_t flags)2843 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2844 						gfp_t flags)
2845 {
2846 	void *entry;
2847 
2848 	entry = kmem_cache_alloc(cachep, flags);
2849 	if (!entry)
2850 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2851 	return entry;
2852 }
2853 
f2fs_kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags,bool nofail,struct f2fs_sb_info * sbi)2854 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2855 			gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2856 {
2857 	if (nofail)
2858 		return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2859 
2860 	if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
2861 		return NULL;
2862 
2863 	return kmem_cache_alloc(cachep, flags);
2864 }
2865 
is_inflight_io(struct f2fs_sb_info * sbi,int type)2866 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2867 {
2868 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2869 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2870 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2871 		get_pages(sbi, F2FS_DIO_READ) ||
2872 		get_pages(sbi, F2FS_DIO_WRITE))
2873 		return true;
2874 
2875 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2876 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2877 		return true;
2878 
2879 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2880 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2881 		return true;
2882 	return false;
2883 }
2884 
is_inflight_read_io(struct f2fs_sb_info * sbi)2885 static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
2886 {
2887 	return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
2888 }
2889 
is_idle(struct f2fs_sb_info * sbi,int type)2890 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2891 {
2892 	bool zoned_gc = (type == GC_TIME &&
2893 			F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
2894 
2895 	if (sbi->gc_mode == GC_URGENT_HIGH)
2896 		return true;
2897 
2898 	if (zoned_gc) {
2899 		if (is_inflight_read_io(sbi))
2900 			return false;
2901 	} else {
2902 		if (is_inflight_io(sbi, type))
2903 			return false;
2904 	}
2905 
2906 	if (sbi->gc_mode == GC_URGENT_MID)
2907 		return true;
2908 
2909 	if (sbi->gc_mode == GC_URGENT_LOW &&
2910 			(type == DISCARD_TIME || type == GC_TIME))
2911 		return true;
2912 
2913 	if (zoned_gc)
2914 		return true;
2915 
2916 	return f2fs_time_over(sbi, type);
2917 }
2918 
f2fs_radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * item)2919 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2920 				unsigned long index, void *item)
2921 {
2922 	while (radix_tree_insert(root, index, item))
2923 		cond_resched();
2924 }
2925 
2926 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2927 
IS_INODE(struct page * page)2928 static inline bool IS_INODE(struct page *page)
2929 {
2930 	struct f2fs_node *p = F2FS_NODE(page);
2931 
2932 	return RAW_IS_INODE(p);
2933 }
2934 
offset_in_addr(struct f2fs_inode * i)2935 static inline int offset_in_addr(struct f2fs_inode *i)
2936 {
2937 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2938 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2939 }
2940 
blkaddr_in_node(struct f2fs_node * node)2941 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2942 {
2943 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2944 }
2945 
2946 static inline int f2fs_has_extra_attr(struct inode *inode);
get_dnode_base(struct inode * inode,struct page * node_page)2947 static inline unsigned int get_dnode_base(struct inode *inode,
2948 					struct page *node_page)
2949 {
2950 	if (!IS_INODE(node_page))
2951 		return 0;
2952 
2953 	return inode ? get_extra_isize(inode) :
2954 			offset_in_addr(&F2FS_NODE(node_page)->i);
2955 }
2956 
get_dnode_addr(struct inode * inode,struct page * node_page)2957 static inline __le32 *get_dnode_addr(struct inode *inode,
2958 					struct page *node_page)
2959 {
2960 	return blkaddr_in_node(F2FS_NODE(node_page)) +
2961 			get_dnode_base(inode, node_page);
2962 }
2963 
data_blkaddr(struct inode * inode,struct page * node_page,unsigned int offset)2964 static inline block_t data_blkaddr(struct inode *inode,
2965 			struct page *node_page, unsigned int offset)
2966 {
2967 	return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
2968 }
2969 
f2fs_data_blkaddr(struct dnode_of_data * dn)2970 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2971 {
2972 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2973 }
2974 
f2fs_test_bit(unsigned int nr,char * addr)2975 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2976 {
2977 	int mask;
2978 
2979 	addr += (nr >> 3);
2980 	mask = BIT(7 - (nr & 0x07));
2981 	return mask & *addr;
2982 }
2983 
f2fs_set_bit(unsigned int nr,char * addr)2984 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2985 {
2986 	int mask;
2987 
2988 	addr += (nr >> 3);
2989 	mask = BIT(7 - (nr & 0x07));
2990 	*addr |= mask;
2991 }
2992 
f2fs_clear_bit(unsigned int nr,char * addr)2993 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2994 {
2995 	int mask;
2996 
2997 	addr += (nr >> 3);
2998 	mask = BIT(7 - (nr & 0x07));
2999 	*addr &= ~mask;
3000 }
3001 
f2fs_test_and_set_bit(unsigned int nr,char * addr)3002 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
3003 {
3004 	int mask;
3005 	int ret;
3006 
3007 	addr += (nr >> 3);
3008 	mask = BIT(7 - (nr & 0x07));
3009 	ret = mask & *addr;
3010 	*addr |= mask;
3011 	return ret;
3012 }
3013 
f2fs_test_and_clear_bit(unsigned int nr,char * addr)3014 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
3015 {
3016 	int mask;
3017 	int ret;
3018 
3019 	addr += (nr >> 3);
3020 	mask = BIT(7 - (nr & 0x07));
3021 	ret = mask & *addr;
3022 	*addr &= ~mask;
3023 	return ret;
3024 }
3025 
f2fs_change_bit(unsigned int nr,char * addr)3026 static inline void f2fs_change_bit(unsigned int nr, char *addr)
3027 {
3028 	int mask;
3029 
3030 	addr += (nr >> 3);
3031 	mask = BIT(7 - (nr & 0x07));
3032 	*addr ^= mask;
3033 }
3034 
3035 /*
3036  * On-disk inode flags (f2fs_inode::i_flags)
3037  */
3038 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
3039 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
3040 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
3041 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
3042 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
3043 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
3044 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
3045 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
3046 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
3047 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
3048 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
3049 
3050 #define F2FS_QUOTA_DEFAULT_FL		(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
3051 
3052 /* Flags that should be inherited by new inodes from their parent. */
3053 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
3054 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3055 			   F2FS_CASEFOLD_FL)
3056 
3057 /* Flags that are appropriate for regular files (all but dir-specific ones). */
3058 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3059 				F2FS_CASEFOLD_FL))
3060 
3061 /* Flags that are appropriate for non-directories/regular files. */
3062 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
3063 
f2fs_mask_flags(umode_t mode,__u32 flags)3064 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
3065 {
3066 	if (S_ISDIR(mode))
3067 		return flags;
3068 	else if (S_ISREG(mode))
3069 		return flags & F2FS_REG_FLMASK;
3070 	else
3071 		return flags & F2FS_OTHER_FLMASK;
3072 }
3073 
__mark_inode_dirty_flag(struct inode * inode,int flag,bool set)3074 static inline void __mark_inode_dirty_flag(struct inode *inode,
3075 						int flag, bool set)
3076 {
3077 	switch (flag) {
3078 	case FI_INLINE_XATTR:
3079 	case FI_INLINE_DATA:
3080 	case FI_INLINE_DENTRY:
3081 	case FI_NEW_INODE:
3082 		if (set)
3083 			return;
3084 		fallthrough;
3085 	case FI_DATA_EXIST:
3086 	case FI_PIN_FILE:
3087 	case FI_COMPRESS_RELEASED:
3088 		f2fs_mark_inode_dirty_sync(inode, true);
3089 	}
3090 }
3091 
set_inode_flag(struct inode * inode,int flag)3092 static inline void set_inode_flag(struct inode *inode, int flag)
3093 {
3094 	set_bit(flag, F2FS_I(inode)->flags);
3095 	__mark_inode_dirty_flag(inode, flag, true);
3096 }
3097 
is_inode_flag_set(struct inode * inode,int flag)3098 static inline int is_inode_flag_set(struct inode *inode, int flag)
3099 {
3100 	return test_bit(flag, F2FS_I(inode)->flags);
3101 }
3102 
clear_inode_flag(struct inode * inode,int flag)3103 static inline void clear_inode_flag(struct inode *inode, int flag)
3104 {
3105 	clear_bit(flag, F2FS_I(inode)->flags);
3106 	__mark_inode_dirty_flag(inode, flag, false);
3107 }
3108 
f2fs_verity_in_progress(struct inode * inode)3109 static inline bool f2fs_verity_in_progress(struct inode *inode)
3110 {
3111 	return IS_ENABLED(CONFIG_FS_VERITY) &&
3112 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
3113 }
3114 
set_acl_inode(struct inode * inode,umode_t mode)3115 static inline void set_acl_inode(struct inode *inode, umode_t mode)
3116 {
3117 	F2FS_I(inode)->i_acl_mode = mode;
3118 	set_inode_flag(inode, FI_ACL_MODE);
3119 	f2fs_mark_inode_dirty_sync(inode, false);
3120 }
3121 
f2fs_i_links_write(struct inode * inode,bool inc)3122 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
3123 {
3124 	if (inc)
3125 		inc_nlink(inode);
3126 	else
3127 		drop_nlink(inode);
3128 	f2fs_mark_inode_dirty_sync(inode, true);
3129 }
3130 
f2fs_i_blocks_write(struct inode * inode,block_t diff,bool add,bool claim)3131 static inline void f2fs_i_blocks_write(struct inode *inode,
3132 					block_t diff, bool add, bool claim)
3133 {
3134 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3135 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3136 
3137 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
3138 	if (add) {
3139 		if (claim)
3140 			dquot_claim_block(inode, diff);
3141 		else
3142 			dquot_alloc_block_nofail(inode, diff);
3143 	} else {
3144 		dquot_free_block(inode, diff);
3145 	}
3146 
3147 	f2fs_mark_inode_dirty_sync(inode, true);
3148 	if (clean || recover)
3149 		set_inode_flag(inode, FI_AUTO_RECOVER);
3150 }
3151 
3152 static inline bool f2fs_is_atomic_file(struct inode *inode);
3153 
f2fs_i_size_write(struct inode * inode,loff_t i_size)3154 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3155 {
3156 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3157 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3158 
3159 	if (i_size_read(inode) == i_size)
3160 		return;
3161 
3162 	i_size_write(inode, i_size);
3163 
3164 	if (f2fs_is_atomic_file(inode))
3165 		return;
3166 
3167 	f2fs_mark_inode_dirty_sync(inode, true);
3168 	if (clean || recover)
3169 		set_inode_flag(inode, FI_AUTO_RECOVER);
3170 }
3171 
f2fs_i_depth_write(struct inode * inode,unsigned int depth)3172 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3173 {
3174 	F2FS_I(inode)->i_current_depth = depth;
3175 	f2fs_mark_inode_dirty_sync(inode, true);
3176 }
3177 
f2fs_i_gc_failures_write(struct inode * inode,unsigned int count)3178 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3179 					unsigned int count)
3180 {
3181 	F2FS_I(inode)->i_gc_failures = count;
3182 	f2fs_mark_inode_dirty_sync(inode, true);
3183 }
3184 
f2fs_i_xnid_write(struct inode * inode,nid_t xnid)3185 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3186 {
3187 	F2FS_I(inode)->i_xattr_nid = xnid;
3188 	f2fs_mark_inode_dirty_sync(inode, true);
3189 }
3190 
f2fs_i_pino_write(struct inode * inode,nid_t pino)3191 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3192 {
3193 	F2FS_I(inode)->i_pino = pino;
3194 	f2fs_mark_inode_dirty_sync(inode, true);
3195 }
3196 
get_inline_info(struct inode * inode,struct f2fs_inode * ri)3197 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3198 {
3199 	struct f2fs_inode_info *fi = F2FS_I(inode);
3200 
3201 	if (ri->i_inline & F2FS_INLINE_XATTR)
3202 		set_bit(FI_INLINE_XATTR, fi->flags);
3203 	if (ri->i_inline & F2FS_INLINE_DATA)
3204 		set_bit(FI_INLINE_DATA, fi->flags);
3205 	if (ri->i_inline & F2FS_INLINE_DENTRY)
3206 		set_bit(FI_INLINE_DENTRY, fi->flags);
3207 	if (ri->i_inline & F2FS_DATA_EXIST)
3208 		set_bit(FI_DATA_EXIST, fi->flags);
3209 	if (ri->i_inline & F2FS_EXTRA_ATTR)
3210 		set_bit(FI_EXTRA_ATTR, fi->flags);
3211 	if (ri->i_inline & F2FS_PIN_FILE)
3212 		set_bit(FI_PIN_FILE, fi->flags);
3213 	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3214 		set_bit(FI_COMPRESS_RELEASED, fi->flags);
3215 }
3216 
set_raw_inline(struct inode * inode,struct f2fs_inode * ri)3217 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3218 {
3219 	ri->i_inline = 0;
3220 
3221 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3222 		ri->i_inline |= F2FS_INLINE_XATTR;
3223 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
3224 		ri->i_inline |= F2FS_INLINE_DATA;
3225 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3226 		ri->i_inline |= F2FS_INLINE_DENTRY;
3227 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
3228 		ri->i_inline |= F2FS_DATA_EXIST;
3229 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3230 		ri->i_inline |= F2FS_EXTRA_ATTR;
3231 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3232 		ri->i_inline |= F2FS_PIN_FILE;
3233 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3234 		ri->i_inline |= F2FS_COMPRESS_RELEASED;
3235 }
3236 
f2fs_has_extra_attr(struct inode * inode)3237 static inline int f2fs_has_extra_attr(struct inode *inode)
3238 {
3239 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3240 }
3241 
f2fs_has_inline_xattr(struct inode * inode)3242 static inline int f2fs_has_inline_xattr(struct inode *inode)
3243 {
3244 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
3245 }
3246 
f2fs_compressed_file(struct inode * inode)3247 static inline int f2fs_compressed_file(struct inode *inode)
3248 {
3249 	return S_ISREG(inode->i_mode) &&
3250 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3251 }
3252 
f2fs_need_compress_data(struct inode * inode)3253 static inline bool f2fs_need_compress_data(struct inode *inode)
3254 {
3255 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3256 
3257 	if (!f2fs_compressed_file(inode))
3258 		return false;
3259 
3260 	if (compress_mode == COMPR_MODE_FS)
3261 		return true;
3262 	else if (compress_mode == COMPR_MODE_USER &&
3263 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3264 		return true;
3265 
3266 	return false;
3267 }
3268 
addrs_per_page(struct inode * inode,bool is_inode)3269 static inline unsigned int addrs_per_page(struct inode *inode,
3270 							bool is_inode)
3271 {
3272 	unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) -
3273 			get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK;
3274 
3275 	if (f2fs_compressed_file(inode))
3276 		return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3277 	return addrs;
3278 }
3279 
inline_xattr_addr(struct inode * inode,struct page * page)3280 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3281 {
3282 	struct f2fs_inode *ri = F2FS_INODE(page);
3283 
3284 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3285 					get_inline_xattr_addrs(inode)]);
3286 }
3287 
inline_xattr_size(struct inode * inode)3288 static inline int inline_xattr_size(struct inode *inode)
3289 {
3290 	if (f2fs_has_inline_xattr(inode))
3291 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
3292 	return 0;
3293 }
3294 
3295 /*
3296  * Notice: check inline_data flag without inode page lock is unsafe.
3297  * It could change at any time by f2fs_convert_inline_page().
3298  */
f2fs_has_inline_data(struct inode * inode)3299 static inline int f2fs_has_inline_data(struct inode *inode)
3300 {
3301 	return is_inode_flag_set(inode, FI_INLINE_DATA);
3302 }
3303 
f2fs_exist_data(struct inode * inode)3304 static inline int f2fs_exist_data(struct inode *inode)
3305 {
3306 	return is_inode_flag_set(inode, FI_DATA_EXIST);
3307 }
3308 
f2fs_is_mmap_file(struct inode * inode)3309 static inline int f2fs_is_mmap_file(struct inode *inode)
3310 {
3311 	return is_inode_flag_set(inode, FI_MMAP_FILE);
3312 }
3313 
f2fs_is_pinned_file(struct inode * inode)3314 static inline bool f2fs_is_pinned_file(struct inode *inode)
3315 {
3316 	return is_inode_flag_set(inode, FI_PIN_FILE);
3317 }
3318 
f2fs_is_atomic_file(struct inode * inode)3319 static inline bool f2fs_is_atomic_file(struct inode *inode)
3320 {
3321 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3322 }
3323 
f2fs_is_cow_file(struct inode * inode)3324 static inline bool f2fs_is_cow_file(struct inode *inode)
3325 {
3326 	return is_inode_flag_set(inode, FI_COW_FILE);
3327 }
3328 
inline_data_addr(struct inode * inode,struct page * page)3329 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3330 {
3331 	__le32 *addr = get_dnode_addr(inode, page);
3332 
3333 	return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
3334 }
3335 
f2fs_has_inline_dentry(struct inode * inode)3336 static inline int f2fs_has_inline_dentry(struct inode *inode)
3337 {
3338 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3339 }
3340 
is_file(struct inode * inode,int type)3341 static inline int is_file(struct inode *inode, int type)
3342 {
3343 	return F2FS_I(inode)->i_advise & type;
3344 }
3345 
set_file(struct inode * inode,int type)3346 static inline void set_file(struct inode *inode, int type)
3347 {
3348 	if (is_file(inode, type))
3349 		return;
3350 	F2FS_I(inode)->i_advise |= type;
3351 	f2fs_mark_inode_dirty_sync(inode, true);
3352 }
3353 
clear_file(struct inode * inode,int type)3354 static inline void clear_file(struct inode *inode, int type)
3355 {
3356 	if (!is_file(inode, type))
3357 		return;
3358 	F2FS_I(inode)->i_advise &= ~type;
3359 	f2fs_mark_inode_dirty_sync(inode, true);
3360 }
3361 
f2fs_is_time_consistent(struct inode * inode)3362 static inline bool f2fs_is_time_consistent(struct inode *inode)
3363 {
3364 	struct timespec64 ts = inode_get_atime(inode);
3365 
3366 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
3367 		return false;
3368 	ts = inode_get_ctime(inode);
3369 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
3370 		return false;
3371 	ts = inode_get_mtime(inode);
3372 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
3373 		return false;
3374 	return true;
3375 }
3376 
f2fs_skip_inode_update(struct inode * inode,int dsync)3377 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3378 {
3379 	bool ret;
3380 
3381 	if (dsync) {
3382 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3383 
3384 		spin_lock(&sbi->inode_lock[DIRTY_META]);
3385 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
3386 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3387 		return ret;
3388 	}
3389 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3390 			file_keep_isize(inode) ||
3391 			i_size_read(inode) & ~PAGE_MASK)
3392 		return false;
3393 
3394 	if (!f2fs_is_time_consistent(inode))
3395 		return false;
3396 
3397 	spin_lock(&F2FS_I(inode)->i_size_lock);
3398 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3399 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3400 
3401 	return ret;
3402 }
3403 
f2fs_readonly(struct super_block * sb)3404 static inline bool f2fs_readonly(struct super_block *sb)
3405 {
3406 	return sb_rdonly(sb);
3407 }
3408 
f2fs_cp_error(struct f2fs_sb_info * sbi)3409 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3410 {
3411 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3412 }
3413 
f2fs_kmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3414 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3415 					size_t size, gfp_t flags)
3416 {
3417 	if (time_to_inject(sbi, FAULT_KMALLOC))
3418 		return NULL;
3419 
3420 	return kmalloc(size, flags);
3421 }
3422 
f2fs_getname(struct f2fs_sb_info * sbi)3423 static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
3424 {
3425 	if (time_to_inject(sbi, FAULT_KMALLOC))
3426 		return NULL;
3427 
3428 	return __getname();
3429 }
3430 
f2fs_putname(char * buf)3431 static inline void f2fs_putname(char *buf)
3432 {
3433 	__putname(buf);
3434 }
3435 
f2fs_kzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3436 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3437 					size_t size, gfp_t flags)
3438 {
3439 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3440 }
3441 
f2fs_kvmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3442 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3443 					size_t size, gfp_t flags)
3444 {
3445 	if (time_to_inject(sbi, FAULT_KVMALLOC))
3446 		return NULL;
3447 
3448 	return kvmalloc(size, flags);
3449 }
3450 
f2fs_kvzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3451 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3452 					size_t size, gfp_t flags)
3453 {
3454 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3455 }
3456 
get_extra_isize(struct inode * inode)3457 static inline int get_extra_isize(struct inode *inode)
3458 {
3459 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3460 }
3461 
get_inline_xattr_addrs(struct inode * inode)3462 static inline int get_inline_xattr_addrs(struct inode *inode)
3463 {
3464 	return F2FS_I(inode)->i_inline_xattr_size;
3465 }
3466 
3467 #define f2fs_get_inode_mode(i) \
3468 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3469 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3470 
3471 #define F2FS_MIN_EXTRA_ATTR_SIZE		(sizeof(__le32))
3472 
3473 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3474 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3475 	offsetof(struct f2fs_inode, i_extra_isize))	\
3476 
3477 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3478 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3479 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3480 		sizeof((f2fs_inode)->field))			\
3481 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3482 
3483 #define __is_large_section(sbi)		(SEGS_PER_SEC(sbi) > 1)
3484 
3485 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3486 
3487 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3488 					block_t blkaddr, int type);
verify_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)3489 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3490 					block_t blkaddr, int type)
3491 {
3492 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
3493 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3494 			 blkaddr, type);
3495 }
3496 
__is_valid_data_blkaddr(block_t blkaddr)3497 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3498 {
3499 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3500 			blkaddr == COMPRESS_ADDR)
3501 		return false;
3502 	return true;
3503 }
3504 
3505 /*
3506  * file.c
3507  */
3508 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3509 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3510 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3511 int f2fs_truncate(struct inode *inode);
3512 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
3513 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3514 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
3515 		 struct iattr *attr);
3516 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3517 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3518 int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
3519 						bool readonly, bool need_lock);
3520 int f2fs_precache_extents(struct inode *inode);
3521 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3522 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3523 		      struct dentry *dentry, struct fileattr *fa);
3524 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3525 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3526 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3527 int f2fs_pin_file_control(struct inode *inode, bool inc);
3528 
3529 /*
3530  * inode.c
3531  */
3532 void f2fs_set_inode_flags(struct inode *inode);
3533 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3534 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3535 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3536 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3537 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3538 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3539 void f2fs_update_inode_page(struct inode *inode);
3540 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3541 void f2fs_evict_inode(struct inode *inode);
3542 void f2fs_handle_failed_inode(struct inode *inode);
3543 
3544 /*
3545  * namei.c
3546  */
3547 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3548 							bool hot, bool set);
3549 struct dentry *f2fs_get_parent(struct dentry *child);
3550 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3551 		     struct inode **new_inode);
3552 
3553 /*
3554  * dir.c
3555  */
3556 #if IS_ENABLED(CONFIG_UNICODE)
3557 int f2fs_init_casefolded_name(const struct inode *dir,
3558 			      struct f2fs_filename *fname);
3559 void f2fs_free_casefolded_name(struct f2fs_filename *fname);
3560 #else
f2fs_init_casefolded_name(const struct inode * dir,struct f2fs_filename * fname)3561 static inline int f2fs_init_casefolded_name(const struct inode *dir,
3562 					    struct f2fs_filename *fname)
3563 {
3564 	return 0;
3565 }
3566 
f2fs_free_casefolded_name(struct f2fs_filename * fname)3567 static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname)
3568 {
3569 }
3570 #endif /* CONFIG_UNICODE */
3571 
3572 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3573 			int lookup, struct f2fs_filename *fname);
3574 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3575 			struct f2fs_filename *fname);
3576 void f2fs_free_filename(struct f2fs_filename *fname);
3577 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3578 			const struct f2fs_filename *fname, int *max_slots);
3579 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3580 			unsigned int start_pos, struct fscrypt_str *fstr);
3581 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3582 			struct f2fs_dentry_ptr *d);
3583 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3584 			const struct f2fs_filename *fname, struct page *dpage);
3585 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3586 			unsigned int current_depth);
3587 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3588 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3589 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3590 					 const struct f2fs_filename *fname,
3591 					 struct page **res_page);
3592 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3593 			const struct qstr *child, struct page **res_page);
3594 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3595 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3596 			struct page **page);
3597 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3598 			struct page *page, struct inode *inode);
3599 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3600 			  const struct f2fs_filename *fname);
3601 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3602 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3603 			unsigned int bit_pos);
3604 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3605 			struct inode *inode, nid_t ino, umode_t mode);
3606 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3607 			struct inode *inode, nid_t ino, umode_t mode);
3608 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3609 			struct inode *inode, nid_t ino, umode_t mode);
3610 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3611 			struct inode *dir, struct inode *inode);
3612 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
3613 					struct f2fs_filename *fname);
3614 bool f2fs_empty_dir(struct inode *dir);
3615 
f2fs_add_link(struct dentry * dentry,struct inode * inode)3616 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3617 {
3618 	if (fscrypt_is_nokey_name(dentry))
3619 		return -ENOKEY;
3620 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3621 				inode, inode->i_ino, inode->i_mode);
3622 }
3623 
3624 /*
3625  * super.c
3626  */
3627 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3628 void f2fs_inode_synced(struct inode *inode);
3629 int f2fs_dquot_initialize(struct inode *inode);
3630 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3631 int f2fs_quota_sync(struct super_block *sb, int type);
3632 loff_t max_file_blocks(struct inode *inode);
3633 void f2fs_quota_off_umount(struct super_block *sb);
3634 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
3635 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
3636 							bool irq_context);
3637 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
3638 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
3639 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3640 int f2fs_sync_fs(struct super_block *sb, int sync);
3641 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3642 
3643 /*
3644  * hash.c
3645  */
3646 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3647 
3648 /*
3649  * node.c
3650  */
3651 struct node_info;
3652 
3653 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3654 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3655 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3656 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3657 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3658 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3659 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3660 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3661 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3662 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3663 				struct node_info *ni, bool checkpoint_context);
3664 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3665 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3666 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3667 int f2fs_truncate_xattr_node(struct inode *inode);
3668 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3669 					unsigned int seq_id);
3670 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3671 int f2fs_remove_inode_page(struct inode *inode);
3672 struct page *f2fs_new_inode_page(struct inode *inode);
3673 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3674 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3675 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3676 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3677 int f2fs_move_node_page(struct page *node_page, int gc_type);
3678 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3679 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3680 			struct writeback_control *wbc, bool atomic,
3681 			unsigned int *seq_id);
3682 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3683 			struct writeback_control *wbc,
3684 			bool do_balance, enum iostat_type io_type);
3685 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3686 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3687 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3688 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3689 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3690 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3691 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3692 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3693 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3694 			unsigned int segno, struct f2fs_summary_block *sum);
3695 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3696 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3697 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3698 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3699 int __init f2fs_create_node_manager_caches(void);
3700 void f2fs_destroy_node_manager_caches(void);
3701 
3702 /*
3703  * segment.c
3704  */
3705 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3706 int f2fs_commit_atomic_write(struct inode *inode);
3707 void f2fs_abort_atomic_write(struct inode *inode, bool clean);
3708 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3709 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3710 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3711 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3712 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3713 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3714 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3715 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3716 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3717 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3718 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3719 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3720 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3721 					struct cp_control *cpc);
3722 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3723 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3724 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3725 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3726 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3727 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3728 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3729 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi);
3730 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3731 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3732 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3733 					unsigned int start, unsigned int end);
3734 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3735 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
3736 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3737 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3738 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3739 					struct cp_control *cpc);
3740 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3741 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3742 					block_t blk_addr);
3743 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
3744 						enum iostat_type io_type);
3745 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3746 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3747 			struct f2fs_io_info *fio);
3748 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3749 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3750 			block_t old_blkaddr, block_t new_blkaddr,
3751 			bool recover_curseg, bool recover_newaddr,
3752 			bool from_gc);
3753 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3754 			block_t old_addr, block_t new_addr,
3755 			unsigned char version, bool recover_curseg,
3756 			bool recover_newaddr);
3757 int f2fs_get_segment_temp(int seg_type);
3758 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3759 			block_t old_blkaddr, block_t *new_blkaddr,
3760 			struct f2fs_summary *sum, int type,
3761 			struct f2fs_io_info *fio);
3762 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3763 					block_t blkaddr, unsigned int blkcnt);
3764 void f2fs_wait_on_page_writeback(struct page *page,
3765 			enum page_type type, bool ordered, bool locked);
3766 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3767 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3768 								block_t len);
3769 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3770 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3771 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3772 			unsigned int val, int alloc);
3773 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3774 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3775 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3776 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3777 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3778 int __init f2fs_create_segment_manager_caches(void);
3779 void f2fs_destroy_segment_manager_caches(void);
3780 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
3781 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3782 			enum page_type type, enum temp_type temp);
3783 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
3784 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3785 			unsigned int segno);
3786 
3787 #define DEF_FRAGMENT_SIZE	4
3788 #define MIN_FRAGMENT_SIZE	1
3789 #define MAX_FRAGMENT_SIZE	512
3790 
f2fs_need_rand_seg(struct f2fs_sb_info * sbi)3791 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3792 {
3793 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3794 		F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3795 }
3796 
3797 /*
3798  * checkpoint.c
3799  */
3800 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3801 							unsigned char reason);
3802 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3803 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3804 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3805 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3806 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3807 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3808 					block_t blkaddr, int type);
3809 bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
3810 					block_t blkaddr, int type);
3811 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3812 			int type, bool sync);
3813 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3814 							unsigned int ra_blocks);
3815 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3816 			long nr_to_write, enum iostat_type io_type);
3817 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3818 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3819 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3820 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3821 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3822 					unsigned int devidx, int type);
3823 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3824 					unsigned int devidx, int type);
3825 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3826 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3827 void f2fs_add_orphan_inode(struct inode *inode);
3828 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3829 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3830 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3831 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3832 void f2fs_remove_dirty_inode(struct inode *inode);
3833 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3834 								bool from_cp);
3835 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3836 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3837 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3838 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3839 int __init f2fs_create_checkpoint_caches(void);
3840 void f2fs_destroy_checkpoint_caches(void);
3841 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3842 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3843 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3844 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3845 
3846 /*
3847  * data.c
3848  */
3849 int __init f2fs_init_bioset(void);
3850 void f2fs_destroy_bioset(void);
3851 bool f2fs_is_cp_guaranteed(struct page *page);
3852 int f2fs_init_bio_entry_cache(void);
3853 void f2fs_destroy_bio_entry_cache(void);
3854 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
3855 			  enum page_type type);
3856 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
3857 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3858 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3859 				struct inode *inode, struct page *page,
3860 				nid_t ino, enum page_type type);
3861 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3862 					struct bio **bio, struct page *page);
3863 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3864 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3865 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3866 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3867 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3868 		block_t blk_addr, sector_t *sector);
3869 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3870 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3871 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3872 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3873 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3874 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
3875 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3876 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3877 			blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
3878 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
3879 							pgoff_t *next_pgofs);
3880 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3881 			bool for_write);
3882 struct page *f2fs_get_new_data_page(struct inode *inode,
3883 			struct page *ipage, pgoff_t index, bool new_i_size);
3884 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3885 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
3886 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3887 			u64 start, u64 len);
3888 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3889 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3890 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3891 int f2fs_write_single_data_page(struct folio *folio, int *submitted,
3892 				struct bio **bio, sector_t *last_block,
3893 				struct writeback_control *wbc,
3894 				enum iostat_type io_type,
3895 				int compr_blocks, bool allow_balance);
3896 void f2fs_write_failed(struct inode *inode, loff_t to);
3897 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
3898 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
3899 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3900 void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
3901 int f2fs_init_post_read_processing(void);
3902 void f2fs_destroy_post_read_processing(void);
3903 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3904 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3905 extern const struct iomap_ops f2fs_iomap_ops;
3906 
3907 /*
3908  * gc.c
3909  */
3910 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3911 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3912 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3913 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
3914 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3915 int f2fs_gc_range(struct f2fs_sb_info *sbi,
3916 		unsigned int start_seg, unsigned int end_seg,
3917 		bool dry_run, unsigned int dry_run_sections);
3918 int f2fs_resize_fs(struct file *filp, __u64 block_count);
3919 int __init f2fs_create_garbage_collection_cache(void);
3920 void f2fs_destroy_garbage_collection_cache(void);
3921 /* victim selection function for cleaning and SSR */
3922 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
3923 			int gc_type, int type, char alloc_mode,
3924 			unsigned long long age, bool one_time);
3925 
3926 /*
3927  * recovery.c
3928  */
3929 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3930 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3931 int __init f2fs_create_recovery_cache(void);
3932 void f2fs_destroy_recovery_cache(void);
3933 
3934 /*
3935  * debug.c
3936  */
3937 #ifdef CONFIG_F2FS_STAT_FS
3938 struct f2fs_stat_info {
3939 	struct list_head stat_list;
3940 	struct f2fs_sb_info *sbi;
3941 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3942 	int main_area_segs, main_area_sections, main_area_zones;
3943 	unsigned long long hit_cached[NR_EXTENT_CACHES];
3944 	unsigned long long hit_rbtree[NR_EXTENT_CACHES];
3945 	unsigned long long total_ext[NR_EXTENT_CACHES];
3946 	unsigned long long hit_total[NR_EXTENT_CACHES];
3947 	int ext_tree[NR_EXTENT_CACHES];
3948 	int zombie_tree[NR_EXTENT_CACHES];
3949 	int ext_node[NR_EXTENT_CACHES];
3950 	/* to count memory footprint */
3951 	unsigned long long ext_mem[NR_EXTENT_CACHES];
3952 	/* for read extent cache */
3953 	unsigned long long hit_largest;
3954 	/* for block age extent cache */
3955 	unsigned long long allocated_data_blocks;
3956 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3957 	int ndirty_data, ndirty_qdata;
3958 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3959 	int nats, dirty_nats, sits, dirty_sits;
3960 	int free_nids, avail_nids, alloc_nids;
3961 	int total_count, utilization;
3962 	int nr_wb_cp_data, nr_wb_data;
3963 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3964 	int nr_dio_read, nr_dio_write;
3965 	unsigned int io_skip_bggc, other_skip_bggc;
3966 	int nr_flushing, nr_flushed, flush_list_empty;
3967 	int nr_discarding, nr_discarded;
3968 	int nr_discard_cmd;
3969 	unsigned int undiscard_blks;
3970 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3971 	unsigned int cur_ckpt_time, peak_ckpt_time;
3972 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3973 	int compr_inode, swapfile_inode;
3974 	unsigned long long compr_blocks;
3975 	int aw_cnt, max_aw_cnt;
3976 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3977 	unsigned int bimodal, avg_vblocks;
3978 	int util_free, util_valid, util_invalid;
3979 	int rsvd_segs, overp_segs;
3980 	int dirty_count, node_pages, meta_pages, compress_pages;
3981 	int compress_page_hit;
3982 	int prefree_count, free_segs, free_secs;
3983 	int cp_call_count[MAX_CALL_TYPE], cp_count;
3984 	int gc_call_count[MAX_CALL_TYPE];
3985 	int gc_segs[2][2];
3986 	int gc_secs[2][2];
3987 	int tot_blks, data_blks, node_blks;
3988 	int bg_data_blks, bg_node_blks;
3989 	int curseg[NR_CURSEG_TYPE];
3990 	int cursec[NR_CURSEG_TYPE];
3991 	int curzone[NR_CURSEG_TYPE];
3992 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3993 	unsigned int full_seg[NR_CURSEG_TYPE];
3994 	unsigned int valid_blks[NR_CURSEG_TYPE];
3995 
3996 	unsigned int meta_count[META_MAX];
3997 	unsigned int segment_count[2];
3998 	unsigned int block_count[2];
3999 	unsigned int inplace_count;
4000 	unsigned long long base_mem, cache_mem, page_mem;
4001 };
4002 
F2FS_STAT(struct f2fs_sb_info * sbi)4003 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
4004 {
4005 	return (struct f2fs_stat_info *)sbi->stat_info;
4006 }
4007 
4008 #define stat_inc_cp_call_count(sbi, foreground)				\
4009 		atomic_inc(&sbi->cp_call_count[(foreground)])
4010 #define stat_inc_cp_count(sbi)		(F2FS_STAT(sbi)->cp_count++)
4011 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
4012 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
4013 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
4014 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
4015 #define stat_inc_total_hit(sbi, type)		(atomic64_inc(&(sbi)->total_hit_ext[type]))
4016 #define stat_inc_rbtree_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_rbtree[type]))
4017 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
4018 #define stat_inc_cached_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_cached[type]))
4019 #define stat_inc_inline_xattr(inode)					\
4020 	do {								\
4021 		if (f2fs_has_inline_xattr(inode))			\
4022 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
4023 	} while (0)
4024 #define stat_dec_inline_xattr(inode)					\
4025 	do {								\
4026 		if (f2fs_has_inline_xattr(inode))			\
4027 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
4028 	} while (0)
4029 #define stat_inc_inline_inode(inode)					\
4030 	do {								\
4031 		if (f2fs_has_inline_data(inode))			\
4032 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
4033 	} while (0)
4034 #define stat_dec_inline_inode(inode)					\
4035 	do {								\
4036 		if (f2fs_has_inline_data(inode))			\
4037 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
4038 	} while (0)
4039 #define stat_inc_inline_dir(inode)					\
4040 	do {								\
4041 		if (f2fs_has_inline_dentry(inode))			\
4042 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
4043 	} while (0)
4044 #define stat_dec_inline_dir(inode)					\
4045 	do {								\
4046 		if (f2fs_has_inline_dentry(inode))			\
4047 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
4048 	} while (0)
4049 #define stat_inc_compr_inode(inode)					\
4050 	do {								\
4051 		if (f2fs_compressed_file(inode))			\
4052 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
4053 	} while (0)
4054 #define stat_dec_compr_inode(inode)					\
4055 	do {								\
4056 		if (f2fs_compressed_file(inode))			\
4057 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
4058 	} while (0)
4059 #define stat_add_compr_blocks(inode, blocks)				\
4060 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
4061 #define stat_sub_compr_blocks(inode, blocks)				\
4062 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
4063 #define stat_inc_swapfile_inode(inode)					\
4064 		(atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
4065 #define stat_dec_swapfile_inode(inode)					\
4066 		(atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
4067 #define stat_inc_atomic_inode(inode)					\
4068 			(atomic_inc(&F2FS_I_SB(inode)->atomic_files))
4069 #define stat_dec_atomic_inode(inode)					\
4070 			(atomic_dec(&F2FS_I_SB(inode)->atomic_files))
4071 #define stat_inc_meta_count(sbi, blkaddr)				\
4072 	do {								\
4073 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
4074 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
4075 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
4076 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
4077 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
4078 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
4079 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
4080 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
4081 	} while (0)
4082 #define stat_inc_seg_type(sbi, curseg)					\
4083 		((sbi)->segment_count[(curseg)->alloc_type]++)
4084 #define stat_inc_block_count(sbi, curseg)				\
4085 		((sbi)->block_count[(curseg)->alloc_type]++)
4086 #define stat_inc_inplace_blocks(sbi)					\
4087 		(atomic_inc(&(sbi)->inplace_count))
4088 #define stat_update_max_atomic_write(inode)				\
4089 	do {								\
4090 		int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files);	\
4091 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
4092 		if (cur > max)						\
4093 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
4094 	} while (0)
4095 #define stat_inc_gc_call_count(sbi, foreground)				\
4096 		(F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
4097 #define stat_inc_gc_sec_count(sbi, type, gc_type)			\
4098 		(F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
4099 #define stat_inc_gc_seg_count(sbi, type, gc_type)			\
4100 		(F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
4101 
4102 #define stat_inc_tot_blk_count(si, blks)				\
4103 	((si)->tot_blks += (blks))
4104 
4105 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
4106 	do {								\
4107 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4108 		stat_inc_tot_blk_count(si, blks);			\
4109 		si->data_blks += (blks);				\
4110 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4111 	} while (0)
4112 
4113 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
4114 	do {								\
4115 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4116 		stat_inc_tot_blk_count(si, blks);			\
4117 		si->node_blks += (blks);				\
4118 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4119 	} while (0)
4120 
4121 int f2fs_build_stats(struct f2fs_sb_info *sbi);
4122 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
4123 void __init f2fs_create_root_stats(void);
4124 void f2fs_destroy_root_stats(void);
4125 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
4126 #else
4127 #define stat_inc_cp_call_count(sbi, foreground)		do { } while (0)
4128 #define stat_inc_cp_count(sbi)				do { } while (0)
4129 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
4130 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
4131 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
4132 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
4133 #define stat_inc_total_hit(sbi, type)			do { } while (0)
4134 #define stat_inc_rbtree_node_hit(sbi, type)		do { } while (0)
4135 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
4136 #define stat_inc_cached_node_hit(sbi, type)		do { } while (0)
4137 #define stat_inc_inline_xattr(inode)			do { } while (0)
4138 #define stat_dec_inline_xattr(inode)			do { } while (0)
4139 #define stat_inc_inline_inode(inode)			do { } while (0)
4140 #define stat_dec_inline_inode(inode)			do { } while (0)
4141 #define stat_inc_inline_dir(inode)			do { } while (0)
4142 #define stat_dec_inline_dir(inode)			do { } while (0)
4143 #define stat_inc_compr_inode(inode)			do { } while (0)
4144 #define stat_dec_compr_inode(inode)			do { } while (0)
4145 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
4146 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
4147 #define stat_inc_swapfile_inode(inode)			do { } while (0)
4148 #define stat_dec_swapfile_inode(inode)			do { } while (0)
4149 #define stat_inc_atomic_inode(inode)			do { } while (0)
4150 #define stat_dec_atomic_inode(inode)			do { } while (0)
4151 #define stat_update_max_atomic_write(inode)		do { } while (0)
4152 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
4153 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
4154 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
4155 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
4156 #define stat_inc_gc_call_count(sbi, foreground)		do { } while (0)
4157 #define stat_inc_gc_sec_count(sbi, type, gc_type)	do { } while (0)
4158 #define stat_inc_gc_seg_count(sbi, type, gc_type)	do { } while (0)
4159 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
4160 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
4161 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
4162 
f2fs_build_stats(struct f2fs_sb_info * sbi)4163 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_stats(struct f2fs_sb_info * sbi)4164 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
f2fs_create_root_stats(void)4165 static inline void __init f2fs_create_root_stats(void) { }
f2fs_destroy_root_stats(void)4166 static inline void f2fs_destroy_root_stats(void) { }
f2fs_update_sit_info(struct f2fs_sb_info * sbi)4167 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4168 #endif
4169 
4170 extern const struct file_operations f2fs_dir_operations;
4171 extern const struct file_operations f2fs_file_operations;
4172 extern const struct inode_operations f2fs_file_inode_operations;
4173 extern const struct address_space_operations f2fs_dblock_aops;
4174 extern const struct address_space_operations f2fs_node_aops;
4175 extern const struct address_space_operations f2fs_meta_aops;
4176 extern const struct inode_operations f2fs_dir_inode_operations;
4177 extern const struct inode_operations f2fs_symlink_inode_operations;
4178 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4179 extern const struct inode_operations f2fs_special_inode_operations;
4180 extern struct kmem_cache *f2fs_inode_entry_slab;
4181 
4182 /*
4183  * inline.c
4184  */
4185 bool f2fs_may_inline_data(struct inode *inode);
4186 bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
4187 bool f2fs_may_inline_dentry(struct inode *inode);
4188 void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage);
4189 void f2fs_truncate_inline_inode(struct inode *inode,
4190 						struct page *ipage, u64 from);
4191 int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
4192 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4193 int f2fs_convert_inline_inode(struct inode *inode);
4194 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4195 int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
4196 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4197 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4198 					const struct f2fs_filename *fname,
4199 					struct page **res_page);
4200 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4201 			struct page *ipage);
4202 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4203 			struct inode *inode, nid_t ino, umode_t mode);
4204 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4205 				struct page *page, struct inode *dir,
4206 				struct inode *inode);
4207 bool f2fs_empty_inline_dir(struct inode *dir);
4208 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4209 			struct fscrypt_str *fstr);
4210 int f2fs_inline_data_fiemap(struct inode *inode,
4211 			struct fiemap_extent_info *fieinfo,
4212 			__u64 start, __u64 len);
4213 
4214 /*
4215  * shrinker.c
4216  */
4217 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4218 			struct shrink_control *sc);
4219 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4220 			struct shrink_control *sc);
4221 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4222 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4223 
4224 /*
4225  * extent_cache.c
4226  */
4227 bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
4228 void f2fs_init_extent_tree(struct inode *inode);
4229 void f2fs_drop_extent_tree(struct inode *inode);
4230 void f2fs_destroy_extent_node(struct inode *inode);
4231 void f2fs_destroy_extent_tree(struct inode *inode);
4232 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4233 int __init f2fs_create_extent_cache(void);
4234 void f2fs_destroy_extent_cache(void);
4235 
4236 /* read extent cache ops */
4237 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
4238 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4239 			struct extent_info *ei);
4240 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
4241 			block_t *blkaddr);
4242 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4243 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4244 			pgoff_t fofs, block_t blkaddr, unsigned int len);
4245 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4246 			int nr_shrink);
4247 
4248 /* block age extent cache ops */
4249 void f2fs_init_age_extent_tree(struct inode *inode);
4250 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4251 			struct extent_info *ei);
4252 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4253 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4254 			pgoff_t fofs, unsigned int len);
4255 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4256 			int nr_shrink);
4257 
4258 /*
4259  * sysfs.c
4260  */
4261 #define MIN_RA_MUL	2
4262 #define MAX_RA_MUL	256
4263 
4264 int __init f2fs_init_sysfs(void);
4265 void f2fs_exit_sysfs(void);
4266 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4267 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4268 
4269 /* verity.c */
4270 extern const struct fsverity_operations f2fs_verityops;
4271 
4272 /*
4273  * crypto support
4274  */
f2fs_encrypted_file(struct inode * inode)4275 static inline bool f2fs_encrypted_file(struct inode *inode)
4276 {
4277 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4278 }
4279 
f2fs_set_encrypted_inode(struct inode * inode)4280 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4281 {
4282 #ifdef CONFIG_FS_ENCRYPTION
4283 	file_set_encrypt(inode);
4284 	f2fs_set_inode_flags(inode);
4285 #endif
4286 }
4287 
4288 /*
4289  * Returns true if the reads of the inode's data need to undergo some
4290  * postprocessing step, like decryption or authenticity verification.
4291  */
f2fs_post_read_required(struct inode * inode)4292 static inline bool f2fs_post_read_required(struct inode *inode)
4293 {
4294 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4295 		f2fs_compressed_file(inode);
4296 }
4297 
f2fs_used_in_atomic_write(struct inode * inode)4298 static inline bool f2fs_used_in_atomic_write(struct inode *inode)
4299 {
4300 	return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
4301 }
4302 
f2fs_meta_inode_gc_required(struct inode * inode)4303 static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
4304 {
4305 	return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
4306 }
4307 
4308 /*
4309  * compress.c
4310  */
4311 #ifdef CONFIG_F2FS_FS_COMPRESSION
4312 enum cluster_check_type {
4313 	CLUSTER_IS_COMPR,   /* check only if compressed cluster */
4314 	CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
4315 	CLUSTER_RAW_BLKS    /* return # of raw blocks in a cluster */
4316 };
4317 bool f2fs_is_compressed_page(struct page *page);
4318 struct page *f2fs_compress_control_page(struct page *page);
4319 int f2fs_prepare_compress_overwrite(struct inode *inode,
4320 			struct page **pagep, pgoff_t index, void **fsdata);
4321 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4322 					pgoff_t index, unsigned copied);
4323 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4324 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4325 bool f2fs_is_compress_backend_ready(struct inode *inode);
4326 bool f2fs_is_compress_level_valid(int alg, int lvl);
4327 int __init f2fs_init_compress_mempool(void);
4328 void f2fs_destroy_compress_mempool(void);
4329 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4330 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4331 				block_t blkaddr, bool in_task);
4332 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4333 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4334 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4335 				int index, int nr_pages, bool uptodate);
4336 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4337 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
4338 int f2fs_write_multi_pages(struct compress_ctx *cc,
4339 						int *submitted,
4340 						struct writeback_control *wbc,
4341 						enum iostat_type io_type);
4342 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4343 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
4344 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4345 				pgoff_t fofs, block_t blkaddr,
4346 				unsigned int llen, unsigned int c_len);
4347 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4348 				unsigned nr_pages, sector_t *last_block_in_bio,
4349 				struct readahead_control *rac, bool for_write);
4350 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4351 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4352 				bool in_task);
4353 void f2fs_put_page_dic(struct page *page, bool in_task);
4354 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
4355 						unsigned int ofs_in_node);
4356 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4357 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4358 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4359 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4360 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4361 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4362 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4363 int __init f2fs_init_compress_cache(void);
4364 void f2fs_destroy_compress_cache(void);
4365 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4366 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4367 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4368 						nid_t ino, block_t blkaddr);
4369 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4370 								block_t blkaddr);
4371 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4372 #define inc_compr_inode_stat(inode)					\
4373 	do {								\
4374 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4375 		sbi->compr_new_inode++;					\
4376 	} while (0)
4377 #define add_compr_block_stat(inode, blocks)				\
4378 	do {								\
4379 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4380 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
4381 		sbi->compr_written_block += blocks;			\
4382 		sbi->compr_saved_block += diff;				\
4383 	} while (0)
4384 #else
f2fs_is_compressed_page(struct page * page)4385 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
f2fs_is_compress_backend_ready(struct inode * inode)4386 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4387 {
4388 	if (!f2fs_compressed_file(inode))
4389 		return true;
4390 	/* not support compression */
4391 	return false;
4392 }
f2fs_is_compress_level_valid(int alg,int lvl)4393 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
f2fs_compress_control_page(struct page * page)4394 static inline struct page *f2fs_compress_control_page(struct page *page)
4395 {
4396 	WARN_ON_ONCE(1);
4397 	return ERR_PTR(-EINVAL);
4398 }
f2fs_init_compress_mempool(void)4399 static inline int __init f2fs_init_compress_mempool(void) { return 0; }
f2fs_destroy_compress_mempool(void)4400 static inline void f2fs_destroy_compress_mempool(void) { }
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)4401 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4402 				bool in_task) { }
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)4403 static inline void f2fs_end_read_compressed_page(struct page *page,
4404 				bool failed, block_t blkaddr, bool in_task)
4405 {
4406 	WARN_ON_ONCE(1);
4407 }
f2fs_put_page_dic(struct page * page,bool in_task)4408 static inline void f2fs_put_page_dic(struct page *page, bool in_task)
4409 {
4410 	WARN_ON_ONCE(1);
4411 }
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)4412 static inline unsigned int f2fs_cluster_blocks_are_contiguous(
4413 			struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
f2fs_sanity_check_cluster(struct dnode_of_data * dn)4414 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)4415 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)4416 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)4417 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)4418 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
f2fs_init_compress_cache(void)4419 static inline int __init f2fs_init_compress_cache(void) { return 0; }
f2fs_destroy_compress_cache(void)4420 static inline void f2fs_destroy_compress_cache(void) { }
f2fs_invalidate_compress_page(struct f2fs_sb_info * sbi,block_t blkaddr)4421 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4422 				block_t blkaddr) { }
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)4423 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4424 				struct page *page, nid_t ino, block_t blkaddr) { }
f2fs_load_compressed_page(struct f2fs_sb_info * sbi,struct page * page,block_t blkaddr)4425 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4426 				struct page *page, block_t blkaddr) { return false; }
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)4427 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4428 							nid_t ino) { }
4429 #define inc_compr_inode_stat(inode)		do { } while (0)
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)4430 static inline int f2fs_is_compressed_cluster(
4431 				struct inode *inode,
4432 				pgoff_t index) { return 0; }
f2fs_is_sparse_cluster(struct inode * inode,pgoff_t index)4433 static inline bool f2fs_is_sparse_cluster(
4434 				struct inode *inode,
4435 				pgoff_t index) { return true; }
f2fs_update_read_extent_tree_range_compressed(struct inode * inode,pgoff_t fofs,block_t blkaddr,unsigned int llen,unsigned int c_len)4436 static inline void f2fs_update_read_extent_tree_range_compressed(
4437 				struct inode *inode,
4438 				pgoff_t fofs, block_t blkaddr,
4439 				unsigned int llen, unsigned int c_len) { }
4440 #endif
4441 
set_compress_context(struct inode * inode)4442 static inline int set_compress_context(struct inode *inode)
4443 {
4444 #ifdef CONFIG_F2FS_FS_COMPRESSION
4445 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4446 	struct f2fs_inode_info *fi = F2FS_I(inode);
4447 
4448 	fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm;
4449 	fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size;
4450 	fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ?
4451 					BIT(COMPRESS_CHKSUM) : 0;
4452 	fi->i_cluster_size = BIT(fi->i_log_cluster_size);
4453 	if ((fi->i_compress_algorithm == COMPRESS_LZ4 ||
4454 		fi->i_compress_algorithm == COMPRESS_ZSTD) &&
4455 			F2FS_OPTION(sbi).compress_level)
4456 		fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
4457 	fi->i_flags |= F2FS_COMPR_FL;
4458 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4459 	stat_inc_compr_inode(inode);
4460 	inc_compr_inode_stat(inode);
4461 	f2fs_mark_inode_dirty_sync(inode, true);
4462 	return 0;
4463 #else
4464 	return -EOPNOTSUPP;
4465 #endif
4466 }
4467 
f2fs_disable_compressed_file(struct inode * inode)4468 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4469 {
4470 	struct f2fs_inode_info *fi = F2FS_I(inode);
4471 
4472 	f2fs_down_write(&fi->i_sem);
4473 
4474 	if (!f2fs_compressed_file(inode)) {
4475 		f2fs_up_write(&fi->i_sem);
4476 		return true;
4477 	}
4478 	if (f2fs_is_mmap_file(inode) ||
4479 		(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
4480 		f2fs_up_write(&fi->i_sem);
4481 		return false;
4482 	}
4483 
4484 	fi->i_flags &= ~F2FS_COMPR_FL;
4485 	stat_dec_compr_inode(inode);
4486 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4487 	f2fs_mark_inode_dirty_sync(inode, true);
4488 
4489 	f2fs_up_write(&fi->i_sem);
4490 	return true;
4491 }
4492 
4493 #define F2FS_FEATURE_FUNCS(name, flagname) \
4494 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4495 { \
4496 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4497 }
4498 
4499 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4500 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4501 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4502 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4503 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4504 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4505 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4506 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4507 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4508 F2FS_FEATURE_FUNCS(verity, VERITY);
4509 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4510 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4511 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4512 F2FS_FEATURE_FUNCS(readonly, RO);
4513 
4514 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_blkz_is_seq(struct f2fs_sb_info * sbi,int devi,block_t blkaddr)4515 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4516 				    block_t blkaddr)
4517 {
4518 	unsigned int zno = blkaddr / sbi->blocks_per_blkz;
4519 
4520 	return test_bit(zno, FDEV(devi).blkz_seq);
4521 }
4522 #endif
4523 
f2fs_bdev_index(struct f2fs_sb_info * sbi,struct block_device * bdev)4524 static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
4525 				  struct block_device *bdev)
4526 {
4527 	int i;
4528 
4529 	if (!f2fs_is_multi_device(sbi))
4530 		return 0;
4531 
4532 	for (i = 0; i < sbi->s_ndevs; i++)
4533 		if (FDEV(i).bdev == bdev)
4534 			return i;
4535 
4536 	WARN_ON(1);
4537 	return -1;
4538 }
4539 
f2fs_hw_should_discard(struct f2fs_sb_info * sbi)4540 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4541 {
4542 	return f2fs_sb_has_blkzoned(sbi);
4543 }
4544 
f2fs_bdev_support_discard(struct block_device * bdev)4545 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4546 {
4547 	return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4548 }
4549 
f2fs_hw_support_discard(struct f2fs_sb_info * sbi)4550 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4551 {
4552 	int i;
4553 
4554 	if (!f2fs_is_multi_device(sbi))
4555 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4556 
4557 	for (i = 0; i < sbi->s_ndevs; i++)
4558 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4559 			return true;
4560 	return false;
4561 }
4562 
f2fs_realtime_discard_enable(struct f2fs_sb_info * sbi)4563 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4564 {
4565 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4566 					f2fs_hw_should_discard(sbi);
4567 }
4568 
f2fs_hw_is_readonly(struct f2fs_sb_info * sbi)4569 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4570 {
4571 	int i;
4572 
4573 	if (!f2fs_is_multi_device(sbi))
4574 		return bdev_read_only(sbi->sb->s_bdev);
4575 
4576 	for (i = 0; i < sbi->s_ndevs; i++)
4577 		if (bdev_read_only(FDEV(i).bdev))
4578 			return true;
4579 	return false;
4580 }
4581 
f2fs_dev_is_readonly(struct f2fs_sb_info * sbi)4582 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
4583 {
4584 	return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
4585 }
4586 
f2fs_lfs_mode(struct f2fs_sb_info * sbi)4587 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4588 {
4589 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4590 }
4591 
f2fs_valid_pinned_area(struct f2fs_sb_info * sbi,block_t blkaddr)4592 static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
4593 					  block_t blkaddr)
4594 {
4595 	if (f2fs_sb_has_blkzoned(sbi)) {
4596 		int devi = f2fs_target_device_index(sbi, blkaddr);
4597 
4598 		return !bdev_is_zoned(FDEV(devi).bdev);
4599 	}
4600 	return true;
4601 }
4602 
f2fs_low_mem_mode(struct f2fs_sb_info * sbi)4603 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4604 {
4605 	return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4606 }
4607 
f2fs_may_compress(struct inode * inode)4608 static inline bool f2fs_may_compress(struct inode *inode)
4609 {
4610 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4611 		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
4612 		f2fs_is_mmap_file(inode))
4613 		return false;
4614 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4615 }
4616 
f2fs_i_compr_blocks_update(struct inode * inode,u64 blocks,bool add)4617 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4618 						u64 blocks, bool add)
4619 {
4620 	struct f2fs_inode_info *fi = F2FS_I(inode);
4621 	int diff = fi->i_cluster_size - blocks;
4622 
4623 	/* don't update i_compr_blocks if saved blocks were released */
4624 	if (!add && !atomic_read(&fi->i_compr_blocks))
4625 		return;
4626 
4627 	if (add) {
4628 		atomic_add(diff, &fi->i_compr_blocks);
4629 		stat_add_compr_blocks(inode, diff);
4630 	} else {
4631 		atomic_sub(diff, &fi->i_compr_blocks);
4632 		stat_sub_compr_blocks(inode, diff);
4633 	}
4634 	f2fs_mark_inode_dirty_sync(inode, true);
4635 }
4636 
f2fs_allow_multi_device_dio(struct f2fs_sb_info * sbi,int flag)4637 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4638 								int flag)
4639 {
4640 	if (!f2fs_is_multi_device(sbi))
4641 		return false;
4642 	if (flag != F2FS_GET_BLOCK_DIO)
4643 		return false;
4644 	return sbi->aligned_blksize;
4645 }
4646 
f2fs_need_verity(const struct inode * inode,pgoff_t idx)4647 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4648 {
4649 	return fsverity_active(inode) &&
4650 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4651 }
4652 
4653 #ifdef CONFIG_F2FS_FAULT_INJECTION
4654 extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
4655 							unsigned long type);
4656 #else
f2fs_build_fault_attr(struct f2fs_sb_info * sbi,unsigned long rate,unsigned long type)4657 static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
4658 					unsigned long rate, unsigned long type)
4659 {
4660 	return 0;
4661 }
4662 #endif
4663 
is_journalled_quota(struct f2fs_sb_info * sbi)4664 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4665 {
4666 #ifdef CONFIG_QUOTA
4667 	if (f2fs_sb_has_quota_ino(sbi))
4668 		return true;
4669 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4670 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4671 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4672 		return true;
4673 #endif
4674 	return false;
4675 }
4676 
f2fs_block_unit_discard(struct f2fs_sb_info * sbi)4677 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4678 {
4679 	return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4680 }
4681 
f2fs_io_schedule_timeout(long timeout)4682 static inline void f2fs_io_schedule_timeout(long timeout)
4683 {
4684 	set_current_state(TASK_UNINTERRUPTIBLE);
4685 	io_schedule_timeout(timeout);
4686 }
4687 
f2fs_handle_page_eio(struct f2fs_sb_info * sbi,struct folio * folio,enum page_type type)4688 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
4689 				struct folio *folio, enum page_type type)
4690 {
4691 	pgoff_t ofs = folio->index;
4692 
4693 	if (unlikely(f2fs_cp_error(sbi)))
4694 		return;
4695 
4696 	if (ofs == sbi->page_eio_ofs[type]) {
4697 		if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
4698 			set_ckpt_flags(sbi, CP_ERROR_FLAG);
4699 	} else {
4700 		sbi->page_eio_ofs[type] = ofs;
4701 		sbi->page_eio_cnt[type] = 0;
4702 	}
4703 }
4704 
f2fs_is_readonly(struct f2fs_sb_info * sbi)4705 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
4706 {
4707 	return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
4708 }
4709 
f2fs_truncate_meta_inode_pages(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int cnt)4710 static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
4711 					block_t blkaddr, unsigned int cnt)
4712 {
4713 	bool need_submit = false;
4714 	int i = 0;
4715 
4716 	do {
4717 		struct page *page;
4718 
4719 		page = find_get_page(META_MAPPING(sbi), blkaddr + i);
4720 		if (page) {
4721 			if (folio_test_writeback(page_folio(page)))
4722 				need_submit = true;
4723 			f2fs_put_page(page, 0);
4724 		}
4725 	} while (++i < cnt && !need_submit);
4726 
4727 	if (need_submit)
4728 		f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
4729 							NULL, 0, DATA);
4730 
4731 	truncate_inode_pages_range(META_MAPPING(sbi),
4732 			F2FS_BLK_TO_BYTES((loff_t)blkaddr),
4733 			F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
4734 }
4735 
f2fs_invalidate_internal_cache(struct f2fs_sb_info * sbi,block_t blkaddr)4736 static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
4737 								block_t blkaddr)
4738 {
4739 	f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
4740 	f2fs_invalidate_compress_page(sbi, blkaddr);
4741 }
4742 
4743 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4744 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4745 
4746 #endif /* _LINUX_F2FS_H */
4747