1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/f2fs.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition) \
35 do { \
36 if (WARN_ON(condition)) \
37 set_sbi_flag(sbi, SBI_NEED_FSCK); \
38 } while (0)
39 #endif
40
41 enum {
42 FAULT_KMALLOC,
43 FAULT_KVMALLOC,
44 FAULT_PAGE_ALLOC,
45 FAULT_PAGE_GET,
46 FAULT_ALLOC_NID,
47 FAULT_ORPHAN,
48 FAULT_BLOCK,
49 FAULT_DIR_DEPTH,
50 FAULT_EVICT_INODE,
51 FAULT_TRUNCATE,
52 FAULT_READ_IO,
53 FAULT_CHECKPOINT,
54 FAULT_DISCARD,
55 FAULT_WRITE_IO,
56 FAULT_MAX,
57 };
58
59 #ifdef CONFIG_F2FS_FAULT_INJECTION
60 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
61
62 struct f2fs_fault_info {
63 atomic_t inject_ops;
64 unsigned int inject_rate;
65 unsigned int inject_type;
66 };
67
68 extern const char *f2fs_fault_name[FAULT_MAX];
69 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
70 #endif
71
72 /*
73 * For mount options
74 */
75 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
76 #define F2FS_MOUNT_DISCARD 0x00000004
77 #define F2FS_MOUNT_NOHEAP 0x00000008
78 #define F2FS_MOUNT_XATTR_USER 0x00000010
79 #define F2FS_MOUNT_POSIX_ACL 0x00000020
80 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
81 #define F2FS_MOUNT_INLINE_XATTR 0x00000080
82 #define F2FS_MOUNT_INLINE_DATA 0x00000100
83 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
84 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
85 #define F2FS_MOUNT_NOBARRIER 0x00000800
86 #define F2FS_MOUNT_FASTBOOT 0x00001000
87 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000
88 #define F2FS_MOUNT_DATA_FLUSH 0x00008000
89 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
90 #define F2FS_MOUNT_USRQUOTA 0x00080000
91 #define F2FS_MOUNT_GRPQUOTA 0x00100000
92 #define F2FS_MOUNT_PRJQUOTA 0x00200000
93 #define F2FS_MOUNT_QUOTA 0x00400000
94 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
95 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
96 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
97 #define F2FS_MOUNT_NORECOVERY 0x04000000
98 #define F2FS_MOUNT_ATGC 0x08000000
99 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
100 #define F2FS_MOUNT_GC_MERGE 0x20000000
101
102 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
103 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
104 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
105 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
106
107 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
108 typecheck(unsigned long long, b) && \
109 ((long long)((a) - (b)) > 0))
110
111 typedef u32 block_t; /*
112 * should not change u32, since it is the on-disk block
113 * address format, __le32.
114 */
115 typedef u32 nid_t;
116
117 #define COMPRESS_EXT_NUM 16
118
119 struct f2fs_mount_info {
120 unsigned int opt;
121 int write_io_size_bits; /* Write IO size bits */
122 block_t root_reserved_blocks; /* root reserved blocks */
123 kuid_t s_resuid; /* reserved blocks for uid */
124 kgid_t s_resgid; /* reserved blocks for gid */
125 int active_logs; /* # of active logs */
126 int inline_xattr_size; /* inline xattr size */
127 #ifdef CONFIG_F2FS_FAULT_INJECTION
128 struct f2fs_fault_info fault_info; /* For fault injection */
129 #endif
130 #ifdef CONFIG_QUOTA
131 /* Names of quota files with journalled quota */
132 char *s_qf_names[MAXQUOTAS];
133 int s_jquota_fmt; /* Format of quota to use */
134 #endif
135 /* For which write hints are passed down to block layer */
136 int whint_mode;
137 int alloc_mode; /* segment allocation policy */
138 int fsync_mode; /* fsync policy */
139 int fs_mode; /* fs mode: LFS or ADAPTIVE */
140 int bggc_mode; /* bggc mode: off, on or sync */
141 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
142 block_t unusable_cap_perc; /* percentage for cap */
143 block_t unusable_cap; /* Amount of space allowed to be
144 * unusable when disabling checkpoint
145 */
146
147 /* For compression */
148 unsigned char compress_algorithm; /* algorithm type */
149 unsigned char compress_log_size; /* cluster log size */
150 unsigned char compress_level; /* compress level */
151 bool compress_chksum; /* compressed data chksum */
152 unsigned char compress_ext_cnt; /* extension count */
153 int compress_mode; /* compression mode */
154 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
155 };
156
157 #define F2FS_FEATURE_ENCRYPT 0x0001
158 #define F2FS_FEATURE_BLKZONED 0x0002
159 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
160 #define F2FS_FEATURE_EXTRA_ATTR 0x0008
161 #define F2FS_FEATURE_PRJQUOTA 0x0010
162 #define F2FS_FEATURE_INODE_CHKSUM 0x0020
163 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
164 #define F2FS_FEATURE_QUOTA_INO 0x0080
165 #define F2FS_FEATURE_INODE_CRTIME 0x0100
166 #define F2FS_FEATURE_LOST_FOUND 0x0200
167 #define F2FS_FEATURE_VERITY 0x0400
168 #define F2FS_FEATURE_SB_CHKSUM 0x0800
169 #define F2FS_FEATURE_CASEFOLD 0x1000
170 #define F2FS_FEATURE_COMPRESSION 0x2000
171
172 #define __F2FS_HAS_FEATURE(raw_super, mask) \
173 ((raw_super->feature & cpu_to_le32(mask)) != 0)
174 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
175 #define F2FS_SET_FEATURE(sbi, mask) \
176 (sbi->raw_super->feature |= cpu_to_le32(mask))
177 #define F2FS_CLEAR_FEATURE(sbi, mask) \
178 (sbi->raw_super->feature &= ~cpu_to_le32(mask))
179
180 /*
181 * Default values for user and/or group using reserved blocks
182 */
183 #define F2FS_DEF_RESUID 0
184 #define F2FS_DEF_RESGID 0
185
186 /*
187 * For checkpoint manager
188 */
189 enum {
190 NAT_BITMAP,
191 SIT_BITMAP
192 };
193
194 #define CP_UMOUNT 0x00000001
195 #define CP_FASTBOOT 0x00000002
196 #define CP_SYNC 0x00000004
197 #define CP_RECOVERY 0x00000008
198 #define CP_DISCARD 0x00000010
199 #define CP_TRIMMED 0x00000020
200 #define CP_PAUSE 0x00000040
201 #define CP_RESIZE 0x00000080
202
203 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
204 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
205 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
206 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
207 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
208 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
209 #define DEF_CP_INTERVAL 60 /* 60 secs */
210 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
211 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
212 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
213 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
214
215 struct cp_control {
216 int reason;
217 __u64 trim_start;
218 __u64 trim_end;
219 __u64 trim_minlen;
220 };
221
222 /*
223 * indicate meta/data type
224 */
225 enum {
226 META_CP,
227 META_NAT,
228 META_SIT,
229 META_SSA,
230 META_MAX,
231 META_POR,
232 DATA_GENERIC, /* check range only */
233 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
234 DATA_GENERIC_ENHANCE_READ, /*
235 * strong check on range and segment
236 * bitmap but no warning due to race
237 * condition of read on truncated area
238 * by extent_cache
239 */
240 META_GENERIC,
241 };
242
243 /* for the list of ino */
244 enum {
245 ORPHAN_INO, /* for orphan ino list */
246 APPEND_INO, /* for append ino list */
247 UPDATE_INO, /* for update ino list */
248 TRANS_DIR_INO, /* for trasactions dir ino list */
249 FLUSH_INO, /* for multiple device flushing */
250 MAX_INO_ENTRY, /* max. list */
251 };
252
253 struct ino_entry {
254 struct list_head list; /* list head */
255 nid_t ino; /* inode number */
256 unsigned int dirty_device; /* dirty device bitmap */
257 };
258
259 /* for the list of inodes to be GCed */
260 struct inode_entry {
261 struct list_head list; /* list head */
262 struct inode *inode; /* vfs inode pointer */
263 };
264
265 struct fsync_node_entry {
266 struct list_head list; /* list head */
267 struct page *page; /* warm node page pointer */
268 unsigned int seq_id; /* sequence id */
269 };
270
271 struct ckpt_req {
272 struct completion wait; /* completion for checkpoint done */
273 struct llist_node llnode; /* llist_node to be linked in wait queue */
274 int ret; /* return code of checkpoint */
275 ktime_t queue_time; /* request queued time */
276 };
277
278 struct ckpt_req_control {
279 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
280 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
281 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
282 atomic_t issued_ckpt; /* # of actually issued ckpts */
283 atomic_t total_ckpt; /* # of total ckpts */
284 atomic_t queued_ckpt; /* # of queued ckpts */
285 struct llist_head issue_list; /* list for command issue */
286 spinlock_t stat_lock; /* lock for below checkpoint time stats */
287 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
288 unsigned int peak_time; /* peak wait time in msec until now */
289 };
290
291 /* for the bitmap indicate blocks to be discarded */
292 struct discard_entry {
293 struct list_head list; /* list head */
294 block_t start_blkaddr; /* start blockaddr of current segment */
295 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
296 };
297
298 /* default discard granularity of inner discard thread, unit: block count */
299 #define DEFAULT_DISCARD_GRANULARITY 16
300
301 /* max discard pend list number */
302 #define MAX_PLIST_NUM 512
303 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
304 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
305
306 enum {
307 D_PREP, /* initial */
308 D_PARTIAL, /* partially submitted */
309 D_SUBMIT, /* all submitted */
310 D_DONE, /* finished */
311 };
312
313 struct discard_info {
314 block_t lstart; /* logical start address */
315 block_t len; /* length */
316 block_t start; /* actual start address in dev */
317 };
318
319 struct discard_cmd {
320 struct rb_node rb_node; /* rb node located in rb-tree */
321 union {
322 struct {
323 block_t lstart; /* logical start address */
324 block_t len; /* length */
325 block_t start; /* actual start address in dev */
326 };
327 struct discard_info di; /* discard info */
328
329 };
330 struct list_head list; /* command list */
331 struct completion wait; /* compleation */
332 struct block_device *bdev; /* bdev */
333 unsigned short ref; /* reference count */
334 unsigned char state; /* state */
335 unsigned char queued; /* queued discard */
336 int error; /* bio error */
337 spinlock_t lock; /* for state/bio_ref updating */
338 unsigned short bio_ref; /* bio reference count */
339 };
340
341 enum {
342 DPOLICY_BG,
343 DPOLICY_FORCE,
344 DPOLICY_FSTRIM,
345 DPOLICY_UMOUNT,
346 MAX_DPOLICY,
347 };
348
349 struct discard_policy {
350 int type; /* type of discard */
351 unsigned int min_interval; /* used for candidates exist */
352 unsigned int mid_interval; /* used for device busy */
353 unsigned int max_interval; /* used for candidates not exist */
354 unsigned int max_requests; /* # of discards issued per round */
355 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
356 bool io_aware; /* issue discard in idle time */
357 bool sync; /* submit discard with REQ_SYNC flag */
358 bool ordered; /* issue discard by lba order */
359 bool timeout; /* discard timeout for put_super */
360 unsigned int granularity; /* discard granularity */
361 };
362
363 struct discard_cmd_control {
364 struct task_struct *f2fs_issue_discard; /* discard thread */
365 struct list_head entry_list; /* 4KB discard entry list */
366 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
367 struct list_head wait_list; /* store on-flushing entries */
368 struct list_head fstrim_list; /* in-flight discard from fstrim */
369 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
370 unsigned int discard_wake; /* to wake up discard thread */
371 struct mutex cmd_lock;
372 unsigned int nr_discards; /* # of discards in the list */
373 unsigned int max_discards; /* max. discards to be issued */
374 unsigned int discard_granularity; /* discard granularity */
375 unsigned int undiscard_blks; /* # of undiscard blocks */
376 unsigned int next_pos; /* next discard position */
377 atomic_t issued_discard; /* # of issued discard */
378 atomic_t queued_discard; /* # of queued discard */
379 atomic_t discard_cmd_cnt; /* # of cached cmd count */
380 struct rb_root_cached root; /* root of discard rb-tree */
381 bool rbtree_check; /* config for consistence check */
382 };
383
384 /* for the list of fsync inodes, used only during recovery */
385 struct fsync_inode_entry {
386 struct list_head list; /* list head */
387 struct inode *inode; /* vfs inode pointer */
388 block_t blkaddr; /* block address locating the last fsync */
389 block_t last_dentry; /* block address locating the last dentry */
390 };
391
392 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
393 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
394
395 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
396 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
397 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
398 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
399
400 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
401 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
402
update_nats_in_cursum(struct f2fs_journal * journal,int i)403 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
404 {
405 int before = nats_in_cursum(journal);
406
407 journal->n_nats = cpu_to_le16(before + i);
408 return before;
409 }
410
update_sits_in_cursum(struct f2fs_journal * journal,int i)411 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
412 {
413 int before = sits_in_cursum(journal);
414
415 journal->n_sits = cpu_to_le16(before + i);
416 return before;
417 }
418
__has_cursum_space(struct f2fs_journal * journal,int size,int type)419 static inline bool __has_cursum_space(struct f2fs_journal *journal,
420 int size, int type)
421 {
422 if (type == NAT_JOURNAL)
423 return size <= MAX_NAT_JENTRIES(journal);
424 return size <= MAX_SIT_JENTRIES(journal);
425 }
426
427 /* for inline stuff */
428 #define DEF_INLINE_RESERVED_SIZE 1
429 static inline int get_extra_isize(struct inode *inode);
430 static inline int get_inline_xattr_addrs(struct inode *inode);
431 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
432 (CUR_ADDRS_PER_INODE(inode) - \
433 get_inline_xattr_addrs(inode) - \
434 DEF_INLINE_RESERVED_SIZE))
435
436 /* for inline dir */
437 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
438 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
439 BITS_PER_BYTE + 1))
440 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
441 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
442 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
443 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
444 NR_INLINE_DENTRY(inode) + \
445 INLINE_DENTRY_BITMAP_SIZE(inode)))
446
447 /*
448 * For INODE and NODE manager
449 */
450 /* for directory operations */
451
452 struct f2fs_filename {
453 /*
454 * The filename the user specified. This is NULL for some
455 * filesystem-internal operations, e.g. converting an inline directory
456 * to a non-inline one, or roll-forward recovering an encrypted dentry.
457 */
458 const struct qstr *usr_fname;
459
460 /*
461 * The on-disk filename. For encrypted directories, this is encrypted.
462 * This may be NULL for lookups in an encrypted dir without the key.
463 */
464 struct fscrypt_str disk_name;
465
466 /* The dirhash of this filename */
467 f2fs_hash_t hash;
468
469 #ifdef CONFIG_FS_ENCRYPTION
470 /*
471 * For lookups in encrypted directories: either the buffer backing
472 * disk_name, or a buffer that holds the decoded no-key name.
473 */
474 struct fscrypt_str crypto_buf;
475 #endif
476 #ifdef CONFIG_UNICODE
477 /*
478 * For casefolded directories: the casefolded name, but it's left NULL
479 * if the original name is not valid Unicode, if the directory is both
480 * casefolded and encrypted and its encryption key is unavailable, or if
481 * the filesystem is doing an internal operation where usr_fname is also
482 * NULL. In all these cases we fall back to treating the name as an
483 * opaque byte sequence.
484 */
485 struct fscrypt_str cf_name;
486 #endif
487 };
488
489 struct f2fs_dentry_ptr {
490 struct inode *inode;
491 void *bitmap;
492 struct f2fs_dir_entry *dentry;
493 __u8 (*filename)[F2FS_SLOT_LEN];
494 int max;
495 int nr_bitmap;
496 };
497
make_dentry_ptr_block(struct inode * inode,struct f2fs_dentry_ptr * d,struct f2fs_dentry_block * t)498 static inline void make_dentry_ptr_block(struct inode *inode,
499 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
500 {
501 d->inode = inode;
502 d->max = NR_DENTRY_IN_BLOCK;
503 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
504 d->bitmap = t->dentry_bitmap;
505 d->dentry = t->dentry;
506 d->filename = t->filename;
507 }
508
make_dentry_ptr_inline(struct inode * inode,struct f2fs_dentry_ptr * d,void * t)509 static inline void make_dentry_ptr_inline(struct inode *inode,
510 struct f2fs_dentry_ptr *d, void *t)
511 {
512 int entry_cnt = NR_INLINE_DENTRY(inode);
513 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
514 int reserved_size = INLINE_RESERVED_SIZE(inode);
515
516 d->inode = inode;
517 d->max = entry_cnt;
518 d->nr_bitmap = bitmap_size;
519 d->bitmap = t;
520 d->dentry = t + bitmap_size + reserved_size;
521 d->filename = t + bitmap_size + reserved_size +
522 SIZE_OF_DIR_ENTRY * entry_cnt;
523 }
524
525 /*
526 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
527 * as its node offset to distinguish from index node blocks.
528 * But some bits are used to mark the node block.
529 */
530 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
531 >> OFFSET_BIT_SHIFT)
532 enum {
533 ALLOC_NODE, /* allocate a new node page if needed */
534 LOOKUP_NODE, /* look up a node without readahead */
535 LOOKUP_NODE_RA, /*
536 * look up a node with readahead called
537 * by get_data_block.
538 */
539 };
540
541 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
542
543 /* congestion wait timeout value, default: 20ms */
544 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
545
546 /* maximum retry quota flush count */
547 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
548
549 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
550
551 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
552
553 /* for in-memory extent cache entry */
554 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
555
556 /* number of extent info in extent cache we try to shrink */
557 #define EXTENT_CACHE_SHRINK_NUMBER 128
558
559 struct rb_entry {
560 struct rb_node rb_node; /* rb node located in rb-tree */
561 union {
562 struct {
563 unsigned int ofs; /* start offset of the entry */
564 unsigned int len; /* length of the entry */
565 };
566 unsigned long long key; /* 64-bits key */
567 } __packed;
568 };
569
570 struct extent_info {
571 unsigned int fofs; /* start offset in a file */
572 unsigned int len; /* length of the extent */
573 u32 blk; /* start block address of the extent */
574 };
575
576 struct extent_node {
577 struct rb_node rb_node; /* rb node located in rb-tree */
578 struct extent_info ei; /* extent info */
579 struct list_head list; /* node in global extent list of sbi */
580 struct extent_tree *et; /* extent tree pointer */
581 };
582
583 struct extent_tree {
584 nid_t ino; /* inode number */
585 struct rb_root_cached root; /* root of extent info rb-tree */
586 struct extent_node *cached_en; /* recently accessed extent node */
587 struct extent_info largest; /* largested extent info */
588 struct list_head list; /* to be used by sbi->zombie_list */
589 rwlock_t lock; /* protect extent info rb-tree */
590 atomic_t node_cnt; /* # of extent node in rb-tree*/
591 bool largest_updated; /* largest extent updated */
592 };
593
594 /*
595 * This structure is taken from ext4_map_blocks.
596 *
597 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
598 */
599 #define F2FS_MAP_NEW (1 << BH_New)
600 #define F2FS_MAP_MAPPED (1 << BH_Mapped)
601 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
602 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
603 F2FS_MAP_UNWRITTEN)
604
605 struct f2fs_map_blocks {
606 block_t m_pblk;
607 block_t m_lblk;
608 unsigned int m_len;
609 unsigned int m_flags;
610 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
611 pgoff_t *m_next_extent; /* point to next possible extent */
612 int m_seg_type;
613 bool m_may_create; /* indicate it is from write path */
614 };
615
616 /* for flag in get_data_block */
617 enum {
618 F2FS_GET_BLOCK_DEFAULT,
619 F2FS_GET_BLOCK_FIEMAP,
620 F2FS_GET_BLOCK_BMAP,
621 F2FS_GET_BLOCK_DIO,
622 F2FS_GET_BLOCK_PRE_DIO,
623 F2FS_GET_BLOCK_PRE_AIO,
624 F2FS_GET_BLOCK_PRECACHE,
625 };
626
627 /*
628 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
629 */
630 #define FADVISE_COLD_BIT 0x01
631 #define FADVISE_LOST_PINO_BIT 0x02
632 #define FADVISE_ENCRYPT_BIT 0x04
633 #define FADVISE_ENC_NAME_BIT 0x08
634 #define FADVISE_KEEP_SIZE_BIT 0x10
635 #define FADVISE_HOT_BIT 0x20
636 #define FADVISE_VERITY_BIT 0x40
637
638 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
639
640 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
641 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
642 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
643
644 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
645 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
646 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
647
648 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
649 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
650
651 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
652 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
653
654 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
655 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
656
657 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
658 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
659 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
660
661 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
662 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
663
664 #define DEF_DIR_LEVEL 0
665
666 enum {
667 GC_FAILURE_PIN,
668 GC_FAILURE_ATOMIC,
669 MAX_GC_FAILURE
670 };
671
672 /* used for f2fs_inode_info->flags */
673 enum {
674 FI_NEW_INODE, /* indicate newly allocated inode */
675 FI_DIRTY_INODE, /* indicate inode is dirty or not */
676 FI_AUTO_RECOVER, /* indicate inode is recoverable */
677 FI_DIRTY_DIR, /* indicate directory has dirty pages */
678 FI_INC_LINK, /* need to increment i_nlink */
679 FI_ACL_MODE, /* indicate acl mode */
680 FI_NO_ALLOC, /* should not allocate any blocks */
681 FI_FREE_NID, /* free allocated nide */
682 FI_NO_EXTENT, /* not to use the extent cache */
683 FI_INLINE_XATTR, /* used for inline xattr */
684 FI_INLINE_DATA, /* used for inline data*/
685 FI_INLINE_DENTRY, /* used for inline dentry */
686 FI_APPEND_WRITE, /* inode has appended data */
687 FI_UPDATE_WRITE, /* inode has in-place-update data */
688 FI_NEED_IPU, /* used for ipu per file */
689 FI_ATOMIC_FILE, /* indicate atomic file */
690 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
691 FI_VOLATILE_FILE, /* indicate volatile file */
692 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
693 FI_DROP_CACHE, /* drop dirty page cache */
694 FI_DATA_EXIST, /* indicate data exists */
695 FI_INLINE_DOTS, /* indicate inline dot dentries */
696 FI_DO_DEFRAG, /* indicate defragment is running */
697 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
698 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
699 FI_HOT_DATA, /* indicate file is hot */
700 FI_EXTRA_ATTR, /* indicate file has extra attribute */
701 FI_PROJ_INHERIT, /* indicate file inherits projectid */
702 FI_PIN_FILE, /* indicate file should not be gced */
703 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
704 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
705 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
706 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
707 FI_MMAP_FILE, /* indicate file was mmapped */
708 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
709 FI_MAX, /* max flag, never be used */
710 };
711
712 struct f2fs_inode_info {
713 struct inode vfs_inode; /* serve a vfs inode */
714 unsigned long i_flags; /* keep an inode flags for ioctl */
715 unsigned char i_advise; /* use to give file attribute hints */
716 unsigned char i_dir_level; /* use for dentry level for large dir */
717 unsigned int i_current_depth; /* only for directory depth */
718 /* for gc failure statistic */
719 unsigned int i_gc_failures[MAX_GC_FAILURE];
720 unsigned int i_pino; /* parent inode number */
721 umode_t i_acl_mode; /* keep file acl mode temporarily */
722
723 /* Use below internally in f2fs*/
724 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
725 struct rw_semaphore i_sem; /* protect fi info */
726 atomic_t dirty_pages; /* # of dirty pages */
727 f2fs_hash_t chash; /* hash value of given file name */
728 unsigned int clevel; /* maximum level of given file name */
729 struct task_struct *task; /* lookup and create consistency */
730 struct task_struct *cp_task; /* separate cp/wb IO stats*/
731 nid_t i_xattr_nid; /* node id that contains xattrs */
732 loff_t last_disk_size; /* lastly written file size */
733 spinlock_t i_size_lock; /* protect last_disk_size */
734
735 #ifdef CONFIG_QUOTA
736 struct dquot *i_dquot[MAXQUOTAS];
737
738 /* quota space reservation, managed internally by quota code */
739 qsize_t i_reserved_quota;
740 #endif
741 struct list_head dirty_list; /* dirty list for dirs and files */
742 struct list_head gdirty_list; /* linked in global dirty list */
743 struct list_head inmem_ilist; /* list for inmem inodes */
744 struct list_head inmem_pages; /* inmemory pages managed by f2fs */
745 struct task_struct *inmem_task; /* store inmemory task */
746 struct mutex inmem_lock; /* lock for inmemory pages */
747 struct extent_tree *extent_tree; /* cached extent_tree entry */
748
749 /* avoid racing between foreground op and gc */
750 struct rw_semaphore i_gc_rwsem[2];
751 struct rw_semaphore i_mmap_sem;
752 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
753
754 int i_extra_isize; /* size of extra space located in i_addr */
755 kprojid_t i_projid; /* id for project quota */
756 int i_inline_xattr_size; /* inline xattr size */
757 struct timespec64 i_crtime; /* inode creation time */
758 struct timespec64 i_disk_time[4];/* inode disk times */
759
760 /* for file compress */
761 atomic_t i_compr_blocks; /* # of compressed blocks */
762 unsigned char i_compress_algorithm; /* algorithm type */
763 unsigned char i_log_cluster_size; /* log of cluster size */
764 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
765 unsigned short i_compress_flag; /* compress flag */
766 unsigned int i_cluster_size; /* cluster size */
767 };
768
get_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)769 static inline void get_extent_info(struct extent_info *ext,
770 struct f2fs_extent *i_ext)
771 {
772 ext->fofs = le32_to_cpu(i_ext->fofs);
773 ext->blk = le32_to_cpu(i_ext->blk);
774 ext->len = le32_to_cpu(i_ext->len);
775 }
776
set_raw_extent(struct extent_info * ext,struct f2fs_extent * i_ext)777 static inline void set_raw_extent(struct extent_info *ext,
778 struct f2fs_extent *i_ext)
779 {
780 i_ext->fofs = cpu_to_le32(ext->fofs);
781 i_ext->blk = cpu_to_le32(ext->blk);
782 i_ext->len = cpu_to_le32(ext->len);
783 }
784
set_extent_info(struct extent_info * ei,unsigned int fofs,u32 blk,unsigned int len)785 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
786 u32 blk, unsigned int len)
787 {
788 ei->fofs = fofs;
789 ei->blk = blk;
790 ei->len = len;
791 }
792
__is_discard_mergeable(struct discard_info * back,struct discard_info * front,unsigned int max_len)793 static inline bool __is_discard_mergeable(struct discard_info *back,
794 struct discard_info *front, unsigned int max_len)
795 {
796 return (back->lstart + back->len == front->lstart) &&
797 (back->len + front->len <= max_len);
798 }
799
__is_discard_back_mergeable(struct discard_info * cur,struct discard_info * back,unsigned int max_len)800 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
801 struct discard_info *back, unsigned int max_len)
802 {
803 return __is_discard_mergeable(back, cur, max_len);
804 }
805
__is_discard_front_mergeable(struct discard_info * cur,struct discard_info * front,unsigned int max_len)806 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
807 struct discard_info *front, unsigned int max_len)
808 {
809 return __is_discard_mergeable(cur, front, max_len);
810 }
811
__is_extent_mergeable(struct extent_info * back,struct extent_info * front)812 static inline bool __is_extent_mergeable(struct extent_info *back,
813 struct extent_info *front)
814 {
815 return (back->fofs + back->len == front->fofs &&
816 back->blk + back->len == front->blk);
817 }
818
__is_back_mergeable(struct extent_info * cur,struct extent_info * back)819 static inline bool __is_back_mergeable(struct extent_info *cur,
820 struct extent_info *back)
821 {
822 return __is_extent_mergeable(back, cur);
823 }
824
__is_front_mergeable(struct extent_info * cur,struct extent_info * front)825 static inline bool __is_front_mergeable(struct extent_info *cur,
826 struct extent_info *front)
827 {
828 return __is_extent_mergeable(cur, front);
829 }
830
831 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
__try_update_largest_extent(struct extent_tree * et,struct extent_node * en)832 static inline void __try_update_largest_extent(struct extent_tree *et,
833 struct extent_node *en)
834 {
835 if (en->ei.len > et->largest.len) {
836 et->largest = en->ei;
837 et->largest_updated = true;
838 }
839 }
840
841 /*
842 * For free nid management
843 */
844 enum nid_state {
845 FREE_NID, /* newly added to free nid list */
846 PREALLOC_NID, /* it is preallocated */
847 MAX_NID_STATE,
848 };
849
850 enum nat_state {
851 TOTAL_NAT,
852 DIRTY_NAT,
853 RECLAIMABLE_NAT,
854 MAX_NAT_STATE,
855 };
856
857 struct f2fs_nm_info {
858 block_t nat_blkaddr; /* base disk address of NAT */
859 nid_t max_nid; /* maximum possible node ids */
860 nid_t available_nids; /* # of available node ids */
861 nid_t next_scan_nid; /* the next nid to be scanned */
862 unsigned int ram_thresh; /* control the memory footprint */
863 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
864 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
865
866 /* NAT cache management */
867 struct radix_tree_root nat_root;/* root of the nat entry cache */
868 struct radix_tree_root nat_set_root;/* root of the nat set cache */
869 struct rw_semaphore nat_tree_lock; /* protect nat entry tree */
870 struct list_head nat_entries; /* cached nat entry list (clean) */
871 spinlock_t nat_list_lock; /* protect clean nat entry list */
872 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
873 unsigned int nat_blocks; /* # of nat blocks */
874
875 /* free node ids management */
876 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
877 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
878 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
879 spinlock_t nid_list_lock; /* protect nid lists ops */
880 struct mutex build_lock; /* lock for build free nids */
881 unsigned char **free_nid_bitmap;
882 unsigned char *nat_block_bitmap;
883 unsigned short *free_nid_count; /* free nid count of NAT block */
884
885 /* for checkpoint */
886 char *nat_bitmap; /* NAT bitmap pointer */
887
888 unsigned int nat_bits_blocks; /* # of nat bits blocks */
889 unsigned char *nat_bits; /* NAT bits blocks */
890 unsigned char *full_nat_bits; /* full NAT pages */
891 unsigned char *empty_nat_bits; /* empty NAT pages */
892 #ifdef CONFIG_F2FS_CHECK_FS
893 char *nat_bitmap_mir; /* NAT bitmap mirror */
894 #endif
895 int bitmap_size; /* bitmap size */
896 };
897
898 /*
899 * this structure is used as one of function parameters.
900 * all the information are dedicated to a given direct node block determined
901 * by the data offset in a file.
902 */
903 struct dnode_of_data {
904 struct inode *inode; /* vfs inode pointer */
905 struct page *inode_page; /* its inode page, NULL is possible */
906 struct page *node_page; /* cached direct node page */
907 nid_t nid; /* node id of the direct node block */
908 unsigned int ofs_in_node; /* data offset in the node page */
909 bool inode_page_locked; /* inode page is locked or not */
910 bool node_changed; /* is node block changed */
911 char cur_level; /* level of hole node page */
912 char max_level; /* level of current page located */
913 block_t data_blkaddr; /* block address of the node block */
914 };
915
set_new_dnode(struct dnode_of_data * dn,struct inode * inode,struct page * ipage,struct page * npage,nid_t nid)916 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
917 struct page *ipage, struct page *npage, nid_t nid)
918 {
919 memset(dn, 0, sizeof(*dn));
920 dn->inode = inode;
921 dn->inode_page = ipage;
922 dn->node_page = npage;
923 dn->nid = nid;
924 }
925
926 /*
927 * For SIT manager
928 *
929 * By default, there are 6 active log areas across the whole main area.
930 * When considering hot and cold data separation to reduce cleaning overhead,
931 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
932 * respectively.
933 * In the current design, you should not change the numbers intentionally.
934 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
935 * logs individually according to the underlying devices. (default: 6)
936 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
937 * data and 8 for node logs.
938 */
939 #define NR_CURSEG_DATA_TYPE (3)
940 #define NR_CURSEG_NODE_TYPE (3)
941 #define NR_CURSEG_INMEM_TYPE (2)
942 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
943 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
944
945 enum {
946 CURSEG_HOT_DATA = 0, /* directory entry blocks */
947 CURSEG_WARM_DATA, /* data blocks */
948 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
949 CURSEG_HOT_NODE, /* direct node blocks of directory files */
950 CURSEG_WARM_NODE, /* direct node blocks of normal files */
951 CURSEG_COLD_NODE, /* indirect node blocks */
952 NR_PERSISTENT_LOG, /* number of persistent log */
953 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
954 /* pinned file that needs consecutive block address */
955 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
956 NO_CHECK_TYPE, /* number of persistent & inmem log */
957 };
958
959 struct flush_cmd {
960 struct completion wait;
961 struct llist_node llnode;
962 nid_t ino;
963 int ret;
964 };
965
966 struct flush_cmd_control {
967 struct task_struct *f2fs_issue_flush; /* flush thread */
968 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
969 atomic_t issued_flush; /* # of issued flushes */
970 atomic_t queued_flush; /* # of queued flushes */
971 struct llist_head issue_list; /* list for command issue */
972 struct llist_node *dispatch_list; /* list for command dispatch */
973 };
974
975 struct f2fs_sm_info {
976 struct sit_info *sit_info; /* whole segment information */
977 struct free_segmap_info *free_info; /* free segment information */
978 struct dirty_seglist_info *dirty_info; /* dirty segment information */
979 struct curseg_info *curseg_array; /* active segment information */
980
981 struct rw_semaphore curseg_lock; /* for preventing curseg change */
982
983 block_t seg0_blkaddr; /* block address of 0'th segment */
984 block_t main_blkaddr; /* start block address of main area */
985 block_t ssa_blkaddr; /* start block address of SSA area */
986
987 unsigned int segment_count; /* total # of segments */
988 unsigned int main_segments; /* # of segments in main area */
989 unsigned int reserved_segments; /* # of reserved segments */
990 unsigned int ovp_segments; /* # of overprovision segments */
991
992 /* a threshold to reclaim prefree segments */
993 unsigned int rec_prefree_segments;
994
995 /* for batched trimming */
996 unsigned int trim_sections; /* # of sections to trim */
997
998 struct list_head sit_entry_set; /* sit entry set list */
999
1000 unsigned int ipu_policy; /* in-place-update policy */
1001 unsigned int min_ipu_util; /* in-place-update threshold */
1002 unsigned int min_fsync_blocks; /* threshold for fsync */
1003 unsigned int min_seq_blocks; /* threshold for sequential blocks */
1004 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1005 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1006
1007 /* for flush command control */
1008 struct flush_cmd_control *fcc_info;
1009
1010 /* for discard command control */
1011 struct discard_cmd_control *dcc_info;
1012 };
1013
1014 /*
1015 * For superblock
1016 */
1017 /*
1018 * COUNT_TYPE for monitoring
1019 *
1020 * f2fs monitors the number of several block types such as on-writeback,
1021 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1022 */
1023 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1024 enum count_type {
1025 F2FS_DIRTY_DENTS,
1026 F2FS_DIRTY_DATA,
1027 F2FS_DIRTY_QDATA,
1028 F2FS_DIRTY_NODES,
1029 F2FS_DIRTY_META,
1030 F2FS_INMEM_PAGES,
1031 F2FS_DIRTY_IMETA,
1032 F2FS_WB_CP_DATA,
1033 F2FS_WB_DATA,
1034 F2FS_RD_DATA,
1035 F2FS_RD_NODE,
1036 F2FS_RD_META,
1037 F2FS_DIO_WRITE,
1038 F2FS_DIO_READ,
1039 NR_COUNT_TYPE,
1040 };
1041
1042 /*
1043 * The below are the page types of bios used in submit_bio().
1044 * The available types are:
1045 * DATA User data pages. It operates as async mode.
1046 * NODE Node pages. It operates as async mode.
1047 * META FS metadata pages such as SIT, NAT, CP.
1048 * NR_PAGE_TYPE The number of page types.
1049 * META_FLUSH Make sure the previous pages are written
1050 * with waiting the bio's completion
1051 * ... Only can be used with META.
1052 */
1053 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1054 enum page_type {
1055 DATA,
1056 NODE,
1057 META,
1058 NR_PAGE_TYPE,
1059 META_FLUSH,
1060 INMEM, /* the below types are used by tracepoints only. */
1061 INMEM_DROP,
1062 INMEM_INVALIDATE,
1063 INMEM_REVOKE,
1064 IPU,
1065 OPU,
1066 };
1067
1068 enum temp_type {
1069 HOT = 0, /* must be zero for meta bio */
1070 WARM,
1071 COLD,
1072 NR_TEMP_TYPE,
1073 };
1074
1075 enum need_lock_type {
1076 LOCK_REQ = 0,
1077 LOCK_DONE,
1078 LOCK_RETRY,
1079 };
1080
1081 enum cp_reason_type {
1082 CP_NO_NEEDED,
1083 CP_NON_REGULAR,
1084 CP_COMPRESSED,
1085 CP_HARDLINK,
1086 CP_SB_NEED_CP,
1087 CP_WRONG_PINO,
1088 CP_NO_SPC_ROLL,
1089 CP_NODE_NEED_CP,
1090 CP_FASTBOOT_MODE,
1091 CP_SPEC_LOG_NUM,
1092 CP_RECOVER_DIR,
1093 };
1094
1095 enum iostat_type {
1096 /* WRITE IO */
1097 APP_DIRECT_IO, /* app direct write IOs */
1098 APP_BUFFERED_IO, /* app buffered write IOs */
1099 APP_WRITE_IO, /* app write IOs */
1100 APP_MAPPED_IO, /* app mapped IOs */
1101 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1102 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1103 FS_META_IO, /* meta IOs from kworker/reclaimer */
1104 FS_GC_DATA_IO, /* data IOs from forground gc */
1105 FS_GC_NODE_IO, /* node IOs from forground gc */
1106 FS_CP_DATA_IO, /* data IOs from checkpoint */
1107 FS_CP_NODE_IO, /* node IOs from checkpoint */
1108 FS_CP_META_IO, /* meta IOs from checkpoint */
1109
1110 /* READ IO */
1111 APP_DIRECT_READ_IO, /* app direct read IOs */
1112 APP_BUFFERED_READ_IO, /* app buffered read IOs */
1113 APP_READ_IO, /* app read IOs */
1114 APP_MAPPED_READ_IO, /* app mapped read IOs */
1115 FS_DATA_READ_IO, /* data read IOs */
1116 FS_GDATA_READ_IO, /* data read IOs from background gc */
1117 FS_CDATA_READ_IO, /* compressed data read IOs */
1118 FS_NODE_READ_IO, /* node read IOs */
1119 FS_META_READ_IO, /* meta read IOs */
1120
1121 /* other */
1122 FS_DISCARD, /* discard */
1123 NR_IO_TYPE,
1124 };
1125
1126 struct f2fs_io_info {
1127 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1128 nid_t ino; /* inode number */
1129 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1130 enum temp_type temp; /* contains HOT/WARM/COLD */
1131 int op; /* contains REQ_OP_ */
1132 int op_flags; /* req_flag_bits */
1133 block_t new_blkaddr; /* new block address to be written */
1134 block_t old_blkaddr; /* old block address before Cow */
1135 struct page *page; /* page to be written */
1136 struct page *encrypted_page; /* encrypted page */
1137 struct page *compressed_page; /* compressed page */
1138 struct list_head list; /* serialize IOs */
1139 bool submitted; /* indicate IO submission */
1140 int need_lock; /* indicate we need to lock cp_rwsem */
1141 bool in_list; /* indicate fio is in io_list */
1142 bool is_por; /* indicate IO is from recovery or not */
1143 bool retry; /* need to reallocate block address */
1144 int compr_blocks; /* # of compressed block addresses */
1145 bool encrypted; /* indicate file is encrypted */
1146 enum iostat_type io_type; /* io type */
1147 struct writeback_control *io_wbc; /* writeback control */
1148 struct bio **bio; /* bio for ipu */
1149 sector_t *last_block; /* last block number in bio */
1150 unsigned char version; /* version of the node */
1151 };
1152
1153 struct bio_entry {
1154 struct bio *bio;
1155 struct list_head list;
1156 };
1157
1158 #define is_read_io(rw) ((rw) == READ)
1159 struct f2fs_bio_info {
1160 struct f2fs_sb_info *sbi; /* f2fs superblock */
1161 struct bio *bio; /* bios to merge */
1162 sector_t last_block_in_bio; /* last block number */
1163 struct f2fs_io_info fio; /* store buffered io info. */
1164 struct rw_semaphore io_rwsem; /* blocking op for bio */
1165 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1166 struct list_head io_list; /* track fios */
1167 struct list_head bio_list; /* bio entry list head */
1168 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
1169 };
1170
1171 #define FDEV(i) (sbi->devs[i])
1172 #define RDEV(i) (raw_super->devs[i])
1173 struct f2fs_dev_info {
1174 struct block_device *bdev;
1175 char path[MAX_PATH_LEN];
1176 unsigned int total_segments;
1177 block_t start_blk;
1178 block_t end_blk;
1179 #ifdef CONFIG_BLK_DEV_ZONED
1180 unsigned int nr_blkz; /* Total number of zones */
1181 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1182 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
1183 #endif
1184 };
1185
1186 enum inode_type {
1187 DIR_INODE, /* for dirty dir inode */
1188 FILE_INODE, /* for dirty regular/symlink inode */
1189 DIRTY_META, /* for all dirtied inode metadata */
1190 ATOMIC_FILE, /* for all atomic files */
1191 NR_INODE_TYPE,
1192 };
1193
1194 /* for inner inode cache management */
1195 struct inode_management {
1196 struct radix_tree_root ino_root; /* ino entry array */
1197 spinlock_t ino_lock; /* for ino entry lock */
1198 struct list_head ino_list; /* inode list head */
1199 unsigned long ino_num; /* number of entries */
1200 };
1201
1202 /* for GC_AT */
1203 struct atgc_management {
1204 bool atgc_enabled; /* ATGC is enabled or not */
1205 struct rb_root_cached root; /* root of victim rb-tree */
1206 struct list_head victim_list; /* linked with all victim entries */
1207 unsigned int victim_count; /* victim count in rb-tree */
1208 unsigned int candidate_ratio; /* candidate ratio */
1209 unsigned int max_candidate_count; /* max candidate count */
1210 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1211 unsigned long long age_threshold; /* age threshold */
1212 };
1213
1214 /* For s_flag in struct f2fs_sb_info */
1215 enum {
1216 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1217 SBI_IS_CLOSE, /* specify unmounting */
1218 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1219 SBI_POR_DOING, /* recovery is doing or not */
1220 SBI_NEED_SB_WRITE, /* need to recover superblock */
1221 SBI_NEED_CP, /* need to checkpoint */
1222 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1223 SBI_IS_RECOVERED, /* recovered orphan/data */
1224 SBI_CP_DISABLED, /* CP was disabled last mount */
1225 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1226 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1227 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1228 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1229 SBI_IS_RESIZEFS, /* resizefs is in process */
1230 };
1231
1232 enum {
1233 CP_TIME,
1234 REQ_TIME,
1235 DISCARD_TIME,
1236 GC_TIME,
1237 DISABLE_TIME,
1238 UMOUNT_DISCARD_TIMEOUT,
1239 MAX_TIME,
1240 };
1241
1242 enum {
1243 GC_NORMAL,
1244 GC_IDLE_CB,
1245 GC_IDLE_GREEDY,
1246 GC_IDLE_AT,
1247 GC_URGENT_HIGH,
1248 GC_URGENT_LOW,
1249 };
1250
1251 enum {
1252 BGGC_MODE_ON, /* background gc is on */
1253 BGGC_MODE_OFF, /* background gc is off */
1254 BGGC_MODE_SYNC, /*
1255 * background gc is on, migrating blocks
1256 * like foreground gc
1257 */
1258 };
1259
1260 enum {
1261 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1262 FS_MODE_LFS, /* use lfs allocation only */
1263 };
1264
1265 enum {
1266 WHINT_MODE_OFF, /* not pass down write hints */
1267 WHINT_MODE_USER, /* try to pass down hints given by users */
1268 WHINT_MODE_FS, /* pass down hints with F2FS policy */
1269 };
1270
1271 enum {
1272 ALLOC_MODE_DEFAULT, /* stay default */
1273 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1274 };
1275
1276 enum fsync_mode {
1277 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1278 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1279 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1280 };
1281
1282 enum {
1283 COMPR_MODE_FS, /*
1284 * automatically compress compression
1285 * enabled files
1286 */
1287 COMPR_MODE_USER, /*
1288 * automatical compression is disabled.
1289 * user can control the file compression
1290 * using ioctls
1291 */
1292 };
1293
1294 /*
1295 * this value is set in page as a private data which indicate that
1296 * the page is atomically written, and it is in inmem_pages list.
1297 */
1298 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
1299 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
1300
1301 #define IS_ATOMIC_WRITTEN_PAGE(page) \
1302 (page_private(page) == ATOMIC_WRITTEN_PAGE)
1303 #define IS_DUMMY_WRITTEN_PAGE(page) \
1304 (page_private(page) == DUMMY_WRITTEN_PAGE)
1305
1306 /* For compression */
1307 enum compress_algorithm_type {
1308 COMPRESS_LZO,
1309 COMPRESS_LZ4,
1310 COMPRESS_ZSTD,
1311 COMPRESS_LZORLE,
1312 COMPRESS_MAX,
1313 };
1314
1315 enum compress_flag {
1316 COMPRESS_CHKSUM,
1317 COMPRESS_MAX_FLAG,
1318 };
1319
1320 #define COMPRESS_DATA_RESERVED_SIZE 4
1321 struct compress_data {
1322 __le32 clen; /* compressed data size */
1323 __le32 chksum; /* compressed data chksum */
1324 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1325 u8 cdata[]; /* compressed data */
1326 };
1327
1328 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1329
1330 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1331
1332 #define COMPRESS_LEVEL_OFFSET 8
1333
1334 /* compress context */
1335 struct compress_ctx {
1336 struct inode *inode; /* inode the context belong to */
1337 pgoff_t cluster_idx; /* cluster index number */
1338 unsigned int cluster_size; /* page count in cluster */
1339 unsigned int log_cluster_size; /* log of cluster size */
1340 struct page **rpages; /* pages store raw data in cluster */
1341 unsigned int nr_rpages; /* total page number in rpages */
1342 struct page **cpages; /* pages store compressed data in cluster */
1343 unsigned int nr_cpages; /* total page number in cpages */
1344 void *rbuf; /* virtual mapped address on rpages */
1345 struct compress_data *cbuf; /* virtual mapped address on cpages */
1346 size_t rlen; /* valid data length in rbuf */
1347 size_t clen; /* valid data length in cbuf */
1348 void *private; /* payload buffer for specified compression algorithm */
1349 void *private2; /* extra payload buffer */
1350 };
1351
1352 /* compress context for write IO path */
1353 struct compress_io_ctx {
1354 u32 magic; /* magic number to indicate page is compressed */
1355 struct inode *inode; /* inode the context belong to */
1356 struct page **rpages; /* pages store raw data in cluster */
1357 unsigned int nr_rpages; /* total page number in rpages */
1358 atomic_t pending_pages; /* in-flight compressed page count */
1359 };
1360
1361 /* Context for decompressing one cluster on the read IO path */
1362 struct decompress_io_ctx {
1363 u32 magic; /* magic number to indicate page is compressed */
1364 struct inode *inode; /* inode the context belong to */
1365 pgoff_t cluster_idx; /* cluster index number */
1366 unsigned int cluster_size; /* page count in cluster */
1367 unsigned int log_cluster_size; /* log of cluster size */
1368 struct page **rpages; /* pages store raw data in cluster */
1369 unsigned int nr_rpages; /* total page number in rpages */
1370 struct page **cpages; /* pages store compressed data in cluster */
1371 unsigned int nr_cpages; /* total page number in cpages */
1372 struct page **tpages; /* temp pages to pad holes in cluster */
1373 void *rbuf; /* virtual mapped address on rpages */
1374 struct compress_data *cbuf; /* virtual mapped address on cpages */
1375 size_t rlen; /* valid data length in rbuf */
1376 size_t clen; /* valid data length in cbuf */
1377
1378 /*
1379 * The number of compressed pages remaining to be read in this cluster.
1380 * This is initially nr_cpages. It is decremented by 1 each time a page
1381 * has been read (or failed to be read). When it reaches 0, the cluster
1382 * is decompressed (or an error is reported).
1383 *
1384 * If an error occurs before all the pages have been submitted for I/O,
1385 * then this will never reach 0. In this case the I/O submitter is
1386 * responsible for calling f2fs_decompress_end_io() instead.
1387 */
1388 atomic_t remaining_pages;
1389
1390 /*
1391 * Number of references to this decompress_io_ctx.
1392 *
1393 * One reference is held for I/O completion. This reference is dropped
1394 * after the pagecache pages are updated and unlocked -- either after
1395 * decompression (and verity if enabled), or after an error.
1396 *
1397 * In addition, each compressed page holds a reference while it is in a
1398 * bio. These references are necessary prevent compressed pages from
1399 * being freed while they are still in a bio.
1400 */
1401 refcount_t refcnt;
1402
1403 bool failed; /* IO error occurred before decompression? */
1404 bool need_verity; /* need fs-verity verification after decompression? */
1405 void *private; /* payload buffer for specified decompression algorithm */
1406 void *private2; /* extra payload buffer */
1407 struct work_struct verity_work; /* work to verify the decompressed pages */
1408 };
1409
1410 #define NULL_CLUSTER ((unsigned int)(~0))
1411 #define MIN_COMPRESS_LOG_SIZE 2
1412 #define MAX_COMPRESS_LOG_SIZE 8
1413 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
1414
1415 struct f2fs_sb_info {
1416 struct super_block *sb; /* pointer to VFS super block */
1417 struct proc_dir_entry *s_proc; /* proc entry */
1418 struct f2fs_super_block *raw_super; /* raw super block pointer */
1419 struct rw_semaphore sb_lock; /* lock for raw super block */
1420 int valid_super_block; /* valid super block no */
1421 unsigned long s_flag; /* flags for sbi */
1422 struct mutex writepages; /* mutex for writepages() */
1423
1424 #ifdef CONFIG_BLK_DEV_ZONED
1425 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1426 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
1427 #endif
1428
1429 /* for node-related operations */
1430 struct f2fs_nm_info *nm_info; /* node manager */
1431 struct inode *node_inode; /* cache node blocks */
1432
1433 /* for segment-related operations */
1434 struct f2fs_sm_info *sm_info; /* segment manager */
1435
1436 /* for bio operations */
1437 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1438 /* keep migration IO order for LFS mode */
1439 struct rw_semaphore io_order_lock;
1440 mempool_t *write_io_dummy; /* Dummy pages */
1441
1442 /* for checkpoint */
1443 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1444 int cur_cp_pack; /* remain current cp pack */
1445 spinlock_t cp_lock; /* for flag in ckpt */
1446 struct inode *meta_inode; /* cache meta blocks */
1447 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */
1448 struct rw_semaphore cp_rwsem; /* blocking FS operations */
1449 struct rw_semaphore node_write; /* locking node writes */
1450 struct rw_semaphore node_change; /* locking node change */
1451 wait_queue_head_t cp_wait;
1452 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1453 long interval_time[MAX_TIME]; /* to store thresholds */
1454 struct ckpt_req_control cprc_info; /* for checkpoint request control */
1455
1456 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1457
1458 spinlock_t fsync_node_lock; /* for node entry lock */
1459 struct list_head fsync_node_list; /* node list head */
1460 unsigned int fsync_seg_id; /* sequence id */
1461 unsigned int fsync_node_num; /* number of node entries */
1462
1463 /* for orphan inode, use 0'th array */
1464 unsigned int max_orphans; /* max orphan inodes */
1465
1466 /* for inode management */
1467 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1468 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1469 struct mutex flush_lock; /* for flush exclusion */
1470
1471 /* for extent tree cache */
1472 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1473 struct mutex extent_tree_lock; /* locking extent radix tree */
1474 struct list_head extent_list; /* lru list for shrinker */
1475 spinlock_t extent_lock; /* locking extent lru list */
1476 atomic_t total_ext_tree; /* extent tree count */
1477 struct list_head zombie_list; /* extent zombie tree list */
1478 atomic_t total_zombie_tree; /* extent zombie tree count */
1479 atomic_t total_ext_node; /* extent info count */
1480
1481 /* basic filesystem units */
1482 unsigned int log_sectors_per_block; /* log2 sectors per block */
1483 unsigned int log_blocksize; /* log2 block size */
1484 unsigned int blocksize; /* block size */
1485 unsigned int root_ino_num; /* root inode number*/
1486 unsigned int node_ino_num; /* node inode number*/
1487 unsigned int meta_ino_num; /* meta inode number*/
1488 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1489 unsigned int blocks_per_seg; /* blocks per segment */
1490 unsigned int segs_per_sec; /* segments per section */
1491 unsigned int secs_per_zone; /* sections per zone */
1492 unsigned int total_sections; /* total section count */
1493 unsigned int total_node_count; /* total node block count */
1494 unsigned int total_valid_node_count; /* valid node block count */
1495 int dir_level; /* directory level */
1496 int readdir_ra; /* readahead inode in readdir */
1497 u64 max_io_bytes; /* max io bytes to merge IOs */
1498
1499 block_t user_block_count; /* # of user blocks */
1500 block_t total_valid_block_count; /* # of valid blocks */
1501 block_t discard_blks; /* discard command candidats */
1502 block_t last_valid_block_count; /* for recovery */
1503 block_t reserved_blocks; /* configurable reserved blocks */
1504 block_t current_reserved_blocks; /* current reserved blocks */
1505
1506 /* Additional tracking for no checkpoint mode */
1507 block_t unusable_block_count; /* # of blocks saved by last cp */
1508
1509 unsigned int nquota_files; /* # of quota sysfile */
1510 struct rw_semaphore quota_sem; /* blocking cp for flags */
1511
1512 /* # of pages, see count_type */
1513 atomic_t nr_pages[NR_COUNT_TYPE];
1514 /* # of allocated blocks */
1515 struct percpu_counter alloc_valid_block_count;
1516
1517 /* writeback control */
1518 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1519
1520 /* valid inode count */
1521 struct percpu_counter total_valid_inode_count;
1522
1523 struct f2fs_mount_info mount_opt; /* mount options */
1524
1525 /* for cleaning operations */
1526 struct rw_semaphore gc_lock; /*
1527 * semaphore for GC, avoid
1528 * race between GC and GC or CP
1529 */
1530 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1531 struct atgc_management am; /* atgc management */
1532 unsigned int cur_victim_sec; /* current victim section num */
1533 unsigned int gc_mode; /* current GC state */
1534 unsigned int next_victim_seg[2]; /* next segment in victim section */
1535
1536 /* for skip statistic */
1537 unsigned int atomic_files; /* # of opened atomic file */
1538 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
1539 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1540
1541 /* threshold for gc trials on pinned files */
1542 u64 gc_pin_file_threshold;
1543 struct rw_semaphore pin_sem;
1544
1545 /* maximum # of trials to find a victim segment for SSR and GC */
1546 unsigned int max_victim_search;
1547 /* migration granularity of garbage collection, unit: segment */
1548 unsigned int migration_granularity;
1549
1550 /*
1551 * for stat information.
1552 * one is for the LFS mode, and the other is for the SSR mode.
1553 */
1554 #ifdef CONFIG_F2FS_STAT_FS
1555 struct f2fs_stat_info *stat_info; /* FS status information */
1556 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1557 unsigned int segment_count[2]; /* # of allocated segments */
1558 unsigned int block_count[2]; /* # of allocated blocks */
1559 atomic_t inplace_count; /* # of inplace update */
1560 atomic64_t total_hit_ext; /* # of lookup extent cache */
1561 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
1562 atomic64_t read_hit_largest; /* # of hit largest extent node */
1563 atomic64_t read_hit_cached; /* # of hit cached extent node */
1564 atomic_t inline_xattr; /* # of inline_xattr inodes */
1565 atomic_t inline_inode; /* # of inline_data inodes */
1566 atomic_t inline_dir; /* # of inline_dentry inodes */
1567 atomic_t compr_inode; /* # of compressed inodes */
1568 atomic64_t compr_blocks; /* # of compressed blocks */
1569 atomic_t vw_cnt; /* # of volatile writes */
1570 atomic_t max_aw_cnt; /* max # of atomic writes */
1571 atomic_t max_vw_cnt; /* max # of volatile writes */
1572 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1573 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1574 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1575 #endif
1576 spinlock_t stat_lock; /* lock for stat operations */
1577
1578 /* For app/fs IO statistics */
1579 spinlock_t iostat_lock;
1580 unsigned long long rw_iostat[NR_IO_TYPE];
1581 unsigned long long prev_rw_iostat[NR_IO_TYPE];
1582 bool iostat_enable;
1583 unsigned long iostat_next_period;
1584 unsigned int iostat_period_ms;
1585
1586 /* to attach REQ_META|REQ_FUA flags */
1587 unsigned int data_io_flag;
1588 unsigned int node_io_flag;
1589
1590 /* For sysfs suppport */
1591 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
1592 struct completion s_kobj_unregister;
1593
1594 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
1595 struct completion s_stat_kobj_unregister;
1596
1597 /* For shrinker support */
1598 struct list_head s_list;
1599 int s_ndevs; /* number of devices */
1600 struct f2fs_dev_info *devs; /* for device list */
1601 unsigned int dirty_device; /* for checkpoint data flush */
1602 spinlock_t dev_lock; /* protect dirty_device */
1603 struct mutex umount_mutex;
1604 unsigned int shrinker_run_no;
1605
1606 /* For write statistics */
1607 u64 sectors_written_start;
1608 u64 kbytes_written;
1609
1610 /* Reference to checksum algorithm driver via cryptoapi */
1611 struct crypto_shash *s_chksum_driver;
1612
1613 /* Precomputed FS UUID checksum for seeding other checksums */
1614 __u32 s_chksum_seed;
1615
1616 struct workqueue_struct *post_read_wq; /* post read workqueue */
1617
1618 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1619 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1620
1621 #ifdef CONFIG_F2FS_FS_COMPRESSION
1622 struct kmem_cache *page_array_slab; /* page array entry */
1623 unsigned int page_array_slab_size; /* default page array slab size */
1624
1625 /* For runtime compression statistics */
1626 u64 compr_written_block;
1627 u64 compr_saved_block;
1628 u32 compr_new_inode;
1629 #endif
1630 };
1631
1632 struct f2fs_private_dio {
1633 struct inode *inode;
1634 void *orig_private;
1635 bio_end_io_t *orig_end_io;
1636 bool write;
1637 };
1638
1639 #ifdef CONFIG_F2FS_FAULT_INJECTION
1640 #define f2fs_show_injection_info(sbi, type) \
1641 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
1642 KERN_INFO, sbi->sb->s_id, \
1643 f2fs_fault_name[type], \
1644 __func__, __builtin_return_address(0))
time_to_inject(struct f2fs_sb_info * sbi,int type)1645 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1646 {
1647 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1648
1649 if (!ffi->inject_rate)
1650 return false;
1651
1652 if (!IS_FAULT_SET(ffi, type))
1653 return false;
1654
1655 atomic_inc(&ffi->inject_ops);
1656 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1657 atomic_set(&ffi->inject_ops, 0);
1658 return true;
1659 }
1660 return false;
1661 }
1662 #else
1663 #define f2fs_show_injection_info(sbi, type) do { } while (0)
time_to_inject(struct f2fs_sb_info * sbi,int type)1664 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1665 {
1666 return false;
1667 }
1668 #endif
1669
1670 /*
1671 * Test if the mounted volume is a multi-device volume.
1672 * - For a single regular disk volume, sbi->s_ndevs is 0.
1673 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1674 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1675 */
f2fs_is_multi_device(struct f2fs_sb_info * sbi)1676 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1677 {
1678 return sbi->s_ndevs > 1;
1679 }
1680
f2fs_update_time(struct f2fs_sb_info * sbi,int type)1681 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1682 {
1683 unsigned long now = jiffies;
1684
1685 sbi->last_time[type] = now;
1686
1687 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1688 if (type == REQ_TIME) {
1689 sbi->last_time[DISCARD_TIME] = now;
1690 sbi->last_time[GC_TIME] = now;
1691 }
1692 }
1693
f2fs_time_over(struct f2fs_sb_info * sbi,int type)1694 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1695 {
1696 unsigned long interval = sbi->interval_time[type] * HZ;
1697
1698 return time_after(jiffies, sbi->last_time[type] + interval);
1699 }
1700
f2fs_time_to_wait(struct f2fs_sb_info * sbi,int type)1701 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1702 int type)
1703 {
1704 unsigned long interval = sbi->interval_time[type] * HZ;
1705 unsigned int wait_ms = 0;
1706 long delta;
1707
1708 delta = (sbi->last_time[type] + interval) - jiffies;
1709 if (delta > 0)
1710 wait_ms = jiffies_to_msecs(delta);
1711
1712 return wait_ms;
1713 }
1714
1715 /*
1716 * Inline functions
1717 */
__f2fs_crc32(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1718 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1719 const void *address, unsigned int length)
1720 {
1721 struct {
1722 struct shash_desc shash;
1723 char ctx[4];
1724 } desc;
1725 int err;
1726
1727 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1728
1729 desc.shash.tfm = sbi->s_chksum_driver;
1730 *(u32 *)desc.ctx = crc;
1731
1732 err = crypto_shash_update(&desc.shash, address, length);
1733 BUG_ON(err);
1734
1735 return *(u32 *)desc.ctx;
1736 }
1737
f2fs_crc32(struct f2fs_sb_info * sbi,const void * address,unsigned int length)1738 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1739 unsigned int length)
1740 {
1741 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1742 }
1743
f2fs_crc_valid(struct f2fs_sb_info * sbi,__u32 blk_crc,void * buf,size_t buf_size)1744 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1745 void *buf, size_t buf_size)
1746 {
1747 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1748 }
1749
f2fs_chksum(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1750 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1751 const void *address, unsigned int length)
1752 {
1753 return __f2fs_crc32(sbi, crc, address, length);
1754 }
1755
F2FS_I(struct inode * inode)1756 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1757 {
1758 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1759 }
1760
F2FS_SB(struct super_block * sb)1761 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1762 {
1763 return sb->s_fs_info;
1764 }
1765
F2FS_I_SB(struct inode * inode)1766 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1767 {
1768 return F2FS_SB(inode->i_sb);
1769 }
1770
F2FS_M_SB(struct address_space * mapping)1771 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1772 {
1773 return F2FS_I_SB(mapping->host);
1774 }
1775
F2FS_P_SB(struct page * page)1776 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1777 {
1778 return F2FS_M_SB(page_file_mapping(page));
1779 }
1780
F2FS_RAW_SUPER(struct f2fs_sb_info * sbi)1781 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1782 {
1783 return (struct f2fs_super_block *)(sbi->raw_super);
1784 }
1785
F2FS_CKPT(struct f2fs_sb_info * sbi)1786 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1787 {
1788 return (struct f2fs_checkpoint *)(sbi->ckpt);
1789 }
1790
F2FS_NODE(struct page * page)1791 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1792 {
1793 return (struct f2fs_node *)page_address(page);
1794 }
1795
F2FS_INODE(struct page * page)1796 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1797 {
1798 return &((struct f2fs_node *)page_address(page))->i;
1799 }
1800
NM_I(struct f2fs_sb_info * sbi)1801 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1802 {
1803 return (struct f2fs_nm_info *)(sbi->nm_info);
1804 }
1805
SM_I(struct f2fs_sb_info * sbi)1806 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1807 {
1808 return (struct f2fs_sm_info *)(sbi->sm_info);
1809 }
1810
SIT_I(struct f2fs_sb_info * sbi)1811 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1812 {
1813 return (struct sit_info *)(SM_I(sbi)->sit_info);
1814 }
1815
FREE_I(struct f2fs_sb_info * sbi)1816 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1817 {
1818 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1819 }
1820
DIRTY_I(struct f2fs_sb_info * sbi)1821 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1822 {
1823 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1824 }
1825
META_MAPPING(struct f2fs_sb_info * sbi)1826 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1827 {
1828 return sbi->meta_inode->i_mapping;
1829 }
1830
NODE_MAPPING(struct f2fs_sb_info * sbi)1831 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1832 {
1833 return sbi->node_inode->i_mapping;
1834 }
1835
is_sbi_flag_set(struct f2fs_sb_info * sbi,unsigned int type)1836 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1837 {
1838 return test_bit(type, &sbi->s_flag);
1839 }
1840
set_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)1841 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1842 {
1843 set_bit(type, &sbi->s_flag);
1844 }
1845
clear_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)1846 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1847 {
1848 clear_bit(type, &sbi->s_flag);
1849 }
1850
cur_cp_version(struct f2fs_checkpoint * cp)1851 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1852 {
1853 return le64_to_cpu(cp->checkpoint_ver);
1854 }
1855
f2fs_qf_ino(struct super_block * sb,int type)1856 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
1857 {
1858 if (type < F2FS_MAX_QUOTAS)
1859 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
1860 return 0;
1861 }
1862
cur_cp_crc(struct f2fs_checkpoint * cp)1863 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
1864 {
1865 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
1866 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
1867 }
1868
__is_set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1869 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1870 {
1871 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1872
1873 return ckpt_flags & f;
1874 }
1875
is_set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1876 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1877 {
1878 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1879 }
1880
__set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1881 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1882 {
1883 unsigned int ckpt_flags;
1884
1885 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1886 ckpt_flags |= f;
1887 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1888 }
1889
set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1890 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1891 {
1892 unsigned long flags;
1893
1894 spin_lock_irqsave(&sbi->cp_lock, flags);
1895 __set_ckpt_flags(F2FS_CKPT(sbi), f);
1896 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1897 }
1898
__clear_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1899 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1900 {
1901 unsigned int ckpt_flags;
1902
1903 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1904 ckpt_flags &= (~f);
1905 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1906 }
1907
clear_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1908 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1909 {
1910 unsigned long flags;
1911
1912 spin_lock_irqsave(&sbi->cp_lock, flags);
1913 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
1914 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1915 }
1916
disable_nat_bits(struct f2fs_sb_info * sbi,bool lock)1917 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
1918 {
1919 unsigned long flags;
1920 unsigned char *nat_bits;
1921
1922 /*
1923 * In order to re-enable nat_bits we need to call fsck.f2fs by
1924 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
1925 * so let's rely on regular fsck or unclean shutdown.
1926 */
1927
1928 if (lock)
1929 spin_lock_irqsave(&sbi->cp_lock, flags);
1930 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
1931 nat_bits = NM_I(sbi)->nat_bits;
1932 NM_I(sbi)->nat_bits = NULL;
1933 if (lock)
1934 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1935
1936 kvfree(nat_bits);
1937 }
1938
enabled_nat_bits(struct f2fs_sb_info * sbi,struct cp_control * cpc)1939 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
1940 struct cp_control *cpc)
1941 {
1942 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1943
1944 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1945 }
1946
f2fs_lock_op(struct f2fs_sb_info * sbi)1947 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1948 {
1949 down_read(&sbi->cp_rwsem);
1950 }
1951
f2fs_trylock_op(struct f2fs_sb_info * sbi)1952 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
1953 {
1954 return down_read_trylock(&sbi->cp_rwsem);
1955 }
1956
f2fs_unlock_op(struct f2fs_sb_info * sbi)1957 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1958 {
1959 up_read(&sbi->cp_rwsem);
1960 }
1961
f2fs_lock_all(struct f2fs_sb_info * sbi)1962 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1963 {
1964 down_write(&sbi->cp_rwsem);
1965 }
1966
f2fs_unlock_all(struct f2fs_sb_info * sbi)1967 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1968 {
1969 up_write(&sbi->cp_rwsem);
1970 }
1971
__get_cp_reason(struct f2fs_sb_info * sbi)1972 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1973 {
1974 int reason = CP_SYNC;
1975
1976 if (test_opt(sbi, FASTBOOT))
1977 reason = CP_FASTBOOT;
1978 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1979 reason = CP_UMOUNT;
1980 return reason;
1981 }
1982
__remain_node_summaries(int reason)1983 static inline bool __remain_node_summaries(int reason)
1984 {
1985 return (reason & (CP_UMOUNT | CP_FASTBOOT));
1986 }
1987
__exist_node_summaries(struct f2fs_sb_info * sbi)1988 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1989 {
1990 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1991 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1992 }
1993
1994 /*
1995 * Check whether the inode has blocks or not
1996 */
F2FS_HAS_BLOCKS(struct inode * inode)1997 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
1998 {
1999 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2000
2001 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2002 }
2003
f2fs_has_xattr_block(unsigned int ofs)2004 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2005 {
2006 return ofs == XATTR_NODE_OFFSET;
2007 }
2008
__allow_reserved_blocks(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2009 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2010 struct inode *inode, bool cap)
2011 {
2012 if (!inode)
2013 return true;
2014 if (!test_opt(sbi, RESERVE_ROOT))
2015 return false;
2016 if (IS_NOQUOTA(inode))
2017 return true;
2018 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2019 return true;
2020 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2021 in_group_p(F2FS_OPTION(sbi).s_resgid))
2022 return true;
2023 if (cap && capable(CAP_SYS_RESOURCE))
2024 return true;
2025 return false;
2026 }
2027
2028 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
inc_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,blkcnt_t * count)2029 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2030 struct inode *inode, blkcnt_t *count)
2031 {
2032 blkcnt_t diff = 0, release = 0;
2033 block_t avail_user_block_count;
2034 int ret;
2035
2036 ret = dquot_reserve_block(inode, *count);
2037 if (ret)
2038 return ret;
2039
2040 if (time_to_inject(sbi, FAULT_BLOCK)) {
2041 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2042 release = *count;
2043 goto release_quota;
2044 }
2045
2046 /*
2047 * let's increase this in prior to actual block count change in order
2048 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2049 */
2050 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2051
2052 spin_lock(&sbi->stat_lock);
2053 sbi->total_valid_block_count += (block_t)(*count);
2054 avail_user_block_count = sbi->user_block_count -
2055 sbi->current_reserved_blocks;
2056
2057 if (!__allow_reserved_blocks(sbi, inode, true))
2058 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2059 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2060 if (avail_user_block_count > sbi->unusable_block_count)
2061 avail_user_block_count -= sbi->unusable_block_count;
2062 else
2063 avail_user_block_count = 0;
2064 }
2065 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2066 diff = sbi->total_valid_block_count - avail_user_block_count;
2067 if (diff > *count)
2068 diff = *count;
2069 *count -= diff;
2070 release = diff;
2071 sbi->total_valid_block_count -= diff;
2072 if (!*count) {
2073 spin_unlock(&sbi->stat_lock);
2074 goto enospc;
2075 }
2076 }
2077 spin_unlock(&sbi->stat_lock);
2078
2079 if (unlikely(release)) {
2080 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2081 dquot_release_reservation_block(inode, release);
2082 }
2083 f2fs_i_blocks_write(inode, *count, true, true);
2084 return 0;
2085
2086 enospc:
2087 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2088 release_quota:
2089 dquot_release_reservation_block(inode, release);
2090 return -ENOSPC;
2091 }
2092
2093 __printf(2, 3)
2094 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2095
2096 #define f2fs_err(sbi, fmt, ...) \
2097 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2098 #define f2fs_warn(sbi, fmt, ...) \
2099 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2100 #define f2fs_notice(sbi, fmt, ...) \
2101 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2102 #define f2fs_info(sbi, fmt, ...) \
2103 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2104 #define f2fs_debug(sbi, fmt, ...) \
2105 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2106
dec_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,block_t count)2107 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2108 struct inode *inode,
2109 block_t count)
2110 {
2111 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2112
2113 spin_lock(&sbi->stat_lock);
2114 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2115 sbi->total_valid_block_count -= (block_t)count;
2116 if (sbi->reserved_blocks &&
2117 sbi->current_reserved_blocks < sbi->reserved_blocks)
2118 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2119 sbi->current_reserved_blocks + count);
2120 spin_unlock(&sbi->stat_lock);
2121 if (unlikely(inode->i_blocks < sectors)) {
2122 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2123 inode->i_ino,
2124 (unsigned long long)inode->i_blocks,
2125 (unsigned long long)sectors);
2126 set_sbi_flag(sbi, SBI_NEED_FSCK);
2127 return;
2128 }
2129 f2fs_i_blocks_write(inode, count, false, true);
2130 }
2131
inc_page_count(struct f2fs_sb_info * sbi,int count_type)2132 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2133 {
2134 atomic_inc(&sbi->nr_pages[count_type]);
2135
2136 if (count_type == F2FS_DIRTY_DENTS ||
2137 count_type == F2FS_DIRTY_NODES ||
2138 count_type == F2FS_DIRTY_META ||
2139 count_type == F2FS_DIRTY_QDATA ||
2140 count_type == F2FS_DIRTY_IMETA)
2141 set_sbi_flag(sbi, SBI_IS_DIRTY);
2142 }
2143
inode_inc_dirty_pages(struct inode * inode)2144 static inline void inode_inc_dirty_pages(struct inode *inode)
2145 {
2146 atomic_inc(&F2FS_I(inode)->dirty_pages);
2147 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2148 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2149 if (IS_NOQUOTA(inode))
2150 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2151 }
2152
dec_page_count(struct f2fs_sb_info * sbi,int count_type)2153 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2154 {
2155 atomic_dec(&sbi->nr_pages[count_type]);
2156 }
2157
inode_dec_dirty_pages(struct inode * inode)2158 static inline void inode_dec_dirty_pages(struct inode *inode)
2159 {
2160 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2161 !S_ISLNK(inode->i_mode))
2162 return;
2163
2164 atomic_dec(&F2FS_I(inode)->dirty_pages);
2165 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2166 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2167 if (IS_NOQUOTA(inode))
2168 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2169 }
2170
get_pages(struct f2fs_sb_info * sbi,int count_type)2171 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2172 {
2173 return atomic_read(&sbi->nr_pages[count_type]);
2174 }
2175
get_dirty_pages(struct inode * inode)2176 static inline int get_dirty_pages(struct inode *inode)
2177 {
2178 return atomic_read(&F2FS_I(inode)->dirty_pages);
2179 }
2180
get_blocktype_secs(struct f2fs_sb_info * sbi,int block_type)2181 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2182 {
2183 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2184 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2185 sbi->log_blocks_per_seg;
2186
2187 return segs / sbi->segs_per_sec;
2188 }
2189
valid_user_blocks(struct f2fs_sb_info * sbi)2190 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2191 {
2192 return sbi->total_valid_block_count;
2193 }
2194
discard_blocks(struct f2fs_sb_info * sbi)2195 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2196 {
2197 return sbi->discard_blks;
2198 }
2199
__bitmap_size(struct f2fs_sb_info * sbi,int flag)2200 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2201 {
2202 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2203
2204 /* return NAT or SIT bitmap */
2205 if (flag == NAT_BITMAP)
2206 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2207 else if (flag == SIT_BITMAP)
2208 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2209
2210 return 0;
2211 }
2212
__cp_payload(struct f2fs_sb_info * sbi)2213 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2214 {
2215 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2216 }
2217
__bitmap_ptr(struct f2fs_sb_info * sbi,int flag)2218 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2219 {
2220 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2221 void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2222 int offset;
2223
2224 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2225 offset = (flag == SIT_BITMAP) ?
2226 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2227 /*
2228 * if large_nat_bitmap feature is enabled, leave checksum
2229 * protection for all nat/sit bitmaps.
2230 */
2231 return tmp_ptr + offset + sizeof(__le32);
2232 }
2233
2234 if (__cp_payload(sbi) > 0) {
2235 if (flag == NAT_BITMAP)
2236 return &ckpt->sit_nat_version_bitmap;
2237 else
2238 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2239 } else {
2240 offset = (flag == NAT_BITMAP) ?
2241 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2242 return tmp_ptr + offset;
2243 }
2244 }
2245
__start_cp_addr(struct f2fs_sb_info * sbi)2246 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2247 {
2248 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2249
2250 if (sbi->cur_cp_pack == 2)
2251 start_addr += sbi->blocks_per_seg;
2252 return start_addr;
2253 }
2254
__start_cp_next_addr(struct f2fs_sb_info * sbi)2255 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2256 {
2257 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2258
2259 if (sbi->cur_cp_pack == 1)
2260 start_addr += sbi->blocks_per_seg;
2261 return start_addr;
2262 }
2263
__set_cp_next_pack(struct f2fs_sb_info * sbi)2264 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2265 {
2266 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2267 }
2268
__start_sum_addr(struct f2fs_sb_info * sbi)2269 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2270 {
2271 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2272 }
2273
inc_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2274 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2275 struct inode *inode, bool is_inode)
2276 {
2277 block_t valid_block_count;
2278 unsigned int valid_node_count, user_block_count;
2279 int err;
2280
2281 if (is_inode) {
2282 if (inode) {
2283 err = dquot_alloc_inode(inode);
2284 if (err)
2285 return err;
2286 }
2287 } else {
2288 err = dquot_reserve_block(inode, 1);
2289 if (err)
2290 return err;
2291 }
2292
2293 if (time_to_inject(sbi, FAULT_BLOCK)) {
2294 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2295 goto enospc;
2296 }
2297
2298 spin_lock(&sbi->stat_lock);
2299
2300 valid_block_count = sbi->total_valid_block_count +
2301 sbi->current_reserved_blocks + 1;
2302
2303 if (!__allow_reserved_blocks(sbi, inode, false))
2304 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2305 user_block_count = sbi->user_block_count;
2306 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2307 user_block_count -= sbi->unusable_block_count;
2308
2309 if (unlikely(valid_block_count > user_block_count)) {
2310 spin_unlock(&sbi->stat_lock);
2311 goto enospc;
2312 }
2313
2314 valid_node_count = sbi->total_valid_node_count + 1;
2315 if (unlikely(valid_node_count > sbi->total_node_count)) {
2316 spin_unlock(&sbi->stat_lock);
2317 goto enospc;
2318 }
2319
2320 sbi->total_valid_node_count++;
2321 sbi->total_valid_block_count++;
2322 spin_unlock(&sbi->stat_lock);
2323
2324 if (inode) {
2325 if (is_inode)
2326 f2fs_mark_inode_dirty_sync(inode, true);
2327 else
2328 f2fs_i_blocks_write(inode, 1, true, true);
2329 }
2330
2331 percpu_counter_inc(&sbi->alloc_valid_block_count);
2332 return 0;
2333
2334 enospc:
2335 if (is_inode) {
2336 if (inode)
2337 dquot_free_inode(inode);
2338 } else {
2339 dquot_release_reservation_block(inode, 1);
2340 }
2341 return -ENOSPC;
2342 }
2343
dec_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2344 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2345 struct inode *inode, bool is_inode)
2346 {
2347 spin_lock(&sbi->stat_lock);
2348
2349 f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2350 f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2351
2352 sbi->total_valid_node_count--;
2353 sbi->total_valid_block_count--;
2354 if (sbi->reserved_blocks &&
2355 sbi->current_reserved_blocks < sbi->reserved_blocks)
2356 sbi->current_reserved_blocks++;
2357
2358 spin_unlock(&sbi->stat_lock);
2359
2360 if (is_inode) {
2361 dquot_free_inode(inode);
2362 } else {
2363 if (unlikely(inode->i_blocks == 0)) {
2364 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2365 inode->i_ino,
2366 (unsigned long long)inode->i_blocks);
2367 set_sbi_flag(sbi, SBI_NEED_FSCK);
2368 return;
2369 }
2370 f2fs_i_blocks_write(inode, 1, false, true);
2371 }
2372 }
2373
valid_node_count(struct f2fs_sb_info * sbi)2374 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2375 {
2376 return sbi->total_valid_node_count;
2377 }
2378
inc_valid_inode_count(struct f2fs_sb_info * sbi)2379 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2380 {
2381 percpu_counter_inc(&sbi->total_valid_inode_count);
2382 }
2383
dec_valid_inode_count(struct f2fs_sb_info * sbi)2384 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2385 {
2386 percpu_counter_dec(&sbi->total_valid_inode_count);
2387 }
2388
valid_inode_count(struct f2fs_sb_info * sbi)2389 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2390 {
2391 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2392 }
2393
f2fs_grab_cache_page(struct address_space * mapping,pgoff_t index,bool for_write)2394 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2395 pgoff_t index, bool for_write)
2396 {
2397 struct page *page;
2398
2399 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2400 if (!for_write)
2401 page = find_get_page_flags(mapping, index,
2402 FGP_LOCK | FGP_ACCESSED);
2403 else
2404 page = find_lock_page(mapping, index);
2405 if (page)
2406 return page;
2407
2408 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2409 f2fs_show_injection_info(F2FS_M_SB(mapping),
2410 FAULT_PAGE_ALLOC);
2411 return NULL;
2412 }
2413 }
2414
2415 if (!for_write)
2416 return grab_cache_page(mapping, index);
2417 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2418 }
2419
f2fs_pagecache_get_page(struct address_space * mapping,pgoff_t index,int fgp_flags,gfp_t gfp_mask)2420 static inline struct page *f2fs_pagecache_get_page(
2421 struct address_space *mapping, pgoff_t index,
2422 int fgp_flags, gfp_t gfp_mask)
2423 {
2424 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2425 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2426 return NULL;
2427 }
2428
2429 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2430 }
2431
f2fs_copy_page(struct page * src,struct page * dst)2432 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2433 {
2434 char *src_kaddr = kmap(src);
2435 char *dst_kaddr = kmap(dst);
2436
2437 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2438 kunmap(dst);
2439 kunmap(src);
2440 }
2441
f2fs_put_page(struct page * page,int unlock)2442 static inline void f2fs_put_page(struct page *page, int unlock)
2443 {
2444 if (!page)
2445 return;
2446
2447 if (unlock) {
2448 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2449 unlock_page(page);
2450 }
2451 put_page(page);
2452 }
2453
f2fs_put_dnode(struct dnode_of_data * dn)2454 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2455 {
2456 if (dn->node_page)
2457 f2fs_put_page(dn->node_page, 1);
2458 if (dn->inode_page && dn->node_page != dn->inode_page)
2459 f2fs_put_page(dn->inode_page, 0);
2460 dn->node_page = NULL;
2461 dn->inode_page = NULL;
2462 }
2463
f2fs_kmem_cache_create(const char * name,size_t size)2464 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2465 size_t size)
2466 {
2467 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2468 }
2469
f2fs_kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)2470 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2471 gfp_t flags)
2472 {
2473 void *entry;
2474
2475 entry = kmem_cache_alloc(cachep, flags);
2476 if (!entry)
2477 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2478 return entry;
2479 }
2480
is_inflight_io(struct f2fs_sb_info * sbi,int type)2481 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2482 {
2483 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2484 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2485 get_pages(sbi, F2FS_WB_CP_DATA) ||
2486 get_pages(sbi, F2FS_DIO_READ) ||
2487 get_pages(sbi, F2FS_DIO_WRITE))
2488 return true;
2489
2490 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2491 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2492 return true;
2493
2494 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2495 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2496 return true;
2497 return false;
2498 }
2499
is_idle(struct f2fs_sb_info * sbi,int type)2500 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2501 {
2502 if (sbi->gc_mode == GC_URGENT_HIGH)
2503 return true;
2504
2505 if (is_inflight_io(sbi, type))
2506 return false;
2507
2508 if (sbi->gc_mode == GC_URGENT_LOW &&
2509 (type == DISCARD_TIME || type == GC_TIME))
2510 return true;
2511
2512 return f2fs_time_over(sbi, type);
2513 }
2514
f2fs_radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * item)2515 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2516 unsigned long index, void *item)
2517 {
2518 while (radix_tree_insert(root, index, item))
2519 cond_resched();
2520 }
2521
2522 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2523
IS_INODE(struct page * page)2524 static inline bool IS_INODE(struct page *page)
2525 {
2526 struct f2fs_node *p = F2FS_NODE(page);
2527
2528 return RAW_IS_INODE(p);
2529 }
2530
offset_in_addr(struct f2fs_inode * i)2531 static inline int offset_in_addr(struct f2fs_inode *i)
2532 {
2533 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2534 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2535 }
2536
blkaddr_in_node(struct f2fs_node * node)2537 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2538 {
2539 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2540 }
2541
2542 static inline int f2fs_has_extra_attr(struct inode *inode);
data_blkaddr(struct inode * inode,struct page * node_page,unsigned int offset)2543 static inline block_t data_blkaddr(struct inode *inode,
2544 struct page *node_page, unsigned int offset)
2545 {
2546 struct f2fs_node *raw_node;
2547 __le32 *addr_array;
2548 int base = 0;
2549 bool is_inode = IS_INODE(node_page);
2550
2551 raw_node = F2FS_NODE(node_page);
2552
2553 if (is_inode) {
2554 if (!inode)
2555 /* from GC path only */
2556 base = offset_in_addr(&raw_node->i);
2557 else if (f2fs_has_extra_attr(inode))
2558 base = get_extra_isize(inode);
2559 }
2560
2561 addr_array = blkaddr_in_node(raw_node);
2562 return le32_to_cpu(addr_array[base + offset]);
2563 }
2564
f2fs_data_blkaddr(struct dnode_of_data * dn)2565 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2566 {
2567 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2568 }
2569
f2fs_test_bit(unsigned int nr,char * addr)2570 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2571 {
2572 int mask;
2573
2574 addr += (nr >> 3);
2575 mask = 1 << (7 - (nr & 0x07));
2576 return mask & *addr;
2577 }
2578
f2fs_set_bit(unsigned int nr,char * addr)2579 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2580 {
2581 int mask;
2582
2583 addr += (nr >> 3);
2584 mask = 1 << (7 - (nr & 0x07));
2585 *addr |= mask;
2586 }
2587
f2fs_clear_bit(unsigned int nr,char * addr)2588 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2589 {
2590 int mask;
2591
2592 addr += (nr >> 3);
2593 mask = 1 << (7 - (nr & 0x07));
2594 *addr &= ~mask;
2595 }
2596
f2fs_test_and_set_bit(unsigned int nr,char * addr)2597 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2598 {
2599 int mask;
2600 int ret;
2601
2602 addr += (nr >> 3);
2603 mask = 1 << (7 - (nr & 0x07));
2604 ret = mask & *addr;
2605 *addr |= mask;
2606 return ret;
2607 }
2608
f2fs_test_and_clear_bit(unsigned int nr,char * addr)2609 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2610 {
2611 int mask;
2612 int ret;
2613
2614 addr += (nr >> 3);
2615 mask = 1 << (7 - (nr & 0x07));
2616 ret = mask & *addr;
2617 *addr &= ~mask;
2618 return ret;
2619 }
2620
f2fs_change_bit(unsigned int nr,char * addr)2621 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2622 {
2623 int mask;
2624
2625 addr += (nr >> 3);
2626 mask = 1 << (7 - (nr & 0x07));
2627 *addr ^= mask;
2628 }
2629
2630 /*
2631 * On-disk inode flags (f2fs_inode::i_flags)
2632 */
2633 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2634 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2635 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2636 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2637 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2638 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2639 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2640 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2641 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2642 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2643 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2644
2645 /* Flags that should be inherited by new inodes from their parent. */
2646 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2647 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2648 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2649
2650 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2651 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2652 F2FS_CASEFOLD_FL))
2653
2654 /* Flags that are appropriate for non-directories/regular files. */
2655 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2656
f2fs_mask_flags(umode_t mode,__u32 flags)2657 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2658 {
2659 if (S_ISDIR(mode))
2660 return flags;
2661 else if (S_ISREG(mode))
2662 return flags & F2FS_REG_FLMASK;
2663 else
2664 return flags & F2FS_OTHER_FLMASK;
2665 }
2666
__mark_inode_dirty_flag(struct inode * inode,int flag,bool set)2667 static inline void __mark_inode_dirty_flag(struct inode *inode,
2668 int flag, bool set)
2669 {
2670 switch (flag) {
2671 case FI_INLINE_XATTR:
2672 case FI_INLINE_DATA:
2673 case FI_INLINE_DENTRY:
2674 case FI_NEW_INODE:
2675 if (set)
2676 return;
2677 fallthrough;
2678 case FI_DATA_EXIST:
2679 case FI_INLINE_DOTS:
2680 case FI_PIN_FILE:
2681 f2fs_mark_inode_dirty_sync(inode, true);
2682 }
2683 }
2684
set_inode_flag(struct inode * inode,int flag)2685 static inline void set_inode_flag(struct inode *inode, int flag)
2686 {
2687 set_bit(flag, F2FS_I(inode)->flags);
2688 __mark_inode_dirty_flag(inode, flag, true);
2689 }
2690
is_inode_flag_set(struct inode * inode,int flag)2691 static inline int is_inode_flag_set(struct inode *inode, int flag)
2692 {
2693 return test_bit(flag, F2FS_I(inode)->flags);
2694 }
2695
clear_inode_flag(struct inode * inode,int flag)2696 static inline void clear_inode_flag(struct inode *inode, int flag)
2697 {
2698 clear_bit(flag, F2FS_I(inode)->flags);
2699 __mark_inode_dirty_flag(inode, flag, false);
2700 }
2701
f2fs_verity_in_progress(struct inode * inode)2702 static inline bool f2fs_verity_in_progress(struct inode *inode)
2703 {
2704 return IS_ENABLED(CONFIG_FS_VERITY) &&
2705 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2706 }
2707
set_acl_inode(struct inode * inode,umode_t mode)2708 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2709 {
2710 F2FS_I(inode)->i_acl_mode = mode;
2711 set_inode_flag(inode, FI_ACL_MODE);
2712 f2fs_mark_inode_dirty_sync(inode, false);
2713 }
2714
f2fs_i_links_write(struct inode * inode,bool inc)2715 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2716 {
2717 if (inc)
2718 inc_nlink(inode);
2719 else
2720 drop_nlink(inode);
2721 f2fs_mark_inode_dirty_sync(inode, true);
2722 }
2723
f2fs_i_blocks_write(struct inode * inode,block_t diff,bool add,bool claim)2724 static inline void f2fs_i_blocks_write(struct inode *inode,
2725 block_t diff, bool add, bool claim)
2726 {
2727 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2728 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2729
2730 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
2731 if (add) {
2732 if (claim)
2733 dquot_claim_block(inode, diff);
2734 else
2735 dquot_alloc_block_nofail(inode, diff);
2736 } else {
2737 dquot_free_block(inode, diff);
2738 }
2739
2740 f2fs_mark_inode_dirty_sync(inode, true);
2741 if (clean || recover)
2742 set_inode_flag(inode, FI_AUTO_RECOVER);
2743 }
2744
f2fs_i_size_write(struct inode * inode,loff_t i_size)2745 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2746 {
2747 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2748 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2749
2750 if (i_size_read(inode) == i_size)
2751 return;
2752
2753 i_size_write(inode, i_size);
2754 f2fs_mark_inode_dirty_sync(inode, true);
2755 if (clean || recover)
2756 set_inode_flag(inode, FI_AUTO_RECOVER);
2757 }
2758
f2fs_i_depth_write(struct inode * inode,unsigned int depth)2759 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2760 {
2761 F2FS_I(inode)->i_current_depth = depth;
2762 f2fs_mark_inode_dirty_sync(inode, true);
2763 }
2764
f2fs_i_gc_failures_write(struct inode * inode,unsigned int count)2765 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2766 unsigned int count)
2767 {
2768 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2769 f2fs_mark_inode_dirty_sync(inode, true);
2770 }
2771
f2fs_i_xnid_write(struct inode * inode,nid_t xnid)2772 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2773 {
2774 F2FS_I(inode)->i_xattr_nid = xnid;
2775 f2fs_mark_inode_dirty_sync(inode, true);
2776 }
2777
f2fs_i_pino_write(struct inode * inode,nid_t pino)2778 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2779 {
2780 F2FS_I(inode)->i_pino = pino;
2781 f2fs_mark_inode_dirty_sync(inode, true);
2782 }
2783
get_inline_info(struct inode * inode,struct f2fs_inode * ri)2784 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2785 {
2786 struct f2fs_inode_info *fi = F2FS_I(inode);
2787
2788 if (ri->i_inline & F2FS_INLINE_XATTR)
2789 set_bit(FI_INLINE_XATTR, fi->flags);
2790 if (ri->i_inline & F2FS_INLINE_DATA)
2791 set_bit(FI_INLINE_DATA, fi->flags);
2792 if (ri->i_inline & F2FS_INLINE_DENTRY)
2793 set_bit(FI_INLINE_DENTRY, fi->flags);
2794 if (ri->i_inline & F2FS_DATA_EXIST)
2795 set_bit(FI_DATA_EXIST, fi->flags);
2796 if (ri->i_inline & F2FS_INLINE_DOTS)
2797 set_bit(FI_INLINE_DOTS, fi->flags);
2798 if (ri->i_inline & F2FS_EXTRA_ATTR)
2799 set_bit(FI_EXTRA_ATTR, fi->flags);
2800 if (ri->i_inline & F2FS_PIN_FILE)
2801 set_bit(FI_PIN_FILE, fi->flags);
2802 }
2803
set_raw_inline(struct inode * inode,struct f2fs_inode * ri)2804 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2805 {
2806 ri->i_inline = 0;
2807
2808 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2809 ri->i_inline |= F2FS_INLINE_XATTR;
2810 if (is_inode_flag_set(inode, FI_INLINE_DATA))
2811 ri->i_inline |= F2FS_INLINE_DATA;
2812 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2813 ri->i_inline |= F2FS_INLINE_DENTRY;
2814 if (is_inode_flag_set(inode, FI_DATA_EXIST))
2815 ri->i_inline |= F2FS_DATA_EXIST;
2816 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2817 ri->i_inline |= F2FS_INLINE_DOTS;
2818 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2819 ri->i_inline |= F2FS_EXTRA_ATTR;
2820 if (is_inode_flag_set(inode, FI_PIN_FILE))
2821 ri->i_inline |= F2FS_PIN_FILE;
2822 }
2823
f2fs_has_extra_attr(struct inode * inode)2824 static inline int f2fs_has_extra_attr(struct inode *inode)
2825 {
2826 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2827 }
2828
f2fs_has_inline_xattr(struct inode * inode)2829 static inline int f2fs_has_inline_xattr(struct inode *inode)
2830 {
2831 return is_inode_flag_set(inode, FI_INLINE_XATTR);
2832 }
2833
f2fs_compressed_file(struct inode * inode)2834 static inline int f2fs_compressed_file(struct inode *inode)
2835 {
2836 return S_ISREG(inode->i_mode) &&
2837 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
2838 }
2839
f2fs_need_compress_data(struct inode * inode)2840 static inline bool f2fs_need_compress_data(struct inode *inode)
2841 {
2842 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
2843
2844 if (!f2fs_compressed_file(inode))
2845 return false;
2846
2847 if (compress_mode == COMPR_MODE_FS)
2848 return true;
2849 else if (compress_mode == COMPR_MODE_USER &&
2850 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
2851 return true;
2852
2853 return false;
2854 }
2855
addrs_per_inode(struct inode * inode)2856 static inline unsigned int addrs_per_inode(struct inode *inode)
2857 {
2858 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
2859 get_inline_xattr_addrs(inode);
2860
2861 if (!f2fs_compressed_file(inode))
2862 return addrs;
2863 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
2864 }
2865
addrs_per_block(struct inode * inode)2866 static inline unsigned int addrs_per_block(struct inode *inode)
2867 {
2868 if (!f2fs_compressed_file(inode))
2869 return DEF_ADDRS_PER_BLOCK;
2870 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
2871 }
2872
inline_xattr_addr(struct inode * inode,struct page * page)2873 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2874 {
2875 struct f2fs_inode *ri = F2FS_INODE(page);
2876
2877 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
2878 get_inline_xattr_addrs(inode)]);
2879 }
2880
inline_xattr_size(struct inode * inode)2881 static inline int inline_xattr_size(struct inode *inode)
2882 {
2883 if (f2fs_has_inline_xattr(inode))
2884 return get_inline_xattr_addrs(inode) * sizeof(__le32);
2885 return 0;
2886 }
2887
f2fs_has_inline_data(struct inode * inode)2888 static inline int f2fs_has_inline_data(struct inode *inode)
2889 {
2890 return is_inode_flag_set(inode, FI_INLINE_DATA);
2891 }
2892
f2fs_exist_data(struct inode * inode)2893 static inline int f2fs_exist_data(struct inode *inode)
2894 {
2895 return is_inode_flag_set(inode, FI_DATA_EXIST);
2896 }
2897
f2fs_has_inline_dots(struct inode * inode)2898 static inline int f2fs_has_inline_dots(struct inode *inode)
2899 {
2900 return is_inode_flag_set(inode, FI_INLINE_DOTS);
2901 }
2902
f2fs_is_mmap_file(struct inode * inode)2903 static inline int f2fs_is_mmap_file(struct inode *inode)
2904 {
2905 return is_inode_flag_set(inode, FI_MMAP_FILE);
2906 }
2907
f2fs_is_pinned_file(struct inode * inode)2908 static inline bool f2fs_is_pinned_file(struct inode *inode)
2909 {
2910 return is_inode_flag_set(inode, FI_PIN_FILE);
2911 }
2912
f2fs_is_atomic_file(struct inode * inode)2913 static inline bool f2fs_is_atomic_file(struct inode *inode)
2914 {
2915 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
2916 }
2917
f2fs_is_commit_atomic_write(struct inode * inode)2918 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
2919 {
2920 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
2921 }
2922
f2fs_is_volatile_file(struct inode * inode)2923 static inline bool f2fs_is_volatile_file(struct inode *inode)
2924 {
2925 return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2926 }
2927
f2fs_is_first_block_written(struct inode * inode)2928 static inline bool f2fs_is_first_block_written(struct inode *inode)
2929 {
2930 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2931 }
2932
f2fs_is_drop_cache(struct inode * inode)2933 static inline bool f2fs_is_drop_cache(struct inode *inode)
2934 {
2935 return is_inode_flag_set(inode, FI_DROP_CACHE);
2936 }
2937
inline_data_addr(struct inode * inode,struct page * page)2938 static inline void *inline_data_addr(struct inode *inode, struct page *page)
2939 {
2940 struct f2fs_inode *ri = F2FS_INODE(page);
2941 int extra_size = get_extra_isize(inode);
2942
2943 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2944 }
2945
f2fs_has_inline_dentry(struct inode * inode)2946 static inline int f2fs_has_inline_dentry(struct inode *inode)
2947 {
2948 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2949 }
2950
is_file(struct inode * inode,int type)2951 static inline int is_file(struct inode *inode, int type)
2952 {
2953 return F2FS_I(inode)->i_advise & type;
2954 }
2955
set_file(struct inode * inode,int type)2956 static inline void set_file(struct inode *inode, int type)
2957 {
2958 F2FS_I(inode)->i_advise |= type;
2959 f2fs_mark_inode_dirty_sync(inode, true);
2960 }
2961
clear_file(struct inode * inode,int type)2962 static inline void clear_file(struct inode *inode, int type)
2963 {
2964 F2FS_I(inode)->i_advise &= ~type;
2965 f2fs_mark_inode_dirty_sync(inode, true);
2966 }
2967
f2fs_is_time_consistent(struct inode * inode)2968 static inline bool f2fs_is_time_consistent(struct inode *inode)
2969 {
2970 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
2971 return false;
2972 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
2973 return false;
2974 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
2975 return false;
2976 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
2977 &F2FS_I(inode)->i_crtime))
2978 return false;
2979 return true;
2980 }
2981
f2fs_skip_inode_update(struct inode * inode,int dsync)2982 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
2983 {
2984 bool ret;
2985
2986 if (dsync) {
2987 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2988
2989 spin_lock(&sbi->inode_lock[DIRTY_META]);
2990 ret = list_empty(&F2FS_I(inode)->gdirty_list);
2991 spin_unlock(&sbi->inode_lock[DIRTY_META]);
2992 return ret;
2993 }
2994 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
2995 file_keep_isize(inode) ||
2996 i_size_read(inode) & ~PAGE_MASK)
2997 return false;
2998
2999 if (!f2fs_is_time_consistent(inode))
3000 return false;
3001
3002 spin_lock(&F2FS_I(inode)->i_size_lock);
3003 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3004 spin_unlock(&F2FS_I(inode)->i_size_lock);
3005
3006 return ret;
3007 }
3008
f2fs_readonly(struct super_block * sb)3009 static inline bool f2fs_readonly(struct super_block *sb)
3010 {
3011 return sb_rdonly(sb);
3012 }
3013
f2fs_cp_error(struct f2fs_sb_info * sbi)3014 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3015 {
3016 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3017 }
3018
is_dot_dotdot(const u8 * name,size_t len)3019 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3020 {
3021 if (len == 1 && name[0] == '.')
3022 return true;
3023
3024 if (len == 2 && name[0] == '.' && name[1] == '.')
3025 return true;
3026
3027 return false;
3028 }
3029
f2fs_may_extent_tree(struct inode * inode)3030 static inline bool f2fs_may_extent_tree(struct inode *inode)
3031 {
3032 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3033
3034 if (!test_opt(sbi, EXTENT_CACHE) ||
3035 is_inode_flag_set(inode, FI_NO_EXTENT) ||
3036 is_inode_flag_set(inode, FI_COMPRESSED_FILE))
3037 return false;
3038
3039 /*
3040 * for recovered files during mount do not create extents
3041 * if shrinker is not registered.
3042 */
3043 if (list_empty(&sbi->s_list))
3044 return false;
3045
3046 return S_ISREG(inode->i_mode);
3047 }
3048
f2fs_kmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3049 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3050 size_t size, gfp_t flags)
3051 {
3052 if (time_to_inject(sbi, FAULT_KMALLOC)) {
3053 f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3054 return NULL;
3055 }
3056
3057 return kmalloc(size, flags);
3058 }
3059
f2fs_kzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3060 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3061 size_t size, gfp_t flags)
3062 {
3063 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3064 }
3065
f2fs_kvmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3066 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3067 size_t size, gfp_t flags)
3068 {
3069 if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3070 f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3071 return NULL;
3072 }
3073
3074 return kvmalloc(size, flags);
3075 }
3076
f2fs_kvzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3077 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3078 size_t size, gfp_t flags)
3079 {
3080 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3081 }
3082
get_extra_isize(struct inode * inode)3083 static inline int get_extra_isize(struct inode *inode)
3084 {
3085 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3086 }
3087
get_inline_xattr_addrs(struct inode * inode)3088 static inline int get_inline_xattr_addrs(struct inode *inode)
3089 {
3090 return F2FS_I(inode)->i_inline_xattr_size;
3091 }
3092
3093 #define f2fs_get_inode_mode(i) \
3094 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3095 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3096
3097 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
3098 (offsetof(struct f2fs_inode, i_extra_end) - \
3099 offsetof(struct f2fs_inode, i_extra_isize)) \
3100
3101 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
3102 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
3103 ((offsetof(typeof(*(f2fs_inode)), field) + \
3104 sizeof((f2fs_inode)->field)) \
3105 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
3106
3107 #define DEFAULT_IOSTAT_PERIOD_MS 3000
3108 #define MIN_IOSTAT_PERIOD_MS 100
3109 /* maximum period of iostat tracing is 1 day */
3110 #define MAX_IOSTAT_PERIOD_MS 8640000
3111
f2fs_reset_iostat(struct f2fs_sb_info * sbi)3112 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3113 {
3114 int i;
3115
3116 spin_lock(&sbi->iostat_lock);
3117 for (i = 0; i < NR_IO_TYPE; i++) {
3118 sbi->rw_iostat[i] = 0;
3119 sbi->prev_rw_iostat[i] = 0;
3120 }
3121 spin_unlock(&sbi->iostat_lock);
3122 }
3123
3124 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3125
f2fs_update_iostat(struct f2fs_sb_info * sbi,enum iostat_type type,unsigned long long io_bytes)3126 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3127 enum iostat_type type, unsigned long long io_bytes)
3128 {
3129 if (!sbi->iostat_enable)
3130 return;
3131 spin_lock(&sbi->iostat_lock);
3132 sbi->rw_iostat[type] += io_bytes;
3133
3134 if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3135 sbi->rw_iostat[APP_BUFFERED_IO] =
3136 sbi->rw_iostat[APP_WRITE_IO] -
3137 sbi->rw_iostat[APP_DIRECT_IO];
3138
3139 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3140 sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3141 sbi->rw_iostat[APP_READ_IO] -
3142 sbi->rw_iostat[APP_DIRECT_READ_IO];
3143 spin_unlock(&sbi->iostat_lock);
3144
3145 f2fs_record_iostat(sbi);
3146 }
3147
3148 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
3149
3150 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3151
3152 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3153 block_t blkaddr, int type);
verify_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)3154 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3155 block_t blkaddr, int type)
3156 {
3157 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3158 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3159 blkaddr, type);
3160 f2fs_bug_on(sbi, 1);
3161 }
3162 }
3163
__is_valid_data_blkaddr(block_t blkaddr)3164 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3165 {
3166 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3167 blkaddr == COMPRESS_ADDR)
3168 return false;
3169 return true;
3170 }
3171
f2fs_set_page_private(struct page * page,unsigned long data)3172 static inline void f2fs_set_page_private(struct page *page,
3173 unsigned long data)
3174 {
3175 if (PagePrivate(page))
3176 return;
3177
3178 attach_page_private(page, (void *)data);
3179 }
3180
f2fs_clear_page_private(struct page * page)3181 static inline void f2fs_clear_page_private(struct page *page)
3182 {
3183 detach_page_private(page);
3184 }
3185
3186 /*
3187 * file.c
3188 */
3189 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3190 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3191 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3192 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3193 int f2fs_truncate(struct inode *inode);
3194 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3195 struct kstat *stat, u32 request_mask, unsigned int flags);
3196 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3197 struct iattr *attr);
3198 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3199 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3200 int f2fs_precache_extents(struct inode *inode);
3201 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3202 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3203 struct dentry *dentry, struct fileattr *fa);
3204 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3205 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3206 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3207 int f2fs_pin_file_control(struct inode *inode, bool inc);
3208
3209 /*
3210 * inode.c
3211 */
3212 void f2fs_set_inode_flags(struct inode *inode);
3213 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3214 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3215 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3216 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3217 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3218 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3219 void f2fs_update_inode_page(struct inode *inode);
3220 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3221 void f2fs_evict_inode(struct inode *inode);
3222 void f2fs_handle_failed_inode(struct inode *inode);
3223
3224 /*
3225 * namei.c
3226 */
3227 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3228 bool hot, bool set);
3229 struct dentry *f2fs_get_parent(struct dentry *child);
3230
3231 /*
3232 * dir.c
3233 */
3234 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3235 int f2fs_init_casefolded_name(const struct inode *dir,
3236 struct f2fs_filename *fname);
3237 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3238 int lookup, struct f2fs_filename *fname);
3239 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3240 struct f2fs_filename *fname);
3241 void f2fs_free_filename(struct f2fs_filename *fname);
3242 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3243 const struct f2fs_filename *fname, int *max_slots);
3244 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3245 unsigned int start_pos, struct fscrypt_str *fstr);
3246 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3247 struct f2fs_dentry_ptr *d);
3248 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3249 const struct f2fs_filename *fname, struct page *dpage);
3250 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3251 unsigned int current_depth);
3252 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3253 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3254 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3255 const struct f2fs_filename *fname,
3256 struct page **res_page);
3257 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3258 const struct qstr *child, struct page **res_page);
3259 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3260 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3261 struct page **page);
3262 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3263 struct page *page, struct inode *inode);
3264 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3265 const struct f2fs_filename *fname);
3266 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3267 const struct fscrypt_str *name, f2fs_hash_t name_hash,
3268 unsigned int bit_pos);
3269 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3270 struct inode *inode, nid_t ino, umode_t mode);
3271 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3272 struct inode *inode, nid_t ino, umode_t mode);
3273 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3274 struct inode *inode, nid_t ino, umode_t mode);
3275 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3276 struct inode *dir, struct inode *inode);
3277 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3278 bool f2fs_empty_dir(struct inode *dir);
3279
f2fs_add_link(struct dentry * dentry,struct inode * inode)3280 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3281 {
3282 if (fscrypt_is_nokey_name(dentry))
3283 return -ENOKEY;
3284 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3285 inode, inode->i_ino, inode->i_mode);
3286 }
3287
3288 /*
3289 * super.c
3290 */
3291 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3292 void f2fs_inode_synced(struct inode *inode);
3293 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3294 int f2fs_quota_sync(struct super_block *sb, int type);
3295 loff_t max_file_blocks(struct inode *inode);
3296 void f2fs_quota_off_umount(struct super_block *sb);
3297 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3298 int f2fs_sync_fs(struct super_block *sb, int sync);
3299 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3300
3301 /*
3302 * hash.c
3303 */
3304 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3305
3306 /*
3307 * node.c
3308 */
3309 struct node_info;
3310
3311 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3312 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3314 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3315 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3316 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3317 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3318 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3319 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3320 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3321 struct node_info *ni);
3322 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3323 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3324 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3325 int f2fs_truncate_xattr_node(struct inode *inode);
3326 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3327 unsigned int seq_id);
3328 int f2fs_remove_inode_page(struct inode *inode);
3329 struct page *f2fs_new_inode_page(struct inode *inode);
3330 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3331 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3332 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3333 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3334 int f2fs_move_node_page(struct page *node_page, int gc_type);
3335 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3336 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3337 struct writeback_control *wbc, bool atomic,
3338 unsigned int *seq_id);
3339 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3340 struct writeback_control *wbc,
3341 bool do_balance, enum iostat_type io_type);
3342 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3343 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3344 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3345 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3346 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3347 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3348 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3349 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3350 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3351 unsigned int segno, struct f2fs_summary_block *sum);
3352 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3353 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3354 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3355 int __init f2fs_create_node_manager_caches(void);
3356 void f2fs_destroy_node_manager_caches(void);
3357
3358 /*
3359 * segment.c
3360 */
3361 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3362 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3363 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3364 void f2fs_drop_inmem_pages(struct inode *inode);
3365 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3366 int f2fs_commit_inmem_pages(struct inode *inode);
3367 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3368 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3369 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3370 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3371 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3372 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3373 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3374 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3375 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3376 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3377 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3378 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3379 struct cp_control *cpc);
3380 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3381 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3382 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3383 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3384 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3385 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3386 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3387 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3388 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3389 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3390 unsigned int *newseg, bool new_sec, int dir);
3391 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3392 unsigned int start, unsigned int end);
3393 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3394 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3395 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3396 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3397 struct cp_control *cpc);
3398 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3399 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3400 block_t blk_addr);
3401 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3402 enum iostat_type io_type);
3403 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3404 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3405 struct f2fs_io_info *fio);
3406 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3407 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3408 block_t old_blkaddr, block_t new_blkaddr,
3409 bool recover_curseg, bool recover_newaddr,
3410 bool from_gc);
3411 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3412 block_t old_addr, block_t new_addr,
3413 unsigned char version, bool recover_curseg,
3414 bool recover_newaddr);
3415 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3416 block_t old_blkaddr, block_t *new_blkaddr,
3417 struct f2fs_summary *sum, int type,
3418 struct f2fs_io_info *fio);
3419 void f2fs_wait_on_page_writeback(struct page *page,
3420 enum page_type type, bool ordered, bool locked);
3421 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3422 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3423 block_t len);
3424 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3425 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3426 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3427 unsigned int val, int alloc);
3428 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3429 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3430 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3431 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3432 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3433 int __init f2fs_create_segment_manager_caches(void);
3434 void f2fs_destroy_segment_manager_caches(void);
3435 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3436 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3437 enum page_type type, enum temp_type temp);
3438 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3439 unsigned int segno);
3440 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3441 unsigned int segno);
3442
3443 /*
3444 * checkpoint.c
3445 */
3446 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3447 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3448 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3449 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3450 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3451 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3452 block_t blkaddr, int type);
3453 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3454 int type, bool sync);
3455 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3456 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3457 long nr_to_write, enum iostat_type io_type);
3458 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3459 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3460 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3461 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3462 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3463 unsigned int devidx, int type);
3464 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3465 unsigned int devidx, int type);
3466 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3467 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3468 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3469 void f2fs_add_orphan_inode(struct inode *inode);
3470 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3471 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3472 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3473 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3474 void f2fs_remove_dirty_inode(struct inode *inode);
3475 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3476 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3477 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3478 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3479 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3480 int __init f2fs_create_checkpoint_caches(void);
3481 void f2fs_destroy_checkpoint_caches(void);
3482 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3483 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3484 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3485 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3486
3487 /*
3488 * data.c
3489 */
3490 int __init f2fs_init_bioset(void);
3491 void f2fs_destroy_bioset(void);
3492 int f2fs_init_bio_entry_cache(void);
3493 void f2fs_destroy_bio_entry_cache(void);
3494 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3495 struct bio *bio, enum page_type type);
3496 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3497 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3498 struct inode *inode, struct page *page,
3499 nid_t ino, enum page_type type);
3500 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3501 struct bio **bio, struct page *page);
3502 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3503 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3504 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3505 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3506 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3507 block_t blk_addr, struct bio *bio);
3508 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3509 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3510 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3511 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3512 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3513 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3514 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3515 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3516 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3517 int op_flags, bool for_write);
3518 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3519 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3520 bool for_write);
3521 struct page *f2fs_get_new_data_page(struct inode *inode,
3522 struct page *ipage, pgoff_t index, bool new_i_size);
3523 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3524 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3525 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3526 int create, int flag);
3527 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3528 u64 start, u64 len);
3529 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3530 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3531 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3532 int f2fs_write_single_data_page(struct page *page, int *submitted,
3533 struct bio **bio, sector_t *last_block,
3534 struct writeback_control *wbc,
3535 enum iostat_type io_type,
3536 int compr_blocks, bool allow_balance);
3537 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3538 unsigned int length);
3539 int f2fs_release_page(struct page *page, gfp_t wait);
3540 #ifdef CONFIG_MIGRATION
3541 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3542 struct page *page, enum migrate_mode mode);
3543 #endif
3544 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3545 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3546 int f2fs_init_post_read_processing(void);
3547 void f2fs_destroy_post_read_processing(void);
3548 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3549 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3550
3551 /*
3552 * gc.c
3553 */
3554 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3555 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3556 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3557 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3558 unsigned int segno);
3559 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3560 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3561 int __init f2fs_create_garbage_collection_cache(void);
3562 void f2fs_destroy_garbage_collection_cache(void);
3563
3564 /*
3565 * recovery.c
3566 */
3567 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3568 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3569
3570 /*
3571 * debug.c
3572 */
3573 #ifdef CONFIG_F2FS_STAT_FS
3574 struct f2fs_stat_info {
3575 struct list_head stat_list;
3576 struct f2fs_sb_info *sbi;
3577 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3578 int main_area_segs, main_area_sections, main_area_zones;
3579 unsigned long long hit_largest, hit_cached, hit_rbtree;
3580 unsigned long long hit_total, total_ext;
3581 int ext_tree, zombie_tree, ext_node;
3582 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3583 int ndirty_data, ndirty_qdata;
3584 int inmem_pages;
3585 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3586 int nats, dirty_nats, sits, dirty_sits;
3587 int free_nids, avail_nids, alloc_nids;
3588 int total_count, utilization;
3589 int bg_gc, nr_wb_cp_data, nr_wb_data;
3590 int nr_rd_data, nr_rd_node, nr_rd_meta;
3591 int nr_dio_read, nr_dio_write;
3592 unsigned int io_skip_bggc, other_skip_bggc;
3593 int nr_flushing, nr_flushed, flush_list_empty;
3594 int nr_discarding, nr_discarded;
3595 int nr_discard_cmd;
3596 unsigned int undiscard_blks;
3597 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3598 unsigned int cur_ckpt_time, peak_ckpt_time;
3599 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3600 int compr_inode;
3601 unsigned long long compr_blocks;
3602 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3603 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3604 unsigned int bimodal, avg_vblocks;
3605 int util_free, util_valid, util_invalid;
3606 int rsvd_segs, overp_segs;
3607 int dirty_count, node_pages, meta_pages;
3608 int prefree_count, call_count, cp_count, bg_cp_count;
3609 int tot_segs, node_segs, data_segs, free_segs, free_secs;
3610 int bg_node_segs, bg_data_segs;
3611 int tot_blks, data_blks, node_blks;
3612 int bg_data_blks, bg_node_blks;
3613 unsigned long long skipped_atomic_files[2];
3614 int curseg[NR_CURSEG_TYPE];
3615 int cursec[NR_CURSEG_TYPE];
3616 int curzone[NR_CURSEG_TYPE];
3617 unsigned int dirty_seg[NR_CURSEG_TYPE];
3618 unsigned int full_seg[NR_CURSEG_TYPE];
3619 unsigned int valid_blks[NR_CURSEG_TYPE];
3620
3621 unsigned int meta_count[META_MAX];
3622 unsigned int segment_count[2];
3623 unsigned int block_count[2];
3624 unsigned int inplace_count;
3625 unsigned long long base_mem, cache_mem, page_mem;
3626 };
3627
F2FS_STAT(struct f2fs_sb_info * sbi)3628 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3629 {
3630 return (struct f2fs_stat_info *)sbi->stat_info;
3631 }
3632
3633 #define stat_inc_cp_count(si) ((si)->cp_count++)
3634 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
3635 #define stat_inc_call_count(si) ((si)->call_count++)
3636 #define stat_inc_bggc_count(si) ((si)->bg_gc++)
3637 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3638 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3639 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3640 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3641 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
3642 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
3643 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3644 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
3645 #define stat_inc_inline_xattr(inode) \
3646 do { \
3647 if (f2fs_has_inline_xattr(inode)) \
3648 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3649 } while (0)
3650 #define stat_dec_inline_xattr(inode) \
3651 do { \
3652 if (f2fs_has_inline_xattr(inode)) \
3653 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3654 } while (0)
3655 #define stat_inc_inline_inode(inode) \
3656 do { \
3657 if (f2fs_has_inline_data(inode)) \
3658 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3659 } while (0)
3660 #define stat_dec_inline_inode(inode) \
3661 do { \
3662 if (f2fs_has_inline_data(inode)) \
3663 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
3664 } while (0)
3665 #define stat_inc_inline_dir(inode) \
3666 do { \
3667 if (f2fs_has_inline_dentry(inode)) \
3668 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
3669 } while (0)
3670 #define stat_dec_inline_dir(inode) \
3671 do { \
3672 if (f2fs_has_inline_dentry(inode)) \
3673 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
3674 } while (0)
3675 #define stat_inc_compr_inode(inode) \
3676 do { \
3677 if (f2fs_compressed_file(inode)) \
3678 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
3679 } while (0)
3680 #define stat_dec_compr_inode(inode) \
3681 do { \
3682 if (f2fs_compressed_file(inode)) \
3683 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
3684 } while (0)
3685 #define stat_add_compr_blocks(inode, blocks) \
3686 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3687 #define stat_sub_compr_blocks(inode, blocks) \
3688 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3689 #define stat_inc_meta_count(sbi, blkaddr) \
3690 do { \
3691 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
3692 atomic_inc(&(sbi)->meta_count[META_CP]); \
3693 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
3694 atomic_inc(&(sbi)->meta_count[META_SIT]); \
3695 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
3696 atomic_inc(&(sbi)->meta_count[META_NAT]); \
3697 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
3698 atomic_inc(&(sbi)->meta_count[META_SSA]); \
3699 } while (0)
3700 #define stat_inc_seg_type(sbi, curseg) \
3701 ((sbi)->segment_count[(curseg)->alloc_type]++)
3702 #define stat_inc_block_count(sbi, curseg) \
3703 ((sbi)->block_count[(curseg)->alloc_type]++)
3704 #define stat_inc_inplace_blocks(sbi) \
3705 (atomic_inc(&(sbi)->inplace_count))
3706 #define stat_update_max_atomic_write(inode) \
3707 do { \
3708 int cur = F2FS_I_SB(inode)->atomic_files; \
3709 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
3710 if (cur > max) \
3711 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
3712 } while (0)
3713 #define stat_inc_volatile_write(inode) \
3714 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3715 #define stat_dec_volatile_write(inode) \
3716 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3717 #define stat_update_max_volatile_write(inode) \
3718 do { \
3719 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
3720 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
3721 if (cur > max) \
3722 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
3723 } while (0)
3724 #define stat_inc_seg_count(sbi, type, gc_type) \
3725 do { \
3726 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3727 si->tot_segs++; \
3728 if ((type) == SUM_TYPE_DATA) { \
3729 si->data_segs++; \
3730 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
3731 } else { \
3732 si->node_segs++; \
3733 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
3734 } \
3735 } while (0)
3736
3737 #define stat_inc_tot_blk_count(si, blks) \
3738 ((si)->tot_blks += (blks))
3739
3740 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
3741 do { \
3742 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3743 stat_inc_tot_blk_count(si, blks); \
3744 si->data_blks += (blks); \
3745 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3746 } while (0)
3747
3748 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
3749 do { \
3750 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3751 stat_inc_tot_blk_count(si, blks); \
3752 si->node_blks += (blks); \
3753 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3754 } while (0)
3755
3756 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3757 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3758 void __init f2fs_create_root_stats(void);
3759 void f2fs_destroy_root_stats(void);
3760 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3761 #else
3762 #define stat_inc_cp_count(si) do { } while (0)
3763 #define stat_inc_bg_cp_count(si) do { } while (0)
3764 #define stat_inc_call_count(si) do { } while (0)
3765 #define stat_inc_bggc_count(si) do { } while (0)
3766 #define stat_io_skip_bggc_count(sbi) do { } while (0)
3767 #define stat_other_skip_bggc_count(sbi) do { } while (0)
3768 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
3769 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
3770 #define stat_inc_total_hit(sbi) do { } while (0)
3771 #define stat_inc_rbtree_node_hit(sbi) do { } while (0)
3772 #define stat_inc_largest_node_hit(sbi) do { } while (0)
3773 #define stat_inc_cached_node_hit(sbi) do { } while (0)
3774 #define stat_inc_inline_xattr(inode) do { } while (0)
3775 #define stat_dec_inline_xattr(inode) do { } while (0)
3776 #define stat_inc_inline_inode(inode) do { } while (0)
3777 #define stat_dec_inline_inode(inode) do { } while (0)
3778 #define stat_inc_inline_dir(inode) do { } while (0)
3779 #define stat_dec_inline_dir(inode) do { } while (0)
3780 #define stat_inc_compr_inode(inode) do { } while (0)
3781 #define stat_dec_compr_inode(inode) do { } while (0)
3782 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
3783 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
3784 #define stat_update_max_atomic_write(inode) do { } while (0)
3785 #define stat_inc_volatile_write(inode) do { } while (0)
3786 #define stat_dec_volatile_write(inode) do { } while (0)
3787 #define stat_update_max_volatile_write(inode) do { } while (0)
3788 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
3789 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
3790 #define stat_inc_block_count(sbi, curseg) do { } while (0)
3791 #define stat_inc_inplace_blocks(sbi) do { } while (0)
3792 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
3793 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
3794 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
3795 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
3796
f2fs_build_stats(struct f2fs_sb_info * sbi)3797 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_stats(struct f2fs_sb_info * sbi)3798 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
f2fs_create_root_stats(void)3799 static inline void __init f2fs_create_root_stats(void) { }
f2fs_destroy_root_stats(void)3800 static inline void f2fs_destroy_root_stats(void) { }
f2fs_update_sit_info(struct f2fs_sb_info * sbi)3801 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3802 #endif
3803
3804 extern const struct file_operations f2fs_dir_operations;
3805 extern const struct file_operations f2fs_file_operations;
3806 extern const struct inode_operations f2fs_file_inode_operations;
3807 extern const struct address_space_operations f2fs_dblock_aops;
3808 extern const struct address_space_operations f2fs_node_aops;
3809 extern const struct address_space_operations f2fs_meta_aops;
3810 extern const struct inode_operations f2fs_dir_inode_operations;
3811 extern const struct inode_operations f2fs_symlink_inode_operations;
3812 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3813 extern const struct inode_operations f2fs_special_inode_operations;
3814 extern struct kmem_cache *f2fs_inode_entry_slab;
3815
3816 /*
3817 * inline.c
3818 */
3819 bool f2fs_may_inline_data(struct inode *inode);
3820 bool f2fs_may_inline_dentry(struct inode *inode);
3821 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3822 void f2fs_truncate_inline_inode(struct inode *inode,
3823 struct page *ipage, u64 from);
3824 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3825 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3826 int f2fs_convert_inline_inode(struct inode *inode);
3827 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3828 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3829 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3830 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3831 const struct f2fs_filename *fname,
3832 struct page **res_page);
3833 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3834 struct page *ipage);
3835 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3836 struct inode *inode, nid_t ino, umode_t mode);
3837 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3838 struct page *page, struct inode *dir,
3839 struct inode *inode);
3840 bool f2fs_empty_inline_dir(struct inode *dir);
3841 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3842 struct fscrypt_str *fstr);
3843 int f2fs_inline_data_fiemap(struct inode *inode,
3844 struct fiemap_extent_info *fieinfo,
3845 __u64 start, __u64 len);
3846
3847 /*
3848 * shrinker.c
3849 */
3850 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3851 struct shrink_control *sc);
3852 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3853 struct shrink_control *sc);
3854 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3855 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3856
3857 /*
3858 * extent_cache.c
3859 */
3860 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3861 struct rb_entry *cached_re, unsigned int ofs);
3862 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
3863 struct rb_root_cached *root,
3864 struct rb_node **parent,
3865 unsigned long long key, bool *left_most);
3866 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3867 struct rb_root_cached *root,
3868 struct rb_node **parent,
3869 unsigned int ofs, bool *leftmost);
3870 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3871 struct rb_entry *cached_re, unsigned int ofs,
3872 struct rb_entry **prev_entry, struct rb_entry **next_entry,
3873 struct rb_node ***insert_p, struct rb_node **insert_parent,
3874 bool force, bool *leftmost);
3875 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3876 struct rb_root_cached *root, bool check_key);
3877 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3878 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
3879 void f2fs_drop_extent_tree(struct inode *inode);
3880 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3881 void f2fs_destroy_extent_tree(struct inode *inode);
3882 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3883 struct extent_info *ei);
3884 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3885 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3886 pgoff_t fofs, block_t blkaddr, unsigned int len);
3887 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3888 int __init f2fs_create_extent_cache(void);
3889 void f2fs_destroy_extent_cache(void);
3890
3891 /*
3892 * sysfs.c
3893 */
3894 int __init f2fs_init_sysfs(void);
3895 void f2fs_exit_sysfs(void);
3896 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
3897 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
3898
3899 /* verity.c */
3900 extern const struct fsverity_operations f2fs_verityops;
3901
3902 /*
3903 * crypto support
3904 */
f2fs_encrypted_file(struct inode * inode)3905 static inline bool f2fs_encrypted_file(struct inode *inode)
3906 {
3907 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
3908 }
3909
f2fs_set_encrypted_inode(struct inode * inode)3910 static inline void f2fs_set_encrypted_inode(struct inode *inode)
3911 {
3912 #ifdef CONFIG_FS_ENCRYPTION
3913 file_set_encrypt(inode);
3914 f2fs_set_inode_flags(inode);
3915 #endif
3916 }
3917
3918 /*
3919 * Returns true if the reads of the inode's data need to undergo some
3920 * postprocessing step, like decryption or authenticity verification.
3921 */
f2fs_post_read_required(struct inode * inode)3922 static inline bool f2fs_post_read_required(struct inode *inode)
3923 {
3924 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
3925 f2fs_compressed_file(inode);
3926 }
3927
3928 /*
3929 * compress.c
3930 */
3931 #ifdef CONFIG_F2FS_FS_COMPRESSION
3932 bool f2fs_is_compressed_page(struct page *page);
3933 struct page *f2fs_compress_control_page(struct page *page);
3934 int f2fs_prepare_compress_overwrite(struct inode *inode,
3935 struct page **pagep, pgoff_t index, void **fsdata);
3936 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
3937 pgoff_t index, unsigned copied);
3938 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
3939 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
3940 bool f2fs_is_compress_backend_ready(struct inode *inode);
3941 int f2fs_init_compress_mempool(void);
3942 void f2fs_destroy_compress_mempool(void);
3943 void f2fs_end_read_compressed_page(struct page *page, bool failed);
3944 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
3945 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
3946 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
3947 int f2fs_write_multi_pages(struct compress_ctx *cc,
3948 int *submitted,
3949 struct writeback_control *wbc,
3950 enum iostat_type io_type);
3951 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
3952 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
3953 unsigned nr_pages, sector_t *last_block_in_bio,
3954 bool is_readahead, bool for_write);
3955 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
3956 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
3957 void f2fs_put_page_dic(struct page *page);
3958 int f2fs_init_compress_ctx(struct compress_ctx *cc);
3959 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
3960 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
3961 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
3962 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
3963 int __init f2fs_init_compress_cache(void);
3964 void f2fs_destroy_compress_cache(void);
3965 #define inc_compr_inode_stat(inode) \
3966 do { \
3967 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
3968 sbi->compr_new_inode++; \
3969 } while (0)
3970 #define add_compr_block_stat(inode, blocks) \
3971 do { \
3972 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
3973 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
3974 sbi->compr_written_block += blocks; \
3975 sbi->compr_saved_block += diff; \
3976 } while (0)
3977 #else
f2fs_is_compressed_page(struct page * page)3978 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
f2fs_is_compress_backend_ready(struct inode * inode)3979 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
3980 {
3981 if (!f2fs_compressed_file(inode))
3982 return true;
3983 /* not support compression */
3984 return false;
3985 }
f2fs_compress_control_page(struct page * page)3986 static inline struct page *f2fs_compress_control_page(struct page *page)
3987 {
3988 WARN_ON_ONCE(1);
3989 return ERR_PTR(-EINVAL);
3990 }
f2fs_init_compress_mempool(void)3991 static inline int f2fs_init_compress_mempool(void) { return 0; }
f2fs_destroy_compress_mempool(void)3992 static inline void f2fs_destroy_compress_mempool(void) { }
f2fs_end_read_compressed_page(struct page * page,bool failed)3993 static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
3994 {
3995 WARN_ON_ONCE(1);
3996 }
f2fs_put_page_dic(struct page * page)3997 static inline void f2fs_put_page_dic(struct page *page)
3998 {
3999 WARN_ON_ONCE(1);
4000 }
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)4001 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)4002 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
f2fs_init_compress_cache(void)4003 static inline int __init f2fs_init_compress_cache(void) { return 0; }
f2fs_destroy_compress_cache(void)4004 static inline void f2fs_destroy_compress_cache(void) { }
4005 #define inc_compr_inode_stat(inode) do { } while (0)
4006 #endif
4007
set_compress_context(struct inode * inode)4008 static inline void set_compress_context(struct inode *inode)
4009 {
4010 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4011
4012 F2FS_I(inode)->i_compress_algorithm =
4013 F2FS_OPTION(sbi).compress_algorithm;
4014 F2FS_I(inode)->i_log_cluster_size =
4015 F2FS_OPTION(sbi).compress_log_size;
4016 F2FS_I(inode)->i_compress_flag =
4017 F2FS_OPTION(sbi).compress_chksum ?
4018 1 << COMPRESS_CHKSUM : 0;
4019 F2FS_I(inode)->i_cluster_size =
4020 1 << F2FS_I(inode)->i_log_cluster_size;
4021 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
4022 F2FS_OPTION(sbi).compress_level)
4023 F2FS_I(inode)->i_compress_flag |=
4024 F2FS_OPTION(sbi).compress_level <<
4025 COMPRESS_LEVEL_OFFSET;
4026 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4027 set_inode_flag(inode, FI_COMPRESSED_FILE);
4028 stat_inc_compr_inode(inode);
4029 inc_compr_inode_stat(inode);
4030 f2fs_mark_inode_dirty_sync(inode, true);
4031 }
4032
f2fs_disable_compressed_file(struct inode * inode)4033 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4034 {
4035 struct f2fs_inode_info *fi = F2FS_I(inode);
4036
4037 if (!f2fs_compressed_file(inode))
4038 return true;
4039 if (S_ISREG(inode->i_mode) &&
4040 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
4041 return false;
4042
4043 fi->i_flags &= ~F2FS_COMPR_FL;
4044 stat_dec_compr_inode(inode);
4045 clear_inode_flag(inode, FI_COMPRESSED_FILE);
4046 f2fs_mark_inode_dirty_sync(inode, true);
4047 return true;
4048 }
4049
4050 #define F2FS_FEATURE_FUNCS(name, flagname) \
4051 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4052 { \
4053 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4054 }
4055
4056 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4057 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4058 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4059 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4060 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4061 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4062 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4063 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4064 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4065 F2FS_FEATURE_FUNCS(verity, VERITY);
4066 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4067 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4068 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4069
4070 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_blkz_is_seq(struct f2fs_sb_info * sbi,int devi,block_t blkaddr)4071 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4072 block_t blkaddr)
4073 {
4074 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4075
4076 return test_bit(zno, FDEV(devi).blkz_seq);
4077 }
4078 #endif
4079
f2fs_hw_should_discard(struct f2fs_sb_info * sbi)4080 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4081 {
4082 return f2fs_sb_has_blkzoned(sbi);
4083 }
4084
f2fs_bdev_support_discard(struct block_device * bdev)4085 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4086 {
4087 return blk_queue_discard(bdev_get_queue(bdev)) ||
4088 bdev_is_zoned(bdev);
4089 }
4090
f2fs_hw_support_discard(struct f2fs_sb_info * sbi)4091 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4092 {
4093 int i;
4094
4095 if (!f2fs_is_multi_device(sbi))
4096 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4097
4098 for (i = 0; i < sbi->s_ndevs; i++)
4099 if (f2fs_bdev_support_discard(FDEV(i).bdev))
4100 return true;
4101 return false;
4102 }
4103
f2fs_realtime_discard_enable(struct f2fs_sb_info * sbi)4104 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4105 {
4106 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4107 f2fs_hw_should_discard(sbi);
4108 }
4109
f2fs_hw_is_readonly(struct f2fs_sb_info * sbi)4110 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4111 {
4112 int i;
4113
4114 if (!f2fs_is_multi_device(sbi))
4115 return bdev_read_only(sbi->sb->s_bdev);
4116
4117 for (i = 0; i < sbi->s_ndevs; i++)
4118 if (bdev_read_only(FDEV(i).bdev))
4119 return true;
4120 return false;
4121 }
4122
f2fs_lfs_mode(struct f2fs_sb_info * sbi)4123 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4124 {
4125 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4126 }
4127
f2fs_may_compress(struct inode * inode)4128 static inline bool f2fs_may_compress(struct inode *inode)
4129 {
4130 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4131 f2fs_is_atomic_file(inode) ||
4132 f2fs_is_volatile_file(inode))
4133 return false;
4134 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4135 }
4136
f2fs_i_compr_blocks_update(struct inode * inode,u64 blocks,bool add)4137 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4138 u64 blocks, bool add)
4139 {
4140 int diff = F2FS_I(inode)->i_cluster_size - blocks;
4141 struct f2fs_inode_info *fi = F2FS_I(inode);
4142
4143 /* don't update i_compr_blocks if saved blocks were released */
4144 if (!add && !atomic_read(&fi->i_compr_blocks))
4145 return;
4146
4147 if (add) {
4148 atomic_add(diff, &fi->i_compr_blocks);
4149 stat_add_compr_blocks(inode, diff);
4150 } else {
4151 atomic_sub(diff, &fi->i_compr_blocks);
4152 stat_sub_compr_blocks(inode, diff);
4153 }
4154 f2fs_mark_inode_dirty_sync(inode, true);
4155 }
4156
block_unaligned_IO(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4157 static inline int block_unaligned_IO(struct inode *inode,
4158 struct kiocb *iocb, struct iov_iter *iter)
4159 {
4160 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4161 unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4162 loff_t offset = iocb->ki_pos;
4163 unsigned long align = offset | iov_iter_alignment(iter);
4164
4165 return align & blocksize_mask;
4166 }
4167
allow_outplace_dio(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4168 static inline int allow_outplace_dio(struct inode *inode,
4169 struct kiocb *iocb, struct iov_iter *iter)
4170 {
4171 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4172 int rw = iov_iter_rw(iter);
4173
4174 return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
4175 !block_unaligned_IO(inode, iocb, iter));
4176 }
4177
f2fs_force_buffered_io(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4178 static inline bool f2fs_force_buffered_io(struct inode *inode,
4179 struct kiocb *iocb, struct iov_iter *iter)
4180 {
4181 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4182 int rw = iov_iter_rw(iter);
4183
4184 if (f2fs_post_read_required(inode))
4185 return true;
4186 if (f2fs_is_multi_device(sbi))
4187 return true;
4188 /*
4189 * for blkzoned device, fallback direct IO to buffered IO, so
4190 * all IOs can be serialized by log-structured write.
4191 */
4192 if (f2fs_sb_has_blkzoned(sbi))
4193 return true;
4194 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4195 if (block_unaligned_IO(inode, iocb, iter))
4196 return true;
4197 if (F2FS_IO_ALIGNED(sbi))
4198 return true;
4199 }
4200 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4201 return true;
4202
4203 return false;
4204 }
4205
f2fs_need_verity(const struct inode * inode,pgoff_t idx)4206 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4207 {
4208 return fsverity_active(inode) &&
4209 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4210 }
4211
4212 #ifdef CONFIG_F2FS_FAULT_INJECTION
4213 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4214 unsigned int type);
4215 #else
4216 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
4217 #endif
4218
is_journalled_quota(struct f2fs_sb_info * sbi)4219 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4220 {
4221 #ifdef CONFIG_QUOTA
4222 if (f2fs_sb_has_quota_ino(sbi))
4223 return true;
4224 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4225 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4226 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4227 return true;
4228 #endif
4229 return false;
4230 }
4231
4232 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4233 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4234
4235 #endif /* _LINUX_F2FS_H */
4236