xref: /linux/fs/f2fs/segment.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/prefetch.h>
13 #include <linux/kthread.h>
14 #include <linux/swap.h>
15 #include <linux/timer.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
18 
19 #include "f2fs.h"
20 #include "segment.h"
21 #include "node.h"
22 #include "gc.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25 
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *discard_cmd_slab;
30 static struct kmem_cache *sit_entry_set_slab;
31 static struct kmem_cache *inmem_entry_slab;
32 
33 static unsigned long __reverse_ulong(unsigned char *str)
34 {
35 	unsigned long tmp = 0;
36 	int shift = 24, idx = 0;
37 
38 #if BITS_PER_LONG == 64
39 	shift = 56;
40 #endif
41 	while (shift >= 0) {
42 		tmp |= (unsigned long)str[idx++] << shift;
43 		shift -= BITS_PER_BYTE;
44 	}
45 	return tmp;
46 }
47 
48 /*
49  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50  * MSB and LSB are reversed in a byte by f2fs_set_bit.
51  */
52 static inline unsigned long __reverse_ffs(unsigned long word)
53 {
54 	int num = 0;
55 
56 #if BITS_PER_LONG == 64
57 	if ((word & 0xffffffff00000000UL) == 0)
58 		num += 32;
59 	else
60 		word >>= 32;
61 #endif
62 	if ((word & 0xffff0000) == 0)
63 		num += 16;
64 	else
65 		word >>= 16;
66 
67 	if ((word & 0xff00) == 0)
68 		num += 8;
69 	else
70 		word >>= 8;
71 
72 	if ((word & 0xf0) == 0)
73 		num += 4;
74 	else
75 		word >>= 4;
76 
77 	if ((word & 0xc) == 0)
78 		num += 2;
79 	else
80 		word >>= 2;
81 
82 	if ((word & 0x2) == 0)
83 		num += 1;
84 	return num;
85 }
86 
87 /*
88  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
89  * f2fs_set_bit makes MSB and LSB reversed in a byte.
90  * @size must be integral times of unsigned long.
91  * Example:
92  *                             MSB <--> LSB
93  *   f2fs_set_bit(0, bitmap) => 1000 0000
94  *   f2fs_set_bit(7, bitmap) => 0000 0001
95  */
96 static unsigned long __find_rev_next_bit(const unsigned long *addr,
97 			unsigned long size, unsigned long offset)
98 {
99 	const unsigned long *p = addr + BIT_WORD(offset);
100 	unsigned long result = size;
101 	unsigned long tmp;
102 
103 	if (offset >= size)
104 		return size;
105 
106 	size -= (offset & ~(BITS_PER_LONG - 1));
107 	offset %= BITS_PER_LONG;
108 
109 	while (1) {
110 		if (*p == 0)
111 			goto pass;
112 
113 		tmp = __reverse_ulong((unsigned char *)p);
114 
115 		tmp &= ~0UL >> offset;
116 		if (size < BITS_PER_LONG)
117 			tmp &= (~0UL << (BITS_PER_LONG - size));
118 		if (tmp)
119 			goto found;
120 pass:
121 		if (size <= BITS_PER_LONG)
122 			break;
123 		size -= BITS_PER_LONG;
124 		offset = 0;
125 		p++;
126 	}
127 	return result;
128 found:
129 	return result - size + __reverse_ffs(tmp);
130 }
131 
132 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133 			unsigned long size, unsigned long offset)
134 {
135 	const unsigned long *p = addr + BIT_WORD(offset);
136 	unsigned long result = size;
137 	unsigned long tmp;
138 
139 	if (offset >= size)
140 		return size;
141 
142 	size -= (offset & ~(BITS_PER_LONG - 1));
143 	offset %= BITS_PER_LONG;
144 
145 	while (1) {
146 		if (*p == ~0UL)
147 			goto pass;
148 
149 		tmp = __reverse_ulong((unsigned char *)p);
150 
151 		if (offset)
152 			tmp |= ~0UL << (BITS_PER_LONG - offset);
153 		if (size < BITS_PER_LONG)
154 			tmp |= ~0UL >> size;
155 		if (tmp != ~0UL)
156 			goto found;
157 pass:
158 		if (size <= BITS_PER_LONG)
159 			break;
160 		size -= BITS_PER_LONG;
161 		offset = 0;
162 		p++;
163 	}
164 	return result;
165 found:
166 	return result - size + __reverse_ffz(tmp);
167 }
168 
169 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
170 {
171 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
172 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
173 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
174 
175 	if (test_opt(sbi, LFS))
176 		return false;
177 	if (sbi->gc_mode == GC_URGENT)
178 		return true;
179 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
180 		return true;
181 
182 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
183 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
184 }
185 
186 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
187 {
188 	struct inmem_pages *new;
189 
190 	f2fs_trace_pid(page);
191 
192 	f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
193 
194 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
195 
196 	/* add atomic page indices to the list */
197 	new->page = page;
198 	INIT_LIST_HEAD(&new->list);
199 
200 	/* increase reference count with clean state */
201 	get_page(page);
202 	mutex_lock(&F2FS_I(inode)->inmem_lock);
203 	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
204 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
205 	mutex_unlock(&F2FS_I(inode)->inmem_lock);
206 
207 	trace_f2fs_register_inmem_page(page, INMEM);
208 }
209 
210 static int __revoke_inmem_pages(struct inode *inode,
211 				struct list_head *head, bool drop, bool recover,
212 				bool trylock)
213 {
214 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
215 	struct inmem_pages *cur, *tmp;
216 	int err = 0;
217 
218 	list_for_each_entry_safe(cur, tmp, head, list) {
219 		struct page *page = cur->page;
220 
221 		if (drop)
222 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
223 
224 		if (trylock) {
225 			/*
226 			 * to avoid deadlock in between page lock and
227 			 * inmem_lock.
228 			 */
229 			if (!trylock_page(page))
230 				continue;
231 		} else {
232 			lock_page(page);
233 		}
234 
235 		f2fs_wait_on_page_writeback(page, DATA, true, true);
236 
237 		if (recover) {
238 			struct dnode_of_data dn;
239 			struct node_info ni;
240 
241 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
242 retry:
243 			set_new_dnode(&dn, inode, NULL, NULL, 0);
244 			err = f2fs_get_dnode_of_data(&dn, page->index,
245 								LOOKUP_NODE);
246 			if (err) {
247 				if (err == -ENOMEM) {
248 					congestion_wait(BLK_RW_ASYNC, HZ/50);
249 					cond_resched();
250 					goto retry;
251 				}
252 				err = -EAGAIN;
253 				goto next;
254 			}
255 
256 			err = f2fs_get_node_info(sbi, dn.nid, &ni);
257 			if (err) {
258 				f2fs_put_dnode(&dn);
259 				return err;
260 			}
261 
262 			if (cur->old_addr == NEW_ADDR) {
263 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
264 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
265 			} else
266 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
267 					cur->old_addr, ni.version, true, true);
268 			f2fs_put_dnode(&dn);
269 		}
270 next:
271 		/* we don't need to invalidate this in the sccessful status */
272 		if (drop || recover) {
273 			ClearPageUptodate(page);
274 			clear_cold_data(page);
275 		}
276 		f2fs_clear_page_private(page);
277 		f2fs_put_page(page, 1);
278 
279 		list_del(&cur->list);
280 		kmem_cache_free(inmem_entry_slab, cur);
281 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
282 	}
283 	return err;
284 }
285 
286 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
287 {
288 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
289 	struct inode *inode;
290 	struct f2fs_inode_info *fi;
291 	unsigned int count = sbi->atomic_files;
292 	unsigned int looped = 0;
293 next:
294 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
295 	if (list_empty(head)) {
296 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
297 		return;
298 	}
299 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
300 	inode = igrab(&fi->vfs_inode);
301 	if (inode)
302 		list_move_tail(&fi->inmem_ilist, head);
303 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
304 
305 	if (inode) {
306 		if (gc_failure) {
307 			if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
308 				goto skip;
309 		}
310 		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
311 		f2fs_drop_inmem_pages(inode);
312 skip:
313 		iput(inode);
314 	}
315 	congestion_wait(BLK_RW_ASYNC, HZ/50);
316 	cond_resched();
317 	if (gc_failure) {
318 		if (++looped >= count)
319 			return;
320 	}
321 	goto next;
322 }
323 
324 void f2fs_drop_inmem_pages(struct inode *inode)
325 {
326 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
327 	struct f2fs_inode_info *fi = F2FS_I(inode);
328 
329 	while (!list_empty(&fi->inmem_pages)) {
330 		mutex_lock(&fi->inmem_lock);
331 		__revoke_inmem_pages(inode, &fi->inmem_pages,
332 						true, false, true);
333 		mutex_unlock(&fi->inmem_lock);
334 	}
335 
336 	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
337 
338 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
339 	if (!list_empty(&fi->inmem_ilist))
340 		list_del_init(&fi->inmem_ilist);
341 	if (f2fs_is_atomic_file(inode)) {
342 		clear_inode_flag(inode, FI_ATOMIC_FILE);
343 		sbi->atomic_files--;
344 	}
345 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
346 }
347 
348 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
349 {
350 	struct f2fs_inode_info *fi = F2FS_I(inode);
351 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
352 	struct list_head *head = &fi->inmem_pages;
353 	struct inmem_pages *cur = NULL;
354 
355 	f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
356 
357 	mutex_lock(&fi->inmem_lock);
358 	list_for_each_entry(cur, head, list) {
359 		if (cur->page == page)
360 			break;
361 	}
362 
363 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
364 	list_del(&cur->list);
365 	mutex_unlock(&fi->inmem_lock);
366 
367 	dec_page_count(sbi, F2FS_INMEM_PAGES);
368 	kmem_cache_free(inmem_entry_slab, cur);
369 
370 	ClearPageUptodate(page);
371 	f2fs_clear_page_private(page);
372 	f2fs_put_page(page, 0);
373 
374 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
375 }
376 
377 static int __f2fs_commit_inmem_pages(struct inode *inode)
378 {
379 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
380 	struct f2fs_inode_info *fi = F2FS_I(inode);
381 	struct inmem_pages *cur, *tmp;
382 	struct f2fs_io_info fio = {
383 		.sbi = sbi,
384 		.ino = inode->i_ino,
385 		.type = DATA,
386 		.op = REQ_OP_WRITE,
387 		.op_flags = REQ_SYNC | REQ_PRIO,
388 		.io_type = FS_DATA_IO,
389 	};
390 	struct list_head revoke_list;
391 	bool submit_bio = false;
392 	int err = 0;
393 
394 	INIT_LIST_HEAD(&revoke_list);
395 
396 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
397 		struct page *page = cur->page;
398 
399 		lock_page(page);
400 		if (page->mapping == inode->i_mapping) {
401 			trace_f2fs_commit_inmem_page(page, INMEM);
402 
403 			f2fs_wait_on_page_writeback(page, DATA, true, true);
404 
405 			set_page_dirty(page);
406 			if (clear_page_dirty_for_io(page)) {
407 				inode_dec_dirty_pages(inode);
408 				f2fs_remove_dirty_inode(inode);
409 			}
410 retry:
411 			fio.page = page;
412 			fio.old_blkaddr = NULL_ADDR;
413 			fio.encrypted_page = NULL;
414 			fio.need_lock = LOCK_DONE;
415 			err = f2fs_do_write_data_page(&fio);
416 			if (err) {
417 				if (err == -ENOMEM) {
418 					congestion_wait(BLK_RW_ASYNC, HZ/50);
419 					cond_resched();
420 					goto retry;
421 				}
422 				unlock_page(page);
423 				break;
424 			}
425 			/* record old blkaddr for revoking */
426 			cur->old_addr = fio.old_blkaddr;
427 			submit_bio = true;
428 		}
429 		unlock_page(page);
430 		list_move_tail(&cur->list, &revoke_list);
431 	}
432 
433 	if (submit_bio)
434 		f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
435 
436 	if (err) {
437 		/*
438 		 * try to revoke all committed pages, but still we could fail
439 		 * due to no memory or other reason, if that happened, EAGAIN
440 		 * will be returned, which means in such case, transaction is
441 		 * already not integrity, caller should use journal to do the
442 		 * recovery or rewrite & commit last transaction. For other
443 		 * error number, revoking was done by filesystem itself.
444 		 */
445 		err = __revoke_inmem_pages(inode, &revoke_list,
446 						false, true, false);
447 
448 		/* drop all uncommitted pages */
449 		__revoke_inmem_pages(inode, &fi->inmem_pages,
450 						true, false, false);
451 	} else {
452 		__revoke_inmem_pages(inode, &revoke_list,
453 						false, false, false);
454 	}
455 
456 	return err;
457 }
458 
459 int f2fs_commit_inmem_pages(struct inode *inode)
460 {
461 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
462 	struct f2fs_inode_info *fi = F2FS_I(inode);
463 	int err;
464 
465 	f2fs_balance_fs(sbi, true);
466 
467 	down_write(&fi->i_gc_rwsem[WRITE]);
468 
469 	f2fs_lock_op(sbi);
470 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
471 
472 	mutex_lock(&fi->inmem_lock);
473 	err = __f2fs_commit_inmem_pages(inode);
474 	mutex_unlock(&fi->inmem_lock);
475 
476 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
477 
478 	f2fs_unlock_op(sbi);
479 	up_write(&fi->i_gc_rwsem[WRITE]);
480 
481 	return err;
482 }
483 
484 /*
485  * This function balances dirty node and dentry pages.
486  * In addition, it controls garbage collection.
487  */
488 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
489 {
490 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
491 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
492 		f2fs_stop_checkpoint(sbi, false);
493 	}
494 
495 	/* balance_fs_bg is able to be pending */
496 	if (need && excess_cached_nats(sbi))
497 		f2fs_balance_fs_bg(sbi);
498 
499 	if (!f2fs_is_checkpoint_ready(sbi))
500 		return;
501 
502 	/*
503 	 * We should do GC or end up with checkpoint, if there are so many dirty
504 	 * dir/node pages without enough free segments.
505 	 */
506 	if (has_not_enough_free_secs(sbi, 0, 0)) {
507 		down_write(&sbi->gc_lock);
508 		f2fs_gc(sbi, false, false, NULL_SEGNO);
509 	}
510 }
511 
512 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
513 {
514 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
515 		return;
516 
517 	/* try to shrink extent cache when there is no enough memory */
518 	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
519 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
520 
521 	/* check the # of cached NAT entries */
522 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
523 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
524 
525 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
526 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
527 	else
528 		f2fs_build_free_nids(sbi, false, false);
529 
530 	if (!is_idle(sbi, REQ_TIME) &&
531 		(!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
532 		return;
533 
534 	/* checkpoint is the only way to shrink partial cached entries */
535 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
536 			!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
537 			excess_prefree_segs(sbi) ||
538 			excess_dirty_nats(sbi) ||
539 			excess_dirty_nodes(sbi) ||
540 			f2fs_time_over(sbi, CP_TIME)) {
541 		if (test_opt(sbi, DATA_FLUSH)) {
542 			struct blk_plug plug;
543 
544 			mutex_lock(&sbi->flush_lock);
545 
546 			blk_start_plug(&plug);
547 			f2fs_sync_dirty_inodes(sbi, FILE_INODE);
548 			blk_finish_plug(&plug);
549 
550 			mutex_unlock(&sbi->flush_lock);
551 		}
552 		f2fs_sync_fs(sbi->sb, true);
553 		stat_inc_bg_cp_count(sbi->stat_info);
554 	}
555 }
556 
557 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
558 				struct block_device *bdev)
559 {
560 	struct bio *bio;
561 	int ret;
562 
563 	bio = f2fs_bio_alloc(sbi, 0, false);
564 	if (!bio)
565 		return -ENOMEM;
566 
567 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
568 	bio_set_dev(bio, bdev);
569 	ret = submit_bio_wait(bio);
570 	bio_put(bio);
571 
572 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
573 				test_opt(sbi, FLUSH_MERGE), ret);
574 	return ret;
575 }
576 
577 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
578 {
579 	int ret = 0;
580 	int i;
581 
582 	if (!f2fs_is_multi_device(sbi))
583 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
584 
585 	for (i = 0; i < sbi->s_ndevs; i++) {
586 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
587 			continue;
588 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
589 		if (ret)
590 			break;
591 	}
592 	return ret;
593 }
594 
595 static int issue_flush_thread(void *data)
596 {
597 	struct f2fs_sb_info *sbi = data;
598 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
599 	wait_queue_head_t *q = &fcc->flush_wait_queue;
600 repeat:
601 	if (kthread_should_stop())
602 		return 0;
603 
604 	sb_start_intwrite(sbi->sb);
605 
606 	if (!llist_empty(&fcc->issue_list)) {
607 		struct flush_cmd *cmd, *next;
608 		int ret;
609 
610 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
611 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
612 
613 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
614 
615 		ret = submit_flush_wait(sbi, cmd->ino);
616 		atomic_inc(&fcc->issued_flush);
617 
618 		llist_for_each_entry_safe(cmd, next,
619 					  fcc->dispatch_list, llnode) {
620 			cmd->ret = ret;
621 			complete(&cmd->wait);
622 		}
623 		fcc->dispatch_list = NULL;
624 	}
625 
626 	sb_end_intwrite(sbi->sb);
627 
628 	wait_event_interruptible(*q,
629 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
630 	goto repeat;
631 }
632 
633 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
634 {
635 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
636 	struct flush_cmd cmd;
637 	int ret;
638 
639 	if (test_opt(sbi, NOBARRIER))
640 		return 0;
641 
642 	if (!test_opt(sbi, FLUSH_MERGE)) {
643 		atomic_inc(&fcc->queued_flush);
644 		ret = submit_flush_wait(sbi, ino);
645 		atomic_dec(&fcc->queued_flush);
646 		atomic_inc(&fcc->issued_flush);
647 		return ret;
648 	}
649 
650 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
651 	    f2fs_is_multi_device(sbi)) {
652 		ret = submit_flush_wait(sbi, ino);
653 		atomic_dec(&fcc->queued_flush);
654 
655 		atomic_inc(&fcc->issued_flush);
656 		return ret;
657 	}
658 
659 	cmd.ino = ino;
660 	init_completion(&cmd.wait);
661 
662 	llist_add(&cmd.llnode, &fcc->issue_list);
663 
664 	/* update issue_list before we wake up issue_flush thread */
665 	smp_mb();
666 
667 	if (waitqueue_active(&fcc->flush_wait_queue))
668 		wake_up(&fcc->flush_wait_queue);
669 
670 	if (fcc->f2fs_issue_flush) {
671 		wait_for_completion(&cmd.wait);
672 		atomic_dec(&fcc->queued_flush);
673 	} else {
674 		struct llist_node *list;
675 
676 		list = llist_del_all(&fcc->issue_list);
677 		if (!list) {
678 			wait_for_completion(&cmd.wait);
679 			atomic_dec(&fcc->queued_flush);
680 		} else {
681 			struct flush_cmd *tmp, *next;
682 
683 			ret = submit_flush_wait(sbi, ino);
684 
685 			llist_for_each_entry_safe(tmp, next, list, llnode) {
686 				if (tmp == &cmd) {
687 					cmd.ret = ret;
688 					atomic_dec(&fcc->queued_flush);
689 					continue;
690 				}
691 				tmp->ret = ret;
692 				complete(&tmp->wait);
693 			}
694 		}
695 	}
696 
697 	return cmd.ret;
698 }
699 
700 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
701 {
702 	dev_t dev = sbi->sb->s_bdev->bd_dev;
703 	struct flush_cmd_control *fcc;
704 	int err = 0;
705 
706 	if (SM_I(sbi)->fcc_info) {
707 		fcc = SM_I(sbi)->fcc_info;
708 		if (fcc->f2fs_issue_flush)
709 			return err;
710 		goto init_thread;
711 	}
712 
713 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
714 	if (!fcc)
715 		return -ENOMEM;
716 	atomic_set(&fcc->issued_flush, 0);
717 	atomic_set(&fcc->queued_flush, 0);
718 	init_waitqueue_head(&fcc->flush_wait_queue);
719 	init_llist_head(&fcc->issue_list);
720 	SM_I(sbi)->fcc_info = fcc;
721 	if (!test_opt(sbi, FLUSH_MERGE))
722 		return err;
723 
724 init_thread:
725 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
726 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
727 	if (IS_ERR(fcc->f2fs_issue_flush)) {
728 		err = PTR_ERR(fcc->f2fs_issue_flush);
729 		kvfree(fcc);
730 		SM_I(sbi)->fcc_info = NULL;
731 		return err;
732 	}
733 
734 	return err;
735 }
736 
737 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
738 {
739 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
740 
741 	if (fcc && fcc->f2fs_issue_flush) {
742 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
743 
744 		fcc->f2fs_issue_flush = NULL;
745 		kthread_stop(flush_thread);
746 	}
747 	if (free) {
748 		kvfree(fcc);
749 		SM_I(sbi)->fcc_info = NULL;
750 	}
751 }
752 
753 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
754 {
755 	int ret = 0, i;
756 
757 	if (!f2fs_is_multi_device(sbi))
758 		return 0;
759 
760 	for (i = 1; i < sbi->s_ndevs; i++) {
761 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
762 			continue;
763 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
764 		if (ret)
765 			break;
766 
767 		spin_lock(&sbi->dev_lock);
768 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
769 		spin_unlock(&sbi->dev_lock);
770 	}
771 
772 	return ret;
773 }
774 
775 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
776 		enum dirty_type dirty_type)
777 {
778 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
779 
780 	/* need not be added */
781 	if (IS_CURSEG(sbi, segno))
782 		return;
783 
784 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
785 		dirty_i->nr_dirty[dirty_type]++;
786 
787 	if (dirty_type == DIRTY) {
788 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
789 		enum dirty_type t = sentry->type;
790 
791 		if (unlikely(t >= DIRTY)) {
792 			f2fs_bug_on(sbi, 1);
793 			return;
794 		}
795 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
796 			dirty_i->nr_dirty[t]++;
797 	}
798 }
799 
800 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
801 		enum dirty_type dirty_type)
802 {
803 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
804 
805 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
806 		dirty_i->nr_dirty[dirty_type]--;
807 
808 	if (dirty_type == DIRTY) {
809 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
810 		enum dirty_type t = sentry->type;
811 
812 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
813 			dirty_i->nr_dirty[t]--;
814 
815 		if (get_valid_blocks(sbi, segno, true) == 0) {
816 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
817 						dirty_i->victim_secmap);
818 #ifdef CONFIG_F2FS_CHECK_FS
819 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
820 #endif
821 		}
822 	}
823 }
824 
825 /*
826  * Should not occur error such as -ENOMEM.
827  * Adding dirty entry into seglist is not critical operation.
828  * If a given segment is one of current working segments, it won't be added.
829  */
830 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
831 {
832 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
833 	unsigned short valid_blocks, ckpt_valid_blocks;
834 
835 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
836 		return;
837 
838 	mutex_lock(&dirty_i->seglist_lock);
839 
840 	valid_blocks = get_valid_blocks(sbi, segno, false);
841 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
842 
843 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
844 				ckpt_valid_blocks == sbi->blocks_per_seg)) {
845 		__locate_dirty_segment(sbi, segno, PRE);
846 		__remove_dirty_segment(sbi, segno, DIRTY);
847 	} else if (valid_blocks < sbi->blocks_per_seg) {
848 		__locate_dirty_segment(sbi, segno, DIRTY);
849 	} else {
850 		/* Recovery routine with SSR needs this */
851 		__remove_dirty_segment(sbi, segno, DIRTY);
852 	}
853 
854 	mutex_unlock(&dirty_i->seglist_lock);
855 }
856 
857 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
858 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
859 {
860 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
861 	unsigned int segno;
862 
863 	mutex_lock(&dirty_i->seglist_lock);
864 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
865 		if (get_valid_blocks(sbi, segno, false))
866 			continue;
867 		if (IS_CURSEG(sbi, segno))
868 			continue;
869 		__locate_dirty_segment(sbi, segno, PRE);
870 		__remove_dirty_segment(sbi, segno, DIRTY);
871 	}
872 	mutex_unlock(&dirty_i->seglist_lock);
873 }
874 
875 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
876 {
877 	int ovp_hole_segs =
878 		(overprovision_segments(sbi) - reserved_segments(sbi));
879 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
880 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
881 	block_t holes[2] = {0, 0};	/* DATA and NODE */
882 	block_t unusable;
883 	struct seg_entry *se;
884 	unsigned int segno;
885 
886 	mutex_lock(&dirty_i->seglist_lock);
887 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
888 		se = get_seg_entry(sbi, segno);
889 		if (IS_NODESEG(se->type))
890 			holes[NODE] += sbi->blocks_per_seg - se->valid_blocks;
891 		else
892 			holes[DATA] += sbi->blocks_per_seg - se->valid_blocks;
893 	}
894 	mutex_unlock(&dirty_i->seglist_lock);
895 
896 	unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
897 	if (unusable > ovp_holes)
898 		return unusable - ovp_holes;
899 	return 0;
900 }
901 
902 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
903 {
904 	int ovp_hole_segs =
905 		(overprovision_segments(sbi) - reserved_segments(sbi));
906 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
907 		return -EAGAIN;
908 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
909 		dirty_segments(sbi) > ovp_hole_segs)
910 		return -EAGAIN;
911 	return 0;
912 }
913 
914 /* This is only used by SBI_CP_DISABLED */
915 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
916 {
917 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
918 	unsigned int segno = 0;
919 
920 	mutex_lock(&dirty_i->seglist_lock);
921 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
922 		if (get_valid_blocks(sbi, segno, false))
923 			continue;
924 		if (get_ckpt_valid_blocks(sbi, segno))
925 			continue;
926 		mutex_unlock(&dirty_i->seglist_lock);
927 		return segno;
928 	}
929 	mutex_unlock(&dirty_i->seglist_lock);
930 	return NULL_SEGNO;
931 }
932 
933 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
934 		struct block_device *bdev, block_t lstart,
935 		block_t start, block_t len)
936 {
937 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
938 	struct list_head *pend_list;
939 	struct discard_cmd *dc;
940 
941 	f2fs_bug_on(sbi, !len);
942 
943 	pend_list = &dcc->pend_list[plist_idx(len)];
944 
945 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
946 	INIT_LIST_HEAD(&dc->list);
947 	dc->bdev = bdev;
948 	dc->lstart = lstart;
949 	dc->start = start;
950 	dc->len = len;
951 	dc->ref = 0;
952 	dc->state = D_PREP;
953 	dc->queued = 0;
954 	dc->error = 0;
955 	init_completion(&dc->wait);
956 	list_add_tail(&dc->list, pend_list);
957 	spin_lock_init(&dc->lock);
958 	dc->bio_ref = 0;
959 	atomic_inc(&dcc->discard_cmd_cnt);
960 	dcc->undiscard_blks += len;
961 
962 	return dc;
963 }
964 
965 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
966 				struct block_device *bdev, block_t lstart,
967 				block_t start, block_t len,
968 				struct rb_node *parent, struct rb_node **p,
969 				bool leftmost)
970 {
971 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
972 	struct discard_cmd *dc;
973 
974 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
975 
976 	rb_link_node(&dc->rb_node, parent, p);
977 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
978 
979 	return dc;
980 }
981 
982 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
983 							struct discard_cmd *dc)
984 {
985 	if (dc->state == D_DONE)
986 		atomic_sub(dc->queued, &dcc->queued_discard);
987 
988 	list_del(&dc->list);
989 	rb_erase_cached(&dc->rb_node, &dcc->root);
990 	dcc->undiscard_blks -= dc->len;
991 
992 	kmem_cache_free(discard_cmd_slab, dc);
993 
994 	atomic_dec(&dcc->discard_cmd_cnt);
995 }
996 
997 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
998 							struct discard_cmd *dc)
999 {
1000 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1001 	unsigned long flags;
1002 
1003 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1004 
1005 	spin_lock_irqsave(&dc->lock, flags);
1006 	if (dc->bio_ref) {
1007 		spin_unlock_irqrestore(&dc->lock, flags);
1008 		return;
1009 	}
1010 	spin_unlock_irqrestore(&dc->lock, flags);
1011 
1012 	f2fs_bug_on(sbi, dc->ref);
1013 
1014 	if (dc->error == -EOPNOTSUPP)
1015 		dc->error = 0;
1016 
1017 	if (dc->error)
1018 		printk_ratelimited(
1019 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1020 			KERN_INFO, sbi->sb->s_id,
1021 			dc->lstart, dc->start, dc->len, dc->error);
1022 	__detach_discard_cmd(dcc, dc);
1023 }
1024 
1025 static void f2fs_submit_discard_endio(struct bio *bio)
1026 {
1027 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1028 	unsigned long flags;
1029 
1030 	dc->error = blk_status_to_errno(bio->bi_status);
1031 
1032 	spin_lock_irqsave(&dc->lock, flags);
1033 	dc->bio_ref--;
1034 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1035 		dc->state = D_DONE;
1036 		complete_all(&dc->wait);
1037 	}
1038 	spin_unlock_irqrestore(&dc->lock, flags);
1039 	bio_put(bio);
1040 }
1041 
1042 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1043 				block_t start, block_t end)
1044 {
1045 #ifdef CONFIG_F2FS_CHECK_FS
1046 	struct seg_entry *sentry;
1047 	unsigned int segno;
1048 	block_t blk = start;
1049 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1050 	unsigned long *map;
1051 
1052 	while (blk < end) {
1053 		segno = GET_SEGNO(sbi, blk);
1054 		sentry = get_seg_entry(sbi, segno);
1055 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1056 
1057 		if (end < START_BLOCK(sbi, segno + 1))
1058 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1059 		else
1060 			size = max_blocks;
1061 		map = (unsigned long *)(sentry->cur_valid_map);
1062 		offset = __find_rev_next_bit(map, size, offset);
1063 		f2fs_bug_on(sbi, offset != size);
1064 		blk = START_BLOCK(sbi, segno + 1);
1065 	}
1066 #endif
1067 }
1068 
1069 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1070 				struct discard_policy *dpolicy,
1071 				int discard_type, unsigned int granularity)
1072 {
1073 	/* common policy */
1074 	dpolicy->type = discard_type;
1075 	dpolicy->sync = true;
1076 	dpolicy->ordered = false;
1077 	dpolicy->granularity = granularity;
1078 
1079 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1080 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1081 	dpolicy->timeout = 0;
1082 
1083 	if (discard_type == DPOLICY_BG) {
1084 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1085 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1086 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1087 		dpolicy->io_aware = true;
1088 		dpolicy->sync = false;
1089 		dpolicy->ordered = true;
1090 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1091 			dpolicy->granularity = 1;
1092 			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1093 		}
1094 	} else if (discard_type == DPOLICY_FORCE) {
1095 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1096 		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1097 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1098 		dpolicy->io_aware = false;
1099 	} else if (discard_type == DPOLICY_FSTRIM) {
1100 		dpolicy->io_aware = false;
1101 	} else if (discard_type == DPOLICY_UMOUNT) {
1102 		dpolicy->max_requests = UINT_MAX;
1103 		dpolicy->io_aware = false;
1104 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1105 		dpolicy->granularity = 1;
1106 	}
1107 }
1108 
1109 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1110 				struct block_device *bdev, block_t lstart,
1111 				block_t start, block_t len);
1112 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1113 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1114 						struct discard_policy *dpolicy,
1115 						struct discard_cmd *dc,
1116 						unsigned int *issued)
1117 {
1118 	struct block_device *bdev = dc->bdev;
1119 	struct request_queue *q = bdev_get_queue(bdev);
1120 	unsigned int max_discard_blocks =
1121 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1122 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1123 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1124 					&(dcc->fstrim_list) : &(dcc->wait_list);
1125 	int flag = dpolicy->sync ? REQ_SYNC : 0;
1126 	block_t lstart, start, len, total_len;
1127 	int err = 0;
1128 
1129 	if (dc->state != D_PREP)
1130 		return 0;
1131 
1132 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1133 		return 0;
1134 
1135 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1136 
1137 	lstart = dc->lstart;
1138 	start = dc->start;
1139 	len = dc->len;
1140 	total_len = len;
1141 
1142 	dc->len = 0;
1143 
1144 	while (total_len && *issued < dpolicy->max_requests && !err) {
1145 		struct bio *bio = NULL;
1146 		unsigned long flags;
1147 		bool last = true;
1148 
1149 		if (len > max_discard_blocks) {
1150 			len = max_discard_blocks;
1151 			last = false;
1152 		}
1153 
1154 		(*issued)++;
1155 		if (*issued == dpolicy->max_requests)
1156 			last = true;
1157 
1158 		dc->len += len;
1159 
1160 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1161 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1162 			err = -EIO;
1163 			goto submit;
1164 		}
1165 		err = __blkdev_issue_discard(bdev,
1166 					SECTOR_FROM_BLOCK(start),
1167 					SECTOR_FROM_BLOCK(len),
1168 					GFP_NOFS, 0, &bio);
1169 submit:
1170 		if (err) {
1171 			spin_lock_irqsave(&dc->lock, flags);
1172 			if (dc->state == D_PARTIAL)
1173 				dc->state = D_SUBMIT;
1174 			spin_unlock_irqrestore(&dc->lock, flags);
1175 
1176 			break;
1177 		}
1178 
1179 		f2fs_bug_on(sbi, !bio);
1180 
1181 		/*
1182 		 * should keep before submission to avoid D_DONE
1183 		 * right away
1184 		 */
1185 		spin_lock_irqsave(&dc->lock, flags);
1186 		if (last)
1187 			dc->state = D_SUBMIT;
1188 		else
1189 			dc->state = D_PARTIAL;
1190 		dc->bio_ref++;
1191 		spin_unlock_irqrestore(&dc->lock, flags);
1192 
1193 		atomic_inc(&dcc->queued_discard);
1194 		dc->queued++;
1195 		list_move_tail(&dc->list, wait_list);
1196 
1197 		/* sanity check on discard range */
1198 		__check_sit_bitmap(sbi, lstart, lstart + len);
1199 
1200 		bio->bi_private = dc;
1201 		bio->bi_end_io = f2fs_submit_discard_endio;
1202 		bio->bi_opf |= flag;
1203 		submit_bio(bio);
1204 
1205 		atomic_inc(&dcc->issued_discard);
1206 
1207 		f2fs_update_iostat(sbi, FS_DISCARD, 1);
1208 
1209 		lstart += len;
1210 		start += len;
1211 		total_len -= len;
1212 		len = total_len;
1213 	}
1214 
1215 	if (!err && len)
1216 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1217 	return err;
1218 }
1219 
1220 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
1221 				struct block_device *bdev, block_t lstart,
1222 				block_t start, block_t len,
1223 				struct rb_node **insert_p,
1224 				struct rb_node *insert_parent)
1225 {
1226 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1227 	struct rb_node **p;
1228 	struct rb_node *parent = NULL;
1229 	struct discard_cmd *dc = NULL;
1230 	bool leftmost = true;
1231 
1232 	if (insert_p && insert_parent) {
1233 		parent = insert_parent;
1234 		p = insert_p;
1235 		goto do_insert;
1236 	}
1237 
1238 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1239 							lstart, &leftmost);
1240 do_insert:
1241 	dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1242 								p, leftmost);
1243 	if (!dc)
1244 		return NULL;
1245 
1246 	return dc;
1247 }
1248 
1249 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1250 						struct discard_cmd *dc)
1251 {
1252 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1253 }
1254 
1255 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1256 				struct discard_cmd *dc, block_t blkaddr)
1257 {
1258 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1259 	struct discard_info di = dc->di;
1260 	bool modified = false;
1261 
1262 	if (dc->state == D_DONE || dc->len == 1) {
1263 		__remove_discard_cmd(sbi, dc);
1264 		return;
1265 	}
1266 
1267 	dcc->undiscard_blks -= di.len;
1268 
1269 	if (blkaddr > di.lstart) {
1270 		dc->len = blkaddr - dc->lstart;
1271 		dcc->undiscard_blks += dc->len;
1272 		__relocate_discard_cmd(dcc, dc);
1273 		modified = true;
1274 	}
1275 
1276 	if (blkaddr < di.lstart + di.len - 1) {
1277 		if (modified) {
1278 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1279 					di.start + blkaddr + 1 - di.lstart,
1280 					di.lstart + di.len - 1 - blkaddr,
1281 					NULL, NULL);
1282 		} else {
1283 			dc->lstart++;
1284 			dc->len--;
1285 			dc->start++;
1286 			dcc->undiscard_blks += dc->len;
1287 			__relocate_discard_cmd(dcc, dc);
1288 		}
1289 	}
1290 }
1291 
1292 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1293 				struct block_device *bdev, block_t lstart,
1294 				block_t start, block_t len)
1295 {
1296 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1297 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1298 	struct discard_cmd *dc;
1299 	struct discard_info di = {0};
1300 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1301 	struct request_queue *q = bdev_get_queue(bdev);
1302 	unsigned int max_discard_blocks =
1303 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1304 	block_t end = lstart + len;
1305 
1306 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1307 					NULL, lstart,
1308 					(struct rb_entry **)&prev_dc,
1309 					(struct rb_entry **)&next_dc,
1310 					&insert_p, &insert_parent, true, NULL);
1311 	if (dc)
1312 		prev_dc = dc;
1313 
1314 	if (!prev_dc) {
1315 		di.lstart = lstart;
1316 		di.len = next_dc ? next_dc->lstart - lstart : len;
1317 		di.len = min(di.len, len);
1318 		di.start = start;
1319 	}
1320 
1321 	while (1) {
1322 		struct rb_node *node;
1323 		bool merged = false;
1324 		struct discard_cmd *tdc = NULL;
1325 
1326 		if (prev_dc) {
1327 			di.lstart = prev_dc->lstart + prev_dc->len;
1328 			if (di.lstart < lstart)
1329 				di.lstart = lstart;
1330 			if (di.lstart >= end)
1331 				break;
1332 
1333 			if (!next_dc || next_dc->lstart > end)
1334 				di.len = end - di.lstart;
1335 			else
1336 				di.len = next_dc->lstart - di.lstart;
1337 			di.start = start + di.lstart - lstart;
1338 		}
1339 
1340 		if (!di.len)
1341 			goto next;
1342 
1343 		if (prev_dc && prev_dc->state == D_PREP &&
1344 			prev_dc->bdev == bdev &&
1345 			__is_discard_back_mergeable(&di, &prev_dc->di,
1346 							max_discard_blocks)) {
1347 			prev_dc->di.len += di.len;
1348 			dcc->undiscard_blks += di.len;
1349 			__relocate_discard_cmd(dcc, prev_dc);
1350 			di = prev_dc->di;
1351 			tdc = prev_dc;
1352 			merged = true;
1353 		}
1354 
1355 		if (next_dc && next_dc->state == D_PREP &&
1356 			next_dc->bdev == bdev &&
1357 			__is_discard_front_mergeable(&di, &next_dc->di,
1358 							max_discard_blocks)) {
1359 			next_dc->di.lstart = di.lstart;
1360 			next_dc->di.len += di.len;
1361 			next_dc->di.start = di.start;
1362 			dcc->undiscard_blks += di.len;
1363 			__relocate_discard_cmd(dcc, next_dc);
1364 			if (tdc)
1365 				__remove_discard_cmd(sbi, tdc);
1366 			merged = true;
1367 		}
1368 
1369 		if (!merged) {
1370 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1371 							di.len, NULL, NULL);
1372 		}
1373  next:
1374 		prev_dc = next_dc;
1375 		if (!prev_dc)
1376 			break;
1377 
1378 		node = rb_next(&prev_dc->rb_node);
1379 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1380 	}
1381 }
1382 
1383 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1384 		struct block_device *bdev, block_t blkstart, block_t blklen)
1385 {
1386 	block_t lblkstart = blkstart;
1387 
1388 	if (!f2fs_bdev_support_discard(bdev))
1389 		return 0;
1390 
1391 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1392 
1393 	if (f2fs_is_multi_device(sbi)) {
1394 		int devi = f2fs_target_device_index(sbi, blkstart);
1395 
1396 		blkstart -= FDEV(devi).start_blk;
1397 	}
1398 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1399 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1400 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1401 	return 0;
1402 }
1403 
1404 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1405 					struct discard_policy *dpolicy)
1406 {
1407 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1408 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1409 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1410 	struct discard_cmd *dc;
1411 	struct blk_plug plug;
1412 	unsigned int pos = dcc->next_pos;
1413 	unsigned int issued = 0;
1414 	bool io_interrupted = false;
1415 
1416 	mutex_lock(&dcc->cmd_lock);
1417 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1418 					NULL, pos,
1419 					(struct rb_entry **)&prev_dc,
1420 					(struct rb_entry **)&next_dc,
1421 					&insert_p, &insert_parent, true, NULL);
1422 	if (!dc)
1423 		dc = next_dc;
1424 
1425 	blk_start_plug(&plug);
1426 
1427 	while (dc) {
1428 		struct rb_node *node;
1429 		int err = 0;
1430 
1431 		if (dc->state != D_PREP)
1432 			goto next;
1433 
1434 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1435 			io_interrupted = true;
1436 			break;
1437 		}
1438 
1439 		dcc->next_pos = dc->lstart + dc->len;
1440 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1441 
1442 		if (issued >= dpolicy->max_requests)
1443 			break;
1444 next:
1445 		node = rb_next(&dc->rb_node);
1446 		if (err)
1447 			__remove_discard_cmd(sbi, dc);
1448 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1449 	}
1450 
1451 	blk_finish_plug(&plug);
1452 
1453 	if (!dc)
1454 		dcc->next_pos = 0;
1455 
1456 	mutex_unlock(&dcc->cmd_lock);
1457 
1458 	if (!issued && io_interrupted)
1459 		issued = -1;
1460 
1461 	return issued;
1462 }
1463 
1464 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1465 					struct discard_policy *dpolicy)
1466 {
1467 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1468 	struct list_head *pend_list;
1469 	struct discard_cmd *dc, *tmp;
1470 	struct blk_plug plug;
1471 	int i, issued = 0;
1472 	bool io_interrupted = false;
1473 
1474 	if (dpolicy->timeout != 0)
1475 		f2fs_update_time(sbi, dpolicy->timeout);
1476 
1477 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1478 		if (dpolicy->timeout != 0 &&
1479 				f2fs_time_over(sbi, dpolicy->timeout))
1480 			break;
1481 
1482 		if (i + 1 < dpolicy->granularity)
1483 			break;
1484 
1485 		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1486 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1487 
1488 		pend_list = &dcc->pend_list[i];
1489 
1490 		mutex_lock(&dcc->cmd_lock);
1491 		if (list_empty(pend_list))
1492 			goto next;
1493 		if (unlikely(dcc->rbtree_check))
1494 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1495 								&dcc->root));
1496 		blk_start_plug(&plug);
1497 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1498 			f2fs_bug_on(sbi, dc->state != D_PREP);
1499 
1500 			if (dpolicy->timeout != 0 &&
1501 				f2fs_time_over(sbi, dpolicy->timeout))
1502 				break;
1503 
1504 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1505 						!is_idle(sbi, DISCARD_TIME)) {
1506 				io_interrupted = true;
1507 				break;
1508 			}
1509 
1510 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1511 
1512 			if (issued >= dpolicy->max_requests)
1513 				break;
1514 		}
1515 		blk_finish_plug(&plug);
1516 next:
1517 		mutex_unlock(&dcc->cmd_lock);
1518 
1519 		if (issued >= dpolicy->max_requests || io_interrupted)
1520 			break;
1521 	}
1522 
1523 	if (!issued && io_interrupted)
1524 		issued = -1;
1525 
1526 	return issued;
1527 }
1528 
1529 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1530 {
1531 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1532 	struct list_head *pend_list;
1533 	struct discard_cmd *dc, *tmp;
1534 	int i;
1535 	bool dropped = false;
1536 
1537 	mutex_lock(&dcc->cmd_lock);
1538 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1539 		pend_list = &dcc->pend_list[i];
1540 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1541 			f2fs_bug_on(sbi, dc->state != D_PREP);
1542 			__remove_discard_cmd(sbi, dc);
1543 			dropped = true;
1544 		}
1545 	}
1546 	mutex_unlock(&dcc->cmd_lock);
1547 
1548 	return dropped;
1549 }
1550 
1551 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1552 {
1553 	__drop_discard_cmd(sbi);
1554 }
1555 
1556 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1557 							struct discard_cmd *dc)
1558 {
1559 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1560 	unsigned int len = 0;
1561 
1562 	wait_for_completion_io(&dc->wait);
1563 	mutex_lock(&dcc->cmd_lock);
1564 	f2fs_bug_on(sbi, dc->state != D_DONE);
1565 	dc->ref--;
1566 	if (!dc->ref) {
1567 		if (!dc->error)
1568 			len = dc->len;
1569 		__remove_discard_cmd(sbi, dc);
1570 	}
1571 	mutex_unlock(&dcc->cmd_lock);
1572 
1573 	return len;
1574 }
1575 
1576 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1577 						struct discard_policy *dpolicy,
1578 						block_t start, block_t end)
1579 {
1580 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1581 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1582 					&(dcc->fstrim_list) : &(dcc->wait_list);
1583 	struct discard_cmd *dc, *tmp;
1584 	bool need_wait;
1585 	unsigned int trimmed = 0;
1586 
1587 next:
1588 	need_wait = false;
1589 
1590 	mutex_lock(&dcc->cmd_lock);
1591 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1592 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1593 			continue;
1594 		if (dc->len < dpolicy->granularity)
1595 			continue;
1596 		if (dc->state == D_DONE && !dc->ref) {
1597 			wait_for_completion_io(&dc->wait);
1598 			if (!dc->error)
1599 				trimmed += dc->len;
1600 			__remove_discard_cmd(sbi, dc);
1601 		} else {
1602 			dc->ref++;
1603 			need_wait = true;
1604 			break;
1605 		}
1606 	}
1607 	mutex_unlock(&dcc->cmd_lock);
1608 
1609 	if (need_wait) {
1610 		trimmed += __wait_one_discard_bio(sbi, dc);
1611 		goto next;
1612 	}
1613 
1614 	return trimmed;
1615 }
1616 
1617 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1618 						struct discard_policy *dpolicy)
1619 {
1620 	struct discard_policy dp;
1621 	unsigned int discard_blks;
1622 
1623 	if (dpolicy)
1624 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1625 
1626 	/* wait all */
1627 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1628 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1629 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1630 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1631 
1632 	return discard_blks;
1633 }
1634 
1635 /* This should be covered by global mutex, &sit_i->sentry_lock */
1636 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1637 {
1638 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1639 	struct discard_cmd *dc;
1640 	bool need_wait = false;
1641 
1642 	mutex_lock(&dcc->cmd_lock);
1643 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1644 							NULL, blkaddr);
1645 	if (dc) {
1646 		if (dc->state == D_PREP) {
1647 			__punch_discard_cmd(sbi, dc, blkaddr);
1648 		} else {
1649 			dc->ref++;
1650 			need_wait = true;
1651 		}
1652 	}
1653 	mutex_unlock(&dcc->cmd_lock);
1654 
1655 	if (need_wait)
1656 		__wait_one_discard_bio(sbi, dc);
1657 }
1658 
1659 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1660 {
1661 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1662 
1663 	if (dcc && dcc->f2fs_issue_discard) {
1664 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1665 
1666 		dcc->f2fs_issue_discard = NULL;
1667 		kthread_stop(discard_thread);
1668 	}
1669 }
1670 
1671 /* This comes from f2fs_put_super */
1672 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1673 {
1674 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1675 	struct discard_policy dpolicy;
1676 	bool dropped;
1677 
1678 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1679 					dcc->discard_granularity);
1680 	dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT;
1681 	__issue_discard_cmd(sbi, &dpolicy);
1682 	dropped = __drop_discard_cmd(sbi);
1683 
1684 	/* just to make sure there is no pending discard commands */
1685 	__wait_all_discard_cmd(sbi, NULL);
1686 
1687 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1688 	return dropped;
1689 }
1690 
1691 static int issue_discard_thread(void *data)
1692 {
1693 	struct f2fs_sb_info *sbi = data;
1694 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1695 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1696 	struct discard_policy dpolicy;
1697 	unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1698 	int issued;
1699 
1700 	set_freezable();
1701 
1702 	do {
1703 		__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1704 					dcc->discard_granularity);
1705 
1706 		wait_event_interruptible_timeout(*q,
1707 				kthread_should_stop() || freezing(current) ||
1708 				dcc->discard_wake,
1709 				msecs_to_jiffies(wait_ms));
1710 
1711 		if (dcc->discard_wake)
1712 			dcc->discard_wake = 0;
1713 
1714 		/* clean up pending candidates before going to sleep */
1715 		if (atomic_read(&dcc->queued_discard))
1716 			__wait_all_discard_cmd(sbi, NULL);
1717 
1718 		if (try_to_freeze())
1719 			continue;
1720 		if (f2fs_readonly(sbi->sb))
1721 			continue;
1722 		if (kthread_should_stop())
1723 			return 0;
1724 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1725 			wait_ms = dpolicy.max_interval;
1726 			continue;
1727 		}
1728 
1729 		if (sbi->gc_mode == GC_URGENT)
1730 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1731 
1732 		sb_start_intwrite(sbi->sb);
1733 
1734 		issued = __issue_discard_cmd(sbi, &dpolicy);
1735 		if (issued > 0) {
1736 			__wait_all_discard_cmd(sbi, &dpolicy);
1737 			wait_ms = dpolicy.min_interval;
1738 		} else if (issued == -1){
1739 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1740 			if (!wait_ms)
1741 				wait_ms = dpolicy.mid_interval;
1742 		} else {
1743 			wait_ms = dpolicy.max_interval;
1744 		}
1745 
1746 		sb_end_intwrite(sbi->sb);
1747 
1748 	} while (!kthread_should_stop());
1749 	return 0;
1750 }
1751 
1752 #ifdef CONFIG_BLK_DEV_ZONED
1753 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1754 		struct block_device *bdev, block_t blkstart, block_t blklen)
1755 {
1756 	sector_t sector, nr_sects;
1757 	block_t lblkstart = blkstart;
1758 	int devi = 0;
1759 
1760 	if (f2fs_is_multi_device(sbi)) {
1761 		devi = f2fs_target_device_index(sbi, blkstart);
1762 		if (blkstart < FDEV(devi).start_blk ||
1763 		    blkstart > FDEV(devi).end_blk) {
1764 			f2fs_err(sbi, "Invalid block %x", blkstart);
1765 			return -EIO;
1766 		}
1767 		blkstart -= FDEV(devi).start_blk;
1768 	}
1769 
1770 	/* For sequential zones, reset the zone write pointer */
1771 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1772 		sector = SECTOR_FROM_BLOCK(blkstart);
1773 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1774 
1775 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1776 				nr_sects != bdev_zone_sectors(bdev)) {
1777 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1778 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1779 				 blkstart, blklen);
1780 			return -EIO;
1781 		}
1782 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1783 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1784 					sector, nr_sects, GFP_NOFS);
1785 	}
1786 
1787 	/* For conventional zones, use regular discard if supported */
1788 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1789 }
1790 #endif
1791 
1792 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1793 		struct block_device *bdev, block_t blkstart, block_t blklen)
1794 {
1795 #ifdef CONFIG_BLK_DEV_ZONED
1796 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1797 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1798 #endif
1799 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1800 }
1801 
1802 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1803 				block_t blkstart, block_t blklen)
1804 {
1805 	sector_t start = blkstart, len = 0;
1806 	struct block_device *bdev;
1807 	struct seg_entry *se;
1808 	unsigned int offset;
1809 	block_t i;
1810 	int err = 0;
1811 
1812 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1813 
1814 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1815 		if (i != start) {
1816 			struct block_device *bdev2 =
1817 				f2fs_target_device(sbi, i, NULL);
1818 
1819 			if (bdev2 != bdev) {
1820 				err = __issue_discard_async(sbi, bdev,
1821 						start, len);
1822 				if (err)
1823 					return err;
1824 				bdev = bdev2;
1825 				start = i;
1826 				len = 0;
1827 			}
1828 		}
1829 
1830 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1831 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1832 
1833 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
1834 			sbi->discard_blks--;
1835 	}
1836 
1837 	if (len)
1838 		err = __issue_discard_async(sbi, bdev, start, len);
1839 	return err;
1840 }
1841 
1842 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1843 							bool check_only)
1844 {
1845 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1846 	int max_blocks = sbi->blocks_per_seg;
1847 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1848 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1849 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1850 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1851 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1852 	unsigned int start = 0, end = -1;
1853 	bool force = (cpc->reason & CP_DISCARD);
1854 	struct discard_entry *de = NULL;
1855 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1856 	int i;
1857 
1858 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
1859 		return false;
1860 
1861 	if (!force) {
1862 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1863 			SM_I(sbi)->dcc_info->nr_discards >=
1864 				SM_I(sbi)->dcc_info->max_discards)
1865 			return false;
1866 	}
1867 
1868 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1869 	for (i = 0; i < entries; i++)
1870 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1871 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1872 
1873 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1874 				SM_I(sbi)->dcc_info->max_discards) {
1875 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1876 		if (start >= max_blocks)
1877 			break;
1878 
1879 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1880 		if (force && start && end != max_blocks
1881 					&& (end - start) < cpc->trim_minlen)
1882 			continue;
1883 
1884 		if (check_only)
1885 			return true;
1886 
1887 		if (!de) {
1888 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1889 								GFP_F2FS_ZERO);
1890 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1891 			list_add_tail(&de->list, head);
1892 		}
1893 
1894 		for (i = start; i < end; i++)
1895 			__set_bit_le(i, (void *)de->discard_map);
1896 
1897 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1898 	}
1899 	return false;
1900 }
1901 
1902 static void release_discard_addr(struct discard_entry *entry)
1903 {
1904 	list_del(&entry->list);
1905 	kmem_cache_free(discard_entry_slab, entry);
1906 }
1907 
1908 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
1909 {
1910 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1911 	struct discard_entry *entry, *this;
1912 
1913 	/* drop caches */
1914 	list_for_each_entry_safe(entry, this, head, list)
1915 		release_discard_addr(entry);
1916 }
1917 
1918 /*
1919  * Should call f2fs_clear_prefree_segments after checkpoint is done.
1920  */
1921 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1922 {
1923 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1924 	unsigned int segno;
1925 
1926 	mutex_lock(&dirty_i->seglist_lock);
1927 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1928 		__set_test_and_free(sbi, segno);
1929 	mutex_unlock(&dirty_i->seglist_lock);
1930 }
1931 
1932 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1933 						struct cp_control *cpc)
1934 {
1935 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1936 	struct list_head *head = &dcc->entry_list;
1937 	struct discard_entry *entry, *this;
1938 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1939 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1940 	unsigned int start = 0, end = -1;
1941 	unsigned int secno, start_segno;
1942 	bool force = (cpc->reason & CP_DISCARD);
1943 	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
1944 
1945 	mutex_lock(&dirty_i->seglist_lock);
1946 
1947 	while (1) {
1948 		int i;
1949 
1950 		if (need_align && end != -1)
1951 			end--;
1952 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1953 		if (start >= MAIN_SEGS(sbi))
1954 			break;
1955 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1956 								start + 1);
1957 
1958 		if (need_align) {
1959 			start = rounddown(start, sbi->segs_per_sec);
1960 			end = roundup(end, sbi->segs_per_sec);
1961 		}
1962 
1963 		for (i = start; i < end; i++) {
1964 			if (test_and_clear_bit(i, prefree_map))
1965 				dirty_i->nr_dirty[PRE]--;
1966 		}
1967 
1968 		if (!f2fs_realtime_discard_enable(sbi))
1969 			continue;
1970 
1971 		if (force && start >= cpc->trim_start &&
1972 					(end - 1) <= cpc->trim_end)
1973 				continue;
1974 
1975 		if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) {
1976 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1977 				(end - start) << sbi->log_blocks_per_seg);
1978 			continue;
1979 		}
1980 next:
1981 		secno = GET_SEC_FROM_SEG(sbi, start);
1982 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
1983 		if (!IS_CURSEC(sbi, secno) &&
1984 			!get_valid_blocks(sbi, start, true))
1985 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1986 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
1987 
1988 		start = start_segno + sbi->segs_per_sec;
1989 		if (start < end)
1990 			goto next;
1991 		else
1992 			end = start - 1;
1993 	}
1994 	mutex_unlock(&dirty_i->seglist_lock);
1995 
1996 	/* send small discards */
1997 	list_for_each_entry_safe(entry, this, head, list) {
1998 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1999 		bool is_valid = test_bit_le(0, entry->discard_map);
2000 
2001 find_next:
2002 		if (is_valid) {
2003 			next_pos = find_next_zero_bit_le(entry->discard_map,
2004 					sbi->blocks_per_seg, cur_pos);
2005 			len = next_pos - cur_pos;
2006 
2007 			if (f2fs_sb_has_blkzoned(sbi) ||
2008 			    (force && len < cpc->trim_minlen))
2009 				goto skip;
2010 
2011 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2012 									len);
2013 			total_len += len;
2014 		} else {
2015 			next_pos = find_next_bit_le(entry->discard_map,
2016 					sbi->blocks_per_seg, cur_pos);
2017 		}
2018 skip:
2019 		cur_pos = next_pos;
2020 		is_valid = !is_valid;
2021 
2022 		if (cur_pos < sbi->blocks_per_seg)
2023 			goto find_next;
2024 
2025 		release_discard_addr(entry);
2026 		dcc->nr_discards -= total_len;
2027 	}
2028 
2029 	wake_up_discard_thread(sbi, false);
2030 }
2031 
2032 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2033 {
2034 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2035 	struct discard_cmd_control *dcc;
2036 	int err = 0, i;
2037 
2038 	if (SM_I(sbi)->dcc_info) {
2039 		dcc = SM_I(sbi)->dcc_info;
2040 		goto init_thread;
2041 	}
2042 
2043 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2044 	if (!dcc)
2045 		return -ENOMEM;
2046 
2047 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2048 	INIT_LIST_HEAD(&dcc->entry_list);
2049 	for (i = 0; i < MAX_PLIST_NUM; i++)
2050 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2051 	INIT_LIST_HEAD(&dcc->wait_list);
2052 	INIT_LIST_HEAD(&dcc->fstrim_list);
2053 	mutex_init(&dcc->cmd_lock);
2054 	atomic_set(&dcc->issued_discard, 0);
2055 	atomic_set(&dcc->queued_discard, 0);
2056 	atomic_set(&dcc->discard_cmd_cnt, 0);
2057 	dcc->nr_discards = 0;
2058 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2059 	dcc->undiscard_blks = 0;
2060 	dcc->next_pos = 0;
2061 	dcc->root = RB_ROOT_CACHED;
2062 	dcc->rbtree_check = false;
2063 
2064 	init_waitqueue_head(&dcc->discard_wait_queue);
2065 	SM_I(sbi)->dcc_info = dcc;
2066 init_thread:
2067 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2068 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2069 	if (IS_ERR(dcc->f2fs_issue_discard)) {
2070 		err = PTR_ERR(dcc->f2fs_issue_discard);
2071 		kvfree(dcc);
2072 		SM_I(sbi)->dcc_info = NULL;
2073 		return err;
2074 	}
2075 
2076 	return err;
2077 }
2078 
2079 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2080 {
2081 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2082 
2083 	if (!dcc)
2084 		return;
2085 
2086 	f2fs_stop_discard_thread(sbi);
2087 
2088 	/*
2089 	 * Recovery can cache discard commands, so in error path of
2090 	 * fill_super(), it needs to give a chance to handle them.
2091 	 */
2092 	if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2093 		f2fs_issue_discard_timeout(sbi);
2094 
2095 	kvfree(dcc);
2096 	SM_I(sbi)->dcc_info = NULL;
2097 }
2098 
2099 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2100 {
2101 	struct sit_info *sit_i = SIT_I(sbi);
2102 
2103 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2104 		sit_i->dirty_sentries++;
2105 		return false;
2106 	}
2107 
2108 	return true;
2109 }
2110 
2111 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2112 					unsigned int segno, int modified)
2113 {
2114 	struct seg_entry *se = get_seg_entry(sbi, segno);
2115 	se->type = type;
2116 	if (modified)
2117 		__mark_sit_entry_dirty(sbi, segno);
2118 }
2119 
2120 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2121 {
2122 	struct seg_entry *se;
2123 	unsigned int segno, offset;
2124 	long int new_vblocks;
2125 	bool exist;
2126 #ifdef CONFIG_F2FS_CHECK_FS
2127 	bool mir_exist;
2128 #endif
2129 
2130 	segno = GET_SEGNO(sbi, blkaddr);
2131 
2132 	se = get_seg_entry(sbi, segno);
2133 	new_vblocks = se->valid_blocks + del;
2134 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2135 
2136 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
2137 				(new_vblocks > sbi->blocks_per_seg)));
2138 
2139 	se->valid_blocks = new_vblocks;
2140 	se->mtime = get_mtime(sbi, false);
2141 	if (se->mtime > SIT_I(sbi)->max_mtime)
2142 		SIT_I(sbi)->max_mtime = se->mtime;
2143 
2144 	/* Update valid block bitmap */
2145 	if (del > 0) {
2146 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2147 #ifdef CONFIG_F2FS_CHECK_FS
2148 		mir_exist = f2fs_test_and_set_bit(offset,
2149 						se->cur_valid_map_mir);
2150 		if (unlikely(exist != mir_exist)) {
2151 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2152 				 blkaddr, exist);
2153 			f2fs_bug_on(sbi, 1);
2154 		}
2155 #endif
2156 		if (unlikely(exist)) {
2157 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2158 				 blkaddr);
2159 			f2fs_bug_on(sbi, 1);
2160 			se->valid_blocks--;
2161 			del = 0;
2162 		}
2163 
2164 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
2165 			sbi->discard_blks--;
2166 
2167 		/*
2168 		 * SSR should never reuse block which is checkpointed
2169 		 * or newly invalidated.
2170 		 */
2171 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2172 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2173 				se->ckpt_valid_blocks++;
2174 		}
2175 	} else {
2176 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2177 #ifdef CONFIG_F2FS_CHECK_FS
2178 		mir_exist = f2fs_test_and_clear_bit(offset,
2179 						se->cur_valid_map_mir);
2180 		if (unlikely(exist != mir_exist)) {
2181 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2182 				 blkaddr, exist);
2183 			f2fs_bug_on(sbi, 1);
2184 		}
2185 #endif
2186 		if (unlikely(!exist)) {
2187 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2188 				 blkaddr);
2189 			f2fs_bug_on(sbi, 1);
2190 			se->valid_blocks++;
2191 			del = 0;
2192 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2193 			/*
2194 			 * If checkpoints are off, we must not reuse data that
2195 			 * was used in the previous checkpoint. If it was used
2196 			 * before, we must track that to know how much space we
2197 			 * really have.
2198 			 */
2199 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2200 				spin_lock(&sbi->stat_lock);
2201 				sbi->unusable_block_count++;
2202 				spin_unlock(&sbi->stat_lock);
2203 			}
2204 		}
2205 
2206 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
2207 			sbi->discard_blks++;
2208 	}
2209 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2210 		se->ckpt_valid_blocks += del;
2211 
2212 	__mark_sit_entry_dirty(sbi, segno);
2213 
2214 	/* update total number of valid blocks to be written in ckpt area */
2215 	SIT_I(sbi)->written_valid_blocks += del;
2216 
2217 	if (__is_large_section(sbi))
2218 		get_sec_entry(sbi, segno)->valid_blocks += del;
2219 }
2220 
2221 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2222 {
2223 	unsigned int segno = GET_SEGNO(sbi, addr);
2224 	struct sit_info *sit_i = SIT_I(sbi);
2225 
2226 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2227 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2228 		return;
2229 
2230 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2231 
2232 	/* add it into sit main buffer */
2233 	down_write(&sit_i->sentry_lock);
2234 
2235 	update_sit_entry(sbi, addr, -1);
2236 
2237 	/* add it into dirty seglist */
2238 	locate_dirty_segment(sbi, segno);
2239 
2240 	up_write(&sit_i->sentry_lock);
2241 }
2242 
2243 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2244 {
2245 	struct sit_info *sit_i = SIT_I(sbi);
2246 	unsigned int segno, offset;
2247 	struct seg_entry *se;
2248 	bool is_cp = false;
2249 
2250 	if (!__is_valid_data_blkaddr(blkaddr))
2251 		return true;
2252 
2253 	down_read(&sit_i->sentry_lock);
2254 
2255 	segno = GET_SEGNO(sbi, blkaddr);
2256 	se = get_seg_entry(sbi, segno);
2257 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2258 
2259 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2260 		is_cp = true;
2261 
2262 	up_read(&sit_i->sentry_lock);
2263 
2264 	return is_cp;
2265 }
2266 
2267 /*
2268  * This function should be resided under the curseg_mutex lock
2269  */
2270 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2271 					struct f2fs_summary *sum)
2272 {
2273 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2274 	void *addr = curseg->sum_blk;
2275 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2276 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2277 }
2278 
2279 /*
2280  * Calculate the number of current summary pages for writing
2281  */
2282 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2283 {
2284 	int valid_sum_count = 0;
2285 	int i, sum_in_page;
2286 
2287 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2288 		if (sbi->ckpt->alloc_type[i] == SSR)
2289 			valid_sum_count += sbi->blocks_per_seg;
2290 		else {
2291 			if (for_ra)
2292 				valid_sum_count += le16_to_cpu(
2293 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2294 			else
2295 				valid_sum_count += curseg_blkoff(sbi, i);
2296 		}
2297 	}
2298 
2299 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2300 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2301 	if (valid_sum_count <= sum_in_page)
2302 		return 1;
2303 	else if ((valid_sum_count - sum_in_page) <=
2304 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2305 		return 2;
2306 	return 3;
2307 }
2308 
2309 /*
2310  * Caller should put this summary page
2311  */
2312 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2313 {
2314 	return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
2315 }
2316 
2317 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2318 					void *src, block_t blk_addr)
2319 {
2320 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2321 
2322 	memcpy(page_address(page), src, PAGE_SIZE);
2323 	set_page_dirty(page);
2324 	f2fs_put_page(page, 1);
2325 }
2326 
2327 static void write_sum_page(struct f2fs_sb_info *sbi,
2328 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2329 {
2330 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2331 }
2332 
2333 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2334 						int type, block_t blk_addr)
2335 {
2336 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2337 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2338 	struct f2fs_summary_block *src = curseg->sum_blk;
2339 	struct f2fs_summary_block *dst;
2340 
2341 	dst = (struct f2fs_summary_block *)page_address(page);
2342 	memset(dst, 0, PAGE_SIZE);
2343 
2344 	mutex_lock(&curseg->curseg_mutex);
2345 
2346 	down_read(&curseg->journal_rwsem);
2347 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2348 	up_read(&curseg->journal_rwsem);
2349 
2350 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2351 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2352 
2353 	mutex_unlock(&curseg->curseg_mutex);
2354 
2355 	set_page_dirty(page);
2356 	f2fs_put_page(page, 1);
2357 }
2358 
2359 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
2360 {
2361 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2362 	unsigned int segno = curseg->segno + 1;
2363 	struct free_segmap_info *free_i = FREE_I(sbi);
2364 
2365 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2366 		return !test_bit(segno, free_i->free_segmap);
2367 	return 0;
2368 }
2369 
2370 /*
2371  * Find a new segment from the free segments bitmap to right order
2372  * This function should be returned with success, otherwise BUG
2373  */
2374 static void get_new_segment(struct f2fs_sb_info *sbi,
2375 			unsigned int *newseg, bool new_sec, int dir)
2376 {
2377 	struct free_segmap_info *free_i = FREE_I(sbi);
2378 	unsigned int segno, secno, zoneno;
2379 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2380 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2381 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2382 	unsigned int left_start = hint;
2383 	bool init = true;
2384 	int go_left = 0;
2385 	int i;
2386 
2387 	spin_lock(&free_i->segmap_lock);
2388 
2389 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2390 		segno = find_next_zero_bit(free_i->free_segmap,
2391 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2392 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2393 			goto got_it;
2394 	}
2395 find_other_zone:
2396 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2397 	if (secno >= MAIN_SECS(sbi)) {
2398 		if (dir == ALLOC_RIGHT) {
2399 			secno = find_next_zero_bit(free_i->free_secmap,
2400 							MAIN_SECS(sbi), 0);
2401 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2402 		} else {
2403 			go_left = 1;
2404 			left_start = hint - 1;
2405 		}
2406 	}
2407 	if (go_left == 0)
2408 		goto skip_left;
2409 
2410 	while (test_bit(left_start, free_i->free_secmap)) {
2411 		if (left_start > 0) {
2412 			left_start--;
2413 			continue;
2414 		}
2415 		left_start = find_next_zero_bit(free_i->free_secmap,
2416 							MAIN_SECS(sbi), 0);
2417 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2418 		break;
2419 	}
2420 	secno = left_start;
2421 skip_left:
2422 	segno = GET_SEG_FROM_SEC(sbi, secno);
2423 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2424 
2425 	/* give up on finding another zone */
2426 	if (!init)
2427 		goto got_it;
2428 	if (sbi->secs_per_zone == 1)
2429 		goto got_it;
2430 	if (zoneno == old_zoneno)
2431 		goto got_it;
2432 	if (dir == ALLOC_LEFT) {
2433 		if (!go_left && zoneno + 1 >= total_zones)
2434 			goto got_it;
2435 		if (go_left && zoneno == 0)
2436 			goto got_it;
2437 	}
2438 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2439 		if (CURSEG_I(sbi, i)->zone == zoneno)
2440 			break;
2441 
2442 	if (i < NR_CURSEG_TYPE) {
2443 		/* zone is in user, try another */
2444 		if (go_left)
2445 			hint = zoneno * sbi->secs_per_zone - 1;
2446 		else if (zoneno + 1 >= total_zones)
2447 			hint = 0;
2448 		else
2449 			hint = (zoneno + 1) * sbi->secs_per_zone;
2450 		init = false;
2451 		goto find_other_zone;
2452 	}
2453 got_it:
2454 	/* set it as dirty segment in free segmap */
2455 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2456 	__set_inuse(sbi, segno);
2457 	*newseg = segno;
2458 	spin_unlock(&free_i->segmap_lock);
2459 }
2460 
2461 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2462 {
2463 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2464 	struct summary_footer *sum_footer;
2465 
2466 	curseg->segno = curseg->next_segno;
2467 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2468 	curseg->next_blkoff = 0;
2469 	curseg->next_segno = NULL_SEGNO;
2470 
2471 	sum_footer = &(curseg->sum_blk->footer);
2472 	memset(sum_footer, 0, sizeof(struct summary_footer));
2473 	if (IS_DATASEG(type))
2474 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2475 	if (IS_NODESEG(type))
2476 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2477 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
2478 }
2479 
2480 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2481 {
2482 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2483 	if (__is_large_section(sbi))
2484 		return CURSEG_I(sbi, type)->segno;
2485 
2486 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2487 		return 0;
2488 
2489 	if (test_opt(sbi, NOHEAP) &&
2490 		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
2491 		return 0;
2492 
2493 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2494 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2495 
2496 	/* find segments from 0 to reuse freed segments */
2497 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2498 		return 0;
2499 
2500 	return CURSEG_I(sbi, type)->segno;
2501 }
2502 
2503 /*
2504  * Allocate a current working segment.
2505  * This function always allocates a free segment in LFS manner.
2506  */
2507 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2508 {
2509 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2510 	unsigned int segno = curseg->segno;
2511 	int dir = ALLOC_LEFT;
2512 
2513 	write_sum_page(sbi, curseg->sum_blk,
2514 				GET_SUM_BLOCK(sbi, segno));
2515 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2516 		dir = ALLOC_RIGHT;
2517 
2518 	if (test_opt(sbi, NOHEAP))
2519 		dir = ALLOC_RIGHT;
2520 
2521 	segno = __get_next_segno(sbi, type);
2522 	get_new_segment(sbi, &segno, new_sec, dir);
2523 	curseg->next_segno = segno;
2524 	reset_curseg(sbi, type, 1);
2525 	curseg->alloc_type = LFS;
2526 }
2527 
2528 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2529 			struct curseg_info *seg, block_t start)
2530 {
2531 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2532 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2533 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2534 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2535 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2536 	int i, pos;
2537 
2538 	for (i = 0; i < entries; i++)
2539 		target_map[i] = ckpt_map[i] | cur_map[i];
2540 
2541 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2542 
2543 	seg->next_blkoff = pos;
2544 }
2545 
2546 /*
2547  * If a segment is written by LFS manner, next block offset is just obtained
2548  * by increasing the current block offset. However, if a segment is written by
2549  * SSR manner, next block offset obtained by calling __next_free_blkoff
2550  */
2551 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2552 				struct curseg_info *seg)
2553 {
2554 	if (seg->alloc_type == SSR)
2555 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2556 	else
2557 		seg->next_blkoff++;
2558 }
2559 
2560 /*
2561  * This function always allocates a used segment(from dirty seglist) by SSR
2562  * manner, so it should recover the existing segment information of valid blocks
2563  */
2564 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2565 {
2566 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2567 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2568 	unsigned int new_segno = curseg->next_segno;
2569 	struct f2fs_summary_block *sum_node;
2570 	struct page *sum_page;
2571 
2572 	write_sum_page(sbi, curseg->sum_blk,
2573 				GET_SUM_BLOCK(sbi, curseg->segno));
2574 	__set_test_and_inuse(sbi, new_segno);
2575 
2576 	mutex_lock(&dirty_i->seglist_lock);
2577 	__remove_dirty_segment(sbi, new_segno, PRE);
2578 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2579 	mutex_unlock(&dirty_i->seglist_lock);
2580 
2581 	reset_curseg(sbi, type, 1);
2582 	curseg->alloc_type = SSR;
2583 	__next_free_blkoff(sbi, curseg, 0);
2584 
2585 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2586 	f2fs_bug_on(sbi, IS_ERR(sum_page));
2587 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2588 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2589 	f2fs_put_page(sum_page, 1);
2590 }
2591 
2592 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2593 {
2594 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2595 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2596 	unsigned segno = NULL_SEGNO;
2597 	int i, cnt;
2598 	bool reversed = false;
2599 
2600 	/* f2fs_need_SSR() already forces to do this */
2601 	if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2602 		curseg->next_segno = segno;
2603 		return 1;
2604 	}
2605 
2606 	/* For node segments, let's do SSR more intensively */
2607 	if (IS_NODESEG(type)) {
2608 		if (type >= CURSEG_WARM_NODE) {
2609 			reversed = true;
2610 			i = CURSEG_COLD_NODE;
2611 		} else {
2612 			i = CURSEG_HOT_NODE;
2613 		}
2614 		cnt = NR_CURSEG_NODE_TYPE;
2615 	} else {
2616 		if (type >= CURSEG_WARM_DATA) {
2617 			reversed = true;
2618 			i = CURSEG_COLD_DATA;
2619 		} else {
2620 			i = CURSEG_HOT_DATA;
2621 		}
2622 		cnt = NR_CURSEG_DATA_TYPE;
2623 	}
2624 
2625 	for (; cnt-- > 0; reversed ? i-- : i++) {
2626 		if (i == type)
2627 			continue;
2628 		if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2629 			curseg->next_segno = segno;
2630 			return 1;
2631 		}
2632 	}
2633 
2634 	/* find valid_blocks=0 in dirty list */
2635 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2636 		segno = get_free_segment(sbi);
2637 		if (segno != NULL_SEGNO) {
2638 			curseg->next_segno = segno;
2639 			return 1;
2640 		}
2641 	}
2642 	return 0;
2643 }
2644 
2645 /*
2646  * flush out current segment and replace it with new segment
2647  * This function should be returned with success, otherwise BUG
2648  */
2649 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2650 						int type, bool force)
2651 {
2652 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2653 
2654 	if (force)
2655 		new_curseg(sbi, type, true);
2656 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2657 					type == CURSEG_WARM_NODE)
2658 		new_curseg(sbi, type, false);
2659 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type) &&
2660 			likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2661 		new_curseg(sbi, type, false);
2662 	else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
2663 		change_curseg(sbi, type);
2664 	else
2665 		new_curseg(sbi, type, false);
2666 
2667 	stat_inc_seg_type(sbi, curseg);
2668 }
2669 
2670 void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2671 					unsigned int start, unsigned int end)
2672 {
2673 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2674 	unsigned int segno;
2675 
2676 	down_read(&SM_I(sbi)->curseg_lock);
2677 	mutex_lock(&curseg->curseg_mutex);
2678 	down_write(&SIT_I(sbi)->sentry_lock);
2679 
2680 	segno = CURSEG_I(sbi, type)->segno;
2681 	if (segno < start || segno > end)
2682 		goto unlock;
2683 
2684 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
2685 		change_curseg(sbi, type);
2686 	else
2687 		new_curseg(sbi, type, true);
2688 
2689 	stat_inc_seg_type(sbi, curseg);
2690 
2691 	locate_dirty_segment(sbi, segno);
2692 unlock:
2693 	up_write(&SIT_I(sbi)->sentry_lock);
2694 
2695 	if (segno != curseg->segno)
2696 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2697 			    type, segno, curseg->segno);
2698 
2699 	mutex_unlock(&curseg->curseg_mutex);
2700 	up_read(&SM_I(sbi)->curseg_lock);
2701 }
2702 
2703 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
2704 {
2705 	struct curseg_info *curseg;
2706 	unsigned int old_segno;
2707 	int i;
2708 
2709 	down_write(&SIT_I(sbi)->sentry_lock);
2710 
2711 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2712 		if (type != NO_CHECK_TYPE && i != type)
2713 			continue;
2714 
2715 		curseg = CURSEG_I(sbi, i);
2716 		if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
2717 				get_valid_blocks(sbi, curseg->segno, false) ||
2718 				get_ckpt_valid_blocks(sbi, curseg->segno)) {
2719 			old_segno = curseg->segno;
2720 			SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2721 			locate_dirty_segment(sbi, old_segno);
2722 		}
2723 	}
2724 
2725 	up_write(&SIT_I(sbi)->sentry_lock);
2726 }
2727 
2728 static const struct segment_allocation default_salloc_ops = {
2729 	.allocate_segment = allocate_segment_by_default,
2730 };
2731 
2732 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2733 						struct cp_control *cpc)
2734 {
2735 	__u64 trim_start = cpc->trim_start;
2736 	bool has_candidate = false;
2737 
2738 	down_write(&SIT_I(sbi)->sentry_lock);
2739 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2740 		if (add_discard_addrs(sbi, cpc, true)) {
2741 			has_candidate = true;
2742 			break;
2743 		}
2744 	}
2745 	up_write(&SIT_I(sbi)->sentry_lock);
2746 
2747 	cpc->trim_start = trim_start;
2748 	return has_candidate;
2749 }
2750 
2751 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
2752 					struct discard_policy *dpolicy,
2753 					unsigned int start, unsigned int end)
2754 {
2755 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2756 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
2757 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
2758 	struct discard_cmd *dc;
2759 	struct blk_plug plug;
2760 	int issued;
2761 	unsigned int trimmed = 0;
2762 
2763 next:
2764 	issued = 0;
2765 
2766 	mutex_lock(&dcc->cmd_lock);
2767 	if (unlikely(dcc->rbtree_check))
2768 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
2769 								&dcc->root));
2770 
2771 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
2772 					NULL, start,
2773 					(struct rb_entry **)&prev_dc,
2774 					(struct rb_entry **)&next_dc,
2775 					&insert_p, &insert_parent, true, NULL);
2776 	if (!dc)
2777 		dc = next_dc;
2778 
2779 	blk_start_plug(&plug);
2780 
2781 	while (dc && dc->lstart <= end) {
2782 		struct rb_node *node;
2783 		int err = 0;
2784 
2785 		if (dc->len < dpolicy->granularity)
2786 			goto skip;
2787 
2788 		if (dc->state != D_PREP) {
2789 			list_move_tail(&dc->list, &dcc->fstrim_list);
2790 			goto skip;
2791 		}
2792 
2793 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
2794 
2795 		if (issued >= dpolicy->max_requests) {
2796 			start = dc->lstart + dc->len;
2797 
2798 			if (err)
2799 				__remove_discard_cmd(sbi, dc);
2800 
2801 			blk_finish_plug(&plug);
2802 			mutex_unlock(&dcc->cmd_lock);
2803 			trimmed += __wait_all_discard_cmd(sbi, NULL);
2804 			congestion_wait(BLK_RW_ASYNC, HZ/50);
2805 			goto next;
2806 		}
2807 skip:
2808 		node = rb_next(&dc->rb_node);
2809 		if (err)
2810 			__remove_discard_cmd(sbi, dc);
2811 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
2812 
2813 		if (fatal_signal_pending(current))
2814 			break;
2815 	}
2816 
2817 	blk_finish_plug(&plug);
2818 	mutex_unlock(&dcc->cmd_lock);
2819 
2820 	return trimmed;
2821 }
2822 
2823 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2824 {
2825 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
2826 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
2827 	unsigned int start_segno, end_segno;
2828 	block_t start_block, end_block;
2829 	struct cp_control cpc;
2830 	struct discard_policy dpolicy;
2831 	unsigned long long trimmed = 0;
2832 	int err = 0;
2833 	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
2834 
2835 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
2836 		return -EINVAL;
2837 
2838 	if (end < MAIN_BLKADDR(sbi))
2839 		goto out;
2840 
2841 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2842 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
2843 		return -EFSCORRUPTED;
2844 	}
2845 
2846 	/* start/end segment number in main_area */
2847 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2848 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2849 						GET_SEGNO(sbi, end);
2850 	if (need_align) {
2851 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
2852 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
2853 	}
2854 
2855 	cpc.reason = CP_DISCARD;
2856 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
2857 	cpc.trim_start = start_segno;
2858 	cpc.trim_end = end_segno;
2859 
2860 	if (sbi->discard_blks == 0)
2861 		goto out;
2862 
2863 	down_write(&sbi->gc_lock);
2864 	err = f2fs_write_checkpoint(sbi, &cpc);
2865 	up_write(&sbi->gc_lock);
2866 	if (err)
2867 		goto out;
2868 
2869 	/*
2870 	 * We filed discard candidates, but actually we don't need to wait for
2871 	 * all of them, since they'll be issued in idle time along with runtime
2872 	 * discard option. User configuration looks like using runtime discard
2873 	 * or periodic fstrim instead of it.
2874 	 */
2875 	if (f2fs_realtime_discard_enable(sbi))
2876 		goto out;
2877 
2878 	start_block = START_BLOCK(sbi, start_segno);
2879 	end_block = START_BLOCK(sbi, end_segno + 1);
2880 
2881 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
2882 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
2883 					start_block, end_block);
2884 
2885 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
2886 					start_block, end_block);
2887 out:
2888 	if (!err)
2889 		range->len = F2FS_BLK_TO_BYTES(trimmed);
2890 	return err;
2891 }
2892 
2893 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2894 {
2895 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2896 	if (curseg->next_blkoff < sbi->blocks_per_seg)
2897 		return true;
2898 	return false;
2899 }
2900 
2901 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
2902 {
2903 	switch (hint) {
2904 	case WRITE_LIFE_SHORT:
2905 		return CURSEG_HOT_DATA;
2906 	case WRITE_LIFE_EXTREME:
2907 		return CURSEG_COLD_DATA;
2908 	default:
2909 		return CURSEG_WARM_DATA;
2910 	}
2911 }
2912 
2913 /* This returns write hints for each segment type. This hints will be
2914  * passed down to block layer. There are mapping tables which depend on
2915  * the mount option 'whint_mode'.
2916  *
2917  * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
2918  *
2919  * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
2920  *
2921  * User                  F2FS                     Block
2922  * ----                  ----                     -----
2923  *                       META                     WRITE_LIFE_NOT_SET
2924  *                       HOT_NODE                 "
2925  *                       WARM_NODE                "
2926  *                       COLD_NODE                "
2927  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
2928  * extension list        "                        "
2929  *
2930  * -- buffered io
2931  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
2932  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
2933  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
2934  * WRITE_LIFE_NONE       "                        "
2935  * WRITE_LIFE_MEDIUM     "                        "
2936  * WRITE_LIFE_LONG       "                        "
2937  *
2938  * -- direct io
2939  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
2940  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
2941  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
2942  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
2943  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
2944  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
2945  *
2946  * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
2947  *
2948  * User                  F2FS                     Block
2949  * ----                  ----                     -----
2950  *                       META                     WRITE_LIFE_MEDIUM;
2951  *                       HOT_NODE                 WRITE_LIFE_NOT_SET
2952  *                       WARM_NODE                "
2953  *                       COLD_NODE                WRITE_LIFE_NONE
2954  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
2955  * extension list        "                        "
2956  *
2957  * -- buffered io
2958  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
2959  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
2960  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
2961  * WRITE_LIFE_NONE       "                        "
2962  * WRITE_LIFE_MEDIUM     "                        "
2963  * WRITE_LIFE_LONG       "                        "
2964  *
2965  * -- direct io
2966  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
2967  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
2968  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
2969  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
2970  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
2971  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
2972  */
2973 
2974 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
2975 				enum page_type type, enum temp_type temp)
2976 {
2977 	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
2978 		if (type == DATA) {
2979 			if (temp == WARM)
2980 				return WRITE_LIFE_NOT_SET;
2981 			else if (temp == HOT)
2982 				return WRITE_LIFE_SHORT;
2983 			else if (temp == COLD)
2984 				return WRITE_LIFE_EXTREME;
2985 		} else {
2986 			return WRITE_LIFE_NOT_SET;
2987 		}
2988 	} else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
2989 		if (type == DATA) {
2990 			if (temp == WARM)
2991 				return WRITE_LIFE_LONG;
2992 			else if (temp == HOT)
2993 				return WRITE_LIFE_SHORT;
2994 			else if (temp == COLD)
2995 				return WRITE_LIFE_EXTREME;
2996 		} else if (type == NODE) {
2997 			if (temp == WARM || temp == HOT)
2998 				return WRITE_LIFE_NOT_SET;
2999 			else if (temp == COLD)
3000 				return WRITE_LIFE_NONE;
3001 		} else if (type == META) {
3002 			return WRITE_LIFE_MEDIUM;
3003 		}
3004 	}
3005 	return WRITE_LIFE_NOT_SET;
3006 }
3007 
3008 static int __get_segment_type_2(struct f2fs_io_info *fio)
3009 {
3010 	if (fio->type == DATA)
3011 		return CURSEG_HOT_DATA;
3012 	else
3013 		return CURSEG_HOT_NODE;
3014 }
3015 
3016 static int __get_segment_type_4(struct f2fs_io_info *fio)
3017 {
3018 	if (fio->type == DATA) {
3019 		struct inode *inode = fio->page->mapping->host;
3020 
3021 		if (S_ISDIR(inode->i_mode))
3022 			return CURSEG_HOT_DATA;
3023 		else
3024 			return CURSEG_COLD_DATA;
3025 	} else {
3026 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3027 			return CURSEG_WARM_NODE;
3028 		else
3029 			return CURSEG_COLD_NODE;
3030 	}
3031 }
3032 
3033 static int __get_segment_type_6(struct f2fs_io_info *fio)
3034 {
3035 	if (fio->type == DATA) {
3036 		struct inode *inode = fio->page->mapping->host;
3037 
3038 		if (is_cold_data(fio->page) || file_is_cold(inode) ||
3039 				f2fs_compressed_file(inode))
3040 			return CURSEG_COLD_DATA;
3041 		if (file_is_hot(inode) ||
3042 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3043 				f2fs_is_atomic_file(inode) ||
3044 				f2fs_is_volatile_file(inode))
3045 			return CURSEG_HOT_DATA;
3046 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3047 	} else {
3048 		if (IS_DNODE(fio->page))
3049 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3050 						CURSEG_HOT_NODE;
3051 		return CURSEG_COLD_NODE;
3052 	}
3053 }
3054 
3055 static int __get_segment_type(struct f2fs_io_info *fio)
3056 {
3057 	int type = 0;
3058 
3059 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3060 	case 2:
3061 		type = __get_segment_type_2(fio);
3062 		break;
3063 	case 4:
3064 		type = __get_segment_type_4(fio);
3065 		break;
3066 	case 6:
3067 		type = __get_segment_type_6(fio);
3068 		break;
3069 	default:
3070 		f2fs_bug_on(fio->sbi, true);
3071 	}
3072 
3073 	if (IS_HOT(type))
3074 		fio->temp = HOT;
3075 	else if (IS_WARM(type))
3076 		fio->temp = WARM;
3077 	else
3078 		fio->temp = COLD;
3079 	return type;
3080 }
3081 
3082 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3083 		block_t old_blkaddr, block_t *new_blkaddr,
3084 		struct f2fs_summary *sum, int type,
3085 		struct f2fs_io_info *fio, bool add_list)
3086 {
3087 	struct sit_info *sit_i = SIT_I(sbi);
3088 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3089 	bool put_pin_sem = false;
3090 
3091 	if (type == CURSEG_COLD_DATA) {
3092 		/* GC during CURSEG_COLD_DATA_PINNED allocation */
3093 		if (down_read_trylock(&sbi->pin_sem)) {
3094 			put_pin_sem = true;
3095 		} else {
3096 			type = CURSEG_WARM_DATA;
3097 			curseg = CURSEG_I(sbi, type);
3098 		}
3099 	} else if (type == CURSEG_COLD_DATA_PINNED) {
3100 		type = CURSEG_COLD_DATA;
3101 	}
3102 
3103 	down_read(&SM_I(sbi)->curseg_lock);
3104 
3105 	mutex_lock(&curseg->curseg_mutex);
3106 	down_write(&sit_i->sentry_lock);
3107 
3108 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3109 
3110 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3111 
3112 	/*
3113 	 * __add_sum_entry should be resided under the curseg_mutex
3114 	 * because, this function updates a summary entry in the
3115 	 * current summary block.
3116 	 */
3117 	__add_sum_entry(sbi, type, sum);
3118 
3119 	__refresh_next_blkoff(sbi, curseg);
3120 
3121 	stat_inc_block_count(sbi, curseg);
3122 
3123 	/*
3124 	 * SIT information should be updated before segment allocation,
3125 	 * since SSR needs latest valid block information.
3126 	 */
3127 	update_sit_entry(sbi, *new_blkaddr, 1);
3128 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3129 		update_sit_entry(sbi, old_blkaddr, -1);
3130 
3131 	if (!__has_curseg_space(sbi, type))
3132 		sit_i->s_ops->allocate_segment(sbi, type, false);
3133 
3134 	/*
3135 	 * segment dirty status should be updated after segment allocation,
3136 	 * so we just need to update status only one time after previous
3137 	 * segment being closed.
3138 	 */
3139 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3140 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3141 
3142 	up_write(&sit_i->sentry_lock);
3143 
3144 	if (page && IS_NODESEG(type)) {
3145 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3146 
3147 		f2fs_inode_chksum_set(sbi, page);
3148 	}
3149 
3150 	if (F2FS_IO_ALIGNED(sbi))
3151 		fio->retry = false;
3152 
3153 	if (add_list) {
3154 		struct f2fs_bio_info *io;
3155 
3156 		INIT_LIST_HEAD(&fio->list);
3157 		fio->in_list = true;
3158 		io = sbi->write_io[fio->type] + fio->temp;
3159 		spin_lock(&io->io_lock);
3160 		list_add_tail(&fio->list, &io->io_list);
3161 		spin_unlock(&io->io_lock);
3162 	}
3163 
3164 	mutex_unlock(&curseg->curseg_mutex);
3165 
3166 	up_read(&SM_I(sbi)->curseg_lock);
3167 
3168 	if (put_pin_sem)
3169 		up_read(&sbi->pin_sem);
3170 }
3171 
3172 static void update_device_state(struct f2fs_io_info *fio)
3173 {
3174 	struct f2fs_sb_info *sbi = fio->sbi;
3175 	unsigned int devidx;
3176 
3177 	if (!f2fs_is_multi_device(sbi))
3178 		return;
3179 
3180 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3181 
3182 	/* update device state for fsync */
3183 	f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
3184 
3185 	/* update device state for checkpoint */
3186 	if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3187 		spin_lock(&sbi->dev_lock);
3188 		f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3189 		spin_unlock(&sbi->dev_lock);
3190 	}
3191 }
3192 
3193 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3194 {
3195 	int type = __get_segment_type(fio);
3196 	bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
3197 
3198 	if (keep_order)
3199 		down_read(&fio->sbi->io_order_lock);
3200 reallocate:
3201 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3202 			&fio->new_blkaddr, sum, type, fio, true);
3203 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3204 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3205 					fio->old_blkaddr, fio->old_blkaddr);
3206 
3207 	/* writeout dirty page into bdev */
3208 	f2fs_submit_page_write(fio);
3209 	if (fio->retry) {
3210 		fio->old_blkaddr = fio->new_blkaddr;
3211 		goto reallocate;
3212 	}
3213 
3214 	update_device_state(fio);
3215 
3216 	if (keep_order)
3217 		up_read(&fio->sbi->io_order_lock);
3218 }
3219 
3220 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3221 					enum iostat_type io_type)
3222 {
3223 	struct f2fs_io_info fio = {
3224 		.sbi = sbi,
3225 		.type = META,
3226 		.temp = HOT,
3227 		.op = REQ_OP_WRITE,
3228 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3229 		.old_blkaddr = page->index,
3230 		.new_blkaddr = page->index,
3231 		.page = page,
3232 		.encrypted_page = NULL,
3233 		.in_list = false,
3234 	};
3235 
3236 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3237 		fio.op_flags &= ~REQ_META;
3238 
3239 	set_page_writeback(page);
3240 	ClearPageError(page);
3241 	f2fs_submit_page_write(&fio);
3242 
3243 	stat_inc_meta_count(sbi, page->index);
3244 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3245 }
3246 
3247 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3248 {
3249 	struct f2fs_summary sum;
3250 
3251 	set_summary(&sum, nid, 0, 0);
3252 	do_write_page(&sum, fio);
3253 
3254 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3255 }
3256 
3257 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3258 					struct f2fs_io_info *fio)
3259 {
3260 	struct f2fs_sb_info *sbi = fio->sbi;
3261 	struct f2fs_summary sum;
3262 
3263 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3264 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3265 	do_write_page(&sum, fio);
3266 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3267 
3268 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3269 }
3270 
3271 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3272 {
3273 	int err;
3274 	struct f2fs_sb_info *sbi = fio->sbi;
3275 	unsigned int segno;
3276 
3277 	fio->new_blkaddr = fio->old_blkaddr;
3278 	/* i/o temperature is needed for passing down write hints */
3279 	__get_segment_type(fio);
3280 
3281 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3282 
3283 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3284 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3285 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3286 			  __func__, segno);
3287 		return -EFSCORRUPTED;
3288 	}
3289 
3290 	stat_inc_inplace_blocks(fio->sbi);
3291 
3292 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3293 		err = f2fs_merge_page_bio(fio);
3294 	else
3295 		err = f2fs_submit_page_bio(fio);
3296 	if (!err) {
3297 		update_device_state(fio);
3298 		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3299 	}
3300 
3301 	return err;
3302 }
3303 
3304 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3305 						unsigned int segno)
3306 {
3307 	int i;
3308 
3309 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3310 		if (CURSEG_I(sbi, i)->segno == segno)
3311 			break;
3312 	}
3313 	return i;
3314 }
3315 
3316 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3317 				block_t old_blkaddr, block_t new_blkaddr,
3318 				bool recover_curseg, bool recover_newaddr)
3319 {
3320 	struct sit_info *sit_i = SIT_I(sbi);
3321 	struct curseg_info *curseg;
3322 	unsigned int segno, old_cursegno;
3323 	struct seg_entry *se;
3324 	int type;
3325 	unsigned short old_blkoff;
3326 
3327 	segno = GET_SEGNO(sbi, new_blkaddr);
3328 	se = get_seg_entry(sbi, segno);
3329 	type = se->type;
3330 
3331 	down_write(&SM_I(sbi)->curseg_lock);
3332 
3333 	if (!recover_curseg) {
3334 		/* for recovery flow */
3335 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3336 			if (old_blkaddr == NULL_ADDR)
3337 				type = CURSEG_COLD_DATA;
3338 			else
3339 				type = CURSEG_WARM_DATA;
3340 		}
3341 	} else {
3342 		if (IS_CURSEG(sbi, segno)) {
3343 			/* se->type is volatile as SSR allocation */
3344 			type = __f2fs_get_curseg(sbi, segno);
3345 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3346 		} else {
3347 			type = CURSEG_WARM_DATA;
3348 		}
3349 	}
3350 
3351 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3352 	curseg = CURSEG_I(sbi, type);
3353 
3354 	mutex_lock(&curseg->curseg_mutex);
3355 	down_write(&sit_i->sentry_lock);
3356 
3357 	old_cursegno = curseg->segno;
3358 	old_blkoff = curseg->next_blkoff;
3359 
3360 	/* change the current segment */
3361 	if (segno != curseg->segno) {
3362 		curseg->next_segno = segno;
3363 		change_curseg(sbi, type);
3364 	}
3365 
3366 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3367 	__add_sum_entry(sbi, type, sum);
3368 
3369 	if (!recover_curseg || recover_newaddr)
3370 		update_sit_entry(sbi, new_blkaddr, 1);
3371 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3372 		invalidate_mapping_pages(META_MAPPING(sbi),
3373 					old_blkaddr, old_blkaddr);
3374 		update_sit_entry(sbi, old_blkaddr, -1);
3375 	}
3376 
3377 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3378 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3379 
3380 	locate_dirty_segment(sbi, old_cursegno);
3381 
3382 	if (recover_curseg) {
3383 		if (old_cursegno != curseg->segno) {
3384 			curseg->next_segno = old_cursegno;
3385 			change_curseg(sbi, type);
3386 		}
3387 		curseg->next_blkoff = old_blkoff;
3388 	}
3389 
3390 	up_write(&sit_i->sentry_lock);
3391 	mutex_unlock(&curseg->curseg_mutex);
3392 	up_write(&SM_I(sbi)->curseg_lock);
3393 }
3394 
3395 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3396 				block_t old_addr, block_t new_addr,
3397 				unsigned char version, bool recover_curseg,
3398 				bool recover_newaddr)
3399 {
3400 	struct f2fs_summary sum;
3401 
3402 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3403 
3404 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3405 					recover_curseg, recover_newaddr);
3406 
3407 	f2fs_update_data_blkaddr(dn, new_addr);
3408 }
3409 
3410 void f2fs_wait_on_page_writeback(struct page *page,
3411 				enum page_type type, bool ordered, bool locked)
3412 {
3413 	if (PageWriteback(page)) {
3414 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3415 
3416 		/* submit cached LFS IO */
3417 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3418 		/* sbumit cached IPU IO */
3419 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3420 		if (ordered) {
3421 			wait_on_page_writeback(page);
3422 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3423 		} else {
3424 			wait_for_stable_page(page);
3425 		}
3426 	}
3427 }
3428 
3429 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3430 {
3431 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3432 	struct page *cpage;
3433 
3434 	if (!f2fs_post_read_required(inode))
3435 		return;
3436 
3437 	if (!__is_valid_data_blkaddr(blkaddr))
3438 		return;
3439 
3440 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3441 	if (cpage) {
3442 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3443 		f2fs_put_page(cpage, 1);
3444 	}
3445 }
3446 
3447 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3448 								block_t len)
3449 {
3450 	block_t i;
3451 
3452 	for (i = 0; i < len; i++)
3453 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3454 }
3455 
3456 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3457 {
3458 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3459 	struct curseg_info *seg_i;
3460 	unsigned char *kaddr;
3461 	struct page *page;
3462 	block_t start;
3463 	int i, j, offset;
3464 
3465 	start = start_sum_block(sbi);
3466 
3467 	page = f2fs_get_meta_page(sbi, start++);
3468 	if (IS_ERR(page))
3469 		return PTR_ERR(page);
3470 	kaddr = (unsigned char *)page_address(page);
3471 
3472 	/* Step 1: restore nat cache */
3473 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3474 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3475 
3476 	/* Step 2: restore sit cache */
3477 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3478 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3479 	offset = 2 * SUM_JOURNAL_SIZE;
3480 
3481 	/* Step 3: restore summary entries */
3482 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3483 		unsigned short blk_off;
3484 		unsigned int segno;
3485 
3486 		seg_i = CURSEG_I(sbi, i);
3487 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3488 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3489 		seg_i->next_segno = segno;
3490 		reset_curseg(sbi, i, 0);
3491 		seg_i->alloc_type = ckpt->alloc_type[i];
3492 		seg_i->next_blkoff = blk_off;
3493 
3494 		if (seg_i->alloc_type == SSR)
3495 			blk_off = sbi->blocks_per_seg;
3496 
3497 		for (j = 0; j < blk_off; j++) {
3498 			struct f2fs_summary *s;
3499 			s = (struct f2fs_summary *)(kaddr + offset);
3500 			seg_i->sum_blk->entries[j] = *s;
3501 			offset += SUMMARY_SIZE;
3502 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3503 						SUM_FOOTER_SIZE)
3504 				continue;
3505 
3506 			f2fs_put_page(page, 1);
3507 			page = NULL;
3508 
3509 			page = f2fs_get_meta_page(sbi, start++);
3510 			if (IS_ERR(page))
3511 				return PTR_ERR(page);
3512 			kaddr = (unsigned char *)page_address(page);
3513 			offset = 0;
3514 		}
3515 	}
3516 	f2fs_put_page(page, 1);
3517 	return 0;
3518 }
3519 
3520 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3521 {
3522 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3523 	struct f2fs_summary_block *sum;
3524 	struct curseg_info *curseg;
3525 	struct page *new;
3526 	unsigned short blk_off;
3527 	unsigned int segno = 0;
3528 	block_t blk_addr = 0;
3529 	int err = 0;
3530 
3531 	/* get segment number and block addr */
3532 	if (IS_DATASEG(type)) {
3533 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3534 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3535 							CURSEG_HOT_DATA]);
3536 		if (__exist_node_summaries(sbi))
3537 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
3538 		else
3539 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3540 	} else {
3541 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3542 							CURSEG_HOT_NODE]);
3543 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3544 							CURSEG_HOT_NODE]);
3545 		if (__exist_node_summaries(sbi))
3546 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3547 							type - CURSEG_HOT_NODE);
3548 		else
3549 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3550 	}
3551 
3552 	new = f2fs_get_meta_page(sbi, blk_addr);
3553 	if (IS_ERR(new))
3554 		return PTR_ERR(new);
3555 	sum = (struct f2fs_summary_block *)page_address(new);
3556 
3557 	if (IS_NODESEG(type)) {
3558 		if (__exist_node_summaries(sbi)) {
3559 			struct f2fs_summary *ns = &sum->entries[0];
3560 			int i;
3561 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3562 				ns->version = 0;
3563 				ns->ofs_in_node = 0;
3564 			}
3565 		} else {
3566 			err = f2fs_restore_node_summary(sbi, segno, sum);
3567 			if (err)
3568 				goto out;
3569 		}
3570 	}
3571 
3572 	/* set uncompleted segment to curseg */
3573 	curseg = CURSEG_I(sbi, type);
3574 	mutex_lock(&curseg->curseg_mutex);
3575 
3576 	/* update journal info */
3577 	down_write(&curseg->journal_rwsem);
3578 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3579 	up_write(&curseg->journal_rwsem);
3580 
3581 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3582 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3583 	curseg->next_segno = segno;
3584 	reset_curseg(sbi, type, 0);
3585 	curseg->alloc_type = ckpt->alloc_type[type];
3586 	curseg->next_blkoff = blk_off;
3587 	mutex_unlock(&curseg->curseg_mutex);
3588 out:
3589 	f2fs_put_page(new, 1);
3590 	return err;
3591 }
3592 
3593 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3594 {
3595 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3596 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3597 	int type = CURSEG_HOT_DATA;
3598 	int err;
3599 
3600 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3601 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3602 
3603 		if (npages >= 2)
3604 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3605 							META_CP, true);
3606 
3607 		/* restore for compacted data summary */
3608 		err = read_compacted_summaries(sbi);
3609 		if (err)
3610 			return err;
3611 		type = CURSEG_HOT_NODE;
3612 	}
3613 
3614 	if (__exist_node_summaries(sbi))
3615 		f2fs_ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
3616 					NR_CURSEG_TYPE - type, META_CP, true);
3617 
3618 	for (; type <= CURSEG_COLD_NODE; type++) {
3619 		err = read_normal_summaries(sbi, type);
3620 		if (err)
3621 			return err;
3622 	}
3623 
3624 	/* sanity check for summary blocks */
3625 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3626 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3627 		f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
3628 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3629 		return -EINVAL;
3630 	}
3631 
3632 	return 0;
3633 }
3634 
3635 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3636 {
3637 	struct page *page;
3638 	unsigned char *kaddr;
3639 	struct f2fs_summary *summary;
3640 	struct curseg_info *seg_i;
3641 	int written_size = 0;
3642 	int i, j;
3643 
3644 	page = f2fs_grab_meta_page(sbi, blkaddr++);
3645 	kaddr = (unsigned char *)page_address(page);
3646 	memset(kaddr, 0, PAGE_SIZE);
3647 
3648 	/* Step 1: write nat cache */
3649 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3650 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3651 	written_size += SUM_JOURNAL_SIZE;
3652 
3653 	/* Step 2: write sit cache */
3654 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3655 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3656 	written_size += SUM_JOURNAL_SIZE;
3657 
3658 	/* Step 3: write summary entries */
3659 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3660 		unsigned short blkoff;
3661 		seg_i = CURSEG_I(sbi, i);
3662 		if (sbi->ckpt->alloc_type[i] == SSR)
3663 			blkoff = sbi->blocks_per_seg;
3664 		else
3665 			blkoff = curseg_blkoff(sbi, i);
3666 
3667 		for (j = 0; j < blkoff; j++) {
3668 			if (!page) {
3669 				page = f2fs_grab_meta_page(sbi, blkaddr++);
3670 				kaddr = (unsigned char *)page_address(page);
3671 				memset(kaddr, 0, PAGE_SIZE);
3672 				written_size = 0;
3673 			}
3674 			summary = (struct f2fs_summary *)(kaddr + written_size);
3675 			*summary = seg_i->sum_blk->entries[j];
3676 			written_size += SUMMARY_SIZE;
3677 
3678 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3679 							SUM_FOOTER_SIZE)
3680 				continue;
3681 
3682 			set_page_dirty(page);
3683 			f2fs_put_page(page, 1);
3684 			page = NULL;
3685 		}
3686 	}
3687 	if (page) {
3688 		set_page_dirty(page);
3689 		f2fs_put_page(page, 1);
3690 	}
3691 }
3692 
3693 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3694 					block_t blkaddr, int type)
3695 {
3696 	int i, end;
3697 	if (IS_DATASEG(type))
3698 		end = type + NR_CURSEG_DATA_TYPE;
3699 	else
3700 		end = type + NR_CURSEG_NODE_TYPE;
3701 
3702 	for (i = type; i < end; i++)
3703 		write_current_sum_page(sbi, i, blkaddr + (i - type));
3704 }
3705 
3706 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3707 {
3708 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3709 		write_compacted_summaries(sbi, start_blk);
3710 	else
3711 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3712 }
3713 
3714 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3715 {
3716 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3717 }
3718 
3719 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3720 					unsigned int val, int alloc)
3721 {
3722 	int i;
3723 
3724 	if (type == NAT_JOURNAL) {
3725 		for (i = 0; i < nats_in_cursum(journal); i++) {
3726 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3727 				return i;
3728 		}
3729 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3730 			return update_nats_in_cursum(journal, 1);
3731 	} else if (type == SIT_JOURNAL) {
3732 		for (i = 0; i < sits_in_cursum(journal); i++)
3733 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3734 				return i;
3735 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3736 			return update_sits_in_cursum(journal, 1);
3737 	}
3738 	return -1;
3739 }
3740 
3741 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3742 					unsigned int segno)
3743 {
3744 	return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
3745 }
3746 
3747 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3748 					unsigned int start)
3749 {
3750 	struct sit_info *sit_i = SIT_I(sbi);
3751 	struct page *page;
3752 	pgoff_t src_off, dst_off;
3753 
3754 	src_off = current_sit_addr(sbi, start);
3755 	dst_off = next_sit_addr(sbi, src_off);
3756 
3757 	page = f2fs_grab_meta_page(sbi, dst_off);
3758 	seg_info_to_sit_page(sbi, page, start);
3759 
3760 	set_page_dirty(page);
3761 	set_to_next_sit(sit_i, start);
3762 
3763 	return page;
3764 }
3765 
3766 static struct sit_entry_set *grab_sit_entry_set(void)
3767 {
3768 	struct sit_entry_set *ses =
3769 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
3770 
3771 	ses->entry_cnt = 0;
3772 	INIT_LIST_HEAD(&ses->set_list);
3773 	return ses;
3774 }
3775 
3776 static void release_sit_entry_set(struct sit_entry_set *ses)
3777 {
3778 	list_del(&ses->set_list);
3779 	kmem_cache_free(sit_entry_set_slab, ses);
3780 }
3781 
3782 static void adjust_sit_entry_set(struct sit_entry_set *ses,
3783 						struct list_head *head)
3784 {
3785 	struct sit_entry_set *next = ses;
3786 
3787 	if (list_is_last(&ses->set_list, head))
3788 		return;
3789 
3790 	list_for_each_entry_continue(next, head, set_list)
3791 		if (ses->entry_cnt <= next->entry_cnt)
3792 			break;
3793 
3794 	list_move_tail(&ses->set_list, &next->set_list);
3795 }
3796 
3797 static void add_sit_entry(unsigned int segno, struct list_head *head)
3798 {
3799 	struct sit_entry_set *ses;
3800 	unsigned int start_segno = START_SEGNO(segno);
3801 
3802 	list_for_each_entry(ses, head, set_list) {
3803 		if (ses->start_segno == start_segno) {
3804 			ses->entry_cnt++;
3805 			adjust_sit_entry_set(ses, head);
3806 			return;
3807 		}
3808 	}
3809 
3810 	ses = grab_sit_entry_set();
3811 
3812 	ses->start_segno = start_segno;
3813 	ses->entry_cnt++;
3814 	list_add(&ses->set_list, head);
3815 }
3816 
3817 static void add_sits_in_set(struct f2fs_sb_info *sbi)
3818 {
3819 	struct f2fs_sm_info *sm_info = SM_I(sbi);
3820 	struct list_head *set_list = &sm_info->sit_entry_set;
3821 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
3822 	unsigned int segno;
3823 
3824 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
3825 		add_sit_entry(segno, set_list);
3826 }
3827 
3828 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
3829 {
3830 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3831 	struct f2fs_journal *journal = curseg->journal;
3832 	int i;
3833 
3834 	down_write(&curseg->journal_rwsem);
3835 	for (i = 0; i < sits_in_cursum(journal); i++) {
3836 		unsigned int segno;
3837 		bool dirtied;
3838 
3839 		segno = le32_to_cpu(segno_in_journal(journal, i));
3840 		dirtied = __mark_sit_entry_dirty(sbi, segno);
3841 
3842 		if (!dirtied)
3843 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
3844 	}
3845 	update_sits_in_cursum(journal, -i);
3846 	up_write(&curseg->journal_rwsem);
3847 }
3848 
3849 /*
3850  * CP calls this function, which flushes SIT entries including sit_journal,
3851  * and moves prefree segs to free segs.
3852  */
3853 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3854 {
3855 	struct sit_info *sit_i = SIT_I(sbi);
3856 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
3857 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3858 	struct f2fs_journal *journal = curseg->journal;
3859 	struct sit_entry_set *ses, *tmp;
3860 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
3861 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
3862 	struct seg_entry *se;
3863 
3864 	down_write(&sit_i->sentry_lock);
3865 
3866 	if (!sit_i->dirty_sentries)
3867 		goto out;
3868 
3869 	/*
3870 	 * add and account sit entries of dirty bitmap in sit entry
3871 	 * set temporarily
3872 	 */
3873 	add_sits_in_set(sbi);
3874 
3875 	/*
3876 	 * if there are no enough space in journal to store dirty sit
3877 	 * entries, remove all entries from journal and add and account
3878 	 * them in sit entry set.
3879 	 */
3880 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
3881 								!to_journal)
3882 		remove_sits_in_journal(sbi);
3883 
3884 	/*
3885 	 * there are two steps to flush sit entries:
3886 	 * #1, flush sit entries to journal in current cold data summary block.
3887 	 * #2, flush sit entries to sit page.
3888 	 */
3889 	list_for_each_entry_safe(ses, tmp, head, set_list) {
3890 		struct page *page = NULL;
3891 		struct f2fs_sit_block *raw_sit = NULL;
3892 		unsigned int start_segno = ses->start_segno;
3893 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
3894 						(unsigned long)MAIN_SEGS(sbi));
3895 		unsigned int segno = start_segno;
3896 
3897 		if (to_journal &&
3898 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
3899 			to_journal = false;
3900 
3901 		if (to_journal) {
3902 			down_write(&curseg->journal_rwsem);
3903 		} else {
3904 			page = get_next_sit_page(sbi, start_segno);
3905 			raw_sit = page_address(page);
3906 		}
3907 
3908 		/* flush dirty sit entries in region of current sit set */
3909 		for_each_set_bit_from(segno, bitmap, end) {
3910 			int offset, sit_offset;
3911 
3912 			se = get_seg_entry(sbi, segno);
3913 #ifdef CONFIG_F2FS_CHECK_FS
3914 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
3915 						SIT_VBLOCK_MAP_SIZE))
3916 				f2fs_bug_on(sbi, 1);
3917 #endif
3918 
3919 			/* add discard candidates */
3920 			if (!(cpc->reason & CP_DISCARD)) {
3921 				cpc->trim_start = segno;
3922 				add_discard_addrs(sbi, cpc, false);
3923 			}
3924 
3925 			if (to_journal) {
3926 				offset = f2fs_lookup_journal_in_cursum(journal,
3927 							SIT_JOURNAL, segno, 1);
3928 				f2fs_bug_on(sbi, offset < 0);
3929 				segno_in_journal(journal, offset) =
3930 							cpu_to_le32(segno);
3931 				seg_info_to_raw_sit(se,
3932 					&sit_in_journal(journal, offset));
3933 				check_block_count(sbi, segno,
3934 					&sit_in_journal(journal, offset));
3935 			} else {
3936 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3937 				seg_info_to_raw_sit(se,
3938 						&raw_sit->entries[sit_offset]);
3939 				check_block_count(sbi, segno,
3940 						&raw_sit->entries[sit_offset]);
3941 			}
3942 
3943 			__clear_bit(segno, bitmap);
3944 			sit_i->dirty_sentries--;
3945 			ses->entry_cnt--;
3946 		}
3947 
3948 		if (to_journal)
3949 			up_write(&curseg->journal_rwsem);
3950 		else
3951 			f2fs_put_page(page, 1);
3952 
3953 		f2fs_bug_on(sbi, ses->entry_cnt);
3954 		release_sit_entry_set(ses);
3955 	}
3956 
3957 	f2fs_bug_on(sbi, !list_empty(head));
3958 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
3959 out:
3960 	if (cpc->reason & CP_DISCARD) {
3961 		__u64 trim_start = cpc->trim_start;
3962 
3963 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
3964 			add_discard_addrs(sbi, cpc, false);
3965 
3966 		cpc->trim_start = trim_start;
3967 	}
3968 	up_write(&sit_i->sentry_lock);
3969 
3970 	set_prefree_as_free_segments(sbi);
3971 }
3972 
3973 static int build_sit_info(struct f2fs_sb_info *sbi)
3974 {
3975 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3976 	struct sit_info *sit_i;
3977 	unsigned int sit_segs, start;
3978 	char *src_bitmap, *bitmap;
3979 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
3980 
3981 	/* allocate memory for SIT information */
3982 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
3983 	if (!sit_i)
3984 		return -ENOMEM;
3985 
3986 	SM_I(sbi)->sit_info = sit_i;
3987 
3988 	sit_i->sentries =
3989 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
3990 					      MAIN_SEGS(sbi)),
3991 			      GFP_KERNEL);
3992 	if (!sit_i->sentries)
3993 		return -ENOMEM;
3994 
3995 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3996 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
3997 								GFP_KERNEL);
3998 	if (!sit_i->dirty_sentries_bitmap)
3999 		return -ENOMEM;
4000 
4001 #ifdef CONFIG_F2FS_CHECK_FS
4002 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4003 #else
4004 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4005 #endif
4006 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4007 	if (!sit_i->bitmap)
4008 		return -ENOMEM;
4009 
4010 	bitmap = sit_i->bitmap;
4011 
4012 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4013 		sit_i->sentries[start].cur_valid_map = bitmap;
4014 		bitmap += SIT_VBLOCK_MAP_SIZE;
4015 
4016 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4017 		bitmap += SIT_VBLOCK_MAP_SIZE;
4018 
4019 #ifdef CONFIG_F2FS_CHECK_FS
4020 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4021 		bitmap += SIT_VBLOCK_MAP_SIZE;
4022 #endif
4023 
4024 		sit_i->sentries[start].discard_map = bitmap;
4025 		bitmap += SIT_VBLOCK_MAP_SIZE;
4026 	}
4027 
4028 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4029 	if (!sit_i->tmp_map)
4030 		return -ENOMEM;
4031 
4032 	if (__is_large_section(sbi)) {
4033 		sit_i->sec_entries =
4034 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4035 						      MAIN_SECS(sbi)),
4036 				      GFP_KERNEL);
4037 		if (!sit_i->sec_entries)
4038 			return -ENOMEM;
4039 	}
4040 
4041 	/* get information related with SIT */
4042 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4043 
4044 	/* setup SIT bitmap from ckeckpoint pack */
4045 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4046 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4047 
4048 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4049 	if (!sit_i->sit_bitmap)
4050 		return -ENOMEM;
4051 
4052 #ifdef CONFIG_F2FS_CHECK_FS
4053 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4054 					sit_bitmap_size, GFP_KERNEL);
4055 	if (!sit_i->sit_bitmap_mir)
4056 		return -ENOMEM;
4057 
4058 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4059 					main_bitmap_size, GFP_KERNEL);
4060 	if (!sit_i->invalid_segmap)
4061 		return -ENOMEM;
4062 #endif
4063 
4064 	/* init SIT information */
4065 	sit_i->s_ops = &default_salloc_ops;
4066 
4067 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4068 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4069 	sit_i->written_valid_blocks = 0;
4070 	sit_i->bitmap_size = sit_bitmap_size;
4071 	sit_i->dirty_sentries = 0;
4072 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4073 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4074 	sit_i->mounted_time = ktime_get_real_seconds();
4075 	init_rwsem(&sit_i->sentry_lock);
4076 	return 0;
4077 }
4078 
4079 static int build_free_segmap(struct f2fs_sb_info *sbi)
4080 {
4081 	struct free_segmap_info *free_i;
4082 	unsigned int bitmap_size, sec_bitmap_size;
4083 
4084 	/* allocate memory for free segmap information */
4085 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4086 	if (!free_i)
4087 		return -ENOMEM;
4088 
4089 	SM_I(sbi)->free_info = free_i;
4090 
4091 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4092 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4093 	if (!free_i->free_segmap)
4094 		return -ENOMEM;
4095 
4096 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4097 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4098 	if (!free_i->free_secmap)
4099 		return -ENOMEM;
4100 
4101 	/* set all segments as dirty temporarily */
4102 	memset(free_i->free_segmap, 0xff, bitmap_size);
4103 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4104 
4105 	/* init free segmap information */
4106 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4107 	free_i->free_segments = 0;
4108 	free_i->free_sections = 0;
4109 	spin_lock_init(&free_i->segmap_lock);
4110 	return 0;
4111 }
4112 
4113 static int build_curseg(struct f2fs_sb_info *sbi)
4114 {
4115 	struct curseg_info *array;
4116 	int i;
4117 
4118 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, sizeof(*array)),
4119 			     GFP_KERNEL);
4120 	if (!array)
4121 		return -ENOMEM;
4122 
4123 	SM_I(sbi)->curseg_array = array;
4124 
4125 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
4126 		mutex_init(&array[i].curseg_mutex);
4127 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4128 		if (!array[i].sum_blk)
4129 			return -ENOMEM;
4130 		init_rwsem(&array[i].journal_rwsem);
4131 		array[i].journal = f2fs_kzalloc(sbi,
4132 				sizeof(struct f2fs_journal), GFP_KERNEL);
4133 		if (!array[i].journal)
4134 			return -ENOMEM;
4135 		array[i].segno = NULL_SEGNO;
4136 		array[i].next_blkoff = 0;
4137 	}
4138 	return restore_curseg_summaries(sbi);
4139 }
4140 
4141 static int build_sit_entries(struct f2fs_sb_info *sbi)
4142 {
4143 	struct sit_info *sit_i = SIT_I(sbi);
4144 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4145 	struct f2fs_journal *journal = curseg->journal;
4146 	struct seg_entry *se;
4147 	struct f2fs_sit_entry sit;
4148 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4149 	unsigned int i, start, end;
4150 	unsigned int readed, start_blk = 0;
4151 	int err = 0;
4152 	block_t total_node_blocks = 0;
4153 
4154 	do {
4155 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
4156 							META_SIT, true);
4157 
4158 		start = start_blk * sit_i->sents_per_block;
4159 		end = (start_blk + readed) * sit_i->sents_per_block;
4160 
4161 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4162 			struct f2fs_sit_block *sit_blk;
4163 			struct page *page;
4164 
4165 			se = &sit_i->sentries[start];
4166 			page = get_current_sit_page(sbi, start);
4167 			if (IS_ERR(page))
4168 				return PTR_ERR(page);
4169 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4170 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4171 			f2fs_put_page(page, 1);
4172 
4173 			err = check_block_count(sbi, start, &sit);
4174 			if (err)
4175 				return err;
4176 			seg_info_from_raw_sit(se, &sit);
4177 			if (IS_NODESEG(se->type))
4178 				total_node_blocks += se->valid_blocks;
4179 
4180 			/* build discard map only one time */
4181 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4182 				memset(se->discard_map, 0xff,
4183 					SIT_VBLOCK_MAP_SIZE);
4184 			} else {
4185 				memcpy(se->discard_map,
4186 					se->cur_valid_map,
4187 					SIT_VBLOCK_MAP_SIZE);
4188 				sbi->discard_blks +=
4189 					sbi->blocks_per_seg -
4190 					se->valid_blocks;
4191 			}
4192 
4193 			if (__is_large_section(sbi))
4194 				get_sec_entry(sbi, start)->valid_blocks +=
4195 							se->valid_blocks;
4196 		}
4197 		start_blk += readed;
4198 	} while (start_blk < sit_blk_cnt);
4199 
4200 	down_read(&curseg->journal_rwsem);
4201 	for (i = 0; i < sits_in_cursum(journal); i++) {
4202 		unsigned int old_valid_blocks;
4203 
4204 		start = le32_to_cpu(segno_in_journal(journal, i));
4205 		if (start >= MAIN_SEGS(sbi)) {
4206 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4207 				 start);
4208 			err = -EFSCORRUPTED;
4209 			break;
4210 		}
4211 
4212 		se = &sit_i->sentries[start];
4213 		sit = sit_in_journal(journal, i);
4214 
4215 		old_valid_blocks = se->valid_blocks;
4216 		if (IS_NODESEG(se->type))
4217 			total_node_blocks -= old_valid_blocks;
4218 
4219 		err = check_block_count(sbi, start, &sit);
4220 		if (err)
4221 			break;
4222 		seg_info_from_raw_sit(se, &sit);
4223 		if (IS_NODESEG(se->type))
4224 			total_node_blocks += se->valid_blocks;
4225 
4226 		if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4227 			memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4228 		} else {
4229 			memcpy(se->discard_map, se->cur_valid_map,
4230 						SIT_VBLOCK_MAP_SIZE);
4231 			sbi->discard_blks += old_valid_blocks;
4232 			sbi->discard_blks -= se->valid_blocks;
4233 		}
4234 
4235 		if (__is_large_section(sbi)) {
4236 			get_sec_entry(sbi, start)->valid_blocks +=
4237 							se->valid_blocks;
4238 			get_sec_entry(sbi, start)->valid_blocks -=
4239 							old_valid_blocks;
4240 		}
4241 	}
4242 	up_read(&curseg->journal_rwsem);
4243 
4244 	if (!err && total_node_blocks != valid_node_count(sbi)) {
4245 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4246 			 total_node_blocks, valid_node_count(sbi));
4247 		err = -EFSCORRUPTED;
4248 	}
4249 
4250 	return err;
4251 }
4252 
4253 static void init_free_segmap(struct f2fs_sb_info *sbi)
4254 {
4255 	unsigned int start;
4256 	int type;
4257 
4258 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4259 		struct seg_entry *sentry = get_seg_entry(sbi, start);
4260 		if (!sentry->valid_blocks)
4261 			__set_free(sbi, start);
4262 		else
4263 			SIT_I(sbi)->written_valid_blocks +=
4264 						sentry->valid_blocks;
4265 	}
4266 
4267 	/* set use the current segments */
4268 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4269 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4270 		__set_test_and_inuse(sbi, curseg_t->segno);
4271 	}
4272 }
4273 
4274 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4275 {
4276 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4277 	struct free_segmap_info *free_i = FREE_I(sbi);
4278 	unsigned int segno = 0, offset = 0;
4279 	unsigned short valid_blocks;
4280 
4281 	while (1) {
4282 		/* find dirty segment based on free segmap */
4283 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4284 		if (segno >= MAIN_SEGS(sbi))
4285 			break;
4286 		offset = segno + 1;
4287 		valid_blocks = get_valid_blocks(sbi, segno, false);
4288 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
4289 			continue;
4290 		if (valid_blocks > sbi->blocks_per_seg) {
4291 			f2fs_bug_on(sbi, 1);
4292 			continue;
4293 		}
4294 		mutex_lock(&dirty_i->seglist_lock);
4295 		__locate_dirty_segment(sbi, segno, DIRTY);
4296 		mutex_unlock(&dirty_i->seglist_lock);
4297 	}
4298 }
4299 
4300 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4301 {
4302 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4303 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4304 
4305 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4306 	if (!dirty_i->victim_secmap)
4307 		return -ENOMEM;
4308 	return 0;
4309 }
4310 
4311 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4312 {
4313 	struct dirty_seglist_info *dirty_i;
4314 	unsigned int bitmap_size, i;
4315 
4316 	/* allocate memory for dirty segments list information */
4317 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4318 								GFP_KERNEL);
4319 	if (!dirty_i)
4320 		return -ENOMEM;
4321 
4322 	SM_I(sbi)->dirty_info = dirty_i;
4323 	mutex_init(&dirty_i->seglist_lock);
4324 
4325 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4326 
4327 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4328 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4329 								GFP_KERNEL);
4330 		if (!dirty_i->dirty_segmap[i])
4331 			return -ENOMEM;
4332 	}
4333 
4334 	init_dirty_segmap(sbi);
4335 	return init_victim_secmap(sbi);
4336 }
4337 
4338 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4339 {
4340 	int i;
4341 
4342 	/*
4343 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4344 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4345 	 */
4346 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4347 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4348 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4349 		unsigned int blkofs = curseg->next_blkoff;
4350 
4351 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4352 			goto out;
4353 
4354 		if (curseg->alloc_type == SSR)
4355 			continue;
4356 
4357 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4358 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4359 				continue;
4360 out:
4361 			f2fs_err(sbi,
4362 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4363 				 i, curseg->segno, curseg->alloc_type,
4364 				 curseg->next_blkoff, blkofs);
4365 			return -EFSCORRUPTED;
4366 		}
4367 	}
4368 	return 0;
4369 }
4370 
4371 #ifdef CONFIG_BLK_DEV_ZONED
4372 
4373 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4374 				    struct f2fs_dev_info *fdev,
4375 				    struct blk_zone *zone)
4376 {
4377 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4378 	block_t zone_block, wp_block, last_valid_block;
4379 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4380 	int i, s, b, ret;
4381 	struct seg_entry *se;
4382 
4383 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4384 		return 0;
4385 
4386 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4387 	wp_segno = GET_SEGNO(sbi, wp_block);
4388 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4389 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4390 	zone_segno = GET_SEGNO(sbi, zone_block);
4391 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4392 
4393 	if (zone_segno >= MAIN_SEGS(sbi))
4394 		return 0;
4395 
4396 	/*
4397 	 * Skip check of zones cursegs point to, since
4398 	 * fix_curseg_write_pointer() checks them.
4399 	 */
4400 	for (i = 0; i < NO_CHECK_TYPE; i++)
4401 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4402 						   CURSEG_I(sbi, i)->segno))
4403 			return 0;
4404 
4405 	/*
4406 	 * Get last valid block of the zone.
4407 	 */
4408 	last_valid_block = zone_block - 1;
4409 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4410 		segno = zone_segno + s;
4411 		se = get_seg_entry(sbi, segno);
4412 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4413 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4414 				last_valid_block = START_BLOCK(sbi, segno) + b;
4415 				break;
4416 			}
4417 		if (last_valid_block >= zone_block)
4418 			break;
4419 	}
4420 
4421 	/*
4422 	 * If last valid block is beyond the write pointer, report the
4423 	 * inconsistency. This inconsistency does not cause write error
4424 	 * because the zone will not be selected for write operation until
4425 	 * it get discarded. Just report it.
4426 	 */
4427 	if (last_valid_block >= wp_block) {
4428 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4429 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4430 			    GET_SEGNO(sbi, last_valid_block),
4431 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4432 			    wp_segno, wp_blkoff);
4433 		return 0;
4434 	}
4435 
4436 	/*
4437 	 * If there is no valid block in the zone and if write pointer is
4438 	 * not at zone start, reset the write pointer.
4439 	 */
4440 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4441 		f2fs_notice(sbi,
4442 			    "Zone without valid block has non-zero write "
4443 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4444 			    wp_segno, wp_blkoff);
4445 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4446 					zone->len >> log_sectors_per_block);
4447 		if (ret) {
4448 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4449 				 fdev->path, ret);
4450 			return ret;
4451 		}
4452 	}
4453 
4454 	return 0;
4455 }
4456 
4457 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4458 						  block_t zone_blkaddr)
4459 {
4460 	int i;
4461 
4462 	for (i = 0; i < sbi->s_ndevs; i++) {
4463 		if (!bdev_is_zoned(FDEV(i).bdev))
4464 			continue;
4465 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4466 				zone_blkaddr <= FDEV(i).end_blk))
4467 			return &FDEV(i);
4468 	}
4469 
4470 	return NULL;
4471 }
4472 
4473 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4474 			      void *data) {
4475 	memcpy(data, zone, sizeof(struct blk_zone));
4476 	return 0;
4477 }
4478 
4479 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4480 {
4481 	struct curseg_info *cs = CURSEG_I(sbi, type);
4482 	struct f2fs_dev_info *zbd;
4483 	struct blk_zone zone;
4484 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4485 	block_t cs_zone_block, wp_block;
4486 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4487 	sector_t zone_sector;
4488 	int err;
4489 
4490 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4491 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4492 
4493 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4494 	if (!zbd)
4495 		return 0;
4496 
4497 	/* report zone for the sector the curseg points to */
4498 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4499 		<< log_sectors_per_block;
4500 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4501 				  report_one_zone_cb, &zone);
4502 	if (err != 1) {
4503 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4504 			 zbd->path, err);
4505 		return err;
4506 	}
4507 
4508 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4509 		return 0;
4510 
4511 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4512 	wp_segno = GET_SEGNO(sbi, wp_block);
4513 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4514 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4515 
4516 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4517 		wp_sector_off == 0)
4518 		return 0;
4519 
4520 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4521 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4522 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4523 
4524 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4525 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4526 	allocate_segment_by_default(sbi, type, true);
4527 
4528 	/* check consistency of the zone curseg pointed to */
4529 	if (check_zone_write_pointer(sbi, zbd, &zone))
4530 		return -EIO;
4531 
4532 	/* check newly assigned zone */
4533 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4534 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4535 
4536 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4537 	if (!zbd)
4538 		return 0;
4539 
4540 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4541 		<< log_sectors_per_block;
4542 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4543 				  report_one_zone_cb, &zone);
4544 	if (err != 1) {
4545 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4546 			 zbd->path, err);
4547 		return err;
4548 	}
4549 
4550 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4551 		return 0;
4552 
4553 	if (zone.wp != zone.start) {
4554 		f2fs_notice(sbi,
4555 			    "New zone for curseg[%d] is not yet discarded. "
4556 			    "Reset the zone: curseg[0x%x,0x%x]",
4557 			    type, cs->segno, cs->next_blkoff);
4558 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4559 				zone_sector >> log_sectors_per_block,
4560 				zone.len >> log_sectors_per_block);
4561 		if (err) {
4562 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4563 				 zbd->path, err);
4564 			return err;
4565 		}
4566 	}
4567 
4568 	return 0;
4569 }
4570 
4571 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4572 {
4573 	int i, ret;
4574 
4575 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4576 		ret = fix_curseg_write_pointer(sbi, i);
4577 		if (ret)
4578 			return ret;
4579 	}
4580 
4581 	return 0;
4582 }
4583 
4584 struct check_zone_write_pointer_args {
4585 	struct f2fs_sb_info *sbi;
4586 	struct f2fs_dev_info *fdev;
4587 };
4588 
4589 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4590 				      void *data) {
4591 	struct check_zone_write_pointer_args *args;
4592 	args = (struct check_zone_write_pointer_args *)data;
4593 
4594 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
4595 }
4596 
4597 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4598 {
4599 	int i, ret;
4600 	struct check_zone_write_pointer_args args;
4601 
4602 	for (i = 0; i < sbi->s_ndevs; i++) {
4603 		if (!bdev_is_zoned(FDEV(i).bdev))
4604 			continue;
4605 
4606 		args.sbi = sbi;
4607 		args.fdev = &FDEV(i);
4608 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4609 					  check_zone_write_pointer_cb, &args);
4610 		if (ret < 0)
4611 			return ret;
4612 	}
4613 
4614 	return 0;
4615 }
4616 #else
4617 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4618 {
4619 	return 0;
4620 }
4621 
4622 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4623 {
4624 	return 0;
4625 }
4626 #endif
4627 
4628 /*
4629  * Update min, max modified time for cost-benefit GC algorithm
4630  */
4631 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
4632 {
4633 	struct sit_info *sit_i = SIT_I(sbi);
4634 	unsigned int segno;
4635 
4636 	down_write(&sit_i->sentry_lock);
4637 
4638 	sit_i->min_mtime = ULLONG_MAX;
4639 
4640 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4641 		unsigned int i;
4642 		unsigned long long mtime = 0;
4643 
4644 		for (i = 0; i < sbi->segs_per_sec; i++)
4645 			mtime += get_seg_entry(sbi, segno + i)->mtime;
4646 
4647 		mtime = div_u64(mtime, sbi->segs_per_sec);
4648 
4649 		if (sit_i->min_mtime > mtime)
4650 			sit_i->min_mtime = mtime;
4651 	}
4652 	sit_i->max_mtime = get_mtime(sbi, false);
4653 	up_write(&sit_i->sentry_lock);
4654 }
4655 
4656 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
4657 {
4658 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4659 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4660 	struct f2fs_sm_info *sm_info;
4661 	int err;
4662 
4663 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
4664 	if (!sm_info)
4665 		return -ENOMEM;
4666 
4667 	/* init sm info */
4668 	sbi->sm_info = sm_info;
4669 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
4670 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
4671 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
4672 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
4673 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
4674 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
4675 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
4676 	sm_info->rec_prefree_segments = sm_info->main_segments *
4677 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
4678 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
4679 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
4680 
4681 	if (!test_opt(sbi, LFS))
4682 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
4683 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
4684 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
4685 	sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
4686 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
4687 	sm_info->min_ssr_sections = reserved_sections(sbi);
4688 
4689 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
4690 
4691 	init_rwsem(&sm_info->curseg_lock);
4692 
4693 	if (!f2fs_readonly(sbi->sb)) {
4694 		err = f2fs_create_flush_cmd_control(sbi);
4695 		if (err)
4696 			return err;
4697 	}
4698 
4699 	err = create_discard_cmd_control(sbi);
4700 	if (err)
4701 		return err;
4702 
4703 	err = build_sit_info(sbi);
4704 	if (err)
4705 		return err;
4706 	err = build_free_segmap(sbi);
4707 	if (err)
4708 		return err;
4709 	err = build_curseg(sbi);
4710 	if (err)
4711 		return err;
4712 
4713 	/* reinit free segmap based on SIT */
4714 	err = build_sit_entries(sbi);
4715 	if (err)
4716 		return err;
4717 
4718 	init_free_segmap(sbi);
4719 	err = build_dirty_segmap(sbi);
4720 	if (err)
4721 		return err;
4722 
4723 	err = sanity_check_curseg(sbi);
4724 	if (err)
4725 		return err;
4726 
4727 	init_min_max_mtime(sbi);
4728 	return 0;
4729 }
4730 
4731 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
4732 		enum dirty_type dirty_type)
4733 {
4734 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4735 
4736 	mutex_lock(&dirty_i->seglist_lock);
4737 	kvfree(dirty_i->dirty_segmap[dirty_type]);
4738 	dirty_i->nr_dirty[dirty_type] = 0;
4739 	mutex_unlock(&dirty_i->seglist_lock);
4740 }
4741 
4742 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
4743 {
4744 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4745 	kvfree(dirty_i->victim_secmap);
4746 }
4747 
4748 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
4749 {
4750 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4751 	int i;
4752 
4753 	if (!dirty_i)
4754 		return;
4755 
4756 	/* discard pre-free/dirty segments list */
4757 	for (i = 0; i < NR_DIRTY_TYPE; i++)
4758 		discard_dirty_segmap(sbi, i);
4759 
4760 	destroy_victim_secmap(sbi);
4761 	SM_I(sbi)->dirty_info = NULL;
4762 	kvfree(dirty_i);
4763 }
4764 
4765 static void destroy_curseg(struct f2fs_sb_info *sbi)
4766 {
4767 	struct curseg_info *array = SM_I(sbi)->curseg_array;
4768 	int i;
4769 
4770 	if (!array)
4771 		return;
4772 	SM_I(sbi)->curseg_array = NULL;
4773 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
4774 		kvfree(array[i].sum_blk);
4775 		kvfree(array[i].journal);
4776 	}
4777 	kvfree(array);
4778 }
4779 
4780 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
4781 {
4782 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
4783 	if (!free_i)
4784 		return;
4785 	SM_I(sbi)->free_info = NULL;
4786 	kvfree(free_i->free_segmap);
4787 	kvfree(free_i->free_secmap);
4788 	kvfree(free_i);
4789 }
4790 
4791 static void destroy_sit_info(struct f2fs_sb_info *sbi)
4792 {
4793 	struct sit_info *sit_i = SIT_I(sbi);
4794 
4795 	if (!sit_i)
4796 		return;
4797 
4798 	if (sit_i->sentries)
4799 		kvfree(sit_i->bitmap);
4800 	kvfree(sit_i->tmp_map);
4801 
4802 	kvfree(sit_i->sentries);
4803 	kvfree(sit_i->sec_entries);
4804 	kvfree(sit_i->dirty_sentries_bitmap);
4805 
4806 	SM_I(sbi)->sit_info = NULL;
4807 	kvfree(sit_i->sit_bitmap);
4808 #ifdef CONFIG_F2FS_CHECK_FS
4809 	kvfree(sit_i->sit_bitmap_mir);
4810 	kvfree(sit_i->invalid_segmap);
4811 #endif
4812 	kvfree(sit_i);
4813 }
4814 
4815 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
4816 {
4817 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4818 
4819 	if (!sm_info)
4820 		return;
4821 	f2fs_destroy_flush_cmd_control(sbi, true);
4822 	destroy_discard_cmd_control(sbi);
4823 	destroy_dirty_segmap(sbi);
4824 	destroy_curseg(sbi);
4825 	destroy_free_segmap(sbi);
4826 	destroy_sit_info(sbi);
4827 	sbi->sm_info = NULL;
4828 	kvfree(sm_info);
4829 }
4830 
4831 int __init f2fs_create_segment_manager_caches(void)
4832 {
4833 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
4834 			sizeof(struct discard_entry));
4835 	if (!discard_entry_slab)
4836 		goto fail;
4837 
4838 	discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
4839 			sizeof(struct discard_cmd));
4840 	if (!discard_cmd_slab)
4841 		goto destroy_discard_entry;
4842 
4843 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
4844 			sizeof(struct sit_entry_set));
4845 	if (!sit_entry_set_slab)
4846 		goto destroy_discard_cmd;
4847 
4848 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
4849 			sizeof(struct inmem_pages));
4850 	if (!inmem_entry_slab)
4851 		goto destroy_sit_entry_set;
4852 	return 0;
4853 
4854 destroy_sit_entry_set:
4855 	kmem_cache_destroy(sit_entry_set_slab);
4856 destroy_discard_cmd:
4857 	kmem_cache_destroy(discard_cmd_slab);
4858 destroy_discard_entry:
4859 	kmem_cache_destroy(discard_entry_slab);
4860 fail:
4861 	return -ENOMEM;
4862 }
4863 
4864 void f2fs_destroy_segment_manager_caches(void)
4865 {
4866 	kmem_cache_destroy(sit_entry_set_slab);
4867 	kmem_cache_destroy(discard_cmd_slab);
4868 	kmem_cache_destroy(discard_entry_slab);
4869 	kmem_cache_destroy(inmem_entry_slab);
4870 }
4871