xref: /linux/fs/bcachefs/recovery_passes.c (revision 4f19a60c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_gc.h"
7 #include "btree_node_scan.h"
8 #include "disk_accounting.h"
9 #include "ec.h"
10 #include "fsck.h"
11 #include "inode.h"
12 #include "journal.h"
13 #include "lru.h"
14 #include "logged_ops.h"
15 #include "rebalance.h"
16 #include "recovery.h"
17 #include "recovery_passes.h"
18 #include "snapshot.h"
19 #include "subvolume.h"
20 #include "super.h"
21 #include "super-io.h"
22 
23 const char * const bch2_recovery_passes[] = {
24 #define x(_fn, ...)	#_fn,
25 	BCH_RECOVERY_PASSES()
26 #undef x
27 	NULL
28 };
29 
bch2_set_may_go_rw(struct bch_fs * c)30 static int bch2_set_may_go_rw(struct bch_fs *c)
31 {
32 	struct journal_keys *keys = &c->journal_keys;
33 
34 	/*
35 	 * After we go RW, the journal keys buffer can't be modified (except for
36 	 * setting journal_key->overwritten: it will be accessed by multiple
37 	 * threads
38 	 */
39 	move_gap(keys, keys->nr);
40 
41 	set_bit(BCH_FS_may_go_rw, &c->flags);
42 
43 	if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes)
44 		return bch2_fs_read_write_early(c);
45 	return 0;
46 }
47 
48 struct recovery_pass_fn {
49 	int		(*fn)(struct bch_fs *);
50 	unsigned	when;
51 };
52 
53 static struct recovery_pass_fn recovery_pass_fns[] = {
54 #define x(_fn, _id, _when)	{ .fn = bch2_##_fn, .when = _when },
55 	BCH_RECOVERY_PASSES()
56 #undef x
57 };
58 
59 static const u8 passes_to_stable_map[] = {
60 #define x(n, id, ...)	[BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
61 	BCH_RECOVERY_PASSES()
62 #undef x
63 };
64 
bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)65 static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
66 {
67 	return passes_to_stable_map[pass];
68 }
69 
bch2_recovery_passes_to_stable(u64 v)70 u64 bch2_recovery_passes_to_stable(u64 v)
71 {
72 	u64 ret = 0;
73 	for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
74 		if (v & BIT_ULL(i))
75 			ret |= BIT_ULL(passes_to_stable_map[i]);
76 	return ret;
77 }
78 
bch2_recovery_passes_from_stable(u64 v)79 u64 bch2_recovery_passes_from_stable(u64 v)
80 {
81 	static const u8 map[] = {
82 #define x(n, id, ...)	[BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
83 	BCH_RECOVERY_PASSES()
84 #undef x
85 	};
86 
87 	u64 ret = 0;
88 	for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
89 		if (v & BIT_ULL(i))
90 			ret |= BIT_ULL(map[i]);
91 	return ret;
92 }
93 
94 /*
95  * For when we need to rewind recovery passes and run a pass we skipped:
96  */
bch2_run_explicit_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)97 int bch2_run_explicit_recovery_pass(struct bch_fs *c,
98 				    enum bch_recovery_pass pass)
99 {
100 	if (c->opts.recovery_passes & BIT_ULL(pass))
101 		return 0;
102 
103 	bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
104 		 bch2_recovery_passes[pass], pass,
105 		 bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
106 
107 	c->opts.recovery_passes |= BIT_ULL(pass);
108 
109 	if (c->curr_recovery_pass >= pass) {
110 		c->curr_recovery_pass = pass;
111 		c->recovery_passes_complete &= (1ULL << pass) >> 1;
112 		return -BCH_ERR_restart_recovery;
113 	} else {
114 		return 0;
115 	}
116 }
117 
bch2_run_explicit_recovery_pass_persistent(struct bch_fs * c,enum bch_recovery_pass pass)118 int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
119 					       enum bch_recovery_pass pass)
120 {
121 	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
122 
123 	mutex_lock(&c->sb_lock);
124 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
125 
126 	if (!test_bit_le64(s, ext->recovery_passes_required)) {
127 		__set_bit_le64(s, ext->recovery_passes_required);
128 		bch2_write_super(c);
129 	}
130 	mutex_unlock(&c->sb_lock);
131 
132 	return bch2_run_explicit_recovery_pass(c, pass);
133 }
134 
bch2_clear_recovery_pass_required(struct bch_fs * c,enum bch_recovery_pass pass)135 static void bch2_clear_recovery_pass_required(struct bch_fs *c,
136 					      enum bch_recovery_pass pass)
137 {
138 	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
139 
140 	mutex_lock(&c->sb_lock);
141 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
142 
143 	if (test_bit_le64(s, ext->recovery_passes_required)) {
144 		__clear_bit_le64(s, ext->recovery_passes_required);
145 		bch2_write_super(c);
146 	}
147 	mutex_unlock(&c->sb_lock);
148 }
149 
bch2_fsck_recovery_passes(void)150 u64 bch2_fsck_recovery_passes(void)
151 {
152 	u64 ret = 0;
153 
154 	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
155 		if (recovery_pass_fns[i].when & PASS_FSCK)
156 			ret |= BIT_ULL(i);
157 	return ret;
158 }
159 
should_run_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)160 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
161 {
162 	struct recovery_pass_fn *p = recovery_pass_fns + pass;
163 
164 	if (c->opts.recovery_passes_exclude & BIT_ULL(pass))
165 		return false;
166 	if (c->opts.recovery_passes & BIT_ULL(pass))
167 		return true;
168 	if ((p->when & PASS_FSCK) && c->opts.fsck)
169 		return true;
170 	if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
171 		return true;
172 	if (p->when & PASS_ALWAYS)
173 		return true;
174 	return false;
175 }
176 
bch2_run_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)177 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
178 {
179 	struct recovery_pass_fn *p = recovery_pass_fns + pass;
180 	int ret;
181 
182 	if (!(p->when & PASS_SILENT))
183 		bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
184 			   bch2_recovery_passes[pass]);
185 	ret = p->fn(c);
186 	if (ret)
187 		return ret;
188 	if (!(p->when & PASS_SILENT))
189 		bch2_print(c, KERN_CONT " done\n");
190 
191 	return 0;
192 }
193 
bch2_run_online_recovery_passes(struct bch_fs * c)194 int bch2_run_online_recovery_passes(struct bch_fs *c)
195 {
196 	int ret = 0;
197 
198 	down_read(&c->state_lock);
199 
200 	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
201 		struct recovery_pass_fn *p = recovery_pass_fns + i;
202 
203 		if (!(p->when & PASS_ONLINE))
204 			continue;
205 
206 		ret = bch2_run_recovery_pass(c, i);
207 		if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
208 			i = c->curr_recovery_pass;
209 			continue;
210 		}
211 		if (ret)
212 			break;
213 	}
214 
215 	up_read(&c->state_lock);
216 
217 	return ret;
218 }
219 
bch2_run_recovery_passes(struct bch_fs * c)220 int bch2_run_recovery_passes(struct bch_fs *c)
221 {
222 	int ret = 0;
223 
224 	while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
225 		if (c->opts.recovery_pass_last &&
226 		    c->curr_recovery_pass > c->opts.recovery_pass_last)
227 			break;
228 
229 		if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
230 			unsigned pass = c->curr_recovery_pass;
231 
232 			ret =   bch2_run_recovery_pass(c, c->curr_recovery_pass) ?:
233 				bch2_journal_flush(&c->journal);
234 			if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
235 			    (ret && c->curr_recovery_pass < pass))
236 				continue;
237 			if (ret)
238 				break;
239 
240 			c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
241 		}
242 
243 		c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
244 
245 		if (!test_bit(BCH_FS_error, &c->flags))
246 			bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
247 
248 		c->curr_recovery_pass++;
249 	}
250 
251 	return ret;
252 }
253