1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_gc.h"
7 #include "btree_node_scan.h"
8 #include "ec.h"
9 #include "fsck.h"
10 #include "inode.h"
11 #include "journal.h"
12 #include "lru.h"
13 #include "logged_ops.h"
14 #include "rebalance.h"
15 #include "recovery.h"
16 #include "recovery_passes.h"
17 #include "snapshot.h"
18 #include "subvolume.h"
19 #include "super.h"
20 #include "super-io.h"
21
22 const char * const bch2_recovery_passes[] = {
23 #define x(_fn, ...) #_fn,
24 BCH_RECOVERY_PASSES()
25 #undef x
26 NULL
27 };
28
bch2_set_may_go_rw(struct bch_fs * c)29 static int bch2_set_may_go_rw(struct bch_fs *c)
30 {
31 struct journal_keys *keys = &c->journal_keys;
32
33 /*
34 * After we go RW, the journal keys buffer can't be modified (except for
35 * setting journal_key->overwritten: it will be accessed by multiple
36 * threads
37 */
38 move_gap(keys, keys->nr);
39
40 set_bit(BCH_FS_may_go_rw, &c->flags);
41
42 if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
43 return bch2_fs_read_write_early(c);
44 return 0;
45 }
46
47 struct recovery_pass_fn {
48 int (*fn)(struct bch_fs *);
49 unsigned when;
50 };
51
52 static struct recovery_pass_fn recovery_pass_fns[] = {
53 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
54 BCH_RECOVERY_PASSES()
55 #undef x
56 };
57
58 static const u8 passes_to_stable_map[] = {
59 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
60 BCH_RECOVERY_PASSES()
61 #undef x
62 };
63
bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)64 static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
65 {
66 return passes_to_stable_map[pass];
67 }
68
bch2_recovery_passes_to_stable(u64 v)69 u64 bch2_recovery_passes_to_stable(u64 v)
70 {
71 u64 ret = 0;
72 for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
73 if (v & BIT_ULL(i))
74 ret |= BIT_ULL(passes_to_stable_map[i]);
75 return ret;
76 }
77
bch2_recovery_passes_from_stable(u64 v)78 u64 bch2_recovery_passes_from_stable(u64 v)
79 {
80 static const u8 map[] = {
81 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
82 BCH_RECOVERY_PASSES()
83 #undef x
84 };
85
86 u64 ret = 0;
87 for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
88 if (v & BIT_ULL(i))
89 ret |= BIT_ULL(map[i]);
90 return ret;
91 }
92
93 /*
94 * For when we need to rewind recovery passes and run a pass we skipped:
95 */
bch2_run_explicit_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)96 int bch2_run_explicit_recovery_pass(struct bch_fs *c,
97 enum bch_recovery_pass pass)
98 {
99 if (c->recovery_passes_explicit & BIT_ULL(pass))
100 return 0;
101
102 bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
103 bch2_recovery_passes[pass], pass,
104 bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
105
106 c->recovery_passes_explicit |= BIT_ULL(pass);
107
108 if (c->curr_recovery_pass >= pass) {
109 c->curr_recovery_pass = pass;
110 c->recovery_passes_complete &= (1ULL << pass) >> 1;
111 return -BCH_ERR_restart_recovery;
112 } else {
113 return 0;
114 }
115 }
116
bch2_run_explicit_recovery_pass_persistent(struct bch_fs * c,enum bch_recovery_pass pass)117 int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
118 enum bch_recovery_pass pass)
119 {
120 enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
121
122 mutex_lock(&c->sb_lock);
123 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
124
125 if (!test_bit_le64(s, ext->recovery_passes_required)) {
126 __set_bit_le64(s, ext->recovery_passes_required);
127 bch2_write_super(c);
128 }
129 mutex_unlock(&c->sb_lock);
130
131 return bch2_run_explicit_recovery_pass(c, pass);
132 }
133
bch2_clear_recovery_pass_required(struct bch_fs * c,enum bch_recovery_pass pass)134 static void bch2_clear_recovery_pass_required(struct bch_fs *c,
135 enum bch_recovery_pass pass)
136 {
137 enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
138
139 mutex_lock(&c->sb_lock);
140 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
141
142 if (test_bit_le64(s, ext->recovery_passes_required)) {
143 __clear_bit_le64(s, ext->recovery_passes_required);
144 bch2_write_super(c);
145 }
146 mutex_unlock(&c->sb_lock);
147 }
148
bch2_fsck_recovery_passes(void)149 u64 bch2_fsck_recovery_passes(void)
150 {
151 u64 ret = 0;
152
153 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
154 if (recovery_pass_fns[i].when & PASS_FSCK)
155 ret |= BIT_ULL(i);
156 return ret;
157 }
158
should_run_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)159 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
160 {
161 struct recovery_pass_fn *p = recovery_pass_fns + pass;
162
163 if (c->recovery_passes_explicit & BIT_ULL(pass))
164 return true;
165 if ((p->when & PASS_FSCK) && c->opts.fsck)
166 return true;
167 if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
168 return true;
169 if (p->when & PASS_ALWAYS)
170 return true;
171 return false;
172 }
173
bch2_run_recovery_pass(struct bch_fs * c,enum bch_recovery_pass pass)174 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
175 {
176 struct recovery_pass_fn *p = recovery_pass_fns + pass;
177 int ret;
178
179 if (!(p->when & PASS_SILENT))
180 bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
181 bch2_recovery_passes[pass]);
182 ret = p->fn(c);
183 if (ret)
184 return ret;
185 if (!(p->when & PASS_SILENT))
186 bch2_print(c, KERN_CONT " done\n");
187
188 return 0;
189 }
190
bch2_run_online_recovery_passes(struct bch_fs * c)191 int bch2_run_online_recovery_passes(struct bch_fs *c)
192 {
193 int ret = 0;
194
195 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
196 struct recovery_pass_fn *p = recovery_pass_fns + i;
197
198 if (!(p->when & PASS_ONLINE))
199 continue;
200
201 ret = bch2_run_recovery_pass(c, i);
202 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
203 i = c->curr_recovery_pass;
204 continue;
205 }
206 if (ret)
207 break;
208 }
209
210 return ret;
211 }
212
bch2_run_recovery_passes(struct bch_fs * c)213 int bch2_run_recovery_passes(struct bch_fs *c)
214 {
215 int ret = 0;
216
217 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
218 if (c->opts.recovery_pass_last &&
219 c->curr_recovery_pass > c->opts.recovery_pass_last)
220 break;
221
222 if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
223 unsigned pass = c->curr_recovery_pass;
224
225 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass) ?:
226 bch2_journal_flush(&c->journal);
227 if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
228 (ret && c->curr_recovery_pass < pass))
229 continue;
230 if (ret)
231 break;
232
233 c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
234 }
235
236 c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
237
238 if (!test_bit(BCH_FS_error, &c->flags))
239 bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
240
241 c->curr_recovery_pass++;
242 }
243
244 return ret;
245 }
246