1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_H
3 #define _BCACHEFS_H
4
5 /*
6 * SOME HIGH LEVEL CODE DOCUMENTATION:
7 *
8 * Bcache mostly works with cache sets, cache devices, and backing devices.
9 *
10 * Support for multiple cache devices hasn't quite been finished off yet, but
11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
12 * like a md raid array and its component devices. Most of the code doesn't care
13 * about individual cache devices, the main abstraction is the cache set.
14 *
15 * Multiple cache devices is intended to give us the ability to mirror dirty
16 * cached data and metadata, without mirroring clean cached data.
17 *
18 * Backing devices are different, in that they have a lifetime independent of a
19 * cache set. When you register a newly formatted backing device it'll come up
20 * in passthrough mode, and then you can attach and detach a backing device from
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22 * invalidates any cached data for that backing device.
23 *
24 * A cache set can have multiple (many) backing devices attached to it.
25 *
26 * There's also flash only volumes - this is the reason for the distinction
27 * between struct cached_dev and struct bcache_device. A flash only volume
28 * works much like a bcache device that has a backing device, except the
29 * "cached" data is always dirty. The end result is that we get thin
30 * provisioning with very little additional code.
31 *
32 * Flash only volumes work but they're not production ready because the moving
33 * garbage collector needs more work. More on that later.
34 *
35 * BUCKETS/ALLOCATION:
36 *
37 * Bcache is primarily designed for caching, which means that in normal
38 * operation all of our available space will be allocated. Thus, we need an
39 * efficient way of deleting things from the cache so we can write new things to
40 * it.
41 *
42 * To do this, we first divide the cache device up into buckets. A bucket is the
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44 * works efficiently.
45 *
46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
50 *
51 * The priority is used to implement an LRU. We reset a bucket's priority when
52 * we allocate it or on cache it, and every so often we decrement the priority
53 * of each bucket. It could be used to implement something more sophisticated,
54 * if anyone ever gets around to it.
55 *
56 * The generation is used for invalidating buckets. Each pointer also has an 8
57 * bit generation embedded in it; for a pointer to be considered valid, its gen
58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
59 * we have to do is increment its gen (and write its new gen to disk; we batch
60 * this up).
61 *
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63 * contain metadata (including btree nodes).
64 *
65 * THE BTREE:
66 *
67 * Bcache is in large part design around the btree.
68 *
69 * At a high level, the btree is just an index of key -> ptr tuples.
70 *
71 * Keys represent extents, and thus have a size field. Keys also have a variable
72 * number of pointers attached to them (potentially zero, which is handy for
73 * invalidating the cache).
74 *
75 * The key itself is an inode:offset pair. The inode number corresponds to a
76 * backing device or a flash only volume. The offset is the ending offset of the
77 * extent within the inode - not the starting offset; this makes lookups
78 * slightly more convenient.
79 *
80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
81 * generation number. More on the gen later.
82 *
83 * Index lookups are not fully abstracted - cache lookups in particular are
84 * still somewhat mixed in with the btree code, but things are headed in that
85 * direction.
86 *
87 * Updates are fairly well abstracted, though. There are two different ways of
88 * updating the btree; insert and replace.
89 *
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
91 * overwriting (possibly only partially) any extents they overlap with. This is
92 * used to update the index after a write.
93 *
94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95 * overwriting a key that matches another given key. This is used for inserting
96 * data into the cache after a cache miss, and for background writeback, and for
97 * the moving garbage collector.
98 *
99 * There is no "delete" operation; deleting things from the index is
100 * accomplished by either by invalidating pointers (by incrementing a bucket's
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
102 * previously present at that location in the index.
103 *
104 * This means that there are always stale/invalid keys in the btree. They're
105 * filtered out by the code that iterates through a btree node, and removed when
106 * a btree node is rewritten.
107 *
108 * BTREE NODES:
109 *
110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111 * free smaller than a bucket - so, that's how big our btree nodes are.
112 *
113 * (If buckets are really big we'll only use part of the bucket for a btree node
114 * - no less than 1/4th - but a bucket still contains no more than a single
115 * btree node. I'd actually like to change this, but for now we rely on the
116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117 *
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119 * btree implementation.
120 *
121 * The way this is solved is that btree nodes are internally log structured; we
122 * can append new keys to an existing btree node without rewriting it. This
123 * means each set of keys we write is sorted, but the node is not.
124 *
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126 * be expensive, and we have to distinguish between the keys we have written and
127 * the keys we haven't. So to do a lookup in a btree node, we have to search
128 * each sorted set. But we do merge written sets together lazily, so the cost of
129 * these extra searches is quite low (normally most of the keys in a btree node
130 * will be in one big set, and then there'll be one or two sets that are much
131 * smaller).
132 *
133 * This log structure makes bcache's btree more of a hybrid between a
134 * conventional btree and a compacting data structure, with some of the
135 * advantages of both.
136 *
137 * GARBAGE COLLECTION:
138 *
139 * We can't just invalidate any bucket - it might contain dirty data or
140 * metadata. If it once contained dirty data, other writes might overwrite it
141 * later, leaving no valid pointers into that bucket in the index.
142 *
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144 * It also counts how much valid data it each bucket currently contains, so that
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
146 *
147 * It also does some things that are really internal to the btree
148 * implementation. If a btree node contains pointers that are stale by more than
149 * some threshold, it rewrites the btree node to avoid the bucket's generation
150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151 *
152 * THE JOURNAL:
153 *
154 * Bcache's journal is not necessary for consistency; we always strictly
155 * order metadata writes so that the btree and everything else is consistent on
156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
157 * caching (with recovery from unclean shutdown) before journalling was
158 * implemented.
159 *
160 * Rather, the journal is purely a performance optimization; we can't complete a
161 * write until we've updated the index on disk, otherwise the cache would be
162 * inconsistent in the event of an unclean shutdown. This means that without the
163 * journal, on random write workloads we constantly have to update all the leaf
164 * nodes in the btree, and those writes will be mostly empty (appending at most
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
166 * and it puts more strain on the various btree resorting/compacting code.
167 *
168 * The journal is just a log of keys we've inserted; on startup we just reinsert
169 * all the keys in the open journal entries. That means that when we're updating
170 * a node in the btree, we can wait until a 4k block of keys fills up before
171 * writing them out.
172 *
173 * For simplicity, we only journal updates to leaf nodes; updates to parent
174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175 * the complexity to deal with journalling them (in particular, journal replay)
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
177 */
178
179 #undef pr_fmt
180 #ifdef __KERNEL__
181 #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
182 #else
183 #define pr_fmt(fmt) "%s() " fmt "\n", __func__
184 #endif
185
186 #include <linux/backing-dev-defs.h>
187 #include <linux/bug.h>
188 #include <linux/bio.h>
189 #include <linux/closure.h>
190 #include <linux/kobject.h>
191 #include <linux/list.h>
192 #include <linux/math64.h>
193 #include <linux/mutex.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
196 #include <linux/refcount.h>
197 #include <linux/rhashtable.h>
198 #include <linux/rwsem.h>
199 #include <linux/semaphore.h>
200 #include <linux/seqlock.h>
201 #include <linux/shrinker.h>
202 #include <linux/srcu.h>
203 #include <linux/types.h>
204 #include <linux/workqueue.h>
205 #include <linux/zstd.h>
206
207 #include "bcachefs_format.h"
208 #include "errcode.h"
209 #include "fifo.h"
210 #include "nocow_locking_types.h"
211 #include "opts.h"
212 #include "recovery_passes_types.h"
213 #include "sb-errors_types.h"
214 #include "seqmutex.h"
215 #include "time_stats.h"
216 #include "util.h"
217
218 #ifdef CONFIG_BCACHEFS_DEBUG
219 #define BCH_WRITE_REF_DEBUG
220 #endif
221
222 #ifndef dynamic_fault
223 #define dynamic_fault(...) 0
224 #endif
225
226 #define race_fault(...) dynamic_fault("bcachefs:race")
227
228 #define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
229
230 #define trace_and_count(_c, _name, ...) \
231 do { \
232 count_event(_c, _name); \
233 trace_##_name(__VA_ARGS__); \
234 } while (0)
235
236 #define bch2_fs_init_fault(name) \
237 dynamic_fault("bcachefs:bch_fs_init:" name)
238 #define bch2_meta_read_fault(name) \
239 dynamic_fault("bcachefs:meta:read:" name)
240 #define bch2_meta_write_fault(name) \
241 dynamic_fault("bcachefs:meta:write:" name)
242
243 #ifdef __KERNEL__
244 #define BCACHEFS_LOG_PREFIX
245 #endif
246
247 #ifdef BCACHEFS_LOG_PREFIX
248
249 #define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
250 #define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
251 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
252 #define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
253 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
254 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
255
256 #else
257
258 #define bch2_log_msg(_c, fmt) fmt
259 #define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
260 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
261 #define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
262 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
263 "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
264
265 #endif
266
267 #define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
268
269 __printf(2, 3)
270 void bch2_print_opts(struct bch_opts *, const char *, ...);
271
272 __printf(2, 3)
273 void __bch2_print(struct bch_fs *c, const char *fmt, ...);
274
275 #define maybe_dev_to_fs(_c) _Generic((_c), \
276 struct bch_dev *: ((struct bch_dev *) (_c))->fs, \
277 struct bch_fs *: (_c))
278
279 #define bch2_print(_c, ...) __bch2_print(maybe_dev_to_fs(_c), __VA_ARGS__)
280
281 #define bch2_print_ratelimited(_c, ...) \
282 do { \
283 static DEFINE_RATELIMIT_STATE(_rs, \
284 DEFAULT_RATELIMIT_INTERVAL, \
285 DEFAULT_RATELIMIT_BURST); \
286 \
287 if (__ratelimit(&_rs)) \
288 bch2_print(_c, __VA_ARGS__); \
289 } while (0)
290
291 #define bch_info(c, fmt, ...) \
292 bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
293 #define bch_notice(c, fmt, ...) \
294 bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
295 #define bch_warn(c, fmt, ...) \
296 bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
297 #define bch_warn_ratelimited(c, fmt, ...) \
298 bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
299
300 #define bch_err(c, fmt, ...) \
301 bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
302 #define bch_err_dev(ca, fmt, ...) \
303 bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
304 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
305 bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
306 #define bch_err_inum(c, _inum, fmt, ...) \
307 bch2_print(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
308 #define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
309 bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
310
311 #define bch_err_ratelimited(c, fmt, ...) \
312 bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
313 #define bch_err_dev_ratelimited(ca, fmt, ...) \
314 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
315 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
316 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
317 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
318 bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
319 #define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
320 bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
321
should_print_err(int err)322 static inline bool should_print_err(int err)
323 {
324 return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
325 }
326
327 #define bch_err_fn(_c, _ret) \
328 do { \
329 if (should_print_err(_ret)) \
330 bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
331 } while (0)
332
333 #define bch_err_fn_ratelimited(_c, _ret) \
334 do { \
335 if (should_print_err(_ret)) \
336 bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
337 } while (0)
338
339 #define bch_err_msg(_c, _ret, _msg, ...) \
340 do { \
341 if (should_print_err(_ret)) \
342 bch_err(_c, "%s(): error " _msg " %s", __func__, \
343 ##__VA_ARGS__, bch2_err_str(_ret)); \
344 } while (0)
345
346 #define bch_verbose(c, fmt, ...) \
347 do { \
348 if ((c)->opts.verbose) \
349 bch_info(c, fmt, ##__VA_ARGS__); \
350 } while (0)
351
352 #define pr_verbose_init(opts, fmt, ...) \
353 do { \
354 if (opt_get(opts, verbose)) \
355 pr_info(fmt, ##__VA_ARGS__); \
356 } while (0)
357
358 /* Parameters that are useful for debugging, but should always be compiled in: */
359 #define BCH_DEBUG_PARAMS_ALWAYS() \
360 BCH_DEBUG_PARAM(key_merging_disabled, \
361 "Disables merging of extents") \
362 BCH_DEBUG_PARAM(btree_node_merging_disabled, \
363 "Disables merging of btree nodes") \
364 BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
365 "Causes mark and sweep to compact and rewrite every " \
366 "btree node it traverses") \
367 BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
368 "Disables rewriting of btree nodes during mark and sweep")\
369 BCH_DEBUG_PARAM(btree_shrinker_disabled, \
370 "Disables the shrinker callback for the btree node cache")\
371 BCH_DEBUG_PARAM(verify_btree_ondisk, \
372 "Reread btree nodes at various points to verify the " \
373 "mergesort in the read path against modifications " \
374 "done in memory") \
375 BCH_DEBUG_PARAM(verify_all_btree_replicas, \
376 "When reading btree nodes, read all replicas and " \
377 "compare them") \
378 BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
379 "Don't use the write buffer for backpointers, enabling "\
380 "extra runtime checks")
381
382 /* Parameters that should only be compiled in debug mode: */
383 #define BCH_DEBUG_PARAMS_DEBUG() \
384 BCH_DEBUG_PARAM(expensive_debug_checks, \
385 "Enables various runtime debugging checks that " \
386 "significantly affect performance") \
387 BCH_DEBUG_PARAM(debug_check_iterators, \
388 "Enables extra verification for btree iterators") \
389 BCH_DEBUG_PARAM(debug_check_btree_accounting, \
390 "Verify btree accounting for keys within a node") \
391 BCH_DEBUG_PARAM(journal_seq_verify, \
392 "Store the journal sequence number in the version " \
393 "number of every btree key, and verify that btree " \
394 "update ordering is preserved during recovery") \
395 BCH_DEBUG_PARAM(inject_invalid_keys, \
396 "Store the journal sequence number in the version " \
397 "number of every btree key, and verify that btree " \
398 "update ordering is preserved during recovery") \
399 BCH_DEBUG_PARAM(test_alloc_startup, \
400 "Force allocator startup to use the slowpath where it" \
401 "can't find enough free buckets without invalidating" \
402 "cached data") \
403 BCH_DEBUG_PARAM(force_reconstruct_read, \
404 "Force reads to use the reconstruct path, when reading" \
405 "from erasure coded extents") \
406 BCH_DEBUG_PARAM(test_restart_gc, \
407 "Test restarting mark and sweep gc when bucket gens change")
408
409 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
410
411 #ifdef CONFIG_BCACHEFS_DEBUG
412 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
413 #else
414 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
415 #endif
416
417 #define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
418 BCH_DEBUG_PARAMS()
419 #undef BCH_DEBUG_PARAM
420
421 #ifndef CONFIG_BCACHEFS_DEBUG
422 #define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
423 BCH_DEBUG_PARAMS_DEBUG()
424 #undef BCH_DEBUG_PARAM
425 #endif
426
427 #define BCH_TIME_STATS() \
428 x(btree_node_mem_alloc) \
429 x(btree_node_split) \
430 x(btree_node_compact) \
431 x(btree_node_merge) \
432 x(btree_node_sort) \
433 x(btree_node_read) \
434 x(btree_node_read_done) \
435 x(btree_interior_update_foreground) \
436 x(btree_interior_update_total) \
437 x(btree_gc) \
438 x(data_write) \
439 x(data_read) \
440 x(data_promote) \
441 x(journal_flush_write) \
442 x(journal_noflush_write) \
443 x(journal_flush_seq) \
444 x(blocked_journal_low_on_space) \
445 x(blocked_journal_low_on_pin) \
446 x(blocked_journal_max_in_flight) \
447 x(blocked_allocate) \
448 x(blocked_allocate_open_bucket) \
449 x(blocked_write_buffer_full) \
450 x(nocow_lock_contended)
451
452 enum bch_time_stats {
453 #define x(name) BCH_TIME_##name,
454 BCH_TIME_STATS()
455 #undef x
456 BCH_TIME_STAT_NR
457 };
458
459 #include "alloc_types.h"
460 #include "btree_gc_types.h"
461 #include "btree_types.h"
462 #include "btree_node_scan_types.h"
463 #include "btree_write_buffer_types.h"
464 #include "buckets_types.h"
465 #include "buckets_waiting_for_journal_types.h"
466 #include "clock_types.h"
467 #include "disk_groups_types.h"
468 #include "ec_types.h"
469 #include "journal_types.h"
470 #include "keylist_types.h"
471 #include "quota_types.h"
472 #include "rebalance_types.h"
473 #include "replicas_types.h"
474 #include "sb-members_types.h"
475 #include "subvolume_types.h"
476 #include "super_types.h"
477 #include "thread_with_file_types.h"
478
479 /* Number of nodes btree coalesce will try to coalesce at once */
480 #define GC_MERGE_NODES 4U
481
482 /* Maximum number of nodes we might need to allocate atomically: */
483 #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
484
485 /* Size of the freelist we allocate btree nodes from: */
486 #define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
487
488 #define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
489
490 struct btree;
491
492 struct io_count {
493 u64 sectors[2][BCH_DATA_NR];
494 };
495
496 struct bch_dev {
497 struct kobject kobj;
498 #ifdef CONFIG_BCACHEFS_DEBUG
499 atomic_long_t ref;
500 bool dying;
501 unsigned long last_put;
502 #else
503 struct percpu_ref ref;
504 #endif
505 struct completion ref_completion;
506 struct percpu_ref io_ref;
507 struct completion io_ref_completion;
508
509 struct bch_fs *fs;
510
511 u8 dev_idx;
512 /*
513 * Cached version of this device's member info from superblock
514 * Committed by bch2_write_super() -> bch_fs_mi_update()
515 */
516 struct bch_member_cpu mi;
517 atomic64_t errors[BCH_MEMBER_ERROR_NR];
518
519 __uuid_t uuid;
520 char name[BDEVNAME_SIZE];
521
522 struct bch_sb_handle disk_sb;
523 struct bch_sb *sb_read_scratch;
524 int sb_write_error;
525 dev_t dev;
526 atomic_t flush_seq;
527
528 struct bch_devs_mask self;
529
530 /*
531 * Buckets:
532 * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
533 * gc_lock, for device resize - holding any is sufficient for access:
534 * Or rcu_read_lock(), but only for dev_ptr_stale():
535 */
536 struct bucket_array __rcu *buckets_gc;
537 struct bucket_gens __rcu *bucket_gens;
538 u8 *oldest_gen;
539 unsigned long *buckets_nouse;
540 struct rw_semaphore bucket_lock;
541
542 struct bch_dev_usage *usage_base;
543 struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
544 struct bch_dev_usage __percpu *usage_gc;
545
546 /* Allocator: */
547 u64 new_fs_bucket_idx;
548 u64 alloc_cursor[3];
549
550 unsigned nr_open_buckets;
551 unsigned nr_btree_reserve;
552
553 size_t inc_gen_needs_gc;
554 size_t inc_gen_really_needs_gc;
555 size_t buckets_waiting_on_journal;
556
557 atomic64_t rebalance_work;
558
559 struct journal_device journal;
560 u64 prev_journal_sector;
561
562 struct work_struct io_error_work;
563
564 /* The rest of this all shows up in sysfs */
565 atomic64_t cur_latency[2];
566 struct bch2_time_stats_quantiles io_latency[2];
567
568 #define CONGESTED_MAX 1024
569 atomic_t congested;
570 u64 congested_last;
571
572 struct io_count __percpu *io_done;
573 };
574
575 /*
576 * initial_gc_unfixed
577 * error
578 * topology error
579 */
580
581 #define BCH_FS_FLAGS() \
582 x(new_fs) \
583 x(started) \
584 x(may_go_rw) \
585 x(rw) \
586 x(was_rw) \
587 x(stopping) \
588 x(emergency_ro) \
589 x(going_ro) \
590 x(write_disable_complete) \
591 x(clean_shutdown) \
592 x(fsck_running) \
593 x(initial_gc_unfixed) \
594 x(need_delete_dead_snapshots) \
595 x(error) \
596 x(topology_error) \
597 x(errors_fixed) \
598 x(errors_not_fixed) \
599 x(no_invalid_checks)
600
601 enum bch_fs_flags {
602 #define x(n) BCH_FS_##n,
603 BCH_FS_FLAGS()
604 #undef x
605 };
606
607 struct btree_debug {
608 unsigned id;
609 };
610
611 #define BCH_TRANSACTIONS_NR 128
612
613 struct btree_transaction_stats {
614 struct bch2_time_stats duration;
615 struct bch2_time_stats lock_hold_times;
616 struct mutex lock;
617 unsigned nr_max_paths;
618 unsigned journal_entries_size;
619 unsigned max_mem;
620 char *max_paths_text;
621 };
622
623 struct bch_fs_pcpu {
624 u64 sectors_available;
625 };
626
627 struct journal_seq_blacklist_table {
628 size_t nr;
629 struct journal_seq_blacklist_table_entry {
630 u64 start;
631 u64 end;
632 bool dirty;
633 } entries[];
634 };
635
636 struct journal_keys {
637 /* must match layout in darray_types.h */
638 size_t nr, size;
639 struct journal_key {
640 u64 journal_seq;
641 u32 journal_offset;
642 enum btree_id btree_id:8;
643 unsigned level:8;
644 bool allocated;
645 bool overwritten;
646 struct bkey_i *k;
647 } *data;
648 /*
649 * Gap buffer: instead of all the empty space in the array being at the
650 * end of the buffer - from @nr to @size - the empty space is at @gap.
651 * This means that sequential insertions are O(n) instead of O(n^2).
652 */
653 size_t gap;
654 atomic_t ref;
655 bool initial_ref_held;
656 };
657
658 struct btree_trans_buf {
659 struct btree_trans *trans;
660 };
661
662 #define REPLICAS_DELTA_LIST_MAX (1U << 16)
663
664 #define BCACHEFS_ROOT_SUBVOL_INUM \
665 ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
666
667 #define BCH_WRITE_REFS() \
668 x(trans) \
669 x(write) \
670 x(promote) \
671 x(node_rewrite) \
672 x(stripe_create) \
673 x(stripe_delete) \
674 x(reflink) \
675 x(fallocate) \
676 x(fsync) \
677 x(dio_write) \
678 x(discard) \
679 x(discard_fast) \
680 x(invalidate) \
681 x(delete_dead_snapshots) \
682 x(gc_gens) \
683 x(snapshot_delete_pagecache) \
684 x(sysfs) \
685 x(btree_write_buffer)
686
687 enum bch_write_ref {
688 #define x(n) BCH_WRITE_REF_##n,
689 BCH_WRITE_REFS()
690 #undef x
691 BCH_WRITE_REF_NR,
692 };
693
694 struct bch_fs {
695 struct closure cl;
696
697 struct list_head list;
698 struct kobject kobj;
699 struct kobject counters_kobj;
700 struct kobject internal;
701 struct kobject opts_dir;
702 struct kobject time_stats;
703 unsigned long flags;
704
705 int minor;
706 struct device *chardev;
707 struct super_block *vfs_sb;
708 dev_t dev;
709 char name[40];
710 struct stdio_redirect *stdio;
711 struct task_struct *stdio_filter;
712
713 /* ro/rw, add/remove/resize devices: */
714 struct rw_semaphore state_lock;
715
716 /* Counts outstanding writes, for clean transition to read-only */
717 #ifdef BCH_WRITE_REF_DEBUG
718 atomic_long_t writes[BCH_WRITE_REF_NR];
719 #else
720 struct percpu_ref writes;
721 #endif
722 /*
723 * Analagous to c->writes, for asynchronous ops that don't necessarily
724 * need fs to be read-write
725 */
726 refcount_t ro_ref;
727 wait_queue_head_t ro_ref_wait;
728
729 struct work_struct read_only_work;
730
731 struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
732
733 struct bch_replicas_cpu replicas;
734 struct bch_replicas_cpu replicas_gc;
735 struct mutex replicas_gc_lock;
736 mempool_t replicas_delta_pool;
737
738 struct journal_entry_res btree_root_journal_res;
739 struct journal_entry_res replicas_journal_res;
740 struct journal_entry_res clock_journal_res;
741 struct journal_entry_res dev_usage_journal_res;
742
743 struct bch_disk_groups_cpu __rcu *disk_groups;
744
745 struct bch_opts opts;
746
747 /* Updated by bch2_sb_update():*/
748 struct {
749 __uuid_t uuid;
750 __uuid_t user_uuid;
751
752 u16 version;
753 u16 version_min;
754 u16 version_upgrade_complete;
755
756 u8 nr_devices;
757 u8 clean;
758
759 u8 encryption_type;
760
761 u64 time_base_lo;
762 u32 time_base_hi;
763 unsigned time_units_per_sec;
764 unsigned nsec_per_time_unit;
765 u64 features;
766 u64 compat;
767 unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
768 u64 btrees_lost_data;
769 } sb;
770
771
772 struct bch_sb_handle disk_sb;
773
774 unsigned short block_bits; /* ilog2(block_size) */
775
776 u16 btree_foreground_merge_threshold;
777
778 struct closure sb_write;
779 struct mutex sb_lock;
780
781 /* snapshot.c: */
782 struct snapshot_table __rcu *snapshots;
783 struct mutex snapshot_table_lock;
784 struct rw_semaphore snapshot_create_lock;
785
786 struct work_struct snapshot_delete_work;
787 struct work_struct snapshot_wait_for_pagecache_and_delete_work;
788 snapshot_id_list snapshots_unlinked;
789 struct mutex snapshots_unlinked_lock;
790
791 /* BTREE CACHE */
792 struct bio_set btree_bio;
793 struct workqueue_struct *btree_read_complete_wq;
794 struct workqueue_struct *btree_write_submit_wq;
795
796 struct btree_root btree_roots_known[BTREE_ID_NR];
797 DARRAY(struct btree_root) btree_roots_extra;
798 struct mutex btree_root_lock;
799
800 struct btree_cache btree_cache;
801
802 /*
803 * Cache of allocated btree nodes - if we allocate a btree node and
804 * don't use it, if we free it that space can't be reused until going
805 * _all_ the way through the allocator (which exposes us to a livelock
806 * when allocating btree reserves fail halfway through) - instead, we
807 * can stick them here:
808 */
809 struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2];
810 unsigned btree_reserve_cache_nr;
811 struct mutex btree_reserve_cache_lock;
812
813 mempool_t btree_interior_update_pool;
814 struct list_head btree_interior_update_list;
815 struct list_head btree_interior_updates_unwritten;
816 struct mutex btree_interior_update_lock;
817 struct closure_waitlist btree_interior_update_wait;
818
819 struct workqueue_struct *btree_interior_update_worker;
820 struct work_struct btree_interior_update_work;
821
822 struct workqueue_struct *btree_node_rewrite_worker;
823
824 struct list_head pending_node_rewrites;
825 struct mutex pending_node_rewrites_lock;
826
827 /* btree_io.c: */
828 spinlock_t btree_write_error_lock;
829 struct btree_write_stats {
830 atomic64_t nr;
831 atomic64_t bytes;
832 } btree_write_stats[BTREE_WRITE_TYPE_NR];
833
834 /* btree_iter.c: */
835 struct seqmutex btree_trans_lock;
836 struct list_head btree_trans_list;
837 mempool_t btree_trans_pool;
838 mempool_t btree_trans_mem_pool;
839 struct btree_trans_buf __percpu *btree_trans_bufs;
840
841 struct srcu_struct btree_trans_barrier;
842 bool btree_trans_barrier_initialized;
843
844 struct btree_key_cache btree_key_cache;
845 unsigned btree_key_cache_btrees;
846
847 struct btree_write_buffer btree_write_buffer;
848
849 struct workqueue_struct *btree_update_wq;
850 struct workqueue_struct *btree_io_complete_wq;
851 /* copygc needs its own workqueue for index updates.. */
852 struct workqueue_struct *copygc_wq;
853 /*
854 * Use a dedicated wq for write ref holder tasks. Required to avoid
855 * dependency problems with other wq tasks that can block on ref
856 * draining, such as read-only transition.
857 */
858 struct workqueue_struct *write_ref_wq;
859
860 /* ALLOCATION */
861 struct bch_devs_mask rw_devs[BCH_DATA_NR];
862
863 u64 capacity; /* sectors */
864
865 /*
866 * When capacity _decreases_ (due to a disk being removed), we
867 * increment capacity_gen - this invalidates outstanding reservations
868 * and forces them to be revalidated
869 */
870 u32 capacity_gen;
871 unsigned bucket_size_max;
872
873 atomic64_t sectors_available;
874 struct mutex sectors_available_lock;
875
876 struct bch_fs_pcpu __percpu *pcpu;
877
878 struct percpu_rw_semaphore mark_lock;
879
880 seqcount_t usage_lock;
881 struct bch_fs_usage *usage_base;
882 struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
883 struct bch_fs_usage __percpu *usage_gc;
884 u64 __percpu *online_reserved;
885
886 /* single element mempool: */
887 struct mutex usage_scratch_lock;
888 struct bch_fs_usage_online *usage_scratch;
889
890 struct io_clock io_clock[2];
891
892 /* JOURNAL SEQ BLACKLIST */
893 struct journal_seq_blacklist_table *
894 journal_seq_blacklist_table;
895
896 /* ALLOCATOR */
897 spinlock_t freelist_lock;
898 struct closure_waitlist freelist_wait;
899
900 open_bucket_idx_t open_buckets_freelist;
901 open_bucket_idx_t open_buckets_nr_free;
902 struct closure_waitlist open_buckets_wait;
903 struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
904 open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
905
906 open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
907 open_bucket_idx_t open_buckets_partial_nr;
908
909 struct write_point btree_write_point;
910 struct write_point rebalance_write_point;
911
912 struct write_point write_points[WRITE_POINT_MAX];
913 struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
914 struct mutex write_points_hash_lock;
915 unsigned write_points_nr;
916
917 struct buckets_waiting_for_journal buckets_waiting_for_journal;
918 struct work_struct invalidate_work;
919 struct work_struct discard_work;
920 struct mutex discard_buckets_in_flight_lock;
921 DARRAY(struct bpos) discard_buckets_in_flight;
922 struct work_struct discard_fast_work;
923
924 /* GARBAGE COLLECTION */
925 struct work_struct gc_gens_work;
926 unsigned long gc_count;
927
928 enum btree_id gc_gens_btree;
929 struct bpos gc_gens_pos;
930
931 /*
932 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
933 * has been marked by GC.
934 *
935 * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
936 *
937 * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
938 * can read without a lock.
939 */
940 seqcount_t gc_pos_lock;
941 struct gc_pos gc_pos;
942
943 /*
944 * The allocation code needs gc_mark in struct bucket to be correct, but
945 * it's not while a gc is in progress.
946 */
947 struct rw_semaphore gc_lock;
948 struct mutex gc_gens_lock;
949
950 /* IO PATH */
951 struct semaphore io_in_flight;
952 struct bio_set bio_read;
953 struct bio_set bio_read_split;
954 struct bio_set bio_write;
955 struct bio_set replica_set;
956 struct mutex bio_bounce_pages_lock;
957 mempool_t bio_bounce_pages;
958 struct bucket_nocow_lock_table
959 nocow_locks;
960 struct rhashtable promote_table;
961
962 mempool_t compression_bounce[2];
963 mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
964 mempool_t decompress_workspace;
965 size_t zstd_workspace_size;
966
967 struct crypto_shash *sha256;
968 struct crypto_sync_skcipher *chacha20;
969 struct crypto_shash *poly1305;
970
971 atomic64_t key_version;
972
973 mempool_t large_bkey_pool;
974
975 /* MOVE.C */
976 struct list_head moving_context_list;
977 struct mutex moving_context_lock;
978
979 /* REBALANCE */
980 struct bch_fs_rebalance rebalance;
981
982 /* COPYGC */
983 struct task_struct *copygc_thread;
984 struct write_point copygc_write_point;
985 s64 copygc_wait_at;
986 s64 copygc_wait;
987 bool copygc_running;
988 wait_queue_head_t copygc_running_wq;
989
990 /* STRIPES: */
991 GENRADIX(struct stripe) stripes;
992 GENRADIX(struct gc_stripe) gc_stripes;
993
994 struct hlist_head ec_stripes_new[32];
995 spinlock_t ec_stripes_new_lock;
996
997 ec_stripes_heap ec_stripes_heap;
998 struct mutex ec_stripes_heap_lock;
999
1000 /* ERASURE CODING */
1001 struct list_head ec_stripe_head_list;
1002 struct mutex ec_stripe_head_lock;
1003
1004 struct list_head ec_stripe_new_list;
1005 struct mutex ec_stripe_new_lock;
1006 wait_queue_head_t ec_stripe_new_wait;
1007
1008 struct work_struct ec_stripe_create_work;
1009 u64 ec_stripe_hint;
1010
1011 struct work_struct ec_stripe_delete_work;
1012
1013 struct bio_set ec_bioset;
1014
1015 /* REFLINK */
1016 reflink_gc_table reflink_gc_table;
1017 size_t reflink_gc_nr;
1018
1019 /* fs.c */
1020 struct list_head vfs_inodes_list;
1021 struct mutex vfs_inodes_lock;
1022
1023 /* VFS IO PATH - fs-io.c */
1024 struct bio_set writepage_bioset;
1025 struct bio_set dio_write_bioset;
1026 struct bio_set dio_read_bioset;
1027 struct bio_set nocow_flush_bioset;
1028
1029 /* QUOTAS */
1030 struct bch_memquota_type quotas[QTYP_NR];
1031
1032 /* RECOVERY */
1033 u64 journal_replay_seq_start;
1034 u64 journal_replay_seq_end;
1035 /*
1036 * Two different uses:
1037 * "Has this fsck pass?" - i.e. should this type of error be an
1038 * emergency read-only
1039 * And, in certain situations fsck will rewind to an earlier pass: used
1040 * for signaling to the toplevel code which pass we want to run now.
1041 */
1042 enum bch_recovery_pass curr_recovery_pass;
1043 /* bitmap of explicitly enabled recovery passes: */
1044 u64 recovery_passes_explicit;
1045 /* bitmask of recovery passes that we actually ran */
1046 u64 recovery_passes_complete;
1047 /* never rewinds version of curr_recovery_pass */
1048 enum bch_recovery_pass recovery_pass_done;
1049 struct semaphore online_fsck_mutex;
1050
1051 /* DEBUG JUNK */
1052 struct dentry *fs_debug_dir;
1053 struct dentry *btree_debug_dir;
1054 struct btree_debug btree_debug[BTREE_ID_NR];
1055 struct btree *verify_data;
1056 struct btree_node *verify_ondisk;
1057 struct mutex verify_lock;
1058
1059 u64 *unused_inode_hints;
1060 unsigned inode_shard_bits;
1061
1062 /*
1063 * A btree node on disk could have too many bsets for an iterator to fit
1064 * on the stack - have to dynamically allocate them
1065 */
1066 mempool_t fill_iter;
1067
1068 mempool_t btree_bounce_pool;
1069
1070 struct journal journal;
1071 GENRADIX(struct journal_replay *) journal_entries;
1072 u64 journal_entries_base_seq;
1073 struct journal_keys journal_keys;
1074 struct list_head journal_iters;
1075
1076 struct find_btree_nodes found_btree_nodes;
1077
1078 u64 last_bucket_seq_cleanup;
1079
1080 u64 counters_on_mount[BCH_COUNTER_NR];
1081 u64 __percpu *counters;
1082
1083 unsigned copy_gc_enabled:1;
1084 bool promote_whole_extents;
1085
1086 struct bch2_time_stats times[BCH_TIME_STAT_NR];
1087
1088 struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
1089
1090 /* ERRORS */
1091 struct list_head fsck_error_msgs;
1092 struct mutex fsck_error_msgs_lock;
1093 bool fsck_alloc_msgs_err;
1094
1095 bch_sb_errors_cpu fsck_error_counts;
1096 struct mutex fsck_error_counts_lock;
1097 };
1098
1099 extern struct wait_queue_head bch2_read_only_wait;
1100
bch2_write_ref_get(struct bch_fs * c,enum bch_write_ref ref)1101 static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
1102 {
1103 #ifdef BCH_WRITE_REF_DEBUG
1104 atomic_long_inc(&c->writes[ref]);
1105 #else
1106 percpu_ref_get(&c->writes);
1107 #endif
1108 }
1109
__bch2_write_ref_tryget(struct bch_fs * c,enum bch_write_ref ref)1110 static inline bool __bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1111 {
1112 #ifdef BCH_WRITE_REF_DEBUG
1113 return !test_bit(BCH_FS_going_ro, &c->flags) &&
1114 atomic_long_inc_not_zero(&c->writes[ref]);
1115 #else
1116 return percpu_ref_tryget(&c->writes);
1117 #endif
1118 }
1119
bch2_write_ref_tryget(struct bch_fs * c,enum bch_write_ref ref)1120 static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1121 {
1122 #ifdef BCH_WRITE_REF_DEBUG
1123 return !test_bit(BCH_FS_going_ro, &c->flags) &&
1124 atomic_long_inc_not_zero(&c->writes[ref]);
1125 #else
1126 return percpu_ref_tryget_live(&c->writes);
1127 #endif
1128 }
1129
bch2_write_ref_put(struct bch_fs * c,enum bch_write_ref ref)1130 static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
1131 {
1132 #ifdef BCH_WRITE_REF_DEBUG
1133 long v = atomic_long_dec_return(&c->writes[ref]);
1134
1135 BUG_ON(v < 0);
1136 if (v)
1137 return;
1138 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
1139 if (atomic_long_read(&c->writes[i]))
1140 return;
1141
1142 set_bit(BCH_FS_write_disable_complete, &c->flags);
1143 wake_up(&bch2_read_only_wait);
1144 #else
1145 percpu_ref_put(&c->writes);
1146 #endif
1147 }
1148
bch2_ro_ref_tryget(struct bch_fs * c)1149 static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
1150 {
1151 if (test_bit(BCH_FS_stopping, &c->flags))
1152 return false;
1153
1154 return refcount_inc_not_zero(&c->ro_ref);
1155 }
1156
bch2_ro_ref_put(struct bch_fs * c)1157 static inline void bch2_ro_ref_put(struct bch_fs *c)
1158 {
1159 if (refcount_dec_and_test(&c->ro_ref))
1160 wake_up(&c->ro_ref_wait);
1161 }
1162
bch2_set_ra_pages(struct bch_fs * c,unsigned ra_pages)1163 static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
1164 {
1165 #ifndef NO_BCACHEFS_FS
1166 if (c->vfs_sb)
1167 c->vfs_sb->s_bdi->ra_pages = ra_pages;
1168 #endif
1169 }
1170
bucket_bytes(const struct bch_dev * ca)1171 static inline unsigned bucket_bytes(const struct bch_dev *ca)
1172 {
1173 return ca->mi.bucket_size << 9;
1174 }
1175
block_bytes(const struct bch_fs * c)1176 static inline unsigned block_bytes(const struct bch_fs *c)
1177 {
1178 return c->opts.block_size;
1179 }
1180
block_sectors(const struct bch_fs * c)1181 static inline unsigned block_sectors(const struct bch_fs *c)
1182 {
1183 return c->opts.block_size >> 9;
1184 }
1185
btree_id_cached(const struct bch_fs * c,enum btree_id btree)1186 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
1187 {
1188 return c->btree_key_cache_btrees & (1U << btree);
1189 }
1190
bch2_time_to_timespec(const struct bch_fs * c,s64 time)1191 static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
1192 {
1193 struct timespec64 t;
1194 s32 rem;
1195
1196 time += c->sb.time_base_lo;
1197
1198 t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
1199 t.tv_nsec = rem * c->sb.nsec_per_time_unit;
1200 return t;
1201 }
1202
timespec_to_bch2_time(const struct bch_fs * c,struct timespec64 ts)1203 static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
1204 {
1205 return (ts.tv_sec * c->sb.time_units_per_sec +
1206 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
1207 }
1208
bch2_current_time(const struct bch_fs * c)1209 static inline s64 bch2_current_time(const struct bch_fs *c)
1210 {
1211 struct timespec64 now;
1212
1213 ktime_get_coarse_real_ts64(&now);
1214 return timespec_to_bch2_time(c, now);
1215 }
1216
bch2_fs_stdio_redirect(struct bch_fs * c)1217 static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
1218 {
1219 struct stdio_redirect *stdio = c->stdio;
1220
1221 if (c->stdio_filter && c->stdio_filter != current)
1222 stdio = NULL;
1223 return stdio;
1224 }
1225
metadata_replicas_required(struct bch_fs * c)1226 static inline unsigned metadata_replicas_required(struct bch_fs *c)
1227 {
1228 return min(c->opts.metadata_replicas,
1229 c->opts.metadata_replicas_required);
1230 }
1231
data_replicas_required(struct bch_fs * c)1232 static inline unsigned data_replicas_required(struct bch_fs *c)
1233 {
1234 return min(c->opts.data_replicas,
1235 c->opts.data_replicas_required);
1236 }
1237
1238 #define BKEY_PADDED_ONSTACK(key, pad) \
1239 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
1240
1241 #endif /* _BCACHEFS_H */
1242