1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BSET_H
3 #define _BCACHEFS_BSET_H
4
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7
8 #include "bcachefs.h"
9 #include "bkey.h"
10 #include "bkey_methods.h"
11 #include "btree_types.h"
12 #include "util.h" /* for time_stats */
13 #include "vstructs.h"
14
15 /*
16 * BKEYS:
17 *
18 * A bkey contains a key, a size field, a variable number of pointers, and some
19 * ancillary flag bits.
20 *
21 * We use two different functions for validating bkeys, bkey_invalid and
22 * bkey_deleted().
23 *
24 * The one exception to the rule that ptr_invalid() filters out invalid keys is
25 * that it also filters out keys of size 0 - these are keys that have been
26 * completely overwritten. It'd be safe to delete these in memory while leaving
27 * them on disk, just unnecessary work - so we filter them out when resorting
28 * instead.
29 *
30 * We can't filter out stale keys when we're resorting, because garbage
31 * collection needs to find them to ensure bucket gens don't wrap around -
32 * unless we're rewriting the btree node those stale keys still exist on disk.
33 *
34 * We also implement functions here for removing some number of sectors from the
35 * front or the back of a bkey - this is mainly used for fixing overlapping
36 * extents, by removing the overlapping sectors from the older key.
37 *
38 * BSETS:
39 *
40 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
41 * along with a header. A btree node is made up of a number of these, written at
42 * different times.
43 *
44 * There could be many of them on disk, but we never allow there to be more than
45 * 4 in memory - we lazily resort as needed.
46 *
47 * We implement code here for creating and maintaining auxiliary search trees
48 * (described below) for searching an individial bset, and on top of that we
49 * implement a btree iterator.
50 *
51 * BTREE ITERATOR:
52 *
53 * Most of the code in bcache doesn't care about an individual bset - it needs
54 * to search entire btree nodes and iterate over them in sorted order.
55 *
56 * The btree iterator code serves both functions; it iterates through the keys
57 * in a btree node in sorted order, starting from either keys after a specific
58 * point (if you pass it a search key) or the start of the btree node.
59 *
60 * AUXILIARY SEARCH TREES:
61 *
62 * Since keys are variable length, we can't use a binary search on a bset - we
63 * wouldn't be able to find the start of the next key. But binary searches are
64 * slow anyways, due to terrible cache behaviour; bcache originally used binary
65 * searches and that code topped out at under 50k lookups/second.
66 *
67 * So we need to construct some sort of lookup table. Since we only insert keys
68 * into the last (unwritten) set, most of the keys within a given btree node are
69 * usually in sets that are mostly constant. We use two different types of
70 * lookup tables to take advantage of this.
71 *
72 * Both lookup tables share in common that they don't index every key in the
73 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
74 * is used for the rest.
75 *
76 * For sets that have been written to disk and are no longer being inserted
77 * into, we construct a binary search tree in an array - traversing a binary
78 * search tree in an array gives excellent locality of reference and is very
79 * fast, since both children of any node are adjacent to each other in memory
80 * (and their grandchildren, and great grandchildren...) - this means
81 * prefetching can be used to great effect.
82 *
83 * It's quite useful performance wise to keep these nodes small - not just
84 * because they're more likely to be in L2, but also because we can prefetch
85 * more nodes on a single cacheline and thus prefetch more iterations in advance
86 * when traversing this tree.
87 *
88 * Nodes in the auxiliary search tree must contain both a key to compare against
89 * (we don't want to fetch the key from the set, that would defeat the purpose),
90 * and a pointer to the key. We use a few tricks to compress both of these.
91 *
92 * To compress the pointer, we take advantage of the fact that one node in the
93 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
94 * a function (to_inorder()) that takes the index of a node in a binary tree and
95 * returns what its index would be in an inorder traversal, so we only have to
96 * store the low bits of the offset.
97 *
98 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
99 * compress that, we take advantage of the fact that when we're traversing the
100 * search tree at every iteration we know that both our search key and the key
101 * we're looking for lie within some range - bounded by our previous
102 * comparisons. (We special case the start of a search so that this is true even
103 * at the root of the tree).
104 *
105 * So we know the key we're looking for is between a and b, and a and b don't
106 * differ higher than bit 50, we don't need to check anything higher than bit
107 * 50.
108 *
109 * We don't usually need the rest of the bits, either; we only need enough bits
110 * to partition the key range we're currently checking. Consider key n - the
111 * key our auxiliary search tree node corresponds to, and key p, the key
112 * immediately preceding n. The lowest bit we need to store in the auxiliary
113 * search tree is the highest bit that differs between n and p.
114 *
115 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
116 * comparison. But we'd really like our nodes in the auxiliary search tree to be
117 * of fixed size.
118 *
119 * The solution is to make them fixed size, and when we're constructing a node
120 * check if p and n differed in the bits we needed them to. If they don't we
121 * flag that node, and when doing lookups we fallback to comparing against the
122 * real key. As long as this doesn't happen to often (and it seems to reliably
123 * happen a bit less than 1% of the time), we win - even on failures, that key
124 * is then more likely to be in cache than if we were doing binary searches all
125 * the way, since we're touching so much less memory.
126 *
127 * The keys in the auxiliary search tree are stored in (software) floating
128 * point, with an exponent and a mantissa. The exponent needs to be big enough
129 * to address all the bits in the original key, but the number of bits in the
130 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
131 *
132 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
133 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
134 * We need one node per 128 bytes in the btree node, which means the auxiliary
135 * search trees take up 3% as much memory as the btree itself.
136 *
137 * Constructing these auxiliary search trees is moderately expensive, and we
138 * don't want to be constantly rebuilding the search tree for the last set
139 * whenever we insert another key into it. For the unwritten set, we use a much
140 * simpler lookup table - it's just a flat array, so index i in the lookup table
141 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
142 * within each byte range works the same as with the auxiliary search trees.
143 *
144 * These are much easier to keep up to date when we insert a key - we do it
145 * somewhat lazily; when we shift a key up we usually just increment the pointer
146 * to it, only when it would overflow do we go to the trouble of finding the
147 * first key in that range of bytes again.
148 */
149
150 enum bset_aux_tree_type {
151 BSET_NO_AUX_TREE,
152 BSET_RO_AUX_TREE,
153 BSET_RW_AUX_TREE,
154 };
155
156 #define BSET_TREE_NR_TYPES 3
157
158 #define BSET_NO_AUX_TREE_VAL (U16_MAX)
159 #define BSET_RW_AUX_TREE_VAL (U16_MAX - 1)
160
bset_aux_tree_type(const struct bset_tree * t)161 static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t)
162 {
163 switch (t->extra) {
164 case BSET_NO_AUX_TREE_VAL:
165 EBUG_ON(t->size);
166 return BSET_NO_AUX_TREE;
167 case BSET_RW_AUX_TREE_VAL:
168 EBUG_ON(!t->size);
169 return BSET_RW_AUX_TREE;
170 default:
171 EBUG_ON(!t->size);
172 return BSET_RO_AUX_TREE;
173 }
174 }
175
176 /*
177 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
178 * it used to be 64, but I realized the lookup code would touch slightly less
179 * memory if it was 128.
180 *
181 * It definites the number of bytes (in struct bset) per struct bkey_float in
182 * the auxiliar search tree - when we're done searching the bset_float tree we
183 * have this many bytes left that we do a linear search over.
184 *
185 * Since (after level 5) every level of the bset_tree is on a new cacheline,
186 * we're touching one fewer cacheline in the bset tree in exchange for one more
187 * cacheline in the linear search - but the linear search might stop before it
188 * gets to the second cacheline.
189 */
190
191 #define BSET_CACHELINE 256
192
btree_keys_cachelines(const struct btree * b)193 static inline size_t btree_keys_cachelines(const struct btree *b)
194 {
195 return (1U << b->byte_order) / BSET_CACHELINE;
196 }
197
btree_aux_data_bytes(const struct btree * b)198 static inline size_t btree_aux_data_bytes(const struct btree *b)
199 {
200 return btree_keys_cachelines(b) * 8;
201 }
202
btree_aux_data_u64s(const struct btree * b)203 static inline size_t btree_aux_data_u64s(const struct btree *b)
204 {
205 return btree_aux_data_bytes(b) / sizeof(u64);
206 }
207
208 #define for_each_bset(_b, _t) \
209 for (struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
210
211 #define for_each_bset_c(_b, _t) \
212 for (const struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
213
214 #define bset_tree_for_each_key(_b, _t, _k) \
215 for (_k = btree_bkey_first(_b, _t); \
216 _k != btree_bkey_last(_b, _t); \
217 _k = bkey_p_next(_k))
218
bset_has_ro_aux_tree(const struct bset_tree * t)219 static inline bool bset_has_ro_aux_tree(const struct bset_tree *t)
220 {
221 return bset_aux_tree_type(t) == BSET_RO_AUX_TREE;
222 }
223
bset_has_rw_aux_tree(struct bset_tree * t)224 static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
225 {
226 return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
227 }
228
bch2_bset_set_no_aux_tree(struct btree * b,struct bset_tree * t)229 static inline void bch2_bset_set_no_aux_tree(struct btree *b,
230 struct bset_tree *t)
231 {
232 BUG_ON(t < b->set);
233
234 for (; t < b->set + ARRAY_SIZE(b->set); t++) {
235 t->size = 0;
236 t->extra = BSET_NO_AUX_TREE_VAL;
237 t->aux_data_offset = U16_MAX;
238 }
239 }
240
btree_node_set_format(struct btree * b,struct bkey_format f)241 static inline void btree_node_set_format(struct btree *b,
242 struct bkey_format f)
243 {
244 int len;
245
246 b->format = f;
247 b->nr_key_bits = bkey_format_key_bits(&f);
248
249 len = bch2_compile_bkey_format(&b->format, b->aux_data);
250 BUG_ON(len < 0 || len > U8_MAX);
251
252 b->unpack_fn_len = len;
253
254 bch2_bset_set_no_aux_tree(b, b->set);
255 }
256
bset_next_set(struct btree * b,unsigned block_bytes)257 static inline struct bset *bset_next_set(struct btree *b,
258 unsigned block_bytes)
259 {
260 struct bset *i = btree_bset_last(b);
261
262 EBUG_ON(!is_power_of_2(block_bytes));
263
264 return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
265 }
266
267 void bch2_btree_keys_init(struct btree *);
268
269 void bch2_bset_init_first(struct btree *, struct bset *);
270 void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
271 void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
272
273 void bch2_bset_insert(struct btree *, struct bkey_packed *, struct bkey_i *,
274 unsigned);
275 void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
276
277 /* Bkey utility code */
278
279 /* packed or unpacked */
bkey_cmp_p_or_unp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r_packed,const struct bpos * r)280 static inline int bkey_cmp_p_or_unp(const struct btree *b,
281 const struct bkey_packed *l,
282 const struct bkey_packed *r_packed,
283 const struct bpos *r)
284 {
285 EBUG_ON(r_packed && !bkey_packed(r_packed));
286
287 if (unlikely(!bkey_packed(l)))
288 return bpos_cmp(packed_to_bkey_c(l)->p, *r);
289
290 if (likely(r_packed))
291 return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
292
293 return __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
294 }
295
296 static inline struct bset_tree *
bch2_bkey_to_bset_inlined(struct btree * b,struct bkey_packed * k)297 bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
298 {
299 unsigned offset = __btree_node_key_to_offset(b, k);
300
301 for_each_bset(b, t)
302 if (offset <= t->end_offset) {
303 EBUG_ON(offset < btree_bkey_first_offset(t));
304 return t;
305 }
306
307 BUG();
308 }
309
310 struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
311
312 struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
313 struct bkey_packed *, unsigned);
314
315 static inline struct bkey_packed *
bch2_bkey_prev_all(struct btree * b,struct bset_tree * t,struct bkey_packed * k)316 bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
317 {
318 return bch2_bkey_prev_filter(b, t, k, 0);
319 }
320
321 static inline struct bkey_packed *
bch2_bkey_prev(struct btree * b,struct bset_tree * t,struct bkey_packed * k)322 bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
323 {
324 return bch2_bkey_prev_filter(b, t, k, 1);
325 }
326
327 /* Btree key iteration */
328
329 void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
330 const struct bkey_packed *,
331 const struct bkey_packed *);
332 void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *,
333 struct bpos *);
334 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *,
335 struct btree *);
336 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *,
337 struct btree *,
338 struct bset_tree *);
339
340 void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
341 void bch2_btree_node_iter_set_drop(struct btree_node_iter *,
342 struct btree_node_iter_set *);
343 void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
344
345 #define btree_node_iter_for_each(_iter, _set) \
346 for (_set = (_iter)->data; \
347 _set < (_iter)->data + ARRAY_SIZE((_iter)->data) && \
348 (_set)->k != (_set)->end; \
349 _set++)
350
__btree_node_iter_set_end(struct btree_node_iter * iter,unsigned i)351 static inline bool __btree_node_iter_set_end(struct btree_node_iter *iter,
352 unsigned i)
353 {
354 return iter->data[i].k == iter->data[i].end;
355 }
356
bch2_btree_node_iter_end(struct btree_node_iter * iter)357 static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
358 {
359 return __btree_node_iter_set_end(iter, 0);
360 }
361
362 /*
363 * When keys compare equal, deleted keys compare first:
364 *
365 * XXX: only need to compare pointers for keys that are both within a
366 * btree_node_iterator - we need to break ties for prev() to work correctly
367 */
bkey_iter_cmp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r)368 static inline int bkey_iter_cmp(const struct btree *b,
369 const struct bkey_packed *l,
370 const struct bkey_packed *r)
371 {
372 return bch2_bkey_cmp_packed(b, l, r)
373 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
374 ?: cmp_int(l, r);
375 }
376
btree_node_iter_cmp(const struct btree * b,struct btree_node_iter_set l,struct btree_node_iter_set r)377 static inline int btree_node_iter_cmp(const struct btree *b,
378 struct btree_node_iter_set l,
379 struct btree_node_iter_set r)
380 {
381 return bkey_iter_cmp(b,
382 __btree_node_offset_to_key(b, l.k),
383 __btree_node_offset_to_key(b, r.k));
384 }
385
386 /* These assume r (the search key) is not a deleted key: */
bkey_iter_pos_cmp(const struct btree * b,const struct bkey_packed * l,const struct bpos * r)387 static inline int bkey_iter_pos_cmp(const struct btree *b,
388 const struct bkey_packed *l,
389 const struct bpos *r)
390 {
391 return bkey_cmp_left_packed(b, l, r)
392 ?: -((int) bkey_deleted(l));
393 }
394
bkey_iter_cmp_p_or_unp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r_packed,const struct bpos * r)395 static inline int bkey_iter_cmp_p_or_unp(const struct btree *b,
396 const struct bkey_packed *l,
397 const struct bkey_packed *r_packed,
398 const struct bpos *r)
399 {
400 return bkey_cmp_p_or_unp(b, l, r_packed, r)
401 ?: -((int) bkey_deleted(l));
402 }
403
404 static inline struct bkey_packed *
__bch2_btree_node_iter_peek_all(struct btree_node_iter * iter,struct btree * b)405 __bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
406 struct btree *b)
407 {
408 return __btree_node_offset_to_key(b, iter->data->k);
409 }
410
411 static inline struct bkey_packed *
bch2_btree_node_iter_peek_all(struct btree_node_iter * iter,struct btree * b)412 bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b)
413 {
414 return !bch2_btree_node_iter_end(iter)
415 ? __btree_node_offset_to_key(b, iter->data->k)
416 : NULL;
417 }
418
419 static inline struct bkey_packed *
bch2_btree_node_iter_peek(struct btree_node_iter * iter,struct btree * b)420 bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
421 {
422 struct bkey_packed *k;
423
424 while ((k = bch2_btree_node_iter_peek_all(iter, b)) &&
425 bkey_deleted(k))
426 bch2_btree_node_iter_advance(iter, b);
427
428 return k;
429 }
430
431 static inline struct bkey_packed *
bch2_btree_node_iter_next_all(struct btree_node_iter * iter,struct btree * b)432 bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
433 {
434 struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b);
435
436 if (ret)
437 bch2_btree_node_iter_advance(iter, b);
438
439 return ret;
440 }
441
442 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *,
443 struct btree *);
444 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *,
445 struct btree *);
446
447 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
448 struct btree *,
449 struct bkey *);
450
451 #define for_each_btree_node_key(b, k, iter) \
452 for (bch2_btree_node_iter_init_from_start((iter), (b)); \
453 (k = bch2_btree_node_iter_peek((iter), (b))); \
454 bch2_btree_node_iter_advance(iter, b))
455
456 #define for_each_btree_node_key_unpack(b, k, iter, unpacked) \
457 for (bch2_btree_node_iter_init_from_start((iter), (b)); \
458 (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
459 bch2_btree_node_iter_advance(iter, b))
460
461 /* Accounting: */
462
463 struct btree_nr_keys bch2_btree_node_count_keys(struct btree *);
464
btree_keys_account_key(struct btree_nr_keys * n,unsigned bset,struct bkey_packed * k,int sign)465 static inline void btree_keys_account_key(struct btree_nr_keys *n,
466 unsigned bset,
467 struct bkey_packed *k,
468 int sign)
469 {
470 n->live_u64s += k->u64s * sign;
471 n->bset_u64s[bset] += k->u64s * sign;
472
473 if (bkey_packed(k))
474 n->packed_keys += sign;
475 else
476 n->unpacked_keys += sign;
477 }
478
btree_keys_account_val_delta(struct btree * b,struct bkey_packed * k,int delta)479 static inline void btree_keys_account_val_delta(struct btree *b,
480 struct bkey_packed *k,
481 int delta)
482 {
483 struct bset_tree *t = bch2_bkey_to_bset(b, k);
484
485 b->nr.live_u64s += delta;
486 b->nr.bset_u64s[t - b->set] += delta;
487 }
488
489 #define btree_keys_account_key_add(_nr, _bset_idx, _k) \
490 btree_keys_account_key(_nr, _bset_idx, _k, 1)
491 #define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
492 btree_keys_account_key(_nr, _bset_idx, _k, -1)
493
494 #define btree_account_key_add(_b, _k) \
495 btree_keys_account_key(&(_b)->nr, \
496 bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
497 #define btree_account_key_drop(_b, _k) \
498 btree_keys_account_key(&(_b)->nr, \
499 bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
500
501 struct bset_stats {
502 struct {
503 size_t nr, bytes;
504 } sets[BSET_TREE_NR_TYPES];
505
506 size_t floats;
507 size_t failed;
508 };
509
510 void bch2_btree_keys_stats(const struct btree *, struct bset_stats *);
511 void bch2_bfloat_to_text(struct printbuf *, struct btree *,
512 struct bkey_packed *);
513
514 /* Debug stuff */
515
516 void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned);
517 void bch2_dump_btree_node(struct bch_fs *, struct btree *);
518 void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
519
520 #ifdef CONFIG_BCACHEFS_DEBUG
521
522 void __bch2_verify_btree_nr_keys(struct btree *);
523 void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
524 void bch2_verify_insert_pos(struct btree *, struct bkey_packed *,
525 struct bkey_packed *, unsigned);
526
527 #else
528
__bch2_verify_btree_nr_keys(struct btree * b)529 static inline void __bch2_verify_btree_nr_keys(struct btree *b) {}
bch2_btree_node_iter_verify(struct btree_node_iter * iter,struct btree * b)530 static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
531 struct btree *b) {}
bch2_verify_insert_pos(struct btree * b,struct bkey_packed * where,struct bkey_packed * insert,unsigned clobber_u64s)532 static inline void bch2_verify_insert_pos(struct btree *b,
533 struct bkey_packed *where,
534 struct bkey_packed *insert,
535 unsigned clobber_u64s) {}
536 #endif
537
bch2_verify_btree_nr_keys(struct btree * b)538 static inline void bch2_verify_btree_nr_keys(struct btree *b)
539 {
540 if (bch2_debug_check_btree_accounting)
541 __bch2_verify_btree_nr_keys(b);
542 }
543
544 #endif /* _BCACHEFS_BSET_H */
545