1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Code for working with individual keys, and sorted sets of keys with in a
4 * btree node
5 *
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcachefs.h"
10 #include "btree_cache.h"
11 #include "bset.h"
12 #include "eytzinger.h"
13 #include "trace.h"
14 #include "util.h"
15
16 #include <linux/unaligned.h>
17 #include <linux/console.h>
18 #include <linux/random.h>
19 #include <linux/prefetch.h>
20
21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
22 struct btree *);
23
__btree_node_iter_used(struct btree_node_iter * iter)24 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
25 {
26 unsigned n = ARRAY_SIZE(iter->data);
27
28 while (n && __btree_node_iter_set_end(iter, n - 1))
29 --n;
30
31 return n;
32 }
33
bch2_bkey_to_bset(struct btree * b,struct bkey_packed * k)34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
35 {
36 return bch2_bkey_to_bset_inlined(b, k);
37 }
38
39 /*
40 * There are never duplicate live keys in the btree - but including keys that
41 * have been flagged as deleted (and will be cleaned up later) we _will_ see
42 * duplicates.
43 *
44 * Thus the sort order is: usual key comparison first, but for keys that compare
45 * equal the deleted key(s) come first, and the (at most one) live version comes
46 * last.
47 *
48 * The main reason for this is insertion: to handle overwrites, we first iterate
49 * over keys that compare equal to our insert key, and then insert immediately
50 * prior to the first key greater than the key we're inserting - our insert
51 * position will be after all keys that compare equal to our insert key, which
52 * by the time we actually do the insert will all be deleted.
53 */
54
bch2_dump_bset(struct bch_fs * c,struct btree * b,struct bset * i,unsigned set)55 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
56 struct bset *i, unsigned set)
57 {
58 struct bkey_packed *_k, *_n;
59 struct bkey uk, n;
60 struct bkey_s_c k;
61 struct printbuf buf = PRINTBUF;
62
63 if (!i->u64s)
64 return;
65
66 for (_k = i->start;
67 _k < vstruct_last(i);
68 _k = _n) {
69 _n = bkey_p_next(_k);
70
71 if (!_k->u64s) {
72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
73 _k->_data - i->_data);
74 break;
75 }
76
77 k = bkey_disassemble(b, _k, &uk);
78
79 printbuf_reset(&buf);
80 if (c)
81 bch2_bkey_val_to_text(&buf, c, k);
82 else
83 bch2_bkey_to_text(&buf, k.k);
84 printk(KERN_ERR "block %u key %5zu: %s\n", set,
85 _k->_data - i->_data, buf.buf);
86
87 if (_n == vstruct_last(i))
88 continue;
89
90 n = bkey_unpack_key(b, _n);
91
92 if (bpos_lt(n.p, k.k->p)) {
93 printk(KERN_ERR "Key skipped backwards\n");
94 continue;
95 }
96
97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
98 printk(KERN_ERR "Duplicate keys\n");
99 }
100
101 printbuf_exit(&buf);
102 }
103
bch2_dump_btree_node(struct bch_fs * c,struct btree * b)104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
105 {
106 console_lock();
107 for_each_bset(b, t)
108 bch2_dump_bset(c, b, bset(b, t), t - b->set);
109 console_unlock();
110 }
111
bch2_dump_btree_node_iter(struct btree * b,struct btree_node_iter * iter)112 void bch2_dump_btree_node_iter(struct btree *b,
113 struct btree_node_iter *iter)
114 {
115 struct btree_node_iter_set *set;
116 struct printbuf buf = PRINTBUF;
117
118 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
119 __btree_node_iter_used(iter), b->nsets);
120
121 btree_node_iter_for_each(iter, set) {
122 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
123 struct bset_tree *t = bch2_bkey_to_bset(b, k);
124 struct bkey uk = bkey_unpack_key(b, k);
125
126 printbuf_reset(&buf);
127 bch2_bkey_to_text(&buf, &uk);
128 printk(KERN_ERR "set %zu key %u: %s\n",
129 t - b->set, set->k, buf.buf);
130 }
131
132 printbuf_exit(&buf);
133 }
134
bch2_btree_node_count_keys(struct btree * b)135 struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
136 {
137 struct bkey_packed *k;
138 struct btree_nr_keys nr = {};
139
140 for_each_bset(b, t)
141 bset_tree_for_each_key(b, t, k)
142 if (!bkey_deleted(k))
143 btree_keys_account_key_add(&nr, t - b->set, k);
144 return nr;
145 }
146
147 #ifdef CONFIG_BCACHEFS_DEBUG
148
__bch2_verify_btree_nr_keys(struct btree * b)149 void __bch2_verify_btree_nr_keys(struct btree *b)
150 {
151 struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
152
153 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
154 }
155
bch2_btree_node_iter_next_check(struct btree_node_iter * _iter,struct btree * b)156 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
157 struct btree *b)
158 {
159 struct btree_node_iter iter = *_iter;
160 const struct bkey_packed *k, *n;
161
162 k = bch2_btree_node_iter_peek_all(&iter, b);
163 __bch2_btree_node_iter_advance(&iter, b);
164 n = bch2_btree_node_iter_peek_all(&iter, b);
165
166 bkey_unpack_key(b, k);
167
168 if (n &&
169 bkey_iter_cmp(b, k, n) > 0) {
170 struct btree_node_iter_set *set;
171 struct bkey ku = bkey_unpack_key(b, k);
172 struct bkey nu = bkey_unpack_key(b, n);
173 struct printbuf buf1 = PRINTBUF;
174 struct printbuf buf2 = PRINTBUF;
175
176 bch2_dump_btree_node(NULL, b);
177 bch2_bkey_to_text(&buf1, &ku);
178 bch2_bkey_to_text(&buf2, &nu);
179 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
180 buf1.buf, buf2.buf);
181 printk(KERN_ERR "iter was:");
182
183 btree_node_iter_for_each(_iter, set) {
184 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
185 struct bset_tree *t = bch2_bkey_to_bset(b, k2);
186 printk(" [%zi %zi]", t - b->set,
187 k2->_data - bset(b, t)->_data);
188 }
189 panic("\n");
190 }
191 }
192
bch2_btree_node_iter_verify(struct btree_node_iter * iter,struct btree * b)193 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
194 struct btree *b)
195 {
196 struct btree_node_iter_set *set, *s2;
197 struct bkey_packed *k, *p;
198
199 if (bch2_btree_node_iter_end(iter))
200 return;
201
202 /* Verify no duplicates: */
203 btree_node_iter_for_each(iter, set) {
204 BUG_ON(set->k > set->end);
205 btree_node_iter_for_each(iter, s2)
206 BUG_ON(set != s2 && set->end == s2->end);
207 }
208
209 /* Verify that set->end is correct: */
210 btree_node_iter_for_each(iter, set) {
211 for_each_bset(b, t)
212 if (set->end == t->end_offset) {
213 BUG_ON(set->k < btree_bkey_first_offset(t) ||
214 set->k >= t->end_offset);
215 goto found;
216 }
217 BUG();
218 found:
219 do {} while (0);
220 }
221
222 /* Verify iterator is sorted: */
223 btree_node_iter_for_each(iter, set)
224 BUG_ON(set != iter->data &&
225 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
226
227 k = bch2_btree_node_iter_peek_all(iter, b);
228
229 for_each_bset(b, t) {
230 if (iter->data[0].end == t->end_offset)
231 continue;
232
233 p = bch2_bkey_prev_all(b, t,
234 bch2_btree_node_iter_bset_pos(iter, b, t));
235
236 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
237 }
238 }
239
bch2_verify_insert_pos(struct btree * b,struct bkey_packed * where,struct bkey_packed * insert,unsigned clobber_u64s)240 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
241 struct bkey_packed *insert, unsigned clobber_u64s)
242 {
243 struct bset_tree *t = bch2_bkey_to_bset(b, where);
244 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
245 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
246 struct printbuf buf1 = PRINTBUF;
247 struct printbuf buf2 = PRINTBUF;
248 #if 0
249 BUG_ON(prev &&
250 bkey_iter_cmp(b, prev, insert) > 0);
251 #else
252 if (prev &&
253 bkey_iter_cmp(b, prev, insert) > 0) {
254 struct bkey k1 = bkey_unpack_key(b, prev);
255 struct bkey k2 = bkey_unpack_key(b, insert);
256
257 bch2_dump_btree_node(NULL, b);
258 bch2_bkey_to_text(&buf1, &k1);
259 bch2_bkey_to_text(&buf2, &k2);
260
261 panic("prev > insert:\n"
262 "prev key %s\n"
263 "insert key %s\n",
264 buf1.buf, buf2.buf);
265 }
266 #endif
267 #if 0
268 BUG_ON(next != btree_bkey_last(b, t) &&
269 bkey_iter_cmp(b, insert, next) > 0);
270 #else
271 if (next != btree_bkey_last(b, t) &&
272 bkey_iter_cmp(b, insert, next) > 0) {
273 struct bkey k1 = bkey_unpack_key(b, insert);
274 struct bkey k2 = bkey_unpack_key(b, next);
275
276 bch2_dump_btree_node(NULL, b);
277 bch2_bkey_to_text(&buf1, &k1);
278 bch2_bkey_to_text(&buf2, &k2);
279
280 panic("insert > next:\n"
281 "insert key %s\n"
282 "next key %s\n",
283 buf1.buf, buf2.buf);
284 }
285 #endif
286 }
287
288 #else
289
bch2_btree_node_iter_next_check(struct btree_node_iter * iter,struct btree * b)290 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
291 struct btree *b) {}
292
293 #endif
294
295 /* Auxiliary search trees */
296
297 #define BFLOAT_FAILED_UNPACKED U8_MAX
298 #define BFLOAT_FAILED U8_MAX
299
300 struct bkey_float {
301 u8 exponent;
302 u8 key_offset;
303 u16 mantissa;
304 };
305 #define BKEY_MANTISSA_BITS 16
306
307 struct ro_aux_tree {
308 u8 nothing[0];
309 struct bkey_float f[];
310 };
311
312 struct rw_aux_tree {
313 u16 offset;
314 struct bpos k;
315 };
316
bset_aux_tree_buf_end(const struct bset_tree * t)317 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
318 {
319 BUG_ON(t->aux_data_offset == U16_MAX);
320
321 switch (bset_aux_tree_type(t)) {
322 case BSET_NO_AUX_TREE:
323 return t->aux_data_offset;
324 case BSET_RO_AUX_TREE:
325 return t->aux_data_offset +
326 DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
327 case BSET_RW_AUX_TREE:
328 return t->aux_data_offset +
329 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
330 default:
331 BUG();
332 }
333 }
334
bset_aux_tree_buf_start(const struct btree * b,const struct bset_tree * t)335 static unsigned bset_aux_tree_buf_start(const struct btree *b,
336 const struct bset_tree *t)
337 {
338 return t == b->set
339 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
340 : bset_aux_tree_buf_end(t - 1);
341 }
342
__aux_tree_base(const struct btree * b,const struct bset_tree * t)343 static void *__aux_tree_base(const struct btree *b,
344 const struct bset_tree *t)
345 {
346 return b->aux_data + t->aux_data_offset * 8;
347 }
348
ro_aux_tree_base(const struct btree * b,const struct bset_tree * t)349 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
350 const struct bset_tree *t)
351 {
352 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
353
354 return __aux_tree_base(b, t);
355 }
356
bkey_float(const struct btree * b,const struct bset_tree * t,unsigned idx)357 static struct bkey_float *bkey_float(const struct btree *b,
358 const struct bset_tree *t,
359 unsigned idx)
360 {
361 return ro_aux_tree_base(b, t)->f + idx;
362 }
363
bset_aux_tree_verify(struct btree * b)364 static void bset_aux_tree_verify(struct btree *b)
365 {
366 #ifdef CONFIG_BCACHEFS_DEBUG
367 for_each_bset(b, t) {
368 if (t->aux_data_offset == U16_MAX)
369 continue;
370
371 BUG_ON(t != b->set &&
372 t[-1].aux_data_offset == U16_MAX);
373
374 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
375 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
376 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
377 }
378 #endif
379 }
380
bch2_btree_keys_init(struct btree * b)381 void bch2_btree_keys_init(struct btree *b)
382 {
383 unsigned i;
384
385 b->nsets = 0;
386 memset(&b->nr, 0, sizeof(b->nr));
387
388 for (i = 0; i < MAX_BSETS; i++)
389 b->set[i].data_offset = U16_MAX;
390
391 bch2_bset_set_no_aux_tree(b, b->set);
392 }
393
394 /* Binary tree stuff for auxiliary search trees */
395
396 /*
397 * Cacheline/offset <-> bkey pointer arithmetic:
398 *
399 * t->tree is a binary search tree in an array; each node corresponds to a key
400 * in one cacheline in t->set (BSET_CACHELINE bytes).
401 *
402 * This means we don't have to store the full index of the key that a node in
403 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
404 * then bkey_float->m gives us the offset within that cacheline, in units of 8
405 * bytes.
406 *
407 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
408 * make this work.
409 *
410 * To construct the bfloat for an arbitrary key we need to know what the key
411 * immediately preceding it is: we have to check if the two keys differ in the
412 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
413 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
414 */
415
bset_cacheline(const struct btree * b,const struct bset_tree * t,unsigned cacheline)416 static inline void *bset_cacheline(const struct btree *b,
417 const struct bset_tree *t,
418 unsigned cacheline)
419 {
420 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
421 L1_CACHE_BYTES) +
422 cacheline * BSET_CACHELINE;
423 }
424
cacheline_to_bkey(const struct btree * b,const struct bset_tree * t,unsigned cacheline,unsigned offset)425 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
426 const struct bset_tree *t,
427 unsigned cacheline,
428 unsigned offset)
429 {
430 return bset_cacheline(b, t, cacheline) + offset * 8;
431 }
432
bkey_to_cacheline(const struct btree * b,const struct bset_tree * t,const struct bkey_packed * k)433 static unsigned bkey_to_cacheline(const struct btree *b,
434 const struct bset_tree *t,
435 const struct bkey_packed *k)
436 {
437 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
438 }
439
__bkey_to_cacheline_offset(const struct btree * b,const struct bset_tree * t,unsigned cacheline,const struct bkey_packed * k)440 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
441 const struct bset_tree *t,
442 unsigned cacheline,
443 const struct bkey_packed *k)
444 {
445 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
446 }
447
bkey_to_cacheline_offset(const struct btree * b,const struct bset_tree * t,unsigned cacheline,const struct bkey_packed * k)448 static unsigned bkey_to_cacheline_offset(const struct btree *b,
449 const struct bset_tree *t,
450 unsigned cacheline,
451 const struct bkey_packed *k)
452 {
453 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
454
455 EBUG_ON(m > U8_MAX);
456 return m;
457 }
458
tree_to_bkey(const struct btree * b,const struct bset_tree * t,unsigned j)459 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
460 const struct bset_tree *t,
461 unsigned j)
462 {
463 return cacheline_to_bkey(b, t,
464 __eytzinger1_to_inorder(j, t->size - 1, t->extra),
465 bkey_float(b, t, j)->key_offset);
466 }
467
rw_aux_tree(const struct btree * b,const struct bset_tree * t)468 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
469 const struct bset_tree *t)
470 {
471 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
472
473 return __aux_tree_base(b, t);
474 }
475
476 /*
477 * For the write set - the one we're currently inserting keys into - we don't
478 * maintain a full search tree, we just keep a simple lookup table in t->prev.
479 */
rw_aux_to_bkey(const struct btree * b,struct bset_tree * t,unsigned j)480 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
481 struct bset_tree *t,
482 unsigned j)
483 {
484 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
485 }
486
rw_aux_tree_set(const struct btree * b,struct bset_tree * t,unsigned j,struct bkey_packed * k)487 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
488 unsigned j, struct bkey_packed *k)
489 {
490 EBUG_ON(k >= btree_bkey_last(b, t));
491
492 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
493 .offset = __btree_node_key_to_offset(b, k),
494 .k = bkey_unpack_pos(b, k),
495 };
496 }
497
bch2_bset_verify_rw_aux_tree(struct btree * b,struct bset_tree * t)498 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
499 struct bset_tree *t)
500 {
501 struct bkey_packed *k = btree_bkey_first(b, t);
502 unsigned j = 0;
503
504 if (!bch2_expensive_debug_checks)
505 return;
506
507 BUG_ON(bset_has_ro_aux_tree(t));
508
509 if (!bset_has_rw_aux_tree(t))
510 return;
511
512 BUG_ON(t->size < 1);
513 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
514
515 goto start;
516 while (1) {
517 if (rw_aux_to_bkey(b, t, j) == k) {
518 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
519 bkey_unpack_pos(b, k)));
520 start:
521 if (++j == t->size)
522 break;
523
524 BUG_ON(rw_aux_tree(b, t)[j].offset <=
525 rw_aux_tree(b, t)[j - 1].offset);
526 }
527
528 k = bkey_p_next(k);
529 BUG_ON(k >= btree_bkey_last(b, t));
530 }
531 }
532
533 /* returns idx of first entry >= offset: */
rw_aux_tree_bsearch(struct btree * b,struct bset_tree * t,unsigned offset)534 static unsigned rw_aux_tree_bsearch(struct btree *b,
535 struct bset_tree *t,
536 unsigned offset)
537 {
538 unsigned bset_offs = offset - btree_bkey_first_offset(t);
539 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
540 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
541
542 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
543 EBUG_ON(!t->size);
544 EBUG_ON(idx > t->size);
545
546 while (idx < t->size &&
547 rw_aux_tree(b, t)[idx].offset < offset)
548 idx++;
549
550 while (idx &&
551 rw_aux_tree(b, t)[idx - 1].offset >= offset)
552 idx--;
553
554 EBUG_ON(idx < t->size &&
555 rw_aux_tree(b, t)[idx].offset < offset);
556 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
557 EBUG_ON(idx + 1 < t->size &&
558 rw_aux_tree(b, t)[idx].offset ==
559 rw_aux_tree(b, t)[idx + 1].offset);
560
561 return idx;
562 }
563
bkey_mantissa(const struct bkey_packed * k,const struct bkey_float * f)564 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
565 const struct bkey_float *f)
566 {
567 u64 v;
568
569 EBUG_ON(!bkey_packed(k));
570
571 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
572
573 /*
574 * In little endian, we're shifting off low bits (and then the bits we
575 * want are at the low end), in big endian we're shifting off high bits
576 * (and then the bits we want are at the high end, so we shift them
577 * back down):
578 */
579 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
580 v >>= f->exponent & 7;
581 #else
582 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
583 #endif
584 return (u16) v;
585 }
586
make_bfloat(struct btree * b,struct bset_tree * t,unsigned j,struct bkey_packed * min_key,struct bkey_packed * max_key)587 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
588 unsigned j,
589 struct bkey_packed *min_key,
590 struct bkey_packed *max_key)
591 {
592 struct bkey_float *f = bkey_float(b, t, j);
593 struct bkey_packed *m = tree_to_bkey(b, t, j);
594 struct bkey_packed *l = is_power_of_2(j)
595 ? min_key
596 : tree_to_bkey(b, t, j >> ffs(j));
597 struct bkey_packed *r = is_power_of_2(j + 1)
598 ? max_key
599 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
600 unsigned mantissa;
601 int shift, exponent, high_bit;
602
603 /*
604 * for failed bfloats, the lookup code falls back to comparing against
605 * the original key.
606 */
607
608 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
609 !b->nr_key_bits) {
610 f->exponent = BFLOAT_FAILED_UNPACKED;
611 return;
612 }
613
614 /*
615 * The greatest differing bit of l and r is the first bit we must
616 * include in the bfloat mantissa we're creating in order to do
617 * comparisons - that bit always becomes the high bit of
618 * bfloat->mantissa, and thus the exponent we're calculating here is
619 * the position of what will become the low bit in bfloat->mantissa:
620 *
621 * Note that this may be negative - we may be running off the low end
622 * of the key: we handle this later:
623 */
624 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
625 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
626 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
627
628 /*
629 * Then we calculate the actual shift value, from the start of the key
630 * (k->_data), to get the key bits starting at exponent:
631 */
632 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
633 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
634
635 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
636 #else
637 shift = high_bit_offset +
638 b->nr_key_bits -
639 exponent -
640 BKEY_MANTISSA_BITS;
641
642 EBUG_ON(shift < KEY_PACKED_BITS_START);
643 #endif
644 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
645
646 f->exponent = shift;
647 mantissa = bkey_mantissa(m, f);
648
649 /*
650 * If we've got garbage bits, set them to all 1s - it's legal for the
651 * bfloat to compare larger than the original key, but not smaller:
652 */
653 if (exponent < 0)
654 mantissa |= ~(~0U << -exponent);
655
656 f->mantissa = mantissa;
657 }
658
659 /* bytes remaining - only valid for last bset: */
__bset_tree_capacity(struct btree * b,const struct bset_tree * t)660 static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
661 {
662 bset_aux_tree_verify(b);
663
664 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
665 }
666
bset_ro_tree_capacity(struct btree * b,const struct bset_tree * t)667 static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
668 {
669 return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
670 }
671
bset_rw_tree_capacity(struct btree * b,const struct bset_tree * t)672 static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
673 {
674 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
675 }
676
__build_rw_aux_tree(struct btree * b,struct bset_tree * t)677 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
678 {
679 struct bkey_packed *k;
680
681 t->size = 1;
682 t->extra = BSET_RW_AUX_TREE_VAL;
683 rw_aux_tree(b, t)[0].offset =
684 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
685
686 bset_tree_for_each_key(b, t, k) {
687 if (t->size == bset_rw_tree_capacity(b, t))
688 break;
689
690 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
691 L1_CACHE_BYTES)
692 rw_aux_tree_set(b, t, t->size++, k);
693 }
694 }
695
__build_ro_aux_tree(struct btree * b,struct bset_tree * t)696 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
697 {
698 struct bkey_packed *k = btree_bkey_first(b, t);
699 struct bkey_i min_key, max_key;
700 unsigned cacheline = 1;
701
702 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
703 bset_ro_tree_capacity(b, t));
704 retry:
705 if (t->size < 2) {
706 t->size = 0;
707 t->extra = BSET_NO_AUX_TREE_VAL;
708 return;
709 }
710
711 t->extra = eytzinger1_extra(t->size - 1);
712
713 /* First we figure out where the first key in each cacheline is */
714 eytzinger1_for_each(j, t->size - 1) {
715 while (bkey_to_cacheline(b, t, k) < cacheline)
716 k = bkey_p_next(k);
717
718 if (k >= btree_bkey_last(b, t)) {
719 /* XXX: this path sucks */
720 t->size--;
721 goto retry;
722 }
723
724 bkey_float(b, t, j)->key_offset =
725 bkey_to_cacheline_offset(b, t, cacheline++, k);
726
727 EBUG_ON(tree_to_bkey(b, t, j) != k);
728 }
729
730 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
731 bkey_init(&min_key.k);
732 min_key.k.p = b->data->min_key;
733 }
734
735 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
736 bkey_init(&max_key.k);
737 max_key.k.p = b->data->max_key;
738 }
739
740 /* Then we build the tree */
741 eytzinger1_for_each(j, t->size - 1)
742 make_bfloat(b, t, j,
743 bkey_to_packed(&min_key),
744 bkey_to_packed(&max_key));
745 }
746
bset_alloc_tree(struct btree * b,struct bset_tree * t)747 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
748 {
749 struct bset_tree *i;
750
751 for (i = b->set; i != t; i++)
752 BUG_ON(bset_has_rw_aux_tree(i));
753
754 bch2_bset_set_no_aux_tree(b, t);
755
756 /* round up to next cacheline: */
757 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
758 SMP_CACHE_BYTES / sizeof(u64));
759
760 bset_aux_tree_verify(b);
761 }
762
bch2_bset_build_aux_tree(struct btree * b,struct bset_tree * t,bool writeable)763 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
764 bool writeable)
765 {
766 if (writeable
767 ? bset_has_rw_aux_tree(t)
768 : bset_has_ro_aux_tree(t))
769 return;
770
771 bset_alloc_tree(b, t);
772
773 if (!__bset_tree_capacity(b, t))
774 return;
775
776 if (writeable)
777 __build_rw_aux_tree(b, t);
778 else
779 __build_ro_aux_tree(b, t);
780
781 bset_aux_tree_verify(b);
782 }
783
bch2_bset_init_first(struct btree * b,struct bset * i)784 void bch2_bset_init_first(struct btree *b, struct bset *i)
785 {
786 struct bset_tree *t;
787
788 BUG_ON(b->nsets);
789
790 memset(i, 0, sizeof(*i));
791 get_random_bytes(&i->seq, sizeof(i->seq));
792 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
793
794 t = &b->set[b->nsets++];
795 set_btree_bset(b, t, i);
796 }
797
bch2_bset_init_next(struct btree * b,struct btree_node_entry * bne)798 void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
799 {
800 struct bset *i = &bne->keys;
801 struct bset_tree *t;
802
803 BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
804 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
805 BUG_ON(b->nsets >= MAX_BSETS);
806
807 memset(i, 0, sizeof(*i));
808 i->seq = btree_bset_first(b)->seq;
809 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
810
811 t = &b->set[b->nsets++];
812 set_btree_bset(b, t, i);
813 }
814
815 /*
816 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
817 * immediate predecessor:
818 */
__bkey_prev(struct btree * b,struct bset_tree * t,struct bkey_packed * k)819 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
820 struct bkey_packed *k)
821 {
822 struct bkey_packed *p;
823 unsigned offset;
824 int j;
825
826 EBUG_ON(k < btree_bkey_first(b, t) ||
827 k > btree_bkey_last(b, t));
828
829 if (k == btree_bkey_first(b, t))
830 return NULL;
831
832 switch (bset_aux_tree_type(t)) {
833 case BSET_NO_AUX_TREE:
834 p = btree_bkey_first(b, t);
835 break;
836 case BSET_RO_AUX_TREE:
837 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
838
839 do {
840 p = j ? tree_to_bkey(b, t,
841 __inorder_to_eytzinger1(j--,
842 t->size - 1, t->extra))
843 : btree_bkey_first(b, t);
844 } while (p >= k);
845 break;
846 case BSET_RW_AUX_TREE:
847 offset = __btree_node_key_to_offset(b, k);
848 j = rw_aux_tree_bsearch(b, t, offset);
849 p = j ? rw_aux_to_bkey(b, t, j - 1)
850 : btree_bkey_first(b, t);
851 break;
852 }
853
854 return p;
855 }
856
bch2_bkey_prev_filter(struct btree * b,struct bset_tree * t,struct bkey_packed * k,unsigned min_key_type)857 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
858 struct bset_tree *t,
859 struct bkey_packed *k,
860 unsigned min_key_type)
861 {
862 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
863
864 while ((p = __bkey_prev(b, t, k)) && !ret) {
865 for (i = p; i != k; i = bkey_p_next(i))
866 if (i->type >= min_key_type)
867 ret = i;
868
869 k = p;
870 }
871
872 if (bch2_expensive_debug_checks) {
873 BUG_ON(ret >= orig_k);
874
875 for (i = ret
876 ? bkey_p_next(ret)
877 : btree_bkey_first(b, t);
878 i != orig_k;
879 i = bkey_p_next(i))
880 BUG_ON(i->type >= min_key_type);
881 }
882
883 return ret;
884 }
885
886 /* Insert */
887
rw_aux_tree_insert_entry(struct btree * b,struct bset_tree * t,unsigned idx)888 static void rw_aux_tree_insert_entry(struct btree *b,
889 struct bset_tree *t,
890 unsigned idx)
891 {
892 EBUG_ON(!idx || idx > t->size);
893 struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
894 struct bkey_packed *end = idx < t->size
895 ? rw_aux_to_bkey(b, t, idx)
896 : btree_bkey_last(b, t);
897
898 if (t->size < bset_rw_tree_capacity(b, t) &&
899 (void *) end - (void *) start > L1_CACHE_BYTES) {
900 struct bkey_packed *k = start;
901
902 while (1) {
903 k = bkey_p_next(k);
904 if (k == end)
905 break;
906
907 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
908 memmove(&rw_aux_tree(b, t)[idx + 1],
909 &rw_aux_tree(b, t)[idx],
910 (void *) &rw_aux_tree(b, t)[t->size] -
911 (void *) &rw_aux_tree(b, t)[idx]);
912 t->size++;
913 rw_aux_tree_set(b, t, idx, k);
914 break;
915 }
916 }
917 }
918 }
919
bch2_bset_fix_lookup_table(struct btree * b,struct bset_tree * t,struct bkey_packed * _where,unsigned clobber_u64s,unsigned new_u64s)920 static void bch2_bset_fix_lookup_table(struct btree *b,
921 struct bset_tree *t,
922 struct bkey_packed *_where,
923 unsigned clobber_u64s,
924 unsigned new_u64s)
925 {
926 int shift = new_u64s - clobber_u64s;
927 unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
928
929 EBUG_ON(bset_has_ro_aux_tree(t));
930
931 if (!bset_has_rw_aux_tree(t))
932 return;
933
934 if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
935 rw_aux_tree_insert_entry(b, t, t->size);
936 goto verify;
937 }
938
939 /* returns first entry >= where */
940 idx = rw_aux_tree_bsearch(b, t, where);
941
942 if (rw_aux_tree(b, t)[idx].offset == where) {
943 if (!idx) { /* never delete first entry */
944 idx++;
945 } else if (where < t->end_offset) {
946 rw_aux_tree_set(b, t, idx++, _where);
947 } else {
948 EBUG_ON(where != t->end_offset);
949 rw_aux_tree_insert_entry(b, t, --t->size);
950 goto verify;
951 }
952 }
953
954 EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
955 if (idx < t->size &&
956 rw_aux_tree(b, t)[idx].offset + shift ==
957 rw_aux_tree(b, t)[idx - 1].offset) {
958 memmove(&rw_aux_tree(b, t)[idx],
959 &rw_aux_tree(b, t)[idx + 1],
960 (void *) &rw_aux_tree(b, t)[t->size] -
961 (void *) &rw_aux_tree(b, t)[idx + 1]);
962 t->size -= 1;
963 }
964
965 for (j = idx; j < t->size; j++)
966 rw_aux_tree(b, t)[j].offset += shift;
967
968 EBUG_ON(idx < t->size &&
969 rw_aux_tree(b, t)[idx].offset ==
970 rw_aux_tree(b, t)[idx - 1].offset);
971
972 rw_aux_tree_insert_entry(b, t, idx);
973
974 verify:
975 bch2_bset_verify_rw_aux_tree(b, t);
976 bset_aux_tree_verify(b);
977 }
978
bch2_bset_insert(struct btree * b,struct bkey_packed * where,struct bkey_i * insert,unsigned clobber_u64s)979 void bch2_bset_insert(struct btree *b,
980 struct bkey_packed *where,
981 struct bkey_i *insert,
982 unsigned clobber_u64s)
983 {
984 struct bkey_format *f = &b->format;
985 struct bset_tree *t = bset_tree_last(b);
986 struct bkey_packed packed, *src = bkey_to_packed(insert);
987
988 bch2_bset_verify_rw_aux_tree(b, t);
989 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
990
991 if (bch2_bkey_pack_key(&packed, &insert->k, f))
992 src = &packed;
993
994 if (!bkey_deleted(&insert->k))
995 btree_keys_account_key_add(&b->nr, t - b->set, src);
996
997 if (src->u64s != clobber_u64s) {
998 u64 *src_p = (u64 *) where->_data + clobber_u64s;
999 u64 *dst_p = (u64 *) where->_data + src->u64s;
1000
1001 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1002 (int) clobber_u64s - src->u64s);
1003
1004 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1005 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1006 set_btree_bset_end(b, t);
1007 }
1008
1009 memcpy_u64s_small(where, src,
1010 bkeyp_key_u64s(f, src));
1011 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1012 bkeyp_val_u64s(f, src));
1013
1014 if (src->u64s != clobber_u64s)
1015 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1016
1017 bch2_verify_btree_nr_keys(b);
1018 }
1019
bch2_bset_delete(struct btree * b,struct bkey_packed * where,unsigned clobber_u64s)1020 void bch2_bset_delete(struct btree *b,
1021 struct bkey_packed *where,
1022 unsigned clobber_u64s)
1023 {
1024 struct bset_tree *t = bset_tree_last(b);
1025 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1026 u64 *dst_p = where->_data;
1027
1028 bch2_bset_verify_rw_aux_tree(b, t);
1029
1030 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1031
1032 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1033 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1034 set_btree_bset_end(b, t);
1035
1036 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1037 }
1038
1039 /* Lookup */
1040
1041 __flatten
bset_search_write_set(const struct btree * b,struct bset_tree * t,struct bpos * search)1042 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1043 struct bset_tree *t,
1044 struct bpos *search)
1045 {
1046 unsigned l = 0, r = t->size;
1047
1048 while (l + 1 != r) {
1049 unsigned m = (l + r) >> 1;
1050
1051 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
1052 l = m;
1053 else
1054 r = m;
1055 }
1056
1057 return rw_aux_to_bkey(b, t, l);
1058 }
1059
prefetch_four_cachelines(void * p)1060 static inline void prefetch_four_cachelines(void *p)
1061 {
1062 #ifdef CONFIG_X86_64
1063 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1064 "prefetcht0 (-127 + 64 * 1)(%0);"
1065 "prefetcht0 (-127 + 64 * 2)(%0);"
1066 "prefetcht0 (-127 + 64 * 3)(%0);"
1067 :
1068 : "r" (p + 127));
1069 #else
1070 prefetch(p + L1_CACHE_BYTES * 0);
1071 prefetch(p + L1_CACHE_BYTES * 1);
1072 prefetch(p + L1_CACHE_BYTES * 2);
1073 prefetch(p + L1_CACHE_BYTES * 3);
1074 #endif
1075 }
1076
bkey_mantissa_bits_dropped(const struct btree * b,const struct bkey_float * f)1077 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1078 const struct bkey_float *f)
1079 {
1080 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1081 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1082
1083 return f->exponent > key_bits_start;
1084 #else
1085 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1086
1087 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1088 #endif
1089 }
1090
1091 __flatten
bset_search_tree(const struct btree * b,const struct bset_tree * t,const struct bpos * search,const struct bkey_packed * packed_search)1092 static struct bkey_packed *bset_search_tree(const struct btree *b,
1093 const struct bset_tree *t,
1094 const struct bpos *search,
1095 const struct bkey_packed *packed_search)
1096 {
1097 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1098 struct bkey_float *f;
1099 struct bkey_packed *k;
1100 unsigned inorder, n = 1, l, r;
1101 int cmp;
1102
1103 do {
1104 if (likely(n << 4 < t->size))
1105 prefetch(&base->f[n << 4]);
1106
1107 f = &base->f[n];
1108 if (unlikely(f->exponent >= BFLOAT_FAILED))
1109 goto slowpath;
1110
1111 l = f->mantissa;
1112 r = bkey_mantissa(packed_search, f);
1113
1114 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
1115 goto slowpath;
1116
1117 n = n * 2 + (l < r);
1118 continue;
1119 slowpath:
1120 k = tree_to_bkey(b, t, n);
1121 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1122 if (!cmp)
1123 return k;
1124
1125 n = n * 2 + (cmp < 0);
1126 } while (n < t->size);
1127
1128 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
1129
1130 /*
1131 * n would have been the node we recursed to - the low bit tells us if
1132 * we recursed left or recursed right.
1133 */
1134 if (likely(!(n & 1))) {
1135 --inorder;
1136 if (unlikely(!inorder))
1137 return btree_bkey_first(b, t);
1138
1139 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
1140 }
1141
1142 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1143 }
1144
1145 static __always_inline __flatten
__bch2_bset_search(struct btree * b,struct bset_tree * t,struct bpos * search,const struct bkey_packed * lossy_packed_search)1146 struct bkey_packed *__bch2_bset_search(struct btree *b,
1147 struct bset_tree *t,
1148 struct bpos *search,
1149 const struct bkey_packed *lossy_packed_search)
1150 {
1151
1152 /*
1153 * First, we search for a cacheline, then lastly we do a linear search
1154 * within that cacheline.
1155 *
1156 * To search for the cacheline, there's three different possibilities:
1157 * * The set is too small to have a search tree, so we just do a linear
1158 * search over the whole set.
1159 * * The set is the one we're currently inserting into; keeping a full
1160 * auxiliary search tree up to date would be too expensive, so we
1161 * use a much simpler lookup table to do a binary search -
1162 * bset_search_write_set().
1163 * * Or we use the auxiliary search tree we constructed earlier -
1164 * bset_search_tree()
1165 */
1166
1167 switch (bset_aux_tree_type(t)) {
1168 case BSET_NO_AUX_TREE:
1169 return btree_bkey_first(b, t);
1170 case BSET_RW_AUX_TREE:
1171 return bset_search_write_set(b, t, search);
1172 case BSET_RO_AUX_TREE:
1173 return bset_search_tree(b, t, search, lossy_packed_search);
1174 default:
1175 BUG();
1176 }
1177 }
1178
1179 static __always_inline __flatten
bch2_bset_search_linear(struct btree * b,struct bset_tree * t,struct bpos * search,struct bkey_packed * packed_search,const struct bkey_packed * lossy_packed_search,struct bkey_packed * m)1180 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1181 struct bset_tree *t,
1182 struct bpos *search,
1183 struct bkey_packed *packed_search,
1184 const struct bkey_packed *lossy_packed_search,
1185 struct bkey_packed *m)
1186 {
1187 if (lossy_packed_search)
1188 while (m != btree_bkey_last(b, t) &&
1189 bkey_iter_cmp_p_or_unp(b, m,
1190 lossy_packed_search, search) < 0)
1191 m = bkey_p_next(m);
1192
1193 if (!packed_search)
1194 while (m != btree_bkey_last(b, t) &&
1195 bkey_iter_pos_cmp(b, m, search) < 0)
1196 m = bkey_p_next(m);
1197
1198 if (bch2_expensive_debug_checks) {
1199 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1200
1201 BUG_ON(prev &&
1202 bkey_iter_cmp_p_or_unp(b, prev,
1203 packed_search, search) >= 0);
1204 }
1205
1206 return m;
1207 }
1208
1209 /* Btree node iterator */
1210
__bch2_btree_node_iter_push(struct btree_node_iter * iter,struct btree * b,const struct bkey_packed * k,const struct bkey_packed * end)1211 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1212 struct btree *b,
1213 const struct bkey_packed *k,
1214 const struct bkey_packed *end)
1215 {
1216 if (k != end) {
1217 struct btree_node_iter_set *pos;
1218
1219 btree_node_iter_for_each(iter, pos)
1220 ;
1221
1222 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1223 *pos = (struct btree_node_iter_set) {
1224 __btree_node_key_to_offset(b, k),
1225 __btree_node_key_to_offset(b, end)
1226 };
1227 }
1228 }
1229
bch2_btree_node_iter_push(struct btree_node_iter * iter,struct btree * b,const struct bkey_packed * k,const struct bkey_packed * end)1230 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1231 struct btree *b,
1232 const struct bkey_packed *k,
1233 const struct bkey_packed *end)
1234 {
1235 __bch2_btree_node_iter_push(iter, b, k, end);
1236 bch2_btree_node_iter_sort(iter, b);
1237 }
1238
1239 noinline __flatten __cold
btree_node_iter_init_pack_failed(struct btree_node_iter * iter,struct btree * b,struct bpos * search)1240 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1241 struct btree *b, struct bpos *search)
1242 {
1243 struct bkey_packed *k;
1244
1245 trace_bkey_pack_pos_fail(search);
1246
1247 bch2_btree_node_iter_init_from_start(iter, b);
1248
1249 while ((k = bch2_btree_node_iter_peek(iter, b)) &&
1250 bkey_iter_pos_cmp(b, k, search) < 0)
1251 bch2_btree_node_iter_advance(iter, b);
1252 }
1253
1254 /**
1255 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1256 * given position
1257 *
1258 * @iter: iterator to initialize
1259 * @b: btree node to search
1260 * @search: search key
1261 *
1262 * Main entry point to the lookup code for individual btree nodes:
1263 *
1264 * NOTE:
1265 *
1266 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1267 * keys. This doesn't matter for most code, but it does matter for lookups.
1268 *
1269 * Some adjacent keys with a string of equal keys:
1270 * i j k k k k l m
1271 *
1272 * If you search for k, the lookup code isn't guaranteed to return you any
1273 * specific k. The lookup code is conceptually doing a binary search and
1274 * iterating backwards is very expensive so if the pivot happens to land at the
1275 * last k that's what you'll get.
1276 *
1277 * This works out ok, but it's something to be aware of:
1278 *
1279 * - For non extents, we guarantee that the live key comes last - see
1280 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1281 * see will only be deleted keys you don't care about.
1282 *
1283 * - For extents, deleted keys sort last (see the comment at the top of this
1284 * file). But when you're searching for extents, you actually want the first
1285 * key strictly greater than your search key - an extent that compares equal
1286 * to the search key is going to have 0 sectors after the search key.
1287 *
1288 * But this does mean that we can't just search for
1289 * bpos_successor(start_of_range) to get the first extent that overlaps with
1290 * the range we want - if we're unlucky and there's an extent that ends
1291 * exactly where we searched, then there could be a deleted key at the same
1292 * position and we'd get that when we search instead of the preceding extent
1293 * we needed.
1294 *
1295 * So we've got to search for start_of_range, then after the lookup iterate
1296 * past any extents that compare equal to the position we searched for.
1297 */
1298 __flatten
bch2_btree_node_iter_init(struct btree_node_iter * iter,struct btree * b,struct bpos * search)1299 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1300 struct btree *b, struct bpos *search)
1301 {
1302 struct bkey_packed p, *packed_search = NULL;
1303 struct btree_node_iter_set *pos = iter->data;
1304 struct bkey_packed *k[MAX_BSETS];
1305 unsigned i;
1306
1307 EBUG_ON(bpos_lt(*search, b->data->min_key));
1308 EBUG_ON(bpos_gt(*search, b->data->max_key));
1309 bset_aux_tree_verify(b);
1310
1311 memset(iter, 0, sizeof(*iter));
1312
1313 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1314 case BKEY_PACK_POS_EXACT:
1315 packed_search = &p;
1316 break;
1317 case BKEY_PACK_POS_SMALLER:
1318 packed_search = NULL;
1319 break;
1320 case BKEY_PACK_POS_FAIL:
1321 btree_node_iter_init_pack_failed(iter, b, search);
1322 return;
1323 }
1324
1325 for (i = 0; i < b->nsets; i++) {
1326 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1327 prefetch_four_cachelines(k[i]);
1328 }
1329
1330 for (i = 0; i < b->nsets; i++) {
1331 struct bset_tree *t = b->set + i;
1332 struct bkey_packed *end = btree_bkey_last(b, t);
1333
1334 k[i] = bch2_bset_search_linear(b, t, search,
1335 packed_search, &p, k[i]);
1336 if (k[i] != end)
1337 *pos++ = (struct btree_node_iter_set) {
1338 __btree_node_key_to_offset(b, k[i]),
1339 __btree_node_key_to_offset(b, end)
1340 };
1341 }
1342
1343 bch2_btree_node_iter_sort(iter, b);
1344 }
1345
bch2_btree_node_iter_init_from_start(struct btree_node_iter * iter,struct btree * b)1346 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1347 struct btree *b)
1348 {
1349 memset(iter, 0, sizeof(*iter));
1350
1351 for_each_bset(b, t)
1352 __bch2_btree_node_iter_push(iter, b,
1353 btree_bkey_first(b, t),
1354 btree_bkey_last(b, t));
1355 bch2_btree_node_iter_sort(iter, b);
1356 }
1357
bch2_btree_node_iter_bset_pos(struct btree_node_iter * iter,struct btree * b,struct bset_tree * t)1358 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1359 struct btree *b,
1360 struct bset_tree *t)
1361 {
1362 struct btree_node_iter_set *set;
1363
1364 btree_node_iter_for_each(iter, set)
1365 if (set->end == t->end_offset)
1366 return __btree_node_offset_to_key(b, set->k);
1367
1368 return btree_bkey_last(b, t);
1369 }
1370
btree_node_iter_sort_two(struct btree_node_iter * iter,struct btree * b,unsigned first)1371 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1372 struct btree *b,
1373 unsigned first)
1374 {
1375 bool ret;
1376
1377 if ((ret = (btree_node_iter_cmp(b,
1378 iter->data[first],
1379 iter->data[first + 1]) > 0)))
1380 swap(iter->data[first], iter->data[first + 1]);
1381 return ret;
1382 }
1383
bch2_btree_node_iter_sort(struct btree_node_iter * iter,struct btree * b)1384 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1385 struct btree *b)
1386 {
1387 /* unrolled bubble sort: */
1388
1389 if (!__btree_node_iter_set_end(iter, 2)) {
1390 btree_node_iter_sort_two(iter, b, 0);
1391 btree_node_iter_sort_two(iter, b, 1);
1392 }
1393
1394 if (!__btree_node_iter_set_end(iter, 1))
1395 btree_node_iter_sort_two(iter, b, 0);
1396 }
1397
bch2_btree_node_iter_set_drop(struct btree_node_iter * iter,struct btree_node_iter_set * set)1398 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1399 struct btree_node_iter_set *set)
1400 {
1401 struct btree_node_iter_set *last =
1402 iter->data + ARRAY_SIZE(iter->data) - 1;
1403
1404 memmove(&set[0], &set[1], (void *) last - (void *) set);
1405 *last = (struct btree_node_iter_set) { 0, 0 };
1406 }
1407
__bch2_btree_node_iter_advance(struct btree_node_iter * iter,struct btree * b)1408 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1409 struct btree *b)
1410 {
1411 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1412
1413 EBUG_ON(iter->data->k > iter->data->end);
1414
1415 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1416 /* avoid an expensive memmove call: */
1417 iter->data[0] = iter->data[1];
1418 iter->data[1] = iter->data[2];
1419 iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
1420 return;
1421 }
1422
1423 if (__btree_node_iter_set_end(iter, 1))
1424 return;
1425
1426 if (!btree_node_iter_sort_two(iter, b, 0))
1427 return;
1428
1429 if (__btree_node_iter_set_end(iter, 2))
1430 return;
1431
1432 btree_node_iter_sort_two(iter, b, 1);
1433 }
1434
bch2_btree_node_iter_advance(struct btree_node_iter * iter,struct btree * b)1435 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1436 struct btree *b)
1437 {
1438 if (bch2_expensive_debug_checks) {
1439 bch2_btree_node_iter_verify(iter, b);
1440 bch2_btree_node_iter_next_check(iter, b);
1441 }
1442
1443 __bch2_btree_node_iter_advance(iter, b);
1444 }
1445
1446 /*
1447 * Expensive:
1448 */
bch2_btree_node_iter_prev_all(struct btree_node_iter * iter,struct btree * b)1449 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1450 struct btree *b)
1451 {
1452 struct bkey_packed *k, *prev = NULL;
1453 struct btree_node_iter_set *set;
1454 unsigned end = 0;
1455
1456 if (bch2_expensive_debug_checks)
1457 bch2_btree_node_iter_verify(iter, b);
1458
1459 for_each_bset(b, t) {
1460 k = bch2_bkey_prev_all(b, t,
1461 bch2_btree_node_iter_bset_pos(iter, b, t));
1462 if (k &&
1463 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1464 prev = k;
1465 end = t->end_offset;
1466 }
1467 }
1468
1469 if (!prev)
1470 return NULL;
1471
1472 /*
1473 * We're manually memmoving instead of just calling sort() to ensure the
1474 * prev we picked ends up in slot 0 - sort won't necessarily put it
1475 * there because of duplicate deleted keys:
1476 */
1477 btree_node_iter_for_each(iter, set)
1478 if (set->end == end)
1479 goto found;
1480
1481 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1482 found:
1483 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1484
1485 memmove(&iter->data[1],
1486 &iter->data[0],
1487 (void *) set - (void *) &iter->data[0]);
1488
1489 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1490 iter->data[0].end = end;
1491
1492 if (bch2_expensive_debug_checks)
1493 bch2_btree_node_iter_verify(iter, b);
1494 return prev;
1495 }
1496
bch2_btree_node_iter_prev(struct btree_node_iter * iter,struct btree * b)1497 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
1498 struct btree *b)
1499 {
1500 struct bkey_packed *prev;
1501
1502 do {
1503 prev = bch2_btree_node_iter_prev_all(iter, b);
1504 } while (prev && bkey_deleted(prev));
1505
1506 return prev;
1507 }
1508
bch2_btree_node_iter_peek_unpack(struct btree_node_iter * iter,struct btree * b,struct bkey * u)1509 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1510 struct btree *b,
1511 struct bkey *u)
1512 {
1513 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1514
1515 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1516 }
1517
1518 /* Mergesort */
1519
bch2_btree_keys_stats(const struct btree * b,struct bset_stats * stats)1520 void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
1521 {
1522 for_each_bset_c(b, t) {
1523 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1524 size_t j;
1525
1526 stats->sets[type].nr++;
1527 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1528 sizeof(u64);
1529
1530 if (bset_has_ro_aux_tree(t)) {
1531 stats->floats += t->size - 1;
1532
1533 for (j = 1; j < t->size; j++)
1534 stats->failed +=
1535 bkey_float(b, t, j)->exponent ==
1536 BFLOAT_FAILED;
1537 }
1538 }
1539 }
1540
bch2_bfloat_to_text(struct printbuf * out,struct btree * b,struct bkey_packed * k)1541 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1542 struct bkey_packed *k)
1543 {
1544 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1545 struct bkey uk;
1546 unsigned j, inorder;
1547
1548 if (!bset_has_ro_aux_tree(t))
1549 return;
1550
1551 inorder = bkey_to_cacheline(b, t, k);
1552 if (!inorder || inorder >= t->size)
1553 return;
1554
1555 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
1556 if (k != tree_to_bkey(b, t, j))
1557 return;
1558
1559 switch (bkey_float(b, t, j)->exponent) {
1560 case BFLOAT_FAILED:
1561 uk = bkey_unpack_key(b, k);
1562 prt_printf(out,
1563 " failed unpacked at depth %u\n"
1564 "\t",
1565 ilog2(j));
1566 bch2_bpos_to_text(out, uk.p);
1567 prt_printf(out, "\n");
1568 break;
1569 }
1570 }
1571