Home
last modified time | relevance | path

Searched refs:btree (Results 1 – 25 of 107) sorted by relevance

12345

/linux/fs/hpfs/
H A Danode.c30 btree = &anode->btree; in hpfs_bplus_lookup()
74 btree = &fnode->btree; in hpfs_add_sector_to_btree()
77 btree = &anode->btree; in hpfs_add_sector_to_btree()
94 btree = &anode->btree; in hpfs_add_sector_to_btree()
154 btree = &anode->btree; in hpfs_add_sector_to_btree()
171 btree = &anode->btree; in hpfs_add_sector_to_btree()
174 btree = &fnode->btree; in hpfs_add_sector_to_btree()
232 btree = &anode->btree; in hpfs_add_sector_to_btree()
238 btree = &fnode->btree; in hpfs_add_sector_to_btree()
410 btree = &fnode->btree; in hpfs_truncate_btree()
[all …]
H A Dmap.c180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode()
181 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode()
187 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode()
188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode()
235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode()
236 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode()
240 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode()
241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
/linux/fs/bcachefs/
H A Dbset.h241 static inline void btree_node_set_format(struct btree *b, in btree_node_set_format()
267 void bch2_btree_keys_init(struct btree *);
335 struct btree *);
337 struct btree *,
368 static inline int bkey_iter_cmp(const struct btree *b, in bkey_iter_cmp()
406 struct btree *b) in __bch2_btree_node_iter_peek_all()
443 struct btree *);
445 struct btree *);
448 struct btree *,
522 void __bch2_verify_btree_nr_keys(struct btree *);
[all …]
H A Dbtree_update_interior.h73 struct btree *b;
86 struct btree *b[BTREE_UPDATE_NODES_MAX];
101 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
104 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
127 struct btree *,
143 struct btree *b; in bch2_foreground_maybe_merge_sibling()
169 struct btree *, unsigned);
172 struct btree *, struct bkey_i *,
183 struct btree *b) in btree_update_reserve_required()
204 static inline void *btree_data_end(struct btree *b) in btree_data_end()
[all …]
H A Dbtree_io.h14 struct btree;
39 struct btree *b;
58 void bch2_btree_node_io_unlock(struct btree *);
59 void bch2_btree_node_io_lock(struct btree *);
60 void __bch2_btree_node_wait_on_read(struct btree *);
61 void __bch2_btree_node_wait_on_write(struct btree *);
62 void bch2_btree_node_wait_on_read(struct btree *);
63 void bch2_btree_node_wait_on_write(struct btree *);
122 void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
126 void bch2_btree_build_aux_trees(struct btree *);
[all …]
H A Dbtree_cache.h15 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
16 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
17 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
26 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
27 struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
29 struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
61 static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k) in btree_node_mem_ptr()
69 static inline bool btree_node_hashed(struct btree *b) in btree_node_hashed()
80 static inline size_t btree_buf_bytes(const struct btree *b) in btree_buf_bytes()
85 static inline size_t btree_buf_max_u64s(const struct btree *b) in btree_buf_max_u64s()
[all …]
H A Dbtree_gc.h39 static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level, in gc_pos_btree() argument
44 .btree = btree, in gc_pos_btree()
54 static inline struct gc_pos gc_pos_btree_node(struct btree *b) in gc_pos_btree_node()
59 static inline int gc_btree_order(enum btree_id btree) in gc_btree_order() argument
61 if (btree == BTREE_ID_stripes) in gc_btree_order()
63 return btree; in gc_btree_order()
69 cmp_int(gc_btree_order(l.btree), in gc_pos_cmp()
70 gc_btree_order(r.btree)) ?: in gc_pos_cmp()
H A Dbset.c22 struct btree *);
157 struct btree *b) in bch2_btree_node_iter_next_check()
194 struct btree *b) in bch2_btree_node_iter_verify()
1236 struct btree *b, in __bch2_btree_node_iter_push()
1371 struct btree *b) in bch2_btree_node_iter_init_from_start()
1383 struct btree *b, in bch2_btree_node_iter_bset_pos()
1396 struct btree *b, in btree_node_iter_sort_two()
1433 struct btree *b) in __bch2_btree_node_iter_advance()
1460 struct btree *b) in bch2_btree_node_iter_advance()
1474 struct btree *b) in bch2_btree_node_iter_prev_all()
[all …]
H A Dbtree_types.h72 struct btree { struct
320 struct btree *b;
406 ? container_of(b, struct btree, c)->key.k.p in btree_node_pos()
595 static inline bool btree_node_ ## flag(struct btree *b) \
638 static inline struct bset *bset(const struct btree *b, in bset()
657 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first()
662 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last()
674 __btree_node_offset_to_key(const struct btree *b, u16 k) in __btree_node_offset_to_key()
773 static inline bool btree_id_is_extents(enum btree_id btree) in btree_id_is_extents() argument
812 struct btree *b;
[all …]
H A Dbtree_cache.c128 struct btree *b; in __btree_node_mem_alloc()
144 struct btree *b; in __bch2_btree_node_mem_alloc()
211 struct btree *b; in bch2_btree_node_update_key_early()
483 struct btree *b; in bch2_fs_btree_cache_exit()
635 struct btree *b; in btree_node_cannibalize()
782 struct btree *b; in bch2_btree_node_fill()
919 struct btree *b; in __bch2_btree_node_get()
1038 struct btree *b; in bch2_btree_node_get()
1114 struct btree *b; in bch2_btree_node_get_noiter()
1216 struct btree *b; in bch2_btree_node_evict()
[all …]
H A Dbbpos.h11 return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos); in bbpos_cmp()
21 if (pos.btree != BTREE_ID_NR) { in bbpos_successor()
22 pos.btree++; in bbpos_successor()
32 prt_str(out, bch2_btree_id_str(pos.btree)); in bch2_bbpos_to_text()
H A Dbkey.h54 struct btree;
57 unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
66 const struct btree *);
74 int bch2_bkey_cmp_packed(const struct btree *,
79 int __bch2_bkey_cmp_left_packed(const struct btree *,
84 int bkey_cmp_left_packed(const struct btree *b, in bkey_cmp_left_packed()
375 const struct btree *);
378 const struct btree *b) in bkey_pack_pos()
391 __bkey_unpack_key_format_checked(const struct btree *b, in __bkey_unpack_key_format_checked()
411 bkey_unpack_key_format_checked(const struct btree *b, in bkey_unpack_key_format_checked()
[all …]
H A Ddebug.h8 struct btree;
11 void __bch2_btree_verify(struct bch_fs *, struct btree *);
13 const struct btree *);
15 static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) in bch2_btree_verify()
H A Dbtree_update.h9 struct btree;
12 struct btree_path *, struct btree *);
14 struct btree *, struct btree_node_iter *,
19 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
71 enum btree_id btree, struct bpos pos) in bch2_btree_delete_at_buffered() argument
73 return bch2_btree_bit_mod_buffered(trans, btree, pos, false); in bch2_btree_delete_at_buffered()
87 enum btree_id btree, in bch2_insert_snapshot_whiteouts() argument
91 if (!btree_type_has_snapshots(btree) || in bch2_insert_snapshot_whiteouts()
95 return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos); in bch2_insert_snapshot_whiteouts()
130 enum btree_id btree, in bch2_trans_update_buffered() argument
[all …]
H A Dbkey_sort.h6 struct btree *b;
15 static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size) in sort_iter_init()
27 static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b) in sort_iter_stack_init()
47 bch2_sort_repack(struct bset *, struct btree *,
H A Dbbpos_types.h6 enum btree_id btree; member
10 static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos) in BBPOS() argument
12 return (struct bbpos) { btree, pos }; in BBPOS()
H A Dbtree_write_buffer.h38 enum btree_id btree, struct bkey_i *k) in bch2_journal_key_to_wb() argument
43 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k); in bch2_journal_key_to_wb()
47 wb_k->btree = btree; in bch2_journal_key_to_wb()
H A Dbtree_update_interior.c310 struct btree *b; in __bch2_btree_node_alloc()
383 struct btree *b; in bch2_btree_node_alloc()
529 struct btree *b; in bch2_btree_reserve_get()
670 struct btree *b; in btree_update_nodes_written()
1352 struct btree *b, in bch2_insert_fixup_btree_ptr()
1414 struct btree *b, in bch2_btree_insert_keys_interior()
1600 struct btree *n[2]; in btree_split()
2208 struct btree *b; in async_btree_node_rewrite_trans()
2326 struct btree *b, struct btree *new_hash, in __bch2_btree_node_update_key()
2333 struct btree *parent; in __bch2_btree_node_update_key()
[all …]
H A Dbackpointers.c682 enum btree_id btree, unsigned level, in check_extent_to_backpointers() argument
723 struct btree *b; in check_btree_root_to_backpointers()
751 .btree = bp.btree_id, in bp_to_bbpos()
786 for (enum btree_id btree = start.btree; in bch2_get_btree_in_memory_pos() local
787 btree < BTREE_ID_NR && !ret; in bch2_get_btree_in_memory_pos()
788 btree++) { in bch2_get_btree_in_memory_pos()
791 struct btree *b; in bch2_get_btree_in_memory_pos()
793 if (!((1U << btree) & btree_leaf_mask) && in bch2_get_btree_in_memory_pos()
797 __for_each_btree_node(trans, iter, btree, in bch2_get_btree_in_memory_pos()
798 btree == start.btree ? start.pos : POS_MIN, in bch2_get_btree_in_memory_pos()
[all …]
/linux/fs/nilfs2/
H A Dbtree.c456 btree->b_inode->i_ino, in nilfs_btree_bad_node()
1230 if (!nilfs_bmap_dirty(btree)) in nilfs_btree_commit_insert()
1231 nilfs_bmap_set_dirty(btree); in nilfs_btree_commit_insert()
1580 if (!nilfs_bmap_dirty(btree)) in nilfs_btree_commit_delete()
1581 nilfs_bmap_set_dirty(btree); in nilfs_btree_commit_delete()
1602 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; in nilfs_btree_delete()
1811 btree->b_ops->bop_clear(btree); in nilfs_btree_commit_convert_and_insert()
1817 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; in nilfs_btree_commit_convert_and_insert()
1818 __nilfs_btree_init(btree); in nilfs_btree_commit_convert_and_insert()
2105 btree->b_inode->i_ino, in nilfs_btree_propagate()
[all …]
/linux/drivers/md/bcache/
H A Dbtree.h117 struct btree { struct
127 struct btree *parent; argument
152 static inline bool btree_node_ ## flag(struct btree *b) \ argument
180 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first()
185 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last()
256 static inline void rw_unlock(bool w, struct btree *b) in rw_unlock()
263 void bch_btree_node_read_done(struct btree *b);
267 void bch_btree_set_root(struct btree *b);
270 struct btree *parent);
273 struct btree *parent);
[all …]
H A Dbtree.c329 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio()
590 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc()
750 struct btree *b; in bch_btree_cache_free()
857 struct btree *b; in mca_find()
889 struct btree *b; in mca_cannibalize()
927 struct btree *b; in mca_alloc()
1012 struct btree *b; in bch_btree_node_get()
1066 struct btree *b; in btree_node_prefetch()
1126 struct btree *b; in __bch_btree_node_alloc()
1352 struct btree *b;
[all …]
H A Dextents.c128 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump()
168 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid()
173 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive()
207 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad()
232 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup()
328 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
502 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid()
507 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive()
539 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad()
585 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
/linux/fs/xfs/libxfs/
H A Dxfs_da_btree.c763 btree = icnodehdr.btree; in xfs_da3_root_split()
802 btree = nodehdr.btree; in xfs_da3_root_split()
1084 btree = nodehdr.btree; in xfs_da3_node_add()
1494 btree = nodehdr.btree; in xfs_da3_fixhashpath()
1533 btree = nodehdr.btree; in xfs_da3_node_remove()
1537 memmove(&btree[index], &btree[index + 1], tmp); in xfs_da3_node_remove()
1739 btree = nodehdr.btree; in xfs_da3_node_lookup_int()
2198 btree = nodehdr.btree; in xfs_da3_path_shift()
2484 btree = deadhdr.btree; in xfs_da3_swap_lastblock()
2570 btree = par_hdr.btree; in xfs_da3_swap_lastblock()
[all …]
/linux/Documentation/admin-guide/device-mapper/
H A Dpersistent-data.rst14 - Another btree-based caching target posted to dm-devel
72 dm-btree.[hc]
73 dm-btree-remove.c
74 dm-btree-spine.c
75 dm-btree-internal.h
77 Currently there is only one data structure, a hierarchical btree.
81 The btree is 'hierarchical' in that you can define it to be composed
83 thin-provisioning target uses a btree with two levels of nesting.

12345