Home
last modified time | relevance | path

Searched refs:btree (Results 1 – 25 of 47) sorted by relevance

12

/Linux-v5.4/fs/hpfs/
Danode.c15 struct bplus_header *btree, unsigned sec, in hpfs_bplus_lookup() argument
24 if (bp_internal(btree)) { in hpfs_bplus_lookup()
25 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup()
26 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { in hpfs_bplus_lookup()
27 a = le32_to_cpu(btree->u.internal[i].down); in hpfs_bplus_lookup()
30 btree = &anode->btree; in hpfs_bplus_lookup()
37 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup()
38 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && in hpfs_bplus_lookup()
39 … le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { in hpfs_bplus_lookup()
40 …a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_sec… in hpfs_bplus_lookup()
[all …]
Dmap.c180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode()
181 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode()
187 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode()
188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode()
235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode()
236 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode()
240 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode()
241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
Dalloc.c466 f->btree.n_free_nodes = 8; in hpfs_alloc_fnode()
467 f->btree.first_free = cpu_to_le16(8); in hpfs_alloc_fnode()
483 a->btree.n_free_nodes = 40; in hpfs_alloc_anode()
484 a->btree.n_used_nodes = 0; in hpfs_alloc_anode()
485 a->btree.first_free = cpu_to_le16(8); in hpfs_alloc_anode()
/Linux-v5.4/fs/nilfs2/
Dbtree.c58 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, in nilfs_btree_get_new_block() argument
61 struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; in nilfs_btree_get_new_block()
111 static int nilfs_btree_node_size(const struct nilfs_bmap *btree) in nilfs_btree_node_size() argument
113 return i_blocksize(btree->b_inode); in nilfs_btree_node_size()
116 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) in nilfs_btree_nchildren_per_block() argument
118 return btree->b_nchildren_per_block; in nilfs_btree_nchildren_per_block()
409 nilfs_btree_get_root(const struct nilfs_bmap *btree) in nilfs_btree_get_root() argument
411 return (struct nilfs_btree_node *)btree->b_u.u_data; in nilfs_btree_get_root()
426 static int nilfs_btree_height(const struct nilfs_bmap *btree) in nilfs_btree_height() argument
428 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; in nilfs_btree_height()
[all …]
DMakefile4 btnode.o bmap.o btree.o direct.o dat.o recovery.o \
/Linux-v5.4/drivers/md/bcache/
Dbtree.h117 struct btree { struct
129 struct btree *parent; argument
151 static inline bool btree_node_ ## flag(struct btree *b) \ argument
154 static inline void set_btree_node_ ## flag(struct btree *b) \
169 static inline struct btree_write *btree_current_write(struct btree *b) in btree_current_write()
174 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write()
179 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first()
184 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last()
189 static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) in bset_block_offset()
228 static inline void rw_lock(bool w, struct btree *b, int level) in rw_lock()
[all …]
Dbtree.c122 #define btree(fn, key, b, op, ...) \ macro
126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
146 struct btree *_b = (c)->root; \
163 static inline struct bset *write_block(struct btree *b) in write_block()
168 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next()
195 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set()
204 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done()
299 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read()
337 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write()
354 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock()
[all …]
Dextents.c128 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump()
168 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid()
173 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive()
207 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad()
232 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup()
328 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
502 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid()
507 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive()
539 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad()
585 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
Ddebug.h11 void bch_btree_verify(struct btree *b);
20 static inline void bch_btree_verify(struct btree *b) {} in bch_btree_verify()
DMakefile5 bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
Dbcache.h222 struct btree;
276 int (*cache_miss)(struct btree *b, struct search *s,
659 struct btree *root;
662 struct btree *verify_data;
DKconfig8 a btree for indexing and the layout is optimized for SSDs.
Ddebug.c31 void bch_btree_verify(struct btree *b) in bch_btree_verify()
33 struct btree *v = b->c->verify_data; in bch_btree_verify()
/Linux-v5.4/fs/xfs/libxfs/
Dxfs_da_btree.c546 struct xfs_da_node_entry *btree; in xfs_da3_root_split() local
581 btree = dp->d_ops->node_tree_p(oldroot); in xfs_da3_root_split()
582 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); in xfs_da3_root_split()
641 btree = dp->d_ops->node_tree_p(node); in xfs_da3_root_split()
642 btree[0].hashval = cpu_to_be32(blk1->hashval); in xfs_da3_root_split()
643 btree[0].before = cpu_to_be32(blk1->blkno); in xfs_da3_root_split()
644 btree[1].hashval = cpu_to_be32(blk2->hashval); in xfs_da3_root_split()
645 btree[1].before = cpu_to_be32(blk2->blkno); in xfs_da3_root_split()
661 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); in xfs_da3_root_split()
917 struct xfs_da_node_entry *btree; in xfs_da3_node_add() local
[all …]
/Linux-v5.4/Documentation/admin-guide/device-mapper/
Dpersistent-data.rst14 - Another btree-based caching target posted to dm-devel
72 dm-btree.[hc]
73 dm-btree-remove.c
74 dm-btree-spine.c
75 dm-btree-internal.h
77 Currently there is only one data structure, a hierarchical btree.
81 The btree is 'hierarchical' in that you can define it to be composed
83 thin-provisioning target uses a btree with two levels of nesting.
/Linux-v5.4/drivers/md/persistent-data/
DMakefile11 dm-btree.o \
12 dm-btree-remove.o \
13 dm-btree-spine.o
/Linux-v5.4/include/trace/events/
Dbcache.h64 TP_PROTO(struct btree *b),
258 TP_PROTO(struct btree *b),
263 TP_PROTO(struct btree *b),
282 TP_PROTO(struct btree *b),
292 TP_PROTO(struct btree *b),
332 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
365 TP_PROTO(struct btree *b, unsigned keys),
382 TP_PROTO(struct btree *b, unsigned keys),
387 TP_PROTO(struct btree *b, unsigned keys),
392 TP_PROTO(struct btree *b),
/Linux-v5.4/fs/xfs/
Dxfs_attr_inactive.c204 struct xfs_da_node_entry *btree; in xfs_attr3_node_inactive() local
222 btree = dp->d_ops->node_tree_p(node); in xfs_attr3_node_inactive()
223 child_fsb = be32_to_cpu(btree[0].before); in xfs_attr3_node_inactive()
287 btree = dp->d_ops->node_tree_p(node); in xfs_attr3_node_inactive()
288 child_fsb = be32_to_cpu(btree[i + 1].before); in xfs_attr3_node_inactive()
Dxfs_attr_list.c203 struct xfs_da_node_entry *btree; in xfs_attr_node_list_lookup() local
246 btree = dp->d_ops->node_tree_p(node); in xfs_attr_node_list_lookup()
247 for (i = 0; i < nodehdr.count; btree++, i++) { in xfs_attr_node_list_lookup()
248 if (cursor->hashval <= be32_to_cpu(btree->hashval)) { in xfs_attr_node_list_lookup()
249 cursor->blkno = be32_to_cpu(btree->before); in xfs_attr_node_list_lookup()
251 btree); in xfs_attr_node_list_lookup()
/Linux-v5.4/fs/befs/
DChangeLog27 * Did the string comparison really right this time (btree.c) [WD]
30 a pointer value. (btree.c) [WD]
38 keys within btree nodes, rather than the linear search we were using
39 before. (btree.c) [Sergey S. Kostyliov <rathamahata@php4.ru>]
56 (btree.c) [WD]
105 * Removed notion of btree handle from btree.c. It was unnecessary, as the
128 (btree.c) [WD]
133 seekleaf() in btree.c [WD]
148 (datastream.c, btree.c super.c inode.c) [WD]
253 * Fix bug with reading an empty directory. (btree.c and dir.c)
[all …]
DMakefile8 befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o
/Linux-v5.4/Documentation/admin-guide/
Dbcache.rst15 in erase block sized buckets, and it uses a hybrid btree/log to track cached
381 the way cache coherency is handled for cache misses. If a btree node is full,
386 cause the btree node to be split, and you need almost no write traffic for
387 this to not show up enough to be noticeable (especially since bcache's btree
513 Average data per key in the btree.
522 Amount of memory currently used by the btree cache
556 Percentage of the root btree node in use. If this gets too high the node
564 Depth of the btree (A single node btree has depth 0).
575 duration: garbage collection, btree read, btree node sorts and btree splits.
581 Total nodes in the btree.
[all …]
/Linux-v5.4/fs/hfs/
DMakefile8 hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
/Linux-v5.4/fs/hfsplus/
DMakefile8 hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
/Linux-v5.4/Documentation/ABI/testing/
Dsysfs-block-bcache134 For a cache, height of the btree excluding leaf nodes (i.e. a
141 Number of btree buckets/nodes that are currently cached in
156 For a cache, sum of all btree writes in human readable units.

12