/Linux-v6.1/tools/testing/radix-tree/ |
D | tag_check.c | 8 #include <linux/radix-tree.h> 14 __simple_checks(struct radix_tree_root *tree, unsigned long index, int tag) in __simple_checks() argument 19 item_check_absent(tree, index); in __simple_checks() 20 assert(item_tag_get(tree, index, tag) == 0); in __simple_checks() 22 item_insert(tree, index); in __simple_checks() 23 assert(item_tag_get(tree, index, tag) == 0); in __simple_checks() 24 item_tag_set(tree, index, tag); in __simple_checks() 25 ret = item_tag_get(tree, index, tag); in __simple_checks() 27 ret = tag_tagged_items(tree, first, ~0UL, 10, tag, !tag); in __simple_checks() 29 ret = item_tag_get(tree, index, !tag); in __simple_checks() [all …]
|
D | main.c | 10 #include <linux/radix-tree.h> 18 RADIX_TREE(tree, GFP_KERNEL); in __gang_check() 23 item_insert(&tree, middle + idx); in __gang_check() 25 item_check_absent(&tree, middle - down - 1); in __gang_check() 27 item_check_present(&tree, middle + idx); in __gang_check() 28 item_check_absent(&tree, middle + up); in __gang_check() 31 item_gang_check_present(&tree, middle - down, up + down, in __gang_check() 33 item_full_scan(&tree, middle - down, down + up, chunk); in __gang_check() 35 item_kill_tree(&tree); in __gang_check() 81 RADIX_TREE(tree, GFP_KERNEL); in add_and_check() [all …]
|
/Linux-v6.1/fs/hfs/ |
D | btree.c | 18 /* Get a reference to a B*Tree and do some initial checks */ 21 struct hfs_btree *tree; in hfs_btree_open() local 27 tree = kzalloc(sizeof(*tree), GFP_KERNEL); in hfs_btree_open() 28 if (!tree) in hfs_btree_open() 31 mutex_init(&tree->tree_lock); in hfs_btree_open() 32 spin_lock_init(&tree->hash_lock); in hfs_btree_open() 34 tree->sb = sb; in hfs_btree_open() 35 tree->cnid = id; in hfs_btree_open() 36 tree->keycmp = keycmp; in hfs_btree_open() 38 tree->inode = iget_locked(sb, id); in hfs_btree_open() [all …]
|
D | brec.c | 16 static int hfs_btree_inc_height(struct hfs_btree *tree); 24 dataoff = node->tree->node_size - (rec + 2) * 2; in hfs_brec_lenoff() 39 !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { in hfs_brec_keylen() 40 if (node->tree->attributes & HFS_TREE_BIGKEYS) in hfs_brec_keylen() 41 retval = node->tree->max_key_len + 2; in hfs_brec_keylen() 43 retval = node->tree->max_key_len + 1; in hfs_brec_keylen() 45 recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); in hfs_brec_keylen() 48 if (node->tree->attributes & HFS_TREE_BIGKEYS) { in hfs_brec_keylen() 50 if (retval > node->tree->max_key_len + 2) { in hfs_brec_keylen() 56 if (retval > node->tree->max_key_len + 1) { in hfs_brec_keylen() [all …]
|
D | bnode.c | 30 if (pagenum >= node->tree->pages_per_bnode) in hfs_bnode_read() 60 struct hfs_btree *tree; in hfs_bnode_read_key() local 63 tree = node->tree; in hfs_bnode_read_key() 65 tree->attributes & HFS_TREE_VARIDXKEYS) in hfs_bnode_read_key() 68 key_len = tree->max_key_len + 1; in hfs_bnode_read_key() 154 off = node->tree->node_size - 2; in hfs_bnode_dump() 161 if (node->tree->attributes & HFS_TREE_VARIDXKEYS) in hfs_bnode_dump() 164 tmp = node->tree->max_key_len + 1; in hfs_bnode_dump() 181 struct hfs_btree *tree; in hfs_bnode_unlink() local 185 tree = node->tree; in hfs_bnode_unlink() [all …]
|
D | bfind.c | 15 int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) in hfs_find_init() argument 19 fd->tree = tree; in hfs_find_init() 21 ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); in hfs_find_init() 25 fd->key = ptr + tree->max_key_len + 2; in hfs_find_init() 27 tree->cnid, __builtin_return_address(0)); in hfs_find_init() 28 switch (tree->cnid) { in hfs_find_init() 30 mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); in hfs_find_init() 33 mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); in hfs_find_init() 36 mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); in hfs_find_init() 49 fd->tree->cnid, __builtin_return_address(0)); in hfs_find_exit() [all …]
|
/Linux-v6.1/fs/hfsplus/ |
D | btree.c | 42 * Catalog B-tree Header 47 * Attributes B-tree Header 132 /* Get a reference to a B*Tree and do some initial checks */ 135 struct hfs_btree *tree; in hfs_btree_open() local 142 tree = kzalloc(sizeof(*tree), GFP_KERNEL); in hfs_btree_open() 143 if (!tree) in hfs_btree_open() 146 mutex_init(&tree->tree_lock); in hfs_btree_open() 147 spin_lock_init(&tree->hash_lock); in hfs_btree_open() 148 tree->sb = sb; in hfs_btree_open() 149 tree->cnid = id; in hfs_btree_open() [all …]
|
D | brec.c | 25 dataoff = node->tree->node_size - (rec + 2) * 2; in hfs_brec_lenoff() 40 !(node->tree->attributes & HFS_TREE_VARIDXKEYS) && in hfs_brec_keylen() 41 (node->tree->cnid != HFSPLUS_ATTR_CNID)) { in hfs_brec_keylen() 42 retval = node->tree->max_key_len + 2; in hfs_brec_keylen() 45 node->tree->node_size - (rec + 1) * 2); in hfs_brec_keylen() 48 if (recoff > node->tree->node_size - 2) { in hfs_brec_keylen() 54 if (retval > node->tree->max_key_len + 2) { in hfs_brec_keylen() 65 struct hfs_btree *tree; in hfs_brec_insert() local 72 tree = fd->tree; in hfs_brec_insert() 74 if (!tree->root) in hfs_brec_insert() [all …]
|
D | bnode.c | 59 struct hfs_btree *tree; in hfs_bnode_read_key() local 62 tree = node->tree; in hfs_bnode_read_key() 64 tree->attributes & HFS_TREE_VARIDXKEYS || in hfs_bnode_read_key() 65 node->tree->cnid == HFSPLUS_ATTR_CNID) in hfs_bnode_read_key() 68 key_len = tree->max_key_len + 2; in hfs_bnode_read_key() 303 off = node->tree->node_size - 2; in hfs_bnode_dump() 310 if (node->tree->attributes & HFS_TREE_VARIDXKEYS || in hfs_bnode_dump() 311 node->tree->cnid == HFSPLUS_ATTR_CNID) in hfs_bnode_dump() 314 tmp = node->tree->max_key_len + 2; in hfs_bnode_dump() 330 struct hfs_btree *tree; in hfs_bnode_unlink() local [all …]
|
D | bfind.c | 15 int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) in hfs_find_init() argument 19 fd->tree = tree; in hfs_find_init() 21 ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); in hfs_find_init() 25 fd->key = ptr + tree->max_key_len + 2; in hfs_find_init() 27 tree->cnid, __builtin_return_address(0)); in hfs_find_init() 28 switch (tree->cnid) { in hfs_find_init() 30 mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); in hfs_find_init() 33 mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); in hfs_find_init() 36 mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); in hfs_find_init() 49 fd->tree->cnid, __builtin_return_address(0)); in hfs_find_exit() [all …]
|
/Linux-v6.1/fs/btrfs/ |
D | extent-io-tree.c | 6 #include "extent-io-tree.h" 45 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", in btrfs_extent_state_leak_debug_check() 54 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument 55 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) 57 struct extent_io_tree *tree, in __btrfs_debug_check_extent_io_range() argument 60 struct inode *inode = tree->private_data; in __btrfs_debug_check_extent_io_range() 83 * the tree lock and get the inode lock when setting delalloc. These two things 96 struct extent_io_tree *tree, unsigned int owner, in extent_io_tree_init() argument 99 tree->fs_info = fs_info; in extent_io_tree_init() 100 tree->state = RB_ROOT; in extent_io_tree_init() [all …]
|
D | extent-io-tree.h | 51 * Redefined bits above which are used only in the device allocation tree, 80 /* Who owns this io tree, should be one of IO_TREE_* */ 102 struct extent_io_tree *tree, unsigned int owner, 104 void extent_io_tree_release(struct extent_io_tree *tree); 106 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 109 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 114 u64 count_range_bits(struct extent_io_tree *tree, 119 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 121 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 123 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, [all …]
|
D | extent_map.c | 31 * extent_map_tree_init - initialize extent map tree 32 * @tree: tree to initialize 34 * Initialize the extent tree @tree. Should be called for each new inode 37 void extent_map_tree_init(struct extent_map_tree *tree) in extent_map_tree_init() argument 39 tree->map = RB_ROOT_CACHED; in extent_map_tree_init() 40 INIT_LIST_HEAD(&tree->modified_extents); in extent_map_tree_init() 41 rwlock_init(&tree->lock); in extent_map_tree_init() 140 * search through the tree for an extent_map with a given offset. If 239 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) in try_merge_map() argument 245 * We can't modify an extent map that is in the tree and that is being in try_merge_map() [all …]
|
D | ordered-data.c | 31 * in the tree 58 * look for a given offset in the tree, and if it can't be found return the 124 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, in tree_search() argument 127 struct rb_root *root = &tree->tree; in tree_search() 132 if (tree->last) { in tree_search() 133 entry = rb_entry(tree->last, struct btrfs_ordered_extent, in tree_search() 136 return tree->last; in tree_search() 142 tree->last = ret; in tree_search() 147 * Add an ordered extent to the per-inode tree. 160 * tree is given a single reference on the ordered extent that was inserted. [all …]
|
/Linux-v6.1/kernel/ |
D | audit_tree.c | 61 * the same tree. 68 * tree.chunks anchors chunk.owners[].list hash_lock 69 * tree.rules anchors rule.rlist audit_filter_mutex 70 * chunk.trees anchors tree.same_root hash_lock 74 * tree is refcounted; one reference for "some rules on rules_list refer to 95 struct audit_tree *tree; in alloc_tree() local 97 tree = kmalloc(struct_size(tree, pathname, strlen(s) + 1), GFP_KERNEL); in alloc_tree() 98 if (tree) { in alloc_tree() 99 refcount_set(&tree->count, 1); in alloc_tree() 100 tree->goner = 0; in alloc_tree() [all …]
|
/Linux-v6.1/Documentation/core-api/ |
D | maple_tree.rst | 5 Maple Tree 13 The Maple Tree is a B-Tree data type which is optimized for storing 14 non-overlapping ranges, including ranges of size 1. The tree was designed to 17 entry in a cache-efficient manner. The tree can also be put into an RCU-safe 22 The Maple Tree maintains a small memory footprint and was designed to use 24 use the normal API. An :ref:`maple-tree-advanced-api` exists for more complex 25 scenarios. The most important usage of the Maple Tree is the tracking of the 28 The Maple Tree can store values between ``0`` and ``ULONG_MAX``. The Maple 29 Tree reserves values with the bottom two bits set to '10' which are below 4096 34 :ref:`maple-tree-advanced-api`, but are blocked by the normal API. [all …]
|
/Linux-v6.1/include/linux/ |
D | maple_tree.h | 5 * Maple Tree - An RCU-safe adaptive tree for storing ranges 18 * Allocated nodes are mutable until they have been inserted into the tree, 20 * from the tree and an RCU grace period has passed. 26 * Nodes in the tree point to their parent unless bit 0 is set. 48 * is a pointer to the tree itself. No more bits are available in this pointer 52 * parent pointer is 256B aligned like all other tree nodes. When storing a 32 59 * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE 99 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 100 * indicate that the tree is specifying ranges, Pivots may appear in the 102 * specific position of a B-tree. Pivot values are inclusive of the slot with [all …]
|
D | rbtree.h | 43 /* Find logical next and previous nodes in a tree */ 97 * rb_erase() may rebalance the tree, causing us to miss some nodes. 157 * rb_add_cached() - insert @node into the leftmost cached tree @tree 159 * @tree: leftmost cached tree to insert @node into 165 rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, in rb_add_cached() argument 168 struct rb_node **link = &tree->rb_root.rb_node; in rb_add_cached() 183 rb_insert_color_cached(node, tree, leftmost); in rb_add_cached() 189 * rb_add() - insert @node into @tree 191 * @tree: tree to insert @node into 195 rb_add(struct rb_node *node, struct rb_root *tree, in rb_add() argument [all …]
|
/Linux-v6.1/tools/include/linux/ |
D | rbtree.h | 52 /* Find logical next and previous nodes in a tree */ 95 * rb_erase() may rebalance the tree, causing us to miss some nodes. 172 * rb_add_cached() - insert @node into the leftmost cached tree @tree 174 * @tree: leftmost cached tree to insert @node into 178 rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, in rb_add_cached() argument 181 struct rb_node **link = &tree->rb_root.rb_node; in rb_add_cached() 196 rb_insert_color_cached(node, tree, leftmost); in rb_add_cached() 200 * rb_add() - insert @node into @tree 202 * @tree: tree to insert @node into 206 rb_add(struct rb_node *node, struct rb_root *tree, in rb_add() argument [all …]
|
/Linux-v6.1/Documentation/devicetree/ |
D | of_unittest.rst | 13 is attached to the live tree dynamically, independent of the machine's 23 from the unflattened device tree data structure. This interface is used by 51 The Device Tree Source file (drivers/of/unittest-data/testcases.dts) contains 53 drivers/of/unittest.c. Currently, following Device Tree Source Include files 83 Un-flattened device tree structure: 85 Un-flattened device tree consists of connected device_node(s) in form of a tree 88 // following struct members are used to construct the tree 97 Figure 1, describes a generic structure of machine's un-flattened device tree 99 ``*parent``, that is used to traverse the tree in the reverse direction. So, at 126 Figure 1: Generic structure of un-flattened device tree [all …]
|
/Linux-v6.1/sound/hda/ |
D | hdac_sysfs.c | 79 * Widget tree sysfs 81 * This is a tree showing the attributes of each widget. It appears like 322 struct hdac_widget_tree *tree = codec->widgets; in widget_tree_free() local 325 if (!tree) in widget_tree_free() 327 free_widget_node(tree->afg, &widget_afg_group); in widget_tree_free() 328 if (tree->nodes) { in widget_tree_free() 329 for (p = tree->nodes; *p; p++) in widget_tree_free() 331 kfree(tree->nodes); in widget_tree_free() 333 kobject_put(tree->root); in widget_tree_free() 334 kfree(tree); in widget_tree_free() [all …]
|
/Linux-v6.1/scripts/dtc/libfdt/ |
D | fdt_overlay.c | 3 * libfdt - Flat Device Tree manipulation 16 * @fdto: pointer to the device tree overlay blob 45 * @fdt: Base device tree blob 46 * @fdto: Device tree overlay blob 51 * device tree of a fragment, no matter how the actual targeting is 55 * the targeted node offset in the base device tree 104 * @fdt: Base device tree blob 105 * @node: Device tree overlay blob 143 * @fdto: Device tree overlay blob 150 * phandles to not conflict with the overlays of the base device tree. [all …]
|
/Linux-v6.1/lib/zlib_deflate/ |
D | deftree.c | 13 * Each code tree is stored in a compressed form which is itself 84 /* The static literal tree. Since the bit lengths are imposed, there is no 86 * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init 91 /* The static distance tree. (Actually a trivial tree since all codes use 111 const ct_data *static_tree; /* static tree or NULL */ 114 int elems; /* max number of elements in the tree */ 133 static void pqdownheap (deflate_state *s, ct_data *tree, int k); 135 static void gen_codes (ct_data *tree, int max_code, ush *bl_count); 137 static void scan_tree (deflate_state *s, ct_data *tree, int max_code); 138 static void send_tree (deflate_state *s, ct_data *tree, int max_code); [all …]
|
/Linux-v6.1/lib/ |
D | test_maple_tree.c | 3 * test_maple_tree.c: Test the maple tree API 7 * Any tests that only require the interface of the tree. 527 MT_BUG_ON(mas.tree, entry == NULL); in check_find() 977 /* Create tree of 1-100 */ in check_ranges() 985 /* Create tree of 1-200 */ in check_ranges() 998 /* Create tree of 1-400 */ in check_ranges() 1026 /* Overwrite multiple levels at the end of the tree (slot 7) */ in check_ranges() 1090 * 8. Overwrite the whole tree in check_ranges() 1091 * 9. Try to overwrite the zero entry of an alloc tree. in check_ranges() 1178 /* Cause a 3 child split all the way up the tree. */ in check_ranges() [all …]
|
/Linux-v6.1/net/sched/ |
D | ematch.c | 162 static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, in tcf_em_get_match() argument 165 return &tree->matches[index]; in tcf_em_get_match() 288 * tcf_em_tree_validate - validate ematch config TLV and build ematch tree 291 * @nla: ematch tree configuration TLV 292 * @tree: destination ematch tree variable to store the resulting 293 * ematch tree. 296 * ematch tree in @tree. The resulting tree must later be copied into 298 * provide the ematch tree variable of the private classifier data directly, 304 struct tcf_ematch_tree *tree) in tcf_em_tree_validate() argument 312 memset(tree, 0, sizeof(*tree)); in tcf_em_tree_validate() [all …]
|