/Linux-v6.6/net/ceph/ |
D | debugfs.c | 69 for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { in osdmap_show() 92 for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { in osdmap_show() 103 for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { in osdmap_show() 110 for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { in osdmap_show() 121 for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { in osdmap_show() 160 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { in monc_show() 239 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in dump_requests() 265 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in dump_linger_requests() 326 for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) { in dump_backoffs() 352 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in osdc_show() [all …]
|
D | osd_client.c | 1201 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in DEFINE_RB_FUNCS() 1204 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS() 1214 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { in DEFINE_RB_FUNCS() 1372 for (n = rb_first(&osd->o_requests); n; ) { in close_osd() 1382 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd() 1423 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd() 1519 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { in have_pool_full() 1992 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS() 1997 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS() 3464 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in handle_timeout() [all …]
|
D | osdmap.c | 223 rb_entry(rb_first(root), struct crush_name_node, cn_node); in clear_crush_names() 269 rb_entry(rb_first(&c->choose_args), in clear_choose_args() 762 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { in ceph_pg_poolid_by_name() 1153 rb_entry(rb_first(&map->pg_temp), in ceph_osdmap_destroy() 1160 rb_entry(rb_first(&map->primary_temp), in ceph_osdmap_destroy() 1167 rb_entry(rb_first(&map->pg_upmap), in ceph_osdmap_destroy() 1174 rb_entry(rb_first(&map->pg_upmap_items), in ceph_osdmap_destroy() 1181 rb_entry(rb_first(&map->pg_pools), in ceph_osdmap_destroy() 2973 struct rb_node *n1 = rb_first(locs1); in ceph_compare_crush_locs() 2974 struct rb_node *n2 = rb_first(locs2); in ceph_compare_crush_locs() [all …]
|
/Linux-v6.6/arch/powerpc/kernel/ |
D | eeh_cache.c | 103 n = rb_first(&cache->rb_root); in eeh_addr_cache_print() 218 n = rb_first(&pci_io_addr_cache_root.rb_root); in __eeh_addr_cache_rmv_dev() 270 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { in eeh_addr_cache_show()
|
/Linux-v6.6/tools/perf/util/ |
D | rb_resort.h | 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 128 for (__nd = rb_first(&__name->entries); \
|
D | callchain.c | 426 n = rb_first(&node->rb_root_in); in __sort_chain_flat() 457 n = rb_first(&node->rb_root_in); in __sort_chain_graph_abs() 488 n = rb_first(&node->rb_root_in); in __sort_chain_graph_rel() 553 n = rb_first(&new->rb_root_in); in create_child() 1038 n = rb_first(&src->rb_root_in); in merge_chain_branch() 1284 n = rb_first(&node->rb_root_in); in callchain_node_branch_counts_cumul() 1489 n = rb_first(&node->rb_root_in); in free_callchain_node() 1514 n = rb_first(&node->rb_root_in); in decay_callchain_node() 1743 struct rb_node *rb_node = rb_first(root); in count_callchain_hits()
|
/Linux-v6.6/scripts/gdb/linux/ |
D | rbtree.py | 13 def rb_first(root): function 110 result = rb_first(root)
|
/Linux-v6.6/Documentation/translations/zh_CN/core-api/ |
D | rbtree.rst | 165 struct rb_node *rb_first(struct rb_root *tree); 170 要开始迭代,需要使用一个指向树根的指针调用rb_first()或rb_last(),它将返回一个指向 181 for (node = rb_first(&mytree); node; node = rb_next(node)) 189 时间复杂度为O(logN)的rb_first()的调用,以简单地获取指针,避免了潜在的昂贵的树迭代。
|
/Linux-v6.6/lib/ |
D | rbtree_test.c | 197 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check() 223 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check_augmented() 295 for (node = rb_first(&root.rb_root); node; node = rb_next(node)) in rbtree_test_init() 309 node = rb_first(&root.rb_root); in rbtree_test_init()
|
D | rbtree.c | 466 struct rb_node *rb_first(const struct rb_root *root) in rb_first() function 477 EXPORT_SYMBOL(rb_first);
|
/Linux-v6.6/drivers/base/regmap/ |
D | regcache-rbtree.c | 148 for (node = rb_first(&rbtree_ctx->root); node != NULL; in rbtree_show() 225 next = rb_first(&rbtree_ctx->root); in regcache_rbtree_exit() 478 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_sync() 520 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_drop()
|
/Linux-v6.6/tools/perf/ui/gtk/ |
D | hists.c | 101 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_flat() 103 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_flat() 165 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_folded() 224 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_graph() 226 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_graph()
|
/Linux-v6.6/fs/ocfs2/ |
D | reservations.c | 83 node = rb_first(&resmap->m_reservations); in ocfs2_dump_resv() 138 node = rb_first(&resmap->m_reservations); in ocfs2_check_resmap() 363 node = rb_first(&resmap->m_reservations); in ocfs2_find_resv_lhs() 522 next = rb_first(root); in __ocfs2_resv_find_window()
|
/Linux-v6.6/kernel/trace/ |
D | trace_stat.c | 188 node = rb_first(&session->stat_root); in stat_seq_start() 203 return rb_first(&session->stat_root); in stat_seq_next()
|
/Linux-v6.6/fs/proc/ |
D | nommu.c | 86 for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) in nommu_region_list_start()
|
/Linux-v6.6/drivers/android/ |
D | binder_alloc.c | 340 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked() 444 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked() 453 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked() 816 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release() 896 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated() 949 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
|
/Linux-v6.6/fs/btrfs/ |
D | ref-verify.c | 242 while ((n = rb_first(&be->roots))) { in free_block_entry() 248 while((n = rb_first(&be->refs))) { in free_block_entry() 636 for (n = rb_first(&be->refs); n; n = rb_next(n)) { in dump_block_entry() 644 for (n = rb_first(&be->roots); n; n = rb_next(n)) { in dump_block_entry() 902 while ((n = rb_first(&fs_info->block_tree))) { in btrfs_free_ref_cache()
|
/Linux-v6.6/net/netfilter/ |
D | nf_conncount.c | 468 for (node = rb_first(root); node != NULL; node = rb_next(node)) { in tree_gc_worker() 483 node = rb_first(root); in tree_gc_worker() 575 while ((node = rb_first(r)) != NULL) { in destroy_tree()
|
D | nft_set_rbtree.c | 347 first = rb_first(&priv->root); in __nft_rbtree_insert() 594 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { in nft_rbtree_walk() 640 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { in nft_rbtree_gc()
|
/Linux-v6.6/tools/perf/ui/stdio/ |
D | hist.c | 125 node = rb_first(root); in __callchain__fprintf_graph() 230 node = rb_first(root); in callchain__fprintf_graph() 311 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_flat() 362 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_folded()
|
/Linux-v6.6/drivers/infiniband/hw/mlx4/ |
D | cm.c | 549 while (rb_first(sl_id_map)) { in mlx4_ib_cm_paravirt_clean() 551 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean() 560 nd = rb_first(sl_id_map); in mlx4_ib_cm_paravirt_clean()
|
/Linux-v6.6/tools/perf/ui/browsers/ |
D | map.c | 120 for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { in map__browse()
|
D | hists.c | 181 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__count_rows_rb_tree() 264 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__count_rows() 330 struct rb_node *nd = rb_first(&node->rb_root); in callchain_node__init_have_children_rb_tree() 332 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__init_have_children_rb_tree() 369 struct rb_node *nd = rb_first(root); in callchain__init_have_children() 372 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in callchain__init_have_children() 515 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__set_folding_rb_tree() 556 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__set_folding() 920 node = rb_first(root); in hist_browser__show_callchain_flat() 1024 node = rb_first(root); in hist_browser__show_callchain_folded() [all …]
|
/Linux-v6.6/tools/perf/tests/ |
D | hists_cumulate.c | 216 for (node = rb_first(root), i = 0; in do_test() 238 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); in do_test()
|
/Linux-v6.6/drivers/vfio/ |
D | vfio_iommu_type1.c | 265 for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { in vfio_dma_populate_bitmap() 277 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_populate_bitmap_full() 288 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 313 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_free_all() 1217 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iova_dirty_bitmap() 1693 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() 1770 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_replay() 2355 while ((node = rb_first(&iommu->dma_list))) in vfio_iommu_unmap_unpin_all() 2363 n = rb_first(&iommu->dma_list); in vfio_iommu_unmap_unpin_reaccount() 2370 p = rb_first(&dma->pfn_list); in vfio_iommu_unmap_unpin_reaccount()
|