/Linux-v5.10/net/ceph/ |
D | debugfs.c | 69 for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { in osdmap_show() 92 for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { in osdmap_show() 103 for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { in osdmap_show() 110 for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { in osdmap_show() 121 for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { in osdmap_show() 160 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { in monc_show() 239 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in dump_requests() 265 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in dump_linger_requests() 326 for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) { in dump_backoffs() 352 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in osdc_show() [all …]
|
D | osd_client.c | 1159 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in DEFINE_RB_FUNCS() 1162 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS() 1172 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { in DEFINE_RB_FUNCS() 1317 for (n = rb_first(&osd->o_requests); n; ) { in close_osd() 1327 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd() 1368 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd() 1460 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { in have_pool_full() 1933 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS() 1938 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS() 3349 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in handle_timeout() [all …]
|
/Linux-v5.10/fs/proc/ |
D | task_nommu.c | 29 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem() 90 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_vsize() 108 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_statm() 220 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) in m_start()
|
D | nommu.c | 87 for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) in nommu_region_list_start()
|
/Linux-v5.10/arch/powerpc/kernel/ |
D | eeh_cache.c | 103 n = rb_first(&cache->rb_root); in eeh_addr_cache_print() 218 n = rb_first(&pci_io_addr_cache_root.rb_root); in __eeh_addr_cache_rmv_dev() 270 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { in eeh_addr_cache_show()
|
/Linux-v5.10/tools/perf/util/ |
D | rb_resort.h | 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 128 for (__nd = rb_first(&__name->entries); \
|
D | callchain.c | 419 n = rb_first(&node->rb_root_in); in __sort_chain_flat() 450 n = rb_first(&node->rb_root_in); in __sort_chain_graph_abs() 481 n = rb_first(&node->rb_root_in); in __sort_chain_graph_rel() 546 n = rb_first(&new->rb_root_in); in create_child() 1020 n = rb_first(&src->rb_root_in); in merge_chain_branch() 1261 n = rb_first(&node->rb_root_in); in callchain_node_branch_counts_cumul() 1472 n = rb_first(&node->rb_root_in); in free_callchain_node() 1497 n = rb_first(&node->rb_root_in); in decay_callchain_node() 1674 struct rb_node *rb_node = rb_first(root); in count_callchain_hits()
|
D | env.c | 137 next = rb_first(root); in perf_env__purge_bpf() 151 next = rb_first(root); in perf_env__purge_bpf()
|
/Linux-v5.10/scripts/gdb/linux/ |
D | rbtree.py | 13 def rb_first(root): function 110 result = rb_first(root)
|
/Linux-v5.10/lib/ |
D | rbtree_test.c | 197 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check() 223 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check_augmented() 295 for (node = rb_first(&root.rb_root); node; node = rb_next(node)) in rbtree_test_init() 309 node = rb_first(&root.rb_root); in rbtree_test_init()
|
/Linux-v5.10/drivers/base/regmap/ |
D | regcache-rbtree.c | 148 for (node = rb_first(&rbtree_ctx->root); node != NULL; in rbtree_show() 225 next = rb_first(&rbtree_ctx->root); in regcache_rbtree_exit() 476 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_sync() 516 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_drop()
|
/Linux-v5.10/tools/perf/ui/gtk/ |
D | hists.c | 101 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_flat() 103 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_flat() 165 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_folded() 224 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_graph() 226 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_graph()
|
/Linux-v5.10/fs/ocfs2/ |
D | reservations.c | 85 node = rb_first(&resmap->m_reservations); in ocfs2_dump_resv() 140 node = rb_first(&resmap->m_reservations); in ocfs2_check_resmap() 367 node = rb_first(&resmap->m_reservations); in ocfs2_find_resv_lhs() 526 next = rb_first(root); in __ocfs2_resv_find_window()
|
/Linux-v5.10/drivers/android/ |
D | binder_alloc.c | 355 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked() 452 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked() 461 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked() 799 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release() 875 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated() 928 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
|
/Linux-v5.10/kernel/trace/ |
D | trace_stat.c | 188 node = rb_first(&session->stat_root); in stat_seq_start() 203 return rb_first(&session->stat_root); in stat_seq_next()
|
/Linux-v5.10/net/netfilter/ |
D | nf_conncount.c | 457 for (node = rb_first(root); node != NULL; node = rb_next(node)) { in tree_gc_worker() 472 node = rb_first(root); in tree_gc_worker() 564 while ((node = rb_first(r)) != NULL) { in destroy_tree()
|
/Linux-v5.10/fs/btrfs/ |
D | ref-verify.c | 239 while ((n = rb_first(&be->roots))) { in free_block_entry() 245 while((n = rb_first(&be->refs))) { in free_block_entry() 645 for (n = rb_first(&be->refs); n; n = rb_next(n)) { in dump_block_entry() 653 for (n = rb_first(&be->roots); n; n = rb_next(n)) { in dump_block_entry() 915 while ((n = rb_first(&fs_info->block_tree))) { in btrfs_free_ref_cache()
|
/Linux-v5.10/tools/perf/ui/stdio/ |
D | hist.c | 125 node = rb_first(root); in __callchain__fprintf_graph() 230 node = rb_first(root); in callchain__fprintf_graph() 311 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_flat() 362 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_folded()
|
/Linux-v5.10/tools/include/linux/ |
D | rbtree.h | 55 extern struct rb_node *rb_first(const struct rb_root *);
|
/Linux-v5.10/tools/perf/ui/browsers/ |
D | map.c | 120 for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { in map__browse()
|
D | hists.c | 181 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__count_rows_rb_tree() 264 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__count_rows() 330 struct rb_node *nd = rb_first(&node->rb_root); in callchain_node__init_have_children_rb_tree() 332 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__init_have_children_rb_tree() 369 struct rb_node *nd = rb_first(root); in callchain__init_have_children() 372 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in callchain__init_have_children() 520 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__set_folding_rb_tree() 561 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__set_folding() 929 node = rb_first(root); in hist_browser__show_callchain_flat() 1033 node = rb_first(root); in hist_browser__show_callchain_folded() [all …]
|
/Linux-v5.10/drivers/infiniband/hw/mlx4/ |
D | cm.c | 548 while (rb_first(sl_id_map)) { in mlx4_ib_cm_paravirt_clean() 550 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean() 559 nd = rb_first(sl_id_map); in mlx4_ib_cm_paravirt_clean()
|
/Linux-v5.10/include/linux/ |
D | rbtree.h | 56 extern struct rb_node *rb_first(const struct rb_root *);
|
/Linux-v5.10/drivers/vfio/ |
D | vfio_iommu_type1.c | 232 for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { in vfio_dma_populate_bitmap() 243 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 268 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_free_all() 1038 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iova_dirty_bitmap() 1438 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() 1512 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_replay() 2214 while ((node = rb_first(&iommu->dma_list))) in vfio_iommu_unmap_unpin_all() 2222 n = rb_first(&iommu->dma_list); in vfio_iommu_unmap_unpin_reaccount() 2229 p = rb_first(&dma->pfn_list); in vfio_iommu_unmap_unpin_reaccount() 2245 n = rb_first(&iommu->dma_list); in vfio_sanity_check_pfn_list()
|
/Linux-v5.10/tools/perf/tests/ |
D | hists_cumulate.c | 203 for (node = rb_first(root), i = 0; in do_test() 225 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); in do_test()
|