| /Linux-v5.4/tools/perf/util/ |
| D | rblist.c | 112 struct rb_node *pos, *next = rb_first_cached(&rblist->entries); in rblist__exit() 133 for (node = rb_first_cached(&rblist->entries); node; in rblist__entry()
|
| D | hist.c | 226 struct rb_node *next = rb_first_cached(&hists->entries); in hists__output_recalc_col_len() 321 struct rb_node *node = rb_first_cached(&he->hroot_out); in hists__decay_entry() 362 struct rb_node *next = rb_first_cached(&hists->entries); in hists__decay_entries() 378 struct rb_node *next = rb_first_cached(&hists->entries); in hists__delete_entries() 391 struct rb_node *next = rb_first_cached(&hists->entries); in hists__get_entry() 1601 next = rb_first_cached(root); in hists__collapse_resort() 1680 node = rb_first_cached(&hists->entries); in hierarchy_recalc_total_periods() 1742 node = rb_first_cached(root_in); in hists__hierarchy_output_resort() 1865 next = rb_first_cached(root); in output_resort() 1944 node = rb_first_cached(&he->hroot_out); in __rb_hierarchy_next() [all …]
|
| D | strlist.h | 60 struct rb_node *rn = rb_first_cached(&slist->rblist.entries); in strlist__first()
|
| D | intlist.h | 48 struct rb_node *rn = rb_first_cached(&ilist->rblist.entries); in intlist__first()
|
| D | symbol_fprintf.c | 69 for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) { in dso__fprintf_symbols_by_name()
|
| D | build-id.c | 372 for (nd = rb_first_cached(&session->machines.guests); nd; in perf_session__write_buildid_table() 406 for (nd = rb_first_cached(&session->machines.guests); nd; in dsos__hit_all() 860 for (nd = rb_first_cached(&session->machines.guests); nd; in perf_session__cache_build_ids() 878 for (nd = rb_first_cached(&session->machines.guests); nd; in perf_session__read_build_ids()
|
| D | machine.c | 196 nd = rb_first_cached(&threads->entries); in machine__delete_threads() 299 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { in machines__set_comm_exec() 371 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { in machines__process_guests() 384 for (node = rb_first_cached(&machines->guests); node; in machines__set_id_hdr_size() 833 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { in machines__fprintf_dsos() 853 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { in machines__fprintf_dsos_buildid() 893 for (nd = rb_first_cached(&threads->entries); nd; in machine__fprintf() 1203 struct rb_node *next = rb_first_cached(&machines->guests); in machines__destroy_kernel_maps() 2575 for (nd = rb_first_cached(&threads->entries); nd; in machine__for_each_thread() 2603 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { in machines__for_each_thread()
|
| D | symbol.c | 193 nd = rb_first_cached(symbols); in symbols__fixup_duplicate() 221 struct rb_node *nd, *prevnd = rb_first_cached(symbols); in symbols__fixup_end() 307 struct rb_node *next = rb_first_cached(symbols); in symbols__delete() 381 struct rb_node *n = rb_first_cached(symbols); in symbols__first() 437 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { in symbols__sort_by_name() 712 struct rb_node *next = rb_first_cached(root); in map_groups__split_kallsyms_for_kcore() 766 struct rb_node *next = rb_first_cached(root); in map_groups__split_kallsyms()
|
| D | srcline.c | 652 struct rb_node *next = rb_first_cached(tree); in srcline__tree_delete() 736 struct rb_node *next = rb_first_cached(tree); in inlines__tree_delete()
|
| D | symbol.h | 70 for (nd = rb_first_cached(symbols); \
|
| /Linux-v5.4/tools/perf/tests/ |
| D | hists_output.c | 107 node = rb_first_cached(root_out); in del_hist_entries() 166 node = rb_first_cached(root); in test1() 266 node = rb_first_cached(root); in test2() 320 node = rb_first_cached(root); in test3() 398 node = rb_first_cached(root); in test4() 501 node = rb_first_cached(root); in test5()
|
| D | hists_common.c | 175 node = rb_first_cached(root); in print_hists_in() 202 node = rb_first_cached(root); in print_hists_out()
|
| D | hists_link.c | 155 node = rb_first_cached(root); in __validate_match() 207 node = rb_first_cached(root); in __validate_link()
|
| /Linux-v5.4/include/linux/ |
| D | timerqueue.h | 36 struct rb_node *leftmost = rb_first_cached(&head->rb_root); in timerqueue_getnext()
|
| D | rbtree.h | 133 #define rb_first_cached(root) (root)->rb_leftmost macro
|
| /Linux-v5.4/kernel/locking/ |
| D | rtmutex_common.h | 55 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_top_waiter()
|
| /Linux-v5.4/tools/include/linux/ |
| D | rbtree.h | 127 #define rb_first_cached(root) (root)->rb_leftmost macro
|
| /Linux-v5.4/drivers/gpu/drm/i915/ |
| D | i915_scheduler.c | 51 GEM_BUG_ON(rb_first_cached(&execlists->queue) != in assert_priolists() 55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { in assert_priolists()
|
| /Linux-v5.4/drivers/gpu/drm/i915/gt/ |
| D | intel_lrc.c | 308 rb = rb_first_cached(&execlists->queue); in queue_prio() 1049 for (rb = rb_first_cached(&execlists->virtual); rb; ) { in execlists_dequeue() 1057 rb = rb_first_cached(&execlists->virtual); in execlists_dequeue() 1177 rb = rb_first_cached(&execlists->virtual); in execlists_dequeue() 1257 rb = rb_first_cached(&execlists->virtual); in execlists_dequeue() 1266 while ((rb = rb_first_cached(&execlists->queue))) { in execlists_dequeue() 2589 while ((rb = rb_first_cached(&execlists->queue))) { in execlists_cancel_requests() 2603 while ((rb = rb_first_cached(&execlists->virtual))) { in execlists_cancel_requests() 3578 first = rb_first_cached(&sibling->execlists.virtual) == in virtual_submission_tasklet() 3939 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { in intel_execlists_show_requests() [all …]
|
| /Linux-v5.4/net/sched/ |
| D | sch_etf.c | 113 p = rb_first_cached(&q->head); in etf_peek_timesortedlist() 422 struct rb_node *p = rb_first_cached(&q->head); in timesortedlist_clear()
|
| /Linux-v5.4/fs/ |
| D | eventpoll.c | 834 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_free() 850 while ((rbp = rb_first_cached(&ep->rbr)) != NULL) { in ep_free() 953 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_show_fdinfo() 1078 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_find_tfd() 1969 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_loop_check_proc()
|
| /Linux-v5.4/tools/perf/ui/gtk/ |
| D | hists.c | 358 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { in perf_gtk__show_hists() 420 for (node = rb_first_cached(root); node; node = rb_next(node)) { in perf_gtk__add_hierarchy_entries()
|
| /Linux-v5.4/drivers/infiniband/hw/hfi1/ |
| D | mmu_rb.c | 148 while ((node = rb_first_cached(&handler->root))) { in hfi1_mmu_rb_unregister()
|
| /Linux-v5.4/tools/perf/ |
| D | builtin-diff.c | 521 next = rb_first_cached(root); in hists__baseline_only() 675 struct rb_node *next = rb_first_cached(root); in get_block_pair() 707 struct rb_node *next = rb_first_cached(root); in block_hists_match() 740 next = rb_first_cached(root); in hists__precompute()
|
| /Linux-v5.4/tools/perf/ui/browsers/ |
| D | hists.c | 64 for (nd = rb_first_cached(&hists->entries); in hist_browser__get_folding() 282 node = rb_first_cached(&he->hroot_out); in hierarchy_count_rows() 523 for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) { in hierarchy_set_folding() 581 nd = rb_first_cached(&browser->hists->entries); in __hist_browser__set_folding() 1759 browser->top = rb_first_cached(&hb->hists->entries); in ui_browser__hists_init_top() 2765 struct rb_node *nd = rb_first_cached(&hb->hists->entries); in hist_browser__update_nr_entries() 2785 struct rb_node *nd = rb_first_cached(&hb->hists->entries); in hist_browser__update_percent_limit()
|