Lines Matching refs:ret_stack
68 if (!current->ret_stack) in ftrace_push_return_trace()
87 current->ret_stack[index].ret = ret; in ftrace_push_return_trace()
88 current->ret_stack[index].func = func; in ftrace_push_return_trace()
89 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace()
91 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace()
94 current->ret_stack[index].retp = retp; in ftrace_push_return_trace()
177 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { in ftrace_pop_return_trace()
181 current->ret_stack[index].fp, in ftrace_pop_return_trace()
183 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace()
184 current->ret_stack[index].ret); in ftrace_pop_return_trace()
190 *ret = current->ret_stack[index].ret; in ftrace_pop_return_trace()
191 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace()
192 trace->calltime = current->ret_stack[index].calltime; in ftrace_pop_return_trace()
275 return &task->ret_stack[idx]; in ftrace_graph_get_ret_stack()
309 if (task->ret_stack[i].retp == retp) in ftrace_graph_ret_addr()
310 return task->ret_stack[i].ret; in ftrace_graph_ret_addr()
325 if (!task->ret_stack || task_idx < *idx) in ftrace_graph_ret_addr()
331 return task->ret_stack[task_idx].ret; in ftrace_graph_ret_addr()
397 if (t->ret_stack == NULL) { in alloc_retstack_tasklist()
404 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist()
445 next->ret_stack[index].calltime += timestamp; in ftrace_graph_probe_sched_switch()
491 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) in graph_init_task() argument
498 t->ret_stack = ret_stack; in graph_init_task()
513 if (t->ret_stack) in ftrace_graph_init_idle_task()
514 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); in ftrace_graph_init_idle_task()
517 struct ftrace_ret_stack *ret_stack; in ftrace_graph_init_idle_task() local
519 ret_stack = per_cpu(idle_ret_stack, cpu); in ftrace_graph_init_idle_task()
520 if (!ret_stack) { in ftrace_graph_init_idle_task()
521 ret_stack = in ftrace_graph_init_idle_task()
525 if (!ret_stack) in ftrace_graph_init_idle_task()
527 per_cpu(idle_ret_stack, cpu) = ret_stack; in ftrace_graph_init_idle_task()
529 graph_init_task(t, ret_stack); in ftrace_graph_init_idle_task()
537 t->ret_stack = NULL; in ftrace_graph_init_task()
542 struct ftrace_ret_stack *ret_stack; in ftrace_graph_init_task() local
544 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, in ftrace_graph_init_task()
547 if (!ret_stack) in ftrace_graph_init_task()
549 graph_init_task(t, ret_stack); in ftrace_graph_init_task()
555 struct ftrace_ret_stack *ret_stack = t->ret_stack; in ftrace_graph_exit_task() local
557 t->ret_stack = NULL; in ftrace_graph_exit_task()
561 kfree(ret_stack); in ftrace_graph_exit_task()
579 if (!idle_task(cpu)->ret_stack) in start_graph_tracing()