| /Linux-v5.4/tools/perf/util/ | 
| D | thread.c | 22 int thread__init_map_groups(struct thread *thread, struct machine *machine)  in thread__init_map_groups()  argument24 	pid_t pid = thread->pid_;  in thread__init_map_groups()
 26 	if (pid == thread->tid || pid == -1) {  in thread__init_map_groups()
 27 		thread->mg = map_groups__new(machine);  in thread__init_map_groups()
 29 		struct thread *leader = __machine__findnew_thread(machine, pid, pid);  in thread__init_map_groups()
 31 			thread->mg = map_groups__get(leader->mg);  in thread__init_map_groups()
 36 	return thread->mg ? 0 : -1;  in thread__init_map_groups()
 39 struct thread *thread__new(pid_t pid, pid_t tid)  in thread__new()
 43 	struct thread *thread = zalloc(sizeof(*thread));  in thread__new()  local
 45 	if (thread != NULL) {  in thread__new()
 [all …]
 
 | 
| D | thread.h | 23 struct thread {  struct55 struct thread *thread__new(pid_t pid, pid_t tid);  argument
 56 int thread__init_map_groups(struct thread *thread, struct machine *machine);
 57 void thread__delete(struct thread *thread);
 59 struct thread *thread__get(struct thread *thread);
 60 void thread__put(struct thread *thread);
 62 static inline void __thread__zput(struct thread **thread)  in __thread__zput()  argument
 64 	thread__put(*thread);  in __thread__zput()
 65 	*thread = NULL;  in __thread__zput()
 68 #define thread__zput(thread) __thread__zput(&thread)  argument
 [all …]
 
 | 
| D | thread-stack.c | 105 static inline bool thread_stack__per_cpu(struct thread *thread)  in thread_stack__per_cpu()  argument107 	return !(thread->tid || thread->pid_);  in thread_stack__per_cpu()
 128 static int thread_stack__init(struct thread_stack *ts, struct thread *thread,  in thread_stack__init()  argument
 137 	if (thread->mg && thread->mg->machine) {  in thread_stack__init()
 138 		struct machine *machine = thread->mg->machine;  in thread_stack__init()
 152 static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,  in thread_stack__new()  argument
 155 	struct thread_stack *ts = thread->ts, *new_ts;  in thread_stack__new()
 159 	if (thread_stack__per_cpu(thread) && cpu > 0)  in thread_stack__new()
 169 		zfree(&thread->ts);  in thread_stack__new()
 170 		thread->ts = new_ts;  in thread_stack__new()
 [all …]
 
 | 
| D | thread-stack.h | 14 struct thread;55 	struct thread *thread;  member
 83 int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
 85 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
 86 void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
 88 int thread_stack__flush(struct thread *thread);
 89 void thread_stack__free(struct thread *thread);
 90 size_t thread_stack__depth(struct thread *thread, int cpu);
 96 int thread_stack__process(struct thread *thread, struct comm *comm,
 
 | 
| D | db-export.c | 62 int db_export__thread(struct db_export *dbe, struct thread *thread,  in db_export__thread()  argument63 		      struct machine *machine, struct thread *main_thread)  in db_export__thread()
 67 	if (thread->db_id)  in db_export__thread()
 70 	thread->db_id = ++dbe->thread_last_db_id;  in db_export__thread()
 76 		return dbe->export_thread(dbe, thread, main_thread_db_id,  in db_export__thread()
 83 			     struct thread *thread)  in __db_export__comm()  argument
 88 		return dbe->export_comm(dbe, comm, thread);  in __db_export__comm()
 94 		    struct thread *thread)  in db_export__comm()  argument
 99 	return __db_export__comm(dbe, comm, thread);  in db_export__comm()
 109 			 struct thread *main_thread)  in db_export__exec_comm()
 [all …]
 
 | 
| /Linux-v5.4/arch/mips/include/asm/ | 
| D | asmmacro-32.h | 16 	.macro	fpu_save_single thread tmp=t020 	s.d	$f0,  THREAD_FPR0(\thread)
 21 	s.d	$f2,  THREAD_FPR2(\thread)
 22 	s.d	$f4,  THREAD_FPR4(\thread)
 23 	s.d	$f6,  THREAD_FPR6(\thread)
 24 	s.d	$f8,  THREAD_FPR8(\thread)
 25 	s.d	$f10, THREAD_FPR10(\thread)
 26 	s.d	$f12, THREAD_FPR12(\thread)
 27 	s.d	$f14, THREAD_FPR14(\thread)
 28 	s.d	$f16, THREAD_FPR16(\thread)
 [all …]
 
 | 
| D | asmmacro-64.h | 17 	.macro	cpu_save_nonscratch thread18 	LONG_S	s0, THREAD_REG16(\thread)
 19 	LONG_S	s1, THREAD_REG17(\thread)
 20 	LONG_S	s2, THREAD_REG18(\thread)
 21 	LONG_S	s3, THREAD_REG19(\thread)
 22 	LONG_S	s4, THREAD_REG20(\thread)
 23 	LONG_S	s5, THREAD_REG21(\thread)
 24 	LONG_S	s6, THREAD_REG22(\thread)
 25 	LONG_S	s7, THREAD_REG23(\thread)
 26 	LONG_S	sp, THREAD_REG29(\thread)
 [all …]
 
 | 
| D | asmmacro.h | 84 	.macro	fpu_save_16even thread tmp=t088 	sdc1	$f0,  THREAD_FPR0(\thread)
 89 	sdc1	$f2,  THREAD_FPR2(\thread)
 90 	sdc1	$f4,  THREAD_FPR4(\thread)
 91 	sdc1	$f6,  THREAD_FPR6(\thread)
 92 	sdc1	$f8,  THREAD_FPR8(\thread)
 93 	sdc1	$f10, THREAD_FPR10(\thread)
 94 	sdc1	$f12, THREAD_FPR12(\thread)
 95 	sdc1	$f14, THREAD_FPR14(\thread)
 96 	sdc1	$f16, THREAD_FPR16(\thread)
 [all …]
 
 | 
| D | dsp.h | 41 	tsk->thread.dsp.dspr[0] = mfhi1();				\42 	tsk->thread.dsp.dspr[1] = mflo1();				\
 43 	tsk->thread.dsp.dspr[2] = mfhi2();				\
 44 	tsk->thread.dsp.dspr[3] = mflo2();				\
 45 	tsk->thread.dsp.dspr[4] = mfhi3();				\
 46 	tsk->thread.dsp.dspr[5] = mflo3();				\
 47 	tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK);			\
 58 	mthi1(tsk->thread.dsp.dspr[0]);					\
 59 	mtlo1(tsk->thread.dsp.dspr[1]);					\
 60 	mthi2(tsk->thread.dsp.dspr[2]);					\
 [all …]
 
 | 
| /Linux-v5.4/arch/riscv/kernel/ | 
| D | asm-offsets.c | 16 	OFFSET(TASK_THREAD_RA, task_struct, thread.ra);  in asm_offsets()17 	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);  in asm_offsets()
 18 	OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);  in asm_offsets()
 19 	OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);  in asm_offsets()
 20 	OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);  in asm_offsets()
 21 	OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);  in asm_offsets()
 22 	OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);  in asm_offsets()
 23 	OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);  in asm_offsets()
 24 	OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);  in asm_offsets()
 25 	OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);  in asm_offsets()
 [all …]
 
 | 
| /Linux-v5.4/tools/perf/tests/ | 
| D | dwarf-unwind.c | 51 int test_dwarf_unwind__thread(struct thread *thread);53 int test_dwarf_unwind__krava_3(struct thread *thread);
 54 int test_dwarf_unwind__krava_2(struct thread *thread);
 55 int test_dwarf_unwind__krava_1(struct thread *thread);
 97 noinline int test_dwarf_unwind__thread(struct thread *thread)  in test_dwarf_unwind__thread()  argument
 105 	if (test__arch_unwind_sample(&sample, thread)) {  in test_dwarf_unwind__thread()
 110 	err = unwind__get_entries(unwind_entry, &cnt, thread,  in test_dwarf_unwind__thread()
 131 	struct thread *thread = *(struct thread **)p1;  in test_dwarf_unwind__compare()  local
 137 		global_unwind_retval = test_dwarf_unwind__thread(thread);  in test_dwarf_unwind__compare()
 140 			global_unwind_retval = test_dwarf_unwind__thread(thread);  in test_dwarf_unwind__compare()
 [all …]
 
 | 
| /Linux-v5.4/drivers/mailbox/ | 
| D | mtk-cmdq-mailbox.c | 66 	struct cmdq_thread	*thread;  member76 	struct cmdq_thread	*thread;  member
 81 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)  in cmdq_thread_suspend()  argument
 85 	writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);  in cmdq_thread_suspend()
 88 	if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))  in cmdq_thread_suspend()
 91 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,  in cmdq_thread_suspend()
 94 			(u32)(thread->base - cmdq->base));  in cmdq_thread_suspend()
 101 static void cmdq_thread_resume(struct cmdq_thread *thread)  in cmdq_thread_resume()  argument
 103 	writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);  in cmdq_thread_resume()
 117 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)  in cmdq_thread_reset()  argument
 [all …]
 
 | 
| /Linux-v5.4/tools/perf/scripts/python/ | 
| D | stat-cpi.py | 10 def get_key(time, event, cpu, thread):  argument11     return "%d-%s-%d-%d" % (time, event, cpu, thread)
 13 def store_key(time, cpu, thread):  argument
 20     if (thread not in threads):
 21         threads.append(thread)
 23 def store(time, event, cpu, thread, val, ena, run):  argument
 27     store_key(time, cpu, thread)
 28     key = get_key(time, event, cpu, thread)
 31 def get(time, event, cpu, thread):  argument
 32     key = get_key(time, event, cpu, thread)
 [all …]
 
 | 
| /Linux-v5.4/arch/parisc/kernel/ | 
| D | asm-offsets.c | 53 	DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));  in main()54 	DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));  in main()
 55 	DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));  in main()
 56 	DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));  in main()
 57 	DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));  in main()
 58 	DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));  in main()
 59 	DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));  in main()
 60 	DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));  in main()
 61 	DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));  in main()
 62 	DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));  in main()
 [all …]
 
 | 
| /Linux-v5.4/Documentation/vm/ | 
| D | mmu_notifier.rst | 41  CPU-thread-0  {try to write to addrA}42  CPU-thread-1  {try to write to addrB}
 43  CPU-thread-2  {}
 44  CPU-thread-3  {}
 45  DEV-thread-0  {read addrA and populate device TLB}
 46  DEV-thread-2  {read addrB and populate device TLB}
 48  CPU-thread-0  {COW_step0: {mmu_notifier_invalidate_range_start(addrA)}}
 49  CPU-thread-1  {COW_step0: {mmu_notifier_invalidate_range_start(addrB)}}
 50  CPU-thread-2  {}
 51  CPU-thread-3  {}
 [all …]
 
 | 
| /Linux-v5.4/arch/powerpc/kernel/ | 
| D | process.c | 96 	if (tsk == current && tsk->thread.regs &&  in check_if_tm_restore_required()97 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&  in check_if_tm_restore_required()
 99 		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;  in check_if_tm_restore_required()
 164 	msr = tsk->thread.regs->msr;  in __giveup_fpu()
 170 	tsk->thread.regs->msr = msr;  in __giveup_fpu()
 189 	if (tsk->thread.regs) {  in flush_fp_to_thread()
 199 		if (tsk->thread.regs->msr & MSR_FP) {  in flush_fp_to_thread()
 223 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {  in enable_kernel_fp()
 233 		     MSR_TM_ACTIVE(current->thread.regs->msr))  in enable_kernel_fp()
 242 	if (tsk->thread.load_fp) {  in restore_fp()
 [all …]
 
 | 
| D | ptrace.c | 145 		tm_save_sprs(&(tsk->thread));  in flush_tmregs_to_thread()209 	return task->thread.regs->msr | task->thread.fpexc_mode;  in get_user_msr()
 214 	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;  in set_user_msr()
 215 	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;  in set_user_msr()
 222 	return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;  in get_user_ckpt_msr()
 227 	task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;  in set_user_ckpt_msr()
 228 	task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;  in set_user_ckpt_msr()
 234 	task->thread.ckpt_regs.trap = trap & 0xfff0;  in set_user_ckpt_trap()
 242 	*data = task->thread.dscr;  in get_user_dscr()
 248 	task->thread.dscr = dscr;  in set_user_dscr()
 [all …]
 
 | 
| /Linux-v5.4/arch/sh/kernel/cpu/sh4/ | 
| D | fpu.c | 84 		      :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),  in save_fpu()134 		      :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)  in restore_fpu()
 230 		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))  in ieee_fpe_handler()
 232 			denormal_to_double(&tsk->thread.xstate->hardfpu,  in ieee_fpe_handler()
 248 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];  in ieee_fpe_handler()
 249 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];  in ieee_fpe_handler()
 250 		fpscr = tsk->thread.xstate->hardfpu.fpscr;  in ieee_fpe_handler()
 260 			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];  in ieee_fpe_handler()
 262 			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];  in ieee_fpe_handler()
 264 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;  in ieee_fpe_handler()
 [all …]
 
 | 
| /Linux-v5.4/arch/s390/kernel/ | 
| D | process.c | 78 	dst->thread.fpu.regs = dst->thread.fpu.fprs;  in arch_dup_task_struct()92 	p->thread.ksp = (unsigned long) frame;  in copy_thread_tls()
 94 	save_access_regs(&p->thread.acrs[0]);  in copy_thread_tls()
 96 	p->thread.mm_segment = get_fs();  in copy_thread_tls()
 98 	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));  in copy_thread_tls()
 99 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));  in copy_thread_tls()
 101 	p->thread.per_flags = 0;  in copy_thread_tls()
 103 	p->thread.user_timer = 0;  in copy_thread_tls()
 104 	p->thread.guest_timer = 0;  in copy_thread_tls()
 105 	p->thread.system_timer = 0;  in copy_thread_tls()
 [all …]
 
 | 
| D | guarded_storage.c | 17 	kfree(tsk->thread.gs_cb);  in guarded_storage_release()18 	kfree(tsk->thread.gs_bc_cb);  in guarded_storage_release()
 25 	if (!current->thread.gs_cb) {  in gs_enable()
 33 		current->thread.gs_cb = gs_cb;  in gs_enable()
 41 	if (current->thread.gs_cb) {  in gs_disable()
 43 		kfree(current->thread.gs_cb);  in gs_disable()
 44 		current->thread.gs_cb = NULL;  in gs_disable()
 55 	gs_cb = current->thread.gs_bc_cb;  in gs_set_bc_cb()
 60 		current->thread.gs_bc_cb = gs_cb;  in gs_set_bc_cb()
 71 	gs_cb = current->thread.gs_bc_cb;  in gs_clear_bc_cb()
 [all …]
 
 | 
| /Linux-v5.4/arch/um/kernel/ | 
| D | process.c | 88 	to->thread.prev_sched = from;  in __switch_to()91 	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);  in __switch_to()
 94 	return current->thread.prev_sched;  in __switch_to()
 99 	struct pt_regs *regs = ¤t->thread.regs;  in interrupt_end()
 123 	if (current->thread.prev_sched != NULL)  in new_thread_handler()
 124 		schedule_tail(current->thread.prev_sched);  in new_thread_handler()
 125 	current->thread.prev_sched = NULL;  in new_thread_handler()
 127 	fn = current->thread.request.u.thread.proc;  in new_thread_handler()
 128 	arg = current->thread.request.u.thread.arg;  in new_thread_handler()
 134 	userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);  in new_thread_handler()
 [all …]
 
 | 
| /Linux-v5.4/tools/perf/lib/ | 
| D | evsel.c | 47 		int cpu, thread;  in perf_evsel__alloc_fd()  local49 			for (thread = 0; thread < nthreads; thread++) {  in perf_evsel__alloc_fd()
 50 				FD(evsel, cpu, thread) = -1;  in perf_evsel__alloc_fd()
 69 	int cpu, thread, err = 0;  in perf_evsel__open()  local
 100 		for (thread = 0; thread < threads->nr; thread++) {  in perf_evsel__open()
 104 						 threads->map[thread].pid,  in perf_evsel__open()
 110 			FD(evsel, cpu, thread) = fd;  in perf_evsel__open()
 119 	int cpu, thread;  in perf_evsel__close_fd()  local
 122 		for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {  in perf_evsel__close_fd()
 123 			close(FD(evsel, cpu, thread));  in perf_evsel__close_fd()
 [all …]
 
 | 
| /Linux-v5.4/arch/mips/kernel/ | 
| D | asm-offsets.c | 113 	OFFSET(THREAD_REG16, task_struct, thread.reg16);  in output_thread_defines()114 	OFFSET(THREAD_REG17, task_struct, thread.reg17);  in output_thread_defines()
 115 	OFFSET(THREAD_REG18, task_struct, thread.reg18);  in output_thread_defines()
 116 	OFFSET(THREAD_REG19, task_struct, thread.reg19);  in output_thread_defines()
 117 	OFFSET(THREAD_REG20, task_struct, thread.reg20);  in output_thread_defines()
 118 	OFFSET(THREAD_REG21, task_struct, thread.reg21);  in output_thread_defines()
 119 	OFFSET(THREAD_REG22, task_struct, thread.reg22);  in output_thread_defines()
 120 	OFFSET(THREAD_REG23, task_struct, thread.reg23);  in output_thread_defines()
 121 	OFFSET(THREAD_REG29, task_struct, thread.reg29);  in output_thread_defines()
 122 	OFFSET(THREAD_REG30, task_struct, thread.reg30);  in output_thread_defines()
 [all …]
 
 | 
| /Linux-v5.4/drivers/acpi/acpica/ | 
| D | exmutex.c | 21 		   struct acpi_thread_state *thread);37 	struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;  in acpi_ex_unlink_mutex()  local
 39 	if (!thread) {  in acpi_ex_unlink_mutex()
 61 		thread->acquired_mutex_list = obj_desc->mutex.next;  in acpi_ex_unlink_mutex()
 80 		   struct acpi_thread_state *thread)  in acpi_ex_link_mutex()  argument
 84 	list_head = thread->acquired_mutex_list;  in acpi_ex_link_mutex()
 99 	thread->acquired_mutex_list = obj_desc;  in acpi_ex_link_mutex()
 205 	if (!walk_state->thread) {  in acpi_ex_acquire_mutex()
 216 	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {  in acpi_ex_acquire_mutex()
 221 			    walk_state->thread->current_sync_level));  in acpi_ex_acquire_mutex()
 [all …]
 
 | 
| /Linux-v5.4/arch/sparc/kernel/ | 
| D | sigutil_32.c | 21 		fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,  in save_fpu_state()22 		       ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);  in save_fpu_state()
 29 		fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,  in save_fpu_state()
 30 		       ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);  in save_fpu_state()
 36 			      ¤t->thread.float_regs[0],  in save_fpu_state()
 38 	err |= __put_user(current->thread.fsr, &fpu->si_fsr);  in save_fpu_state()
 39 	err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);  in save_fpu_state()
 40 	if (current->thread.fpqdepth != 0)  in save_fpu_state()
 42 				      ¤t->thread.fpqueue[0],  in save_fpu_state()
 71 	err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0],  in restore_fpu_state()
 [all …]
 
 |