Lines Matching full:thread

88 	 * If we are saving the current thread's registers, and the  in check_if_tm_restore_required()
89 * thread is in a transactional state, set the TIF_RESTORE_TM in check_if_tm_restore_required()
93 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required()
94 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required()
96 regs_set_return_msr(&tsk->thread.ckpt_regs, in check_if_tm_restore_required()
97 tsk->thread.regs->msr); in check_if_tm_restore_required()
158 msr = tsk->thread.regs->msr; in __giveup_fpu()
162 regs_set_return_msr(tsk->thread.regs, msr); in __giveup_fpu()
181 if (tsk->thread.regs) { in flush_fp_to_thread()
191 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread()
215 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp()
218 * If a thread has already been reclaimed then the in enable_kernel_fp()
225 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp()
241 msr = tsk->thread.regs->msr; in __giveup_altivec()
245 regs_set_return_msr(tsk->thread.regs, msr); in __giveup_altivec()
266 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { in enable_kernel_altivec()
269 * If a thread has already been reclaimed then the in enable_kernel_altivec()
276 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_altivec()
289 if (tsk->thread.regs) { in flush_altivec_to_thread()
291 if (tsk->thread.regs->msr & MSR_VEC) { in flush_altivec_to_thread()
304 unsigned long msr = tsk->thread.regs->msr; in __giveup_vsx()
336 if (current->thread.regs && in enable_kernel_vsx()
337 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) { in enable_kernel_vsx()
340 * If a thread has already been reclaimed then the in enable_kernel_vsx()
347 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_vsx()
356 if (tsk->thread.regs) { in flush_vsx_to_thread()
358 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { in flush_vsx_to_thread()
385 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { in enable_kernel_spe()
394 if (tsk->thread.regs) { in flush_spe_to_thread()
396 if (tsk->thread.regs->msr & MSR_SPE) { in flush_spe_to_thread()
398 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); in flush_spe_to_thread()
427 if (!tsk->thread.regs) in giveup_all()
432 usermsr = tsk->thread.regs->msr; in giveup_all()
456 if (current->thread.load_fp) { in should_restore_fp()
457 current->thread.load_fp++; in should_restore_fp()
465 load_fp_state(&current->thread.fp_state); in do_restore_fp()
475 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) { in should_restore_altivec()
476 current->thread.load_vec++; in should_restore_altivec()
484 load_vr_state(&current->thread.vr_state); in do_restore_altivec()
485 current->thread.used_vr = 1; in do_restore_altivec()
501 current->thread.used_vsr = 1; in do_restore_vsx()
527 * are live for the user thread). in restore_math()
549 fpexc_mode = current->thread.fpexc_mode; in restore_math()
569 if (!tsk->thread.regs) in save_all()
572 usermsr = tsk->thread.regs->msr; in save_all()
595 if (tsk->thread.regs) { in flush_all_to_thread()
599 if (tsk->thread.regs->msr & MSR_SPE) in flush_all_to_thread()
600 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); in flush_all_to_thread()
613 current->thread.trap_nr = TRAP_HWBKPT; in do_send_trap()
640 current->thread.hw_brk[0] = null_brk; in do_break_handler()
641 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED; in do_break_handler()
649 info = &current->thread.hw_brk[i]; in do_break_handler()
655 current->thread.hw_brk[i] = null_brk; in do_break_handler()
656 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED; in do_break_handler()
663 current->thread.trap_nr = TRAP_HWBKPT; in DEFINE_INTERRUPT_HANDLER()
692 static void set_debug_reg_defaults(struct thread_struct *thread) in set_debug_reg_defaults() argument
694 thread->debug.iac1 = thread->debug.iac2 = 0; in set_debug_reg_defaults()
696 thread->debug.iac3 = thread->debug.iac4 = 0; in set_debug_reg_defaults()
698 thread->debug.dac1 = thread->debug.dac2 = 0; in set_debug_reg_defaults()
700 thread->debug.dvc1 = thread->debug.dvc2 = 0; in set_debug_reg_defaults()
702 thread->debug.dbcr0 = 0; in set_debug_reg_defaults()
707 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | in set_debug_reg_defaults()
713 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; in set_debug_reg_defaults()
715 thread->debug.dbcr1 = 0; in set_debug_reg_defaults()
747 * Unless neither the old or new thread are making use of the
749 * stored in the new thread.
753 if ((current->thread.debug.dbcr0 & DBCR0_IDM) in switch_booke_debug_regs()
767 static void set_debug_reg_defaults(struct thread_struct *thread) in set_debug_reg_defaults() argument
773 thread->hw_brk[i] = null_brk; in set_debug_reg_defaults()
775 set_breakpoint(i, &thread->hw_brk[i]); in set_debug_reg_defaults()
798 &new->thread.hw_brk[i]))) in switch_hw_breakpoint()
801 __set_breakpoint(i, &new->thread.hw_brk[i]); in switch_hw_breakpoint()
898 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM); in tm_enabled()
910 * we need to exit this thread which calls __switch_to() which in tm_reclaim_thread()
921 giveup_all(container_of(thr, struct task_struct, thread)); in tm_reclaim_thread()
948 tm_reclaim_thread(&current->thread, cause); in tm_reclaim_current()
957 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the in tm_reclaim_task()
963 struct thread_struct *thr = &tsk->thread; in tm_reclaim_task()
986 * This context-switches a thread's TM info SPRs. We do it here to in tm_reclaim_task()
993 extern void __tm_recheckpoint(struct thread_struct *thread);
995 void tm_recheckpoint(struct thread_struct *thread) in tm_recheckpoint() argument
999 if (!(thread->regs->msr & MSR_TM)) in tm_recheckpoint()
1012 tm_restore_sprs(thread); in tm_recheckpoint()
1014 __tm_recheckpoint(thread); in tm_recheckpoint()
1024 /* Recheckpoint the registers of the thread we're about to switch to. in tm_recheckpoint_new_task()
1035 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ in tm_recheckpoint_new_task()
1036 tm_restore_sprs(&new->thread); in tm_recheckpoint_new_task()
1041 new->pid, new->thread.regs->msr); in tm_recheckpoint_new_task()
1043 tm_recheckpoint(&new->thread); in tm_recheckpoint_new_task()
1050 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX); in tm_recheckpoint_new_task()
1065 prev->thread.load_tm++; in __switch_to_tm()
1067 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0) in __switch_to_tm()
1068 prev->thread.regs->msr &= ~MSR_TM; in __switch_to_tm()
1103 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; in restore_tm_state()
1108 current->thread.load_fp = 1; in restore_tm_state()
1111 current->thread.load_vec = 1; in restore_tm_state()
1161 if (!current->thread.regs) in kvmppc_save_user_regs()
1164 usermsr = current->thread.regs->msr; in kvmppc_save_user_regs()
1174 current->thread.tm_tfhar = mfspr(SPRN_TFHAR); in kvmppc_save_user_regs()
1175 current->thread.tm_tfiar = mfspr(SPRN_TFIAR); in kvmppc_save_user_regs()
1176 current->thread.tm_texasr = mfspr(SPRN_TEXASR); in kvmppc_save_user_regs()
1177 current->thread.regs->msr &= ~MSR_TM; in kvmppc_save_user_regs()
1185 save_sprs(&current->thread); in kvmppc_save_current_sprs()
1244 new_thread = &new->thread; in __switch_to()
1245 old_thread = &current->thread; in __switch_to()
1273 switch_booke_debug_regs(&new->thread.debug); in __switch_to()
1288 save_sprs(&prev->thread); in __switch_to()
1347 if (current->thread.regs) in __switch_to()
1348 restore_math(current->thread.regs); in __switch_to()
1578 set_debug_reg_defaults(&current->thread); in flush_thread()
1590 * If we exec out of a kernel thread then thread.regs will not be in arch_setup_new_exec()
1593 if (!current->thread.regs) { in arch_setup_new_exec()
1595 current->thread.regs = regs - 1; in arch_setup_new_exec()
1599 current->thread.regs->amr = default_amr; in arch_setup_new_exec()
1600 current->thread.regs->iamr = default_iamr; in arch_setup_new_exec()
1606 * Assign a TIDR (thread ID) for task @t and set it in the thread
1614 * 1. The correct thread is running, the wrong thread is not
1615 * In this situation, the correct thread is woken and proceeds to pass it's
1619 * In this situation, neither thread will be woken. When scheduled, the waiting
1621 * by a condition check, which will pass for the correct thread and fail
1622 * for the wrong thread, or they will execute the condition check immediately.
1624 * 3. The wrong thread is running, the correct thread is not
1625 * The wrong thread will be woken, but will fail it's condition check and
1626 * re-execute wait. The correct thread, when scheduled, will execute either
1628 * when called the first time after the thread is scheduled, followed by it's
1632 * Both threads will be woken. The wrong thread will fail it's condition check
1633 * and execute another wait, while the correct thread will pass it's condition
1636 * @t: the task to set the thread ID for
1646 if (t->thread.tidr) in set_thread_tidr()
1649 t->thread.tidr = (u16)task_pid_nr(t); in set_thread_tidr()
1650 mtspr(SPRN_TIDR, t->thread.tidr); in set_thread_tidr()
1660 * copy the current task into the new thread.
1700 p->thread.ksp_vsid = sp_vsid; in setup_ksp_vsid()
1705 * Copy a thread..
1709 * Copy architecture-specific thread state
1733 /* kernel thread */ in copy_thread()
1744 p->thread.regs = NULL; /* no user register state */ in copy_thread()
1748 /* user thread */ in copy_thread()
1753 p->thread.regs = childregs; in copy_thread()
1784 p->thread.ksp = sp; in copy_thread()
1787 p->thread.ptrace_bps[i] = NULL; in copy_thread()
1791 p->thread.fp_save_area = NULL; in copy_thread()
1794 p->thread.vr_save_area = NULL; in copy_thread()
1797 p->thread.kuap = KUAP_NONE; in copy_thread()
1800 p->thread.pid = MMU_NO_CONTEXT; in copy_thread()
1807 p->thread.dscr_inherit = current->thread.dscr_inherit; in copy_thread()
1808 p->thread.dscr = mfspr(SPRN_DSCR); in copy_thread()
1813 p->thread.tidr = 0; in copy_thread()
1832 * Set up a thread for executing a new program
1914 current->thread.used_vsr = 0; in start_thread()
1916 current->thread.load_slb = 0; in start_thread()
1917 current->thread.load_fp = 0; in start_thread()
1919 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); in start_thread()
1920 current->thread.fp_save_area = NULL; in start_thread()
1923 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); in start_thread()
1924 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ in start_thread()
1925 current->thread.vr_save_area = NULL; in start_thread()
1926 current->thread.vrsave = 0; in start_thread()
1927 current->thread.used_vr = 0; in start_thread()
1928 current->thread.load_vec = 0; in start_thread()
1931 memset(current->thread.evr, 0, sizeof(current->thread.evr)); in start_thread()
1932 current->thread.acc = 0; in start_thread()
1933 current->thread.spefscr = 0; in start_thread()
1934 current->thread.used_spe = 0; in start_thread()
1937 current->thread.tm_tfhar = 0; in start_thread()
1938 current->thread.tm_texasr = 0; in start_thread()
1939 current->thread.tm_tfiar = 0; in start_thread()
1940 current->thread.load_tm = 0; in start_thread()
1950 struct pt_regs *regs = tsk->thread.regs; in set_fpexc_mode()
1971 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); in set_fpexc_mode()
1972 tsk->thread.fpexc_mode = val & in set_fpexc_mode()
1988 tsk->thread.fpexc_mode = __pack_fe01(val); in set_fpexc_mode()
1991 | tsk->thread.fpexc_mode); in set_fpexc_mode()
2000 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { in get_fpexc_mode()
2015 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); in get_fpexc_mode()
2016 val = tsk->thread.fpexc_mode; in get_fpexc_mode()
2021 val = __unpack_fe01(tsk->thread.fpexc_mode); in get_fpexc_mode()
2028 struct pt_regs *regs = tsk->thread.regs; in set_endian()
2049 struct pt_regs *regs = tsk->thread.regs; in get_endian()
2072 tsk->thread.align_ctl = val; in set_unalign_ctl()
2078 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); in get_unalign_ctl()
2151 sp = p->thread.ksp; in ___get_wchan()
2206 sp = tsk->thread.ksp; in show_stack()