Lines Matching +full:a +full:- +full:za +full:- +full:z
1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
82 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
105 * Power-off simply requires that the secondary CPUs stop performing any
120 * provide a HW restart implementation, to ensure that all CPUs reset at once.
122 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
123 * executing pre-reset code, and using RAM that the primary CPU's code wishes
124 * to use. Implementing such co-ordination would be essentially impossible.
143 * Whoops - the architecture was unable to reboot. in machine_restart()
145 printk("Reboot failed -- System halted\n"); in machine_restart()
151 bstr(NONE, "--"),
153 bstr( C, "-c"),
154 bstr( J , "j-")
160 u64 pstate = regs->pstate; in print_pstate()
166 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', in print_pstate()
172 pstate & PSR_AA32_A_BIT ? 'A' : 'a', in print_pstate()
175 pstate & PSR_AA32_DIT_BIT ? '+' : '-', in print_pstate()
176 pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); in print_pstate()
184 pstate & PSR_Z_BIT ? 'Z' : 'z', in print_pstate()
188 pstate & PSR_A_BIT ? 'A' : 'a', in print_pstate()
191 pstate & PSR_PAN_BIT ? '+' : '-', in print_pstate()
192 pstate & PSR_UAO_BIT ? '+' : '-', in print_pstate()
193 pstate & PSR_TCO_BIT ? '+' : '-', in print_pstate()
194 pstate & PSR_DIT_BIT ? '+' : '-', in print_pstate()
195 pstate & PSR_SSBS_BIT ? '+' : '-', in print_pstate()
206 lr = regs->compat_lr; in __show_regs()
207 sp = regs->compat_sp; in __show_regs()
210 lr = regs->regs[30]; in __show_regs()
211 sp = regs->sp; in __show_regs()
219 printk("pc : %pS\n", (void *)regs->pc); in __show_regs()
222 printk("pc : %016llx\n", regs->pc); in __show_regs()
229 printk("pmr_save: %08llx\n", regs->pmr_save); in __show_regs()
234 printk("x%-2d: %016llx", i, regs->regs[i]); in __show_regs()
236 while (i-- % 3) in __show_regs()
237 pr_cont(" x%-2d: %016llx", i, regs->regs[i]); in __show_regs()
256 current->thread.uw.tp_value = 0; in tls_thread_flush()
261 * with a stale shadow state during context switch. in tls_thread_flush()
289 if (current->mm) in arch_dup_task_struct()
305 dst->thread.sve_state = NULL; in arch_dup_task_struct()
309 * In the unlikely event that we create a new thread with ZA in arch_dup_task_struct()
310 * enabled we should retain the ZA and ZT state so duplicate in arch_dup_task_struct()
313 * confusing the rest of the code ensure that we have a in arch_dup_task_struct()
316 if (thread_za_enabled(&src->thread)) { in arch_dup_task_struct()
317 dst->thread.sve_state = kzalloc(sve_state_size(src), in arch_dup_task_struct()
319 if (!dst->thread.sve_state) in arch_dup_task_struct()
320 return -ENOMEM; in arch_dup_task_struct()
322 dst->thread.sme_state = kmemdup(src->thread.sme_state, in arch_dup_task_struct()
325 if (!dst->thread.sme_state) { in arch_dup_task_struct()
326 kfree(dst->thread.sve_state); in arch_dup_task_struct()
327 dst->thread.sve_state = NULL; in arch_dup_task_struct()
328 return -ENOMEM; in arch_dup_task_struct()
331 dst->thread.sme_state = NULL; in arch_dup_task_struct()
335 dst->thread.fp_type = FP_STATE_FPSIMD; in arch_dup_task_struct()
347 unsigned long clone_flags = args->flags; in copy_thread()
348 unsigned long stack_start = args->stack; in copy_thread()
349 unsigned long tls = args->tls; in copy_thread()
352 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); in copy_thread()
356 * other recently-exited task, make sure p is disassociated from in copy_thread()
357 * any cpu that may have run that now-exited task recently. in copy_thread()
365 if (likely(!args->fn)) { in copy_thread()
367 childregs->regs[0] = 0; in copy_thread()
371 * out-of-sync with the saved value. in copy_thread()
375 p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); in copy_thread()
379 childregs->compat_sp = stack_start; in copy_thread()
381 childregs->sp = stack_start; in copy_thread()
385 * If a TLS pointer was passed to clone, use it for the new in copy_thread()
389 p->thread.uw.tp_value = tls; in copy_thread()
390 p->thread.tpidr2_el0 = 0; in copy_thread()
394 * A kthread has no context to ERET to, so ensure any buggy in copy_thread()
397 * When a user task is created from a kthread, childregs will in copy_thread()
401 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; in copy_thread()
403 p->thread.cpu_context.x19 = (unsigned long)args->fn; in copy_thread()
404 p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; in copy_thread()
406 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; in copy_thread()
407 p->thread.cpu_context.sp = (unsigned long)childregs; in copy_thread()
409 * For the benefit of the unwinder, set up childregs->stackframe in copy_thread()
412 p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; in copy_thread()
423 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); in tls_preserve_current_state()
431 write_sysreg(next->thread.uw.tp_value, tpidrro_el0); in tls_thread_switch()
437 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); in tls_thread_switch()
441 * Force SSBS state on context-switch, since it may be lost after migrating
442 * from a CPU which treats the bit as RES0 in a heterogeneous system.
450 if (unlikely(next->flags & PF_KTHREAD)) in ssbs_thread_switch()
455 * context-switch the PSTATE field. in ssbs_thread_switch()
464 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
468 * __switch_to() a user task.
479 * Ensure access is disabled when switching to a 32bit task, ensure
480 * access is enabled when switching to a 64bit task.
502 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
511 * in-kernel PAC. It will be cleared on kernel exit if needed. in update_sctlr_el1()
539 * the thread migrates to a different CPU. in __switch_to()
552 if (prev->thread.sctlr_user != next->thread.sctlr_user) in __switch_to()
553 update_sctlr_el1(next->thread.sctlr_user); in __switch_to()
571 wchan_info->pc = pc; in get_wchan_cb()
574 return wchan_info->count++ < 16; in get_wchan_cb()
596 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) in arch_align_stack()
597 sp -= get_random_u32_below(PAGE_SIZE); in arch_align_stack()
607 if ((hdr)->e_machine != EM_ARM) in compat_elf_check_arch()
610 if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) in compat_elf_check_arch()
614 * Prevent execve() of a 32-bit program from a deadline task in compat_elf_check_arch()
634 * Restrict the CPU affinity mask for a 32-bit task so that in arch_setup_new_exec()
635 * it contains only 32-bit-capable CPUs. in arch_setup_new_exec()
638 * what would happen if the 64-bit-only CPUs were hot-unplugged in arch_setup_new_exec()
639 * at the point of execve(), although we try a bit harder to in arch_setup_new_exec()
648 current->mm->context.flags = mmflags; in arch_setup_new_exec()
671 return -EINVAL; in set_tagged_addr_ctrl()
678 return -EINVAL; in set_tagged_addr_ctrl()
685 return -EINVAL; in set_tagged_addr_ctrl()
688 return -EINVAL; in set_tagged_addr_ctrl()
701 return -EINVAL; in get_tagged_addr_ctrl()
733 return -EINVAL; in tagged_addr_init()
752 if (!(state->flags & ARM64_ELF_BTI)) in arch_elf_adjust_prot()