Lines Matching +full:data +full:- +full:addr
1 // SPDX-License-Identifier: GPL-2.0
46 struct thread_struct *thread = &task->thread; in update_cr_regs()
60 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
64 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
65 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
74 if (task->thread.gs_cb) in update_cr_regs()
85 new.control = thread->per_user.control; in update_cr_regs()
86 new.start = thread->per_user.start; in update_cr_regs()
87 new.end = thread->per_user.end; in update_cr_regs()
101 new.end = -1UL; in update_cr_regs()
106 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs()
109 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs()
140 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
141 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
144 task->thread.per_flags = 0; in ptrace_disable()
150 addr_t addr) in __peek_user_per() argument
154 if (addr == (addr_t) &dummy->cr9) in __peek_user_per()
157 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
158 else if (addr == (addr_t) &dummy->cr10) in __peek_user_per()
161 0 : child->thread.per_user.start; in __peek_user_per()
162 else if (addr == (addr_t) &dummy->cr11) in __peek_user_per()
165 -1UL : child->thread.per_user.end; in __peek_user_per()
166 else if (addr == (addr_t) &dummy->bits) in __peek_user_per()
167 /* Single-step bit. */ in __peek_user_per()
169 (1UL << (BITS_PER_LONG - 1)) : 0; in __peek_user_per()
170 else if (addr == (addr_t) &dummy->starting_addr) in __peek_user_per()
172 return child->thread.per_user.start; in __peek_user_per()
173 else if (addr == (addr_t) &dummy->ending_addr) in __peek_user_per()
175 return child->thread.per_user.end; in __peek_user_per()
176 else if (addr == (addr_t) &dummy->perc_atmid) in __peek_user_per()
179 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
180 else if (addr == (addr_t) &dummy->address) in __peek_user_per()
182 return child->thread.per_event.address; in __peek_user_per()
183 else if (addr == (addr_t) &dummy->access_id) in __peek_user_per()
186 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
191 * Read the word at offset addr from the user area of a process. The
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr) in __peek_user() argument
204 if (addr < (addr_t) &dummy->regs.acrs) { in __peek_user()
208 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user()
209 if (addr == (addr_t) &dummy->regs.psw.mask) { in __peek_user()
215 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
219 offset = addr - (addr_t) &dummy->regs.acrs; in __peek_user()
225 if (addr == (addr_t) &dummy->regs.acrs[15]) in __peek_user()
226 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
228 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
230 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
234 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; in __peek_user()
236 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __peek_user()
243 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __peek_user()
247 tmp = child->thread.fpu.fpc; in __peek_user()
248 tmp <<= BITS_PER_LONG - 32; in __peek_user()
250 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __peek_user()
252 * floating point regs. are either in child->thread.fpu in __peek_user()
253 * or the child->thread.fpu.vxrs array in __peek_user()
255 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __peek_user()
258 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user()
261 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user()
263 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __peek_user()
267 addr -= (addr_t) &dummy->regs.per_info; in __peek_user()
268 tmp = __peek_user_per(child, addr); in __peek_user()
277 peek_user(struct task_struct *child, addr_t addr, addr_t data) in peek_user() argument
286 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in peek_user()
287 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in peek_user()
289 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in peek_user()
290 return -EIO; in peek_user()
292 tmp = __peek_user(child, addr); in peek_user()
293 return put_user(tmp, (addr_t __user *) data); in peek_user()
297 addr_t addr, addr_t data) in __poke_user_per() argument
313 if (addr == (addr_t) &dummy->cr9) in __poke_user_per()
315 child->thread.per_user.control = in __poke_user_per()
316 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per()
317 else if (addr == (addr_t) &dummy->starting_addr) in __poke_user_per()
319 child->thread.per_user.start = data; in __poke_user_per()
320 else if (addr == (addr_t) &dummy->ending_addr) in __poke_user_per()
322 child->thread.per_user.end = data; in __poke_user_per()
325 static void fixup_int_code(struct task_struct *child, addr_t data) in fixup_int_code() argument
328 int ilc = regs->int_code >> 16; in fixup_int_code()
334 if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), in fixup_int_code()
342 regs->int_code = 0x20000 | (data & 0xffff); in fixup_int_code()
345 * Write a word to the user area of a process at location addr. This
350 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) in __poke_user() argument
356 if (addr < (addr_t) &dummy->regs.acrs) { in __poke_user()
361 if (addr == (addr_t) &dummy->regs.psw.mask) { in __poke_user()
365 if ((data ^ PSW_USER_BITS) & ~mask) in __poke_user()
367 return -EINVAL; in __poke_user()
368 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) in __poke_user()
369 /* Invalid address-space-control bits */ in __poke_user()
370 return -EINVAL; in __poke_user()
371 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) in __poke_user()
373 return -EINVAL; in __poke_user()
377 addr == offsetof(struct user, regs.gprs[2])) in __poke_user()
378 fixup_int_code(child, data); in __poke_user()
379 *(addr_t *)((addr_t) ®s->psw + addr) = data; in __poke_user()
381 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { in __poke_user()
385 offset = addr - (addr_t) &dummy->regs.acrs; in __poke_user()
392 if (addr == (addr_t) &dummy->regs.acrs[15]) in __poke_user()
393 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
395 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
397 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __poke_user()
401 task_pt_regs(child)->orig_gpr2 = data; in __poke_user()
403 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __poke_user()
410 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __poke_user()
414 if ((unsigned int) data != 0 || in __poke_user()
415 test_fp_ctl(data >> (BITS_PER_LONG - 32))) in __poke_user()
416 return -EINVAL; in __poke_user()
417 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
419 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __poke_user()
421 * floating point regs. are either in child->thread.fpu in __poke_user()
422 * or the child->thread.fpu.vxrs array in __poke_user()
424 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __poke_user()
427 child->thread.fpu.vxrs + 2*offset) = data; in __poke_user()
430 child->thread.fpu.fprs + offset) = data; in __poke_user()
432 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __poke_user()
436 addr -= (addr_t) &dummy->regs.per_info; in __poke_user()
437 __poke_user_per(child, addr, data); in __poke_user()
444 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) in poke_user() argument
453 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in poke_user()
454 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in poke_user()
456 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in poke_user()
457 return -EIO; in poke_user()
459 return __poke_user(child, addr, data); in poke_user()
463 unsigned long addr, unsigned long data) in arch_ptrace() argument
470 /* read the word at location addr in the USER area. */ in arch_ptrace()
471 return peek_user(child, addr, data); in arch_ptrace()
474 /* write the word at location addr in the USER area */ in arch_ptrace()
475 return poke_user(child, addr, data); in arch_ptrace()
479 if (copy_from_user(&parea, (void __force __user *) addr, in arch_ptrace()
481 return -EFAULT; in arch_ptrace()
482 addr = parea.kernel_addr; in arch_ptrace()
483 data = parea.process_addr; in arch_ptrace()
487 ret = peek_user(child, addr, data); in arch_ptrace()
491 (addr_t __force __user *) data)) in arch_ptrace()
492 return -EFAULT; in arch_ptrace()
493 ret = poke_user(child, addr, utmp); in arch_ptrace()
497 addr += sizeof(unsigned long); in arch_ptrace()
498 data += sizeof(unsigned long); in arch_ptrace()
503 put_user(child->thread.last_break, in arch_ptrace()
504 (unsigned long __user *) data); in arch_ptrace()
508 return -EIO; in arch_ptrace()
509 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
513 return -EIO; in arch_ptrace()
514 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
515 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
518 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
519 return -EIO; in arch_ptrace()
520 switch (data) { in arch_ptrace()
522 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
525 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
526 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
529 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
530 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
533 return -EINVAL; in arch_ptrace()
537 return ptrace_request(child, request, addr, data); in arch_ptrace()
553 * a 64 bit program is a no-no.
560 addr_t addr) in __peek_user_per_compat() argument
564 if (addr == (addr_t) &dummy32->cr9) in __peek_user_per_compat()
567 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
568 else if (addr == (addr_t) &dummy32->cr10) in __peek_user_per_compat()
571 0 : child->thread.per_user.start; in __peek_user_per_compat()
572 else if (addr == (addr_t) &dummy32->cr11) in __peek_user_per_compat()
575 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
576 else if (addr == (addr_t) &dummy32->bits) in __peek_user_per_compat()
577 /* Single-step bit. */ in __peek_user_per_compat()
580 else if (addr == (addr_t) &dummy32->starting_addr) in __peek_user_per_compat()
582 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
583 else if (addr == (addr_t) &dummy32->ending_addr) in __peek_user_per_compat()
585 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
586 else if (addr == (addr_t) &dummy32->perc_atmid) in __peek_user_per_compat()
588 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
589 else if (addr == (addr_t) &dummy32->address) in __peek_user_per_compat()
591 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
592 else if (addr == (addr_t) &dummy32->access_id) in __peek_user_per_compat()
594 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
601 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) in __peek_user_compat() argument
607 if (addr < (addr_t) &dummy32->regs.acrs) { in __peek_user_compat()
612 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __peek_user_compat()
614 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat()
617 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __peek_user_compat()
619 tmp = (__u32) regs->psw.addr | in __peek_user_compat()
620 (__u32)(regs->psw.mask & PSW_MASK_BA); in __peek_user_compat()
622 /* gpr 0-15 */ in __peek_user_compat()
623 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); in __peek_user_compat()
625 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
629 offset = addr - (addr_t) &dummy32->regs.acrs; in __peek_user_compat()
630 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
632 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
636 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); in __peek_user_compat()
638 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __peek_user_compat()
645 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __peek_user_compat()
649 tmp = child->thread.fpu.fpc; in __peek_user_compat()
651 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __peek_user_compat()
653 * floating point regs. are either in child->thread.fpu in __peek_user_compat()
654 * or the child->thread.fpu.vxrs array in __peek_user_compat()
656 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __peek_user_compat()
659 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user_compat()
662 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user_compat()
664 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __peek_user_compat()
668 addr -= (addr_t) &dummy32->regs.per_info; in __peek_user_compat()
669 tmp = __peek_user_per_compat(child, addr); in __peek_user_compat()
678 addr_t addr, addr_t data) in peek_user_compat() argument
682 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) in peek_user_compat()
683 return -EIO; in peek_user_compat()
685 tmp = __peek_user_compat(child, addr); in peek_user_compat()
686 return put_user(tmp, (__u32 __user *) data); in peek_user_compat()
693 addr_t addr, __u32 data) in __poke_user_per_compat() argument
697 if (addr == (addr_t) &dummy32->cr9) in __poke_user_per_compat()
699 child->thread.per_user.control = in __poke_user_per_compat()
700 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per_compat()
701 else if (addr == (addr_t) &dummy32->starting_addr) in __poke_user_per_compat()
703 child->thread.per_user.start = data; in __poke_user_per_compat()
704 else if (addr == (addr_t) &dummy32->ending_addr) in __poke_user_per_compat()
706 child->thread.per_user.end = data; in __poke_user_per_compat()
713 addr_t addr, addr_t data) in __poke_user_compat() argument
716 __u32 tmp = (__u32) data; in __poke_user_compat()
719 if (addr < (addr_t) &dummy32->regs.acrs) { in __poke_user_compat()
724 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __poke_user_compat()
731 return -EINVAL; in __poke_user_compat()
732 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) in __poke_user_compat()
733 /* Invalid address-space-control bits */ in __poke_user_compat()
734 return -EINVAL; in __poke_user_compat()
735 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | in __poke_user_compat()
736 (regs->psw.mask & PSW_MASK_BA) | in __poke_user_compat()
738 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __poke_user_compat()
740 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; in __poke_user_compat()
742 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | in __poke_user_compat()
747 addr == offsetof(struct compat_user, regs.gprs[2])) in __poke_user_compat()
748 fixup_int_code(child, data); in __poke_user_compat()
749 /* gpr 0-15 */ in __poke_user_compat()
750 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; in __poke_user_compat()
752 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
756 offset = addr - (addr_t) &dummy32->regs.acrs; in __poke_user_compat()
757 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
759 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
763 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; in __poke_user_compat()
765 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __poke_user_compat()
772 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __poke_user_compat()
777 return -EINVAL; in __poke_user_compat()
778 child->thread.fpu.fpc = data; in __poke_user_compat()
780 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __poke_user_compat()
782 * floating point regs. are either in child->thread.fpu in __poke_user_compat()
783 * or the child->thread.fpu.vxrs array in __poke_user_compat()
785 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __poke_user_compat()
788 child->thread.fpu.vxrs + 2*offset) = tmp; in __poke_user_compat()
791 child->thread.fpu.fprs + offset) = tmp; in __poke_user_compat()
793 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __poke_user_compat()
797 addr -= (addr_t) &dummy32->regs.per_info; in __poke_user_compat()
798 __poke_user_per_compat(child, addr, data); in __poke_user_compat()
805 addr_t addr, addr_t data) in poke_user_compat() argument
807 if (!is_compat_task() || (addr & 3) || in poke_user_compat()
808 addr > sizeof(struct compat_user) - 3) in poke_user_compat()
809 return -EIO; in poke_user_compat()
811 return __poke_user_compat(child, addr, data); in poke_user_compat()
817 unsigned long addr = caddr; in compat_arch_ptrace() local
818 unsigned long data = cdata; in compat_arch_ptrace() local
824 /* read the word at location addr in the USER area. */ in compat_arch_ptrace()
825 return peek_user_compat(child, addr, data); in compat_arch_ptrace()
828 /* write the word at location addr in the USER area */ in compat_arch_ptrace()
829 return poke_user_compat(child, addr, data); in compat_arch_ptrace()
833 if (copy_from_user(&parea, (void __force __user *) addr, in compat_arch_ptrace()
835 return -EFAULT; in compat_arch_ptrace()
836 addr = parea.kernel_addr; in compat_arch_ptrace()
837 data = parea.process_addr; in compat_arch_ptrace()
841 ret = peek_user_compat(child, addr, data); in compat_arch_ptrace()
845 (__u32 __force __user *) data)) in compat_arch_ptrace()
846 return -EFAULT; in compat_arch_ptrace()
847 ret = poke_user_compat(child, addr, utmp); in compat_arch_ptrace()
851 addr += sizeof(unsigned int); in compat_arch_ptrace()
852 data += sizeof(unsigned int); in compat_arch_ptrace()
857 put_user(child->thread.last_break, in compat_arch_ptrace()
858 (unsigned int __user *) data); in compat_arch_ptrace()
861 return compat_ptrace_request(child, request, addr, data); in compat_arch_ptrace()
867 unsigned long mask = -1UL; in do_syscall_trace_enter()
868 long ret = -1; in do_syscall_trace_enter()
892 sd.instruction_pointer = regs->psw.addr & 0x7fffffff; in do_syscall_trace_enter()
895 sd.instruction_pointer = regs->psw.addr; in do_syscall_trace_enter()
899 sd.nr = regs->int_code & 0xffff; in do_syscall_trace_enter()
900 sd.args[0] = regs->orig_gpr2 & mask; in do_syscall_trace_enter()
901 sd.args[1] = regs->gprs[3] & mask; in do_syscall_trace_enter()
902 sd.args[2] = regs->gprs[4] & mask; in do_syscall_trace_enter()
903 sd.args[3] = regs->gprs[5] & mask; in do_syscall_trace_enter()
904 sd.args[4] = regs->gprs[6] & mask; in do_syscall_trace_enter()
905 sd.args[5] = regs->gprs[7] & mask; in do_syscall_trace_enter()
907 if (__secure_computing(&sd) == -1) in do_syscall_trace_enter()
913 trace_sys_enter(regs, regs->int_code & 0xffff); in do_syscall_trace_enter()
916 audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, in do_syscall_trace_enter()
917 regs->gprs[3] &mask, regs->gprs[4] &mask, in do_syscall_trace_enter()
918 regs->gprs[5] &mask); in do_syscall_trace_enter()
920 if ((signed long)regs->gprs[2] >= NR_syscalls) { in do_syscall_trace_enter()
921 regs->gprs[2] = -ENOSYS; in do_syscall_trace_enter()
922 ret = -ENOSYS; in do_syscall_trace_enter()
924 return regs->gprs[2]; in do_syscall_trace_enter()
935 trace_sys_exit(regs, regs->gprs[2]); in do_syscall_trace_exit()
951 save_access_regs(target->thread.acrs); in s390_regs_get()
966 save_access_regs(target->thread.acrs); in s390_regs_set()
972 count -= sizeof(*k); in s390_regs_set()
983 count -= sizeof(*u); in s390_regs_set()
989 restore_access_regs(target->thread.acrs); in s390_regs_set()
1003 fp_regs.fpc = target->thread.fpu.fpc; in s390_fpregs_get()
1004 fpregs_store(&fp_regs, &target->thread.fpu); in s390_fpregs_get()
1021 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); in s390_fpregs_set()
1023 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); in s390_fpregs_set()
1027 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; in s390_fpregs_set()
1033 return -EINVAL; in s390_fpregs_set()
1034 target->thread.fpu.fpc = ufpc[0]; in s390_fpregs_set()
1039 fprs, offsetof(s390_fp_regs, fprs), -1); in s390_fpregs_set()
1044 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); in s390_fpregs_set()
1046 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); in s390_fpregs_set()
1055 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
1072 if (!(regs->int_code & 0x200)) in s390_tdb_get()
1073 return -ENODATA; in s390_tdb_get()
1074 return membuf_write(&to, target->thread.trap_tdb, 256); in s390_tdb_get()
1093 return -ENODEV; in s390_vxrs_low_get()
1097 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_get()
1110 return -ENODEV; in s390_vxrs_low_set()
1115 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_set()
1117 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); in s390_vxrs_low_set()
1120 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; in s390_vxrs_low_set()
1130 return -ENODEV; in s390_vxrs_high_get()
1133 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1145 return -ENODEV; in s390_vxrs_high_set()
1150 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1158 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1166 unsigned int *data = &target->thread.system_call; in s390_system_call_set() local
1168 data, 0, sizeof(unsigned int)); in s390_system_call_set()
1175 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get() local
1178 return -ENODEV; in s390_gs_cb_get()
1179 if (!data) in s390_gs_cb_get()
1180 return -ENODATA; in s390_gs_cb_get()
1182 save_gs_cb(data); in s390_gs_cb_get()
1183 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_cb_get()
1191 struct gs_cb gs_cb = { }, *data = NULL; in s390_gs_cb_set() local
1195 return -ENODEV; in s390_gs_cb_set()
1196 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1197 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_cb_set()
1198 if (!data) in s390_gs_cb_set()
1199 return -ENOMEM; in s390_gs_cb_set()
1201 if (!target->thread.gs_cb) in s390_gs_cb_set()
1206 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1210 kfree(data); in s390_gs_cb_set()
1211 return -EFAULT; in s390_gs_cb_set()
1214 if (!target->thread.gs_cb) in s390_gs_cb_set()
1215 target->thread.gs_cb = data; in s390_gs_cb_set()
1216 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1219 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1229 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get() local
1232 return -ENODEV; in s390_gs_bc_get()
1233 if (!data) in s390_gs_bc_get()
1234 return -ENODATA; in s390_gs_bc_get()
1235 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_bc_get()
1243 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set() local
1246 return -ENODEV; in s390_gs_bc_set()
1247 if (!data) { in s390_gs_bc_set()
1248 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_bc_set()
1249 if (!data) in s390_gs_bc_set()
1250 return -ENOMEM; in s390_gs_bc_set()
1251 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1254 data, 0, sizeof(struct gs_cb)); in s390_gs_bc_set()
1259 return (cb->rca & 0x1f) == 0 && in is_ri_cb_valid()
1260 (cb->roa & 0xfff) == 0 && in is_ri_cb_valid()
1261 (cb->rla & 0xfff) == 0xfff && in is_ri_cb_valid()
1262 cb->s == 1 && in is_ri_cb_valid()
1263 cb->k == 1 && in is_ri_cb_valid()
1264 cb->h == 0 && in is_ri_cb_valid()
1265 cb->reserved1 == 0 && in is_ri_cb_valid()
1266 cb->ps == 1 && in is_ri_cb_valid()
1267 cb->qs == 0 && in is_ri_cb_valid()
1268 cb->pc == 1 && in is_ri_cb_valid()
1269 cb->qc == 0 && in is_ri_cb_valid()
1270 cb->reserved2 == 0 && in is_ri_cb_valid()
1271 cb->reserved3 == 0 && in is_ri_cb_valid()
1272 cb->reserved4 == 0 && in is_ri_cb_valid()
1273 cb->reserved5 == 0 && in is_ri_cb_valid()
1274 cb->reserved6 == 0 && in is_ri_cb_valid()
1275 cb->reserved7 == 0 && in is_ri_cb_valid()
1276 cb->reserved8 == 0 && in is_ri_cb_valid()
1277 cb->rla >= cb->roa && in is_ri_cb_valid()
1278 cb->rca >= cb->roa && in is_ri_cb_valid()
1279 cb->rca <= cb->rla+1 && in is_ri_cb_valid()
1280 cb->m < 3; in is_ri_cb_valid()
1287 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get() local
1290 return -ENODEV; in s390_runtime_instr_get()
1291 if (!data) in s390_runtime_instr_get()
1292 return -ENODATA; in s390_runtime_instr_get()
1294 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); in s390_runtime_instr_get()
1302 struct runtime_instr_cb ri_cb = { }, *data = NULL; in s390_runtime_instr_set() local
1306 return -ENODEV; in s390_runtime_instr_set()
1308 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1309 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_runtime_instr_set()
1310 if (!data) in s390_runtime_instr_set()
1311 return -ENOMEM; in s390_runtime_instr_set()
1314 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1318 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1324 kfree(data); in s390_runtime_instr_set()
1325 return -EFAULT; in s390_runtime_instr_set()
1329 kfree(data); in s390_runtime_instr_set()
1330 return -EINVAL; in s390_runtime_instr_set()
1338 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1339 target->thread.ri_cb = data; in s390_runtime_instr_set()
1340 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1342 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1446 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1461 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1467 count -= sizeof(*k); in s390_compat_regs_set()
1478 count -= sizeof(*u); in s390_compat_regs_set()
1484 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1496 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; in s390_compat_regs_high_get()
1511 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; in s390_compat_regs_high_set()
1517 count -= sizeof(*k); in s390_compat_regs_high_set()
1528 count -= sizeof(*u); in s390_compat_regs_high_set()
1539 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()
1669 return regs->gprs[offset]; in regs_get_register()
1677 return -EINVAL; in regs_query_register_offset()
1679 return -EINVAL; in regs_query_register_offset()
1681 return -EINVAL; in regs_query_register_offset()
1692 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) in regs_within_kernel_stack() argument
1696 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); in regs_within_kernel_stack()
1700 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1710 unsigned long addr; in regs_get_kernel_stack_nth() local
1712 addr = kernel_stack_pointer(regs) + n * sizeof(long); in regs_get_kernel_stack_nth()
1713 if (!regs_within_kernel_stack(regs, addr)) in regs_get_kernel_stack_nth()
1715 return *(unsigned long *)addr; in regs_get_kernel_stack_nth()