Lines Matching +full:data +full:- +full:addr

1 // SPDX-License-Identifier: GPL-2.0
44 struct thread_struct *thread = &task->thread; in update_cr_regs()
58 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
72 if (task->thread.gs_cb) in update_cr_regs()
83 new.control = thread->per_user.control; in update_cr_regs()
84 new.start = thread->per_user.start; in update_cr_regs()
85 new.end = thread->per_user.end; in update_cr_regs()
99 new.end = -1UL; in update_cr_regs()
104 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs()
107 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs()
138 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
139 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
142 task->thread.per_flags = 0; in ptrace_disable()
148 addr_t addr) in __peek_user_per() argument
152 if (addr == (addr_t) &dummy->cr9) in __peek_user_per()
155 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
156 else if (addr == (addr_t) &dummy->cr10) in __peek_user_per()
159 0 : child->thread.per_user.start; in __peek_user_per()
160 else if (addr == (addr_t) &dummy->cr11) in __peek_user_per()
163 -1UL : child->thread.per_user.end; in __peek_user_per()
164 else if (addr == (addr_t) &dummy->bits) in __peek_user_per()
165 /* Single-step bit. */ in __peek_user_per()
167 (1UL << (BITS_PER_LONG - 1)) : 0; in __peek_user_per()
168 else if (addr == (addr_t) &dummy->starting_addr) in __peek_user_per()
170 return child->thread.per_user.start; in __peek_user_per()
171 else if (addr == (addr_t) &dummy->ending_addr) in __peek_user_per()
173 return child->thread.per_user.end; in __peek_user_per()
174 else if (addr == (addr_t) &dummy->perc_atmid) in __peek_user_per()
177 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
178 else if (addr == (addr_t) &dummy->address) in __peek_user_per()
180 return child->thread.per_event.address; in __peek_user_per()
181 else if (addr == (addr_t) &dummy->access_id) in __peek_user_per()
184 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
189 * Read the word at offset addr from the user area of a process. The
197 static unsigned long __peek_user(struct task_struct *child, addr_t addr) in __peek_user() argument
202 if (addr < (addr_t) &dummy->regs.acrs) { in __peek_user()
206 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user()
207 if (addr == (addr_t) &dummy->regs.psw.mask) { in __peek_user()
213 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
217 offset = addr - (addr_t) &dummy->regs.acrs; in __peek_user()
223 if (addr == (addr_t) &dummy->regs.acrs[15]) in __peek_user()
224 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
228 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
232 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; in __peek_user()
234 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __peek_user()
241 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __peek_user()
245 tmp = child->thread.fpu.fpc; in __peek_user()
246 tmp <<= BITS_PER_LONG - 32; in __peek_user()
248 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __peek_user()
250 * floating point regs. are either in child->thread.fpu in __peek_user()
251 * or the child->thread.fpu.vxrs array in __peek_user()
253 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __peek_user()
256 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user()
259 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user()
261 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __peek_user()
265 addr -= (addr_t) &dummy->regs.per_info; in __peek_user()
266 tmp = __peek_user_per(child, addr); in __peek_user()
275 peek_user(struct task_struct *child, addr_t addr, addr_t data) in peek_user() argument
284 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in peek_user()
285 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in peek_user()
287 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in peek_user()
288 return -EIO; in peek_user()
290 tmp = __peek_user(child, addr); in peek_user()
291 return put_user(tmp, (addr_t __user *) data); in peek_user()
295 addr_t addr, addr_t data) in __poke_user_per() argument
311 if (addr == (addr_t) &dummy->cr9) in __poke_user_per()
313 child->thread.per_user.control = in __poke_user_per()
314 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per()
315 else if (addr == (addr_t) &dummy->starting_addr) in __poke_user_per()
317 child->thread.per_user.start = data; in __poke_user_per()
318 else if (addr == (addr_t) &dummy->ending_addr) in __poke_user_per()
320 child->thread.per_user.end = data; in __poke_user_per()
324 * Write a word to the user area of a process at location addr. This
329 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) in __poke_user() argument
335 if (addr < (addr_t) &dummy->regs.acrs) { in __poke_user()
340 if (addr == (addr_t) &dummy->regs.psw.mask) { in __poke_user()
344 if ((data ^ PSW_USER_BITS) & ~mask) in __poke_user()
346 return -EINVAL; in __poke_user()
347 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) in __poke_user()
348 /* Invalid address-space-control bits */ in __poke_user()
349 return -EINVAL; in __poke_user()
350 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) in __poke_user()
352 return -EINVAL; in __poke_user()
356 addr == offsetof(struct user, regs.gprs[2])) { in __poke_user()
359 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user()
361 *(addr_t *)((addr_t) &regs->psw + addr) = data; in __poke_user()
362 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { in __poke_user()
366 offset = addr - (addr_t) &dummy->regs.acrs; in __poke_user()
373 if (addr == (addr_t) &dummy->regs.acrs[15]) in __poke_user()
374 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
376 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
378 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __poke_user()
382 task_pt_regs(child)->orig_gpr2 = data; in __poke_user()
384 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __poke_user()
391 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __poke_user()
395 if ((unsigned int) data != 0 || in __poke_user()
396 test_fp_ctl(data >> (BITS_PER_LONG - 32))) in __poke_user()
397 return -EINVAL; in __poke_user()
398 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
400 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __poke_user()
402 * floating point regs. are either in child->thread.fpu in __poke_user()
403 * or the child->thread.fpu.vxrs array in __poke_user()
405 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __poke_user()
408 child->thread.fpu.vxrs + 2*offset) = data; in __poke_user()
411 child->thread.fpu.fprs + offset) = data; in __poke_user()
413 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __poke_user()
417 addr -= (addr_t) &dummy->regs.per_info; in __poke_user()
418 __poke_user_per(child, addr, data); in __poke_user()
425 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) in poke_user() argument
434 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in poke_user()
435 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in poke_user()
437 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in poke_user()
438 return -EIO; in poke_user()
440 return __poke_user(child, addr, data); in poke_user()
444 unsigned long addr, unsigned long data) in arch_ptrace() argument
451 /* read the word at location addr in the USER area. */ in arch_ptrace()
452 return peek_user(child, addr, data); in arch_ptrace()
455 /* write the word at location addr in the USER area */ in arch_ptrace()
456 return poke_user(child, addr, data); in arch_ptrace()
460 if (copy_from_user(&parea, (void __force __user *) addr, in arch_ptrace()
462 return -EFAULT; in arch_ptrace()
463 addr = parea.kernel_addr; in arch_ptrace()
464 data = parea.process_addr; in arch_ptrace()
468 ret = peek_user(child, addr, data); in arch_ptrace()
472 (addr_t __force __user *) data)) in arch_ptrace()
473 return -EFAULT; in arch_ptrace()
474 ret = poke_user(child, addr, utmp); in arch_ptrace()
478 addr += sizeof(unsigned long); in arch_ptrace()
479 data += sizeof(unsigned long); in arch_ptrace()
484 put_user(child->thread.last_break, in arch_ptrace()
485 (unsigned long __user *) data); in arch_ptrace()
489 return -EIO; in arch_ptrace()
490 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
494 return -EIO; in arch_ptrace()
495 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
496 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
499 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
500 return -EIO; in arch_ptrace()
501 switch (data) { in arch_ptrace()
503 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
506 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
507 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
510 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
511 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
514 return -EINVAL; in arch_ptrace()
518 return ptrace_request(child, request, addr, data); in arch_ptrace()
534 * a 64 bit program is a no-no.
541 addr_t addr) in __peek_user_per_compat() argument
545 if (addr == (addr_t) &dummy32->cr9) in __peek_user_per_compat()
548 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
549 else if (addr == (addr_t) &dummy32->cr10) in __peek_user_per_compat()
552 0 : child->thread.per_user.start; in __peek_user_per_compat()
553 else if (addr == (addr_t) &dummy32->cr11) in __peek_user_per_compat()
556 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
557 else if (addr == (addr_t) &dummy32->bits) in __peek_user_per_compat()
558 /* Single-step bit. */ in __peek_user_per_compat()
561 else if (addr == (addr_t) &dummy32->starting_addr) in __peek_user_per_compat()
563 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
564 else if (addr == (addr_t) &dummy32->ending_addr) in __peek_user_per_compat()
566 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
567 else if (addr == (addr_t) &dummy32->perc_atmid) in __peek_user_per_compat()
569 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
570 else if (addr == (addr_t) &dummy32->address) in __peek_user_per_compat()
572 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
573 else if (addr == (addr_t) &dummy32->access_id) in __peek_user_per_compat()
575 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
582 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) in __peek_user_compat() argument
588 if (addr < (addr_t) &dummy32->regs.acrs) { in __peek_user_compat()
593 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __peek_user_compat()
595 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat()
598 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __peek_user_compat()
600 tmp = (__u32) regs->psw.addr | in __peek_user_compat()
601 (__u32)(regs->psw.mask & PSW_MASK_BA); in __peek_user_compat()
603 /* gpr 0-15 */ in __peek_user_compat()
604 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); in __peek_user_compat()
606 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
610 offset = addr - (addr_t) &dummy32->regs.acrs; in __peek_user_compat()
611 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
613 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
617 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); in __peek_user_compat()
619 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __peek_user_compat()
626 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __peek_user_compat()
630 tmp = child->thread.fpu.fpc; in __peek_user_compat()
632 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __peek_user_compat()
634 * floating point regs. are either in child->thread.fpu in __peek_user_compat()
635 * or the child->thread.fpu.vxrs array in __peek_user_compat()
637 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __peek_user_compat()
640 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user_compat()
643 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user_compat()
645 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __peek_user_compat()
649 addr -= (addr_t) &dummy32->regs.per_info; in __peek_user_compat()
650 tmp = __peek_user_per_compat(child, addr); in __peek_user_compat()
659 addr_t addr, addr_t data) in peek_user_compat() argument
663 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) in peek_user_compat()
664 return -EIO; in peek_user_compat()
666 tmp = __peek_user_compat(child, addr); in peek_user_compat()
667 return put_user(tmp, (__u32 __user *) data); in peek_user_compat()
674 addr_t addr, __u32 data) in __poke_user_per_compat() argument
678 if (addr == (addr_t) &dummy32->cr9) in __poke_user_per_compat()
680 child->thread.per_user.control = in __poke_user_per_compat()
681 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per_compat()
682 else if (addr == (addr_t) &dummy32->starting_addr) in __poke_user_per_compat()
684 child->thread.per_user.start = data; in __poke_user_per_compat()
685 else if (addr == (addr_t) &dummy32->ending_addr) in __poke_user_per_compat()
687 child->thread.per_user.end = data; in __poke_user_per_compat()
694 addr_t addr, addr_t data) in __poke_user_compat() argument
697 __u32 tmp = (__u32) data; in __poke_user_compat()
700 if (addr < (addr_t) &dummy32->regs.acrs) { in __poke_user_compat()
705 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __poke_user_compat()
712 return -EINVAL; in __poke_user_compat()
713 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) in __poke_user_compat()
714 /* Invalid address-space-control bits */ in __poke_user_compat()
715 return -EINVAL; in __poke_user_compat()
716 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | in __poke_user_compat()
717 (regs->psw.mask & PSW_MASK_BA) | in __poke_user_compat()
719 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __poke_user_compat()
721 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; in __poke_user_compat()
723 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | in __poke_user_compat()
727 addr == offsetof(struct compat_user, regs.gprs[2])) { in __poke_user_compat()
730 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user_compat()
732 /* gpr 0-15 */ in __poke_user_compat()
733 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; in __poke_user_compat()
735 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
739 offset = addr - (addr_t) &dummy32->regs.acrs; in __poke_user_compat()
740 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
742 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
746 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; in __poke_user_compat()
748 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __poke_user_compat()
755 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __poke_user_compat()
760 return -EINVAL; in __poke_user_compat()
761 child->thread.fpu.fpc = data; in __poke_user_compat()
763 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __poke_user_compat()
765 * floating point regs. are either in child->thread.fpu in __poke_user_compat()
766 * or the child->thread.fpu.vxrs array in __poke_user_compat()
768 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __poke_user_compat()
771 child->thread.fpu.vxrs + 2*offset) = tmp; in __poke_user_compat()
774 child->thread.fpu.fprs + offset) = tmp; in __poke_user_compat()
776 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __poke_user_compat()
780 addr -= (addr_t) &dummy32->regs.per_info; in __poke_user_compat()
781 __poke_user_per_compat(child, addr, data); in __poke_user_compat()
788 addr_t addr, addr_t data) in poke_user_compat() argument
790 if (!is_compat_task() || (addr & 3) || in poke_user_compat()
791 addr > sizeof(struct compat_user) - 3) in poke_user_compat()
792 return -EIO; in poke_user_compat()
794 return __poke_user_compat(child, addr, data); in poke_user_compat()
800 unsigned long addr = caddr; in compat_arch_ptrace() local
801 unsigned long data = cdata; in compat_arch_ptrace() local
807 /* read the word at location addr in the USER area. */ in compat_arch_ptrace()
808 return peek_user_compat(child, addr, data); in compat_arch_ptrace()
811 /* write the word at location addr in the USER area */ in compat_arch_ptrace()
812 return poke_user_compat(child, addr, data); in compat_arch_ptrace()
816 if (copy_from_user(&parea, (void __force __user *) addr, in compat_arch_ptrace()
818 return -EFAULT; in compat_arch_ptrace()
819 addr = parea.kernel_addr; in compat_arch_ptrace()
820 data = parea.process_addr; in compat_arch_ptrace()
824 ret = peek_user_compat(child, addr, data); in compat_arch_ptrace()
828 (__u32 __force __user *) data)) in compat_arch_ptrace()
829 return -EFAULT; in compat_arch_ptrace()
830 ret = poke_user_compat(child, addr, utmp); in compat_arch_ptrace()
834 addr += sizeof(unsigned int); in compat_arch_ptrace()
835 data += sizeof(unsigned int); in compat_arch_ptrace()
840 put_user(child->thread.last_break, in compat_arch_ptrace()
841 (unsigned int __user *) data); in compat_arch_ptrace()
844 return compat_ptrace_request(child, request, addr, data); in compat_arch_ptrace()
858 save_access_regs(target->thread.acrs); in s390_regs_get()
873 save_access_regs(target->thread.acrs); in s390_regs_set()
879 count -= sizeof(*k); in s390_regs_set()
890 count -= sizeof(*u); in s390_regs_set()
896 restore_access_regs(target->thread.acrs); in s390_regs_set()
910 fp_regs.fpc = target->thread.fpu.fpc; in s390_fpregs_get()
911 fpregs_store(&fp_regs, &target->thread.fpu); in s390_fpregs_get()
928 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); in s390_fpregs_set()
930 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); in s390_fpregs_set()
934 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; in s390_fpregs_set()
940 return -EINVAL; in s390_fpregs_set()
941 target->thread.fpu.fpc = ufpc[0]; in s390_fpregs_set()
946 fprs, offsetof(s390_fp_regs, fprs), -1); in s390_fpregs_set()
951 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); in s390_fpregs_set()
953 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); in s390_fpregs_set()
962 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
980 if (!(regs->int_code & 0x200)) in s390_tdb_get()
981 return -ENODATA; in s390_tdb_get()
982 size = sizeof(target->thread.trap_tdb.data); in s390_tdb_get()
983 return membuf_write(&to, target->thread.trap_tdb.data, size); in s390_tdb_get()
1002 return -ENODEV; in s390_vxrs_low_get()
1006 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_get()
1019 return -ENODEV; in s390_vxrs_low_set()
1024 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_set()
1026 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); in s390_vxrs_low_set()
1029 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; in s390_vxrs_low_set()
1039 return -ENODEV; in s390_vxrs_high_get()
1042 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1054 return -ENODEV; in s390_vxrs_high_set()
1059 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1067 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1075 unsigned int *data = &target->thread.system_call; in s390_system_call_set() local
1077 data, 0, sizeof(unsigned int)); in s390_system_call_set()
1084 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get() local
1087 return -ENODEV; in s390_gs_cb_get()
1088 if (!data) in s390_gs_cb_get()
1089 return -ENODATA; in s390_gs_cb_get()
1091 save_gs_cb(data); in s390_gs_cb_get()
1092 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_cb_get()
1100 struct gs_cb gs_cb = { }, *data = NULL; in s390_gs_cb_set() local
1104 return -ENODEV; in s390_gs_cb_set()
1105 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1106 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_cb_set()
1107 if (!data) in s390_gs_cb_set()
1108 return -ENOMEM; in s390_gs_cb_set()
1110 if (!target->thread.gs_cb) in s390_gs_cb_set()
1115 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1119 kfree(data); in s390_gs_cb_set()
1120 return -EFAULT; in s390_gs_cb_set()
1123 if (!target->thread.gs_cb) in s390_gs_cb_set()
1124 target->thread.gs_cb = data; in s390_gs_cb_set()
1125 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1128 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1138 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get() local
1141 return -ENODEV; in s390_gs_bc_get()
1142 if (!data) in s390_gs_bc_get()
1143 return -ENODATA; in s390_gs_bc_get()
1144 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_bc_get()
1152 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set() local
1155 return -ENODEV; in s390_gs_bc_set()
1156 if (!data) { in s390_gs_bc_set()
1157 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_bc_set()
1158 if (!data) in s390_gs_bc_set()
1159 return -ENOMEM; in s390_gs_bc_set()
1160 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1163 data, 0, sizeof(struct gs_cb)); in s390_gs_bc_set()
1168 return (cb->rca & 0x1f) == 0 && in is_ri_cb_valid()
1169 (cb->roa & 0xfff) == 0 && in is_ri_cb_valid()
1170 (cb->rla & 0xfff) == 0xfff && in is_ri_cb_valid()
1171 cb->s == 1 && in is_ri_cb_valid()
1172 cb->k == 1 && in is_ri_cb_valid()
1173 cb->h == 0 && in is_ri_cb_valid()
1174 cb->reserved1 == 0 && in is_ri_cb_valid()
1175 cb->ps == 1 && in is_ri_cb_valid()
1176 cb->qs == 0 && in is_ri_cb_valid()
1177 cb->pc == 1 && in is_ri_cb_valid()
1178 cb->qc == 0 && in is_ri_cb_valid()
1179 cb->reserved2 == 0 && in is_ri_cb_valid()
1180 cb->reserved3 == 0 && in is_ri_cb_valid()
1181 cb->reserved4 == 0 && in is_ri_cb_valid()
1182 cb->reserved5 == 0 && in is_ri_cb_valid()
1183 cb->reserved6 == 0 && in is_ri_cb_valid()
1184 cb->reserved7 == 0 && in is_ri_cb_valid()
1185 cb->reserved8 == 0 && in is_ri_cb_valid()
1186 cb->rla >= cb->roa && in is_ri_cb_valid()
1187 cb->rca >= cb->roa && in is_ri_cb_valid()
1188 cb->rca <= cb->rla+1 && in is_ri_cb_valid()
1189 cb->m < 3; in is_ri_cb_valid()
1196 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get() local
1199 return -ENODEV; in s390_runtime_instr_get()
1200 if (!data) in s390_runtime_instr_get()
1201 return -ENODATA; in s390_runtime_instr_get()
1203 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); in s390_runtime_instr_get()
1211 struct runtime_instr_cb ri_cb = { }, *data = NULL; in s390_runtime_instr_set() local
1215 return -ENODEV; in s390_runtime_instr_set()
1217 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1218 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_runtime_instr_set()
1219 if (!data) in s390_runtime_instr_set()
1220 return -ENOMEM; in s390_runtime_instr_set()
1223 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1227 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1233 kfree(data); in s390_runtime_instr_set()
1234 return -EFAULT; in s390_runtime_instr_set()
1238 kfree(data); in s390_runtime_instr_set()
1239 return -EINVAL; in s390_runtime_instr_set()
1247 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1248 target->thread.ri_cb = data; in s390_runtime_instr_set()
1249 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1251 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1355 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1370 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1376 count -= sizeof(*k); in s390_compat_regs_set()
1387 count -= sizeof(*u); in s390_compat_regs_set()
1393 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1405 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; in s390_compat_regs_high_get()
1420 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; in s390_compat_regs_high_set()
1426 count -= sizeof(*k); in s390_compat_regs_high_set()
1437 count -= sizeof(*u); in s390_compat_regs_high_set()
1448 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()
1578 return regs->gprs[offset]; in regs_get_register()
1586 return -EINVAL; in regs_query_register_offset()
1588 return -EINVAL; in regs_query_register_offset()
1590 return -EINVAL; in regs_query_register_offset()
1601 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) in regs_within_kernel_stack() argument
1605 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); in regs_within_kernel_stack()
1609 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1619 unsigned long addr; in regs_get_kernel_stack_nth() local
1621 addr = kernel_stack_pointer(regs) + n * sizeof(long); in regs_get_kernel_stack_nth()
1622 if (!regs_within_kernel_stack(regs, addr)) in regs_get_kernel_stack_nth()
1624 return *(unsigned long *)addr; in regs_get_kernel_stack_nth()