| /Linux-v5.4/arch/x86/kvm/ |
| D | emulate.c | 216 int (*execute)(struct x86_emulate_ctxt *ctxt); 225 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_read() argument 266 if (!(ctxt->regs_valid & (1 << nr))) { in reg_read() 267 ctxt->regs_valid |= 1 << nr; in reg_read() 268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); in reg_read() 270 return ctxt->_regs[nr]; in reg_read() 273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_write() argument 275 ctxt->regs_valid |= 1 << nr; in reg_write() 276 ctxt->regs_dirty |= 1 << nr; in reg_write() [all …]
|
| /Linux-v5.4/arch/arm64/kvm/hyp/ |
| D | sysreg-sr.c | 26 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() argument 28 ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); in __sysreg_save_common_state() 34 ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); in __sysreg_save_common_state() 37 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() argument 39 ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); in __sysreg_save_user_state() 40 ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); in __sysreg_save_user_state() 43 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() argument 45 ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); in __sysreg_save_el1_state() 46 ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); in __sysreg_save_el1_state() 47 ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); in __sysreg_save_el1_state() [all …]
|
| D | entry.S | 25 .macro save_callee_saved_regs ctxt 26 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 27 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 28 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 29 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 30 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 31 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 34 .macro restore_callee_saved_regs ctxt 35 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 36 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] [all …]
|
| /Linux-v5.4/arch/arm/kvm/hyp/ |
| D | cp15-sr.c | 12 static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) in cp15_64() argument 14 return (u64 *)(ctxt->cp15 + idx); in cp15_64() 17 void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) in __sysreg_save_state() argument 19 ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); in __sysreg_save_state() 20 ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); in __sysreg_save_state() 21 ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); in __sysreg_save_state() 22 *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); in __sysreg_save_state() 23 *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); in __sysreg_save_state() 24 ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); in __sysreg_save_state() 25 ctxt->cp15[c3_DACR] = read_sysreg(DACR); in __sysreg_save_state() [all …]
|
| D | banked-sr.c | 18 void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) in __banked_save_state() argument 20 ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); in __banked_save_state() 21 ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); in __banked_save_state() 22 ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); in __banked_save_state() 23 ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); in __banked_save_state() 24 ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); in __banked_save_state() 25 ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); in __banked_save_state() 26 ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); in __banked_save_state() 27 ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); in __banked_save_state() 28 ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); in __banked_save_state() [all …]
|
| /Linux-v5.4/net/sunrpc/xprtrdma/ |
| D | svc_rdma_recvfrom.c | 123 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local 127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); in svc_rdma_recv_ctxt_alloc() 128 if (!ctxt) in svc_rdma_recv_ctxt_alloc() 138 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc() 139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc() 140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; in svc_rdma_recv_ctxt_alloc() 141 ctxt->rc_recv_wr.num_sge = 1; in svc_rdma_recv_ctxt_alloc() 142 ctxt->rc_cqe.done = svc_rdma_wc_receive; in svc_rdma_recv_ctxt_alloc() 143 ctxt->rc_recv_sge.addr = addr; in svc_rdma_recv_ctxt_alloc() 144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc() [all …]
|
| D | svc_rdma_sendto.c | 129 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 135 size = sizeof(*ctxt); in svc_rdma_send_ctxt_alloc() 137 ctxt = kmalloc(size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc() 138 if (!ctxt) in svc_rdma_send_ctxt_alloc() 148 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc() 149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc() 151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc() 152 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc() 153 ctxt->sc_xprt_buf = buffer; in svc_rdma_send_ctxt_alloc() [all …]
|
| D | svc_rdma_rw.c | 57 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt() 62 if (ctxt) { in svc_rdma_get_rw_ctxt() 63 list_del(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 67 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), in svc_rdma_get_rw_ctxt() 69 if (!ctxt) in svc_rdma_get_rw_ctxt() 71 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 74 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 75 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt() 76 ctxt->rw_sg_table.sgl, in svc_rdma_get_rw_ctxt() [all …]
|
| D | svc_rdma_backchannel.c | 116 struct svc_rdma_send_ctxt *ctxt) in svc_rdma_bc_sendto() argument 120 ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL); in svc_rdma_bc_sendto() 128 ctxt->sc_send_wr.opcode = IB_WR_SEND; in svc_rdma_bc_sendto() 129 return svc_rdma_send(rdma, &ctxt->sc_send_wr); in svc_rdma_bc_sendto() 176 struct svc_rdma_send_ctxt *ctxt; in rpcrdma_bc_send_request() local 180 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request() 181 if (!ctxt) in rpcrdma_bc_send_request() 184 p = ctxt->sc_xprt_buf; in rpcrdma_bc_send_request() 192 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN); in rpcrdma_bc_send_request() 198 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request() [all …]
|
| /Linux-v5.4/arch/x86/include/asm/ |
| D | kvm_emulate.h | 96 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 103 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 112 int (*read_std)(struct x86_emulate_ctxt *ctxt, 124 int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr, 135 int (*write_std)(struct x86_emulate_ctxt *ctxt, 145 int (*fetch)(struct x86_emulate_ctxt *ctxt, 155 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 166 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 179 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 185 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
| /Linux-v5.4/arch/x86/power/ |
| D | cpu.c | 37 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument 39 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() 40 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 48 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument 50 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() 51 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 75 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 85 store_idt(&ctxt->idt); in __save_processor_state() 93 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state() 94 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state() [all …]
|
| /Linux-v5.4/arch/x86/xen/ |
| D | smp_pv.c | 286 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 295 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 296 if (ctxt == NULL) in cpu_initialize_context() 302 ctxt->user_regs.fs = __KERNEL_PERCPU; in cpu_initialize_context() 303 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; in cpu_initialize_context() 305 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); in cpu_initialize_context() 312 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; in cpu_initialize_context() 313 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 314 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 315 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() [all …]
|
| D | pmu.c | 30 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 31 (uintptr_t)ctxt->field)) 193 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 203 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate() 207 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 210 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 213 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 216 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 221 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate() 225 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate() [all …]
|
| /Linux-v5.4/drivers/net/wireless/intel/iwlwifi/mvm/ |
| D | phy-ctxt.c | 126 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 132 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 133 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 189 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_apply() argument 199 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); in iwl_mvm_phy_ctxt_apply() 214 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_add() argument 219 ctxt->ref); in iwl_mvm_phy_ctxt_add() 222 ctxt->channel = chandef->chan; in iwl_mvm_phy_ctxt_add() 224 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, in iwl_mvm_phy_ctxt_add() 233 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) in iwl_mvm_phy_ctxt_ref() argument [all …]
|
| /Linux-v5.4/drivers/infiniband/hw/hfi1/ |
| D | trace_rx.h | 70 __field(u32, ctxt) 79 __entry->ctxt = packet->rcd->ctxt; 89 __entry->ctxt, 103 __field(u32, ctxt) 108 __entry->ctxt = rcd->ctxt; 125 __entry->ctxt, 132 TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type, 134 TP_ARGS(ctxt, subctxt, type, start, end), 136 __field(unsigned int, ctxt) 143 __entry->ctxt = ctxt; [all …]
|
| D | trace_tx.h | 210 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt), 211 TP_ARGS(dd, ctxt, subctxt), 213 __field(u16, ctxt) 217 __entry->ctxt = ctxt; 222 __entry->ctxt, 228 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, 230 TP_ARGS(dd, ctxt, subctxt, comp_idx), 232 __field(u16, ctxt) 237 __entry->ctxt = ctxt; 243 __entry->ctxt, [all …]
|
| D | trace_ctxts.h | 66 __field(unsigned int, ctxt) 78 __entry->ctxt = uctxt->ctxt; 91 __entry->ctxt, 107 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 110 TP_ARGS(dd, ctxt, subctxt, cinfo), 112 __field(unsigned int, ctxt) 121 __entry->ctxt = ctxt; 131 __entry->ctxt,
|
| D | init.c | 155 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt() 219 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 269 u16 ctxt; in allocate_rcd_index() local 272 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index() 273 if (!dd->rcd[ctxt]) in allocate_rcd_index() 276 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index() 277 rcd->ctxt = ctxt; in allocate_rcd_index() 278 dd->rcd[ctxt] = rcd; in allocate_rcd_index() 283 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index() 286 *index = ctxt; in allocate_rcd_index() [all …]
|
| D | file_ops.c | 171 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 174 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 315 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter() 351 u16 ctxt; in hfi1_file_mmap() local 359 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 362 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap() 465 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap() 555 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap() 564 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, in hfi1_file_mmap() 640 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close() [all …]
|
| /Linux-v5.4/fs/nilfs2/ |
| D | btnode.c | 157 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 161 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 167 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 168 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 207 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 220 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 222 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 223 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 250 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 260 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
| /Linux-v5.4/drivers/net/ethernet/intel/ice/ |
| D | ice_lib.c | 421 struct ice_vsi_ctx *ctxt; in ice_vsi_delete() local 424 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); in ice_vsi_delete() 425 if (!ctxt) in ice_vsi_delete() 429 ctxt->vf_num = vsi->vf_id; in ice_vsi_delete() 430 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete() 432 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete() 434 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete() 439 devm_kfree(&pf->pdev->dev, ctxt); in ice_vsi_delete() 829 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) in ice_set_dflt_vsi_ctx() argument 833 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx() [all …]
|
| /Linux-v5.4/arch/arm64/include/asm/ |
| D | kvm_hyp.h | 63 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 64 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 65 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 66 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 67 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 68 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
| /Linux-v5.4/arch/arm/kvm/ |
| D | emulate.c | 103 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; in vcpu_reg() 138 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; in __vcpu_spsr() 140 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; in __vcpu_spsr() 142 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; in __vcpu_spsr() 144 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; in __vcpu_spsr() 146 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; in __vcpu_spsr()
|
| /Linux-v5.4/fs/ocfs2/ |
| D | xattr.c | 266 struct ocfs2_xattr_set_ctxt *ctxt); 271 struct ocfs2_xattr_set_ctxt *ctxt); 704 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument 707 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation() 730 ctxt->data_ac, in ocfs2_xattr_extend_allocation() 731 ctxt->meta_ac, in ocfs2_xattr_extend_allocation() 769 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument 773 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range() 785 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range() 786 &ctxt->dealloc); in __ocfs2_remove_xattr_range() [all …]
|
| /Linux-v5.4/arch/arm64/kvm/ |
| D | handle_exit.c | 177 struct kvm_cpu_context *ctxt; in kvm_arm_vcpu_ptrauth_trap() local 181 ctxt = vcpu->arch.host_cpu_context; in kvm_arm_vcpu_ptrauth_trap() 182 __ptrauth_save_key(ctxt->sys_regs, APIA); in kvm_arm_vcpu_ptrauth_trap() 183 __ptrauth_save_key(ctxt->sys_regs, APIB); in kvm_arm_vcpu_ptrauth_trap() 184 __ptrauth_save_key(ctxt->sys_regs, APDA); in kvm_arm_vcpu_ptrauth_trap() 185 __ptrauth_save_key(ctxt->sys_regs, APDB); in kvm_arm_vcpu_ptrauth_trap() 186 __ptrauth_save_key(ctxt->sys_regs, APGA); in kvm_arm_vcpu_ptrauth_trap()
|