Home
last modified time | relevance | path

Searched refs:ctxt (Results 1 – 25 of 104) sorted by relevance

12345

/Linux-v4.19/arch/x86/kvm/
Demulate.c218 int (*execute)(struct x86_emulate_ctxt *ctxt);
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_read() argument
268 if (!(ctxt->regs_valid & (1 << nr))) { in reg_read()
269 ctxt->regs_valid |= 1 << nr; in reg_read()
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); in reg_read()
272 return ctxt->_regs[nr]; in reg_read()
275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_write() argument
277 ctxt->regs_valid |= 1 << nr; in reg_write()
278 ctxt->regs_dirty |= 1 << nr; in reg_write()
[all …]
/Linux-v4.19/arch/arm64/kvm/hyp/
Dsysreg-sr.c36 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() argument
38 ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); in __sysreg_save_common_state()
44 ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); in __sysreg_save_common_state()
47 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() argument
49 ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); in __sysreg_save_user_state()
50 ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); in __sysreg_save_user_state()
53 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() argument
55 ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); in __sysreg_save_el1_state()
56 ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); in __sysreg_save_el1_state()
57 ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); in __sysreg_save_el1_state()
[all …]
Dentry.S34 .macro save_callee_saved_regs ctxt
35 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
43 .macro restore_callee_saved_regs ctxt
44 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
45 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
[all …]
/Linux-v4.19/arch/arm/kvm/hyp/
Dcp15-sr.c23 static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) in cp15_64() argument
25 return (u64 *)(ctxt->cp15 + idx); in cp15_64()
28 void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) in __sysreg_save_state() argument
30 ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR); in __sysreg_save_state()
31 ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); in __sysreg_save_state()
32 ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); in __sysreg_save_state()
33 ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); in __sysreg_save_state()
34 *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); in __sysreg_save_state()
35 *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); in __sysreg_save_state()
36 ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); in __sysreg_save_state()
[all …]
Dbanked-sr.c29 void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) in __banked_save_state() argument
31 ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); in __banked_save_state()
32 ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); in __banked_save_state()
33 ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); in __banked_save_state()
34 ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); in __banked_save_state()
35 ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); in __banked_save_state()
36 ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); in __banked_save_state()
37 ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); in __banked_save_state()
38 ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); in __banked_save_state()
39 ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); in __banked_save_state()
[all …]
/Linux-v4.19/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c123 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local
127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); in svc_rdma_recv_ctxt_alloc()
128 if (!ctxt) in svc_rdma_recv_ctxt_alloc()
138 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc()
139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; in svc_rdma_recv_ctxt_alloc()
141 ctxt->rc_recv_wr.num_sge = 1; in svc_rdma_recv_ctxt_alloc()
142 ctxt->rc_cqe.done = svc_rdma_wc_receive; in svc_rdma_recv_ctxt_alloc()
143 ctxt->rc_recv_sge.addr = addr; in svc_rdma_recv_ctxt_alloc()
144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc()
[all …]
Dsvc_rdma_sendto.c129 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local
135 size = sizeof(*ctxt); in svc_rdma_send_ctxt_alloc()
137 ctxt = kmalloc(size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc()
138 if (!ctxt) in svc_rdma_send_ctxt_alloc()
148 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc()
149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc()
150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc()
151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc()
152 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc()
153 ctxt->sc_xprt_buf = buffer; in svc_rdma_send_ctxt_alloc()
[all …]
Dsvc_rdma_rw.c57 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local
61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
62 if (ctxt) { in svc_rdma_get_rw_ctxt()
63 list_del(&ctxt->rw_list); in svc_rdma_get_rw_ctxt()
67 ctxt = kmalloc(sizeof(*ctxt) + in svc_rdma_get_rw_ctxt()
70 if (!ctxt) in svc_rdma_get_rw_ctxt()
72 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt()
75 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt()
76 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt()
77 ctxt->rw_sg_table.sgl)) { in svc_rdma_get_rw_ctxt()
[all …]
Dsvc_rdma_backchannel.c119 struct svc_rdma_send_ctxt *ctxt) in svc_rdma_bc_sendto() argument
123 ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL); in svc_rdma_bc_sendto()
131 ctxt->sc_send_wr.opcode = IB_WR_SEND; in svc_rdma_bc_sendto()
132 return svc_rdma_send(rdma, &ctxt->sc_send_wr); in svc_rdma_bc_sendto()
179 struct svc_rdma_send_ctxt *ctxt; in rpcrdma_bc_send_request() local
183 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request()
184 if (!ctxt) in rpcrdma_bc_send_request()
187 p = ctxt->sc_xprt_buf; in rpcrdma_bc_send_request()
195 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN); in rpcrdma_bc_send_request()
201 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request()
[all …]
/Linux-v4.19/arch/x86/power/
Dcpu.c37 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument
39 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context()
40 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
48 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument
50 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context()
51 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
75 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument
85 store_idt(&ctxt->idt); in __save_processor_state()
93 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state()
94 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state()
[all …]
/Linux-v4.19/arch/x86/include/asm/
Dkvm_emulate.h96 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
103 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
112 int (*read_std)(struct x86_emulate_ctxt *ctxt,
124 int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
135 int (*write_std)(struct x86_emulate_ctxt *ctxt,
145 int (*fetch)(struct x86_emulate_ctxt *ctxt,
155 int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
166 int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
179 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
185 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
[all …]
/Linux-v4.19/drivers/infiniband/hw/hfi1/
Dtrace_rx.h70 __field(u32, ctxt)
79 __entry->ctxt = packet->rcd->ctxt;
89 __entry->ctxt,
103 __field(u32, ctxt)
108 __entry->ctxt = rcd->ctxt;
125 __entry->ctxt,
133 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
136 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
138 __field(unsigned int, ctxt)
147 __entry->ctxt = ctxt;
[all …]
Dtrace_tx.h202 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
203 TP_ARGS(dd, ctxt, subctxt),
205 __field(u16, ctxt)
209 __entry->ctxt = ctxt;
214 __entry->ctxt,
220 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
222 TP_ARGS(dd, ctxt, subctxt, comp_idx),
224 __field(u16, ctxt)
229 __entry->ctxt = ctxt;
235 __entry->ctxt,
[all …]
Dtrace_ctxts.h66 __field(unsigned int, ctxt)
78 __entry->ctxt = uctxt->ctxt;
91 __entry->ctxt,
107 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
110 TP_ARGS(dd, ctxt, subctxt, cinfo),
112 __field(unsigned int, ctxt)
121 __entry->ctxt = ctxt;
131 __entry->ctxt,
Dinit.c153 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt()
219 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
264 u16 ctxt; in allocate_rcd_index() local
267 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
268 if (!dd->rcd[ctxt]) in allocate_rcd_index()
271 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
272 rcd->ctxt = ctxt; in allocate_rcd_index()
273 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
278 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
281 *index = ctxt; in allocate_rcd_index()
[all …]
Dfile_ops.c171 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument
174 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
315 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter()
351 u16 ctxt; in hfi1_file_mmap() local
359 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap()
362 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap()
465 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap()
555 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap()
564 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, in hfi1_file_mmap()
640 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close()
[all …]
/Linux-v4.19/arch/x86/xen/
Dsmp_pv.c282 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local
291 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context()
292 if (ctxt == NULL) in cpu_initialize_context()
298 ctxt->user_regs.fs = __KERNEL_PERCPU; in cpu_initialize_context()
299 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; in cpu_initialize_context()
301 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); in cpu_initialize_context()
308 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; in cpu_initialize_context()
309 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context()
310 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context()
311 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context()
[all …]
Dpmu.c29 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument
30 (uintptr_t)ctxt->field))
186 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local
196 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate()
200 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate()
203 reg = &ctxt->global_status; in xen_intel_pmu_emulate()
206 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate()
209 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate()
214 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate()
218 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate()
[all …]
/Linux-v4.19/drivers/net/wireless/intel/iwlwifi/mvm/
Dphy-ctxt.c125 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument
131 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr()
132 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr()
191 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_apply() argument
200 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); in iwl_mvm_phy_ctxt_apply()
217 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_add() argument
222 ctxt->ref); in iwl_mvm_phy_ctxt_add()
225 ctxt->channel = chandef->chan; in iwl_mvm_phy_ctxt_add()
227 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, in iwl_mvm_phy_ctxt_add()
236 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) in iwl_mvm_phy_ctxt_ref() argument
[all …]
/Linux-v4.19/fs/nilfs2/
Dbtnode.c157 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument
161 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key()
167 obh = ctxt->bh; in nilfs_btnode_prepare_change_key()
168 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key()
214 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key()
227 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument
229 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key()
230 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key()
258 ctxt->bh = nbh; in nilfs_btnode_commit_change_key()
268 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument
[all …]
/Linux-v4.19/arch/arm/kvm/
Demulate.c115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; in vcpu_reg()
150 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; in __vcpu_spsr()
152 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; in __vcpu_spsr()
154 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; in __vcpu_spsr()
156 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; in __vcpu_spsr()
158 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; in __vcpu_spsr()
/Linux-v4.19/arch/arm64/include/asm/
Dkvm_hyp.h136 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
137 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
138 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
139 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
140 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
141 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
/Linux-v4.19/fs/ocfs2/
Dxattr.c274 struct ocfs2_xattr_set_ctxt *ctxt);
279 struct ocfs2_xattr_set_ctxt *ctxt);
712 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument
715 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation()
738 ctxt->data_ac, in ocfs2_xattr_extend_allocation()
739 ctxt->meta_ac, in ocfs2_xattr_extend_allocation()
777 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument
781 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range()
793 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range()
794 &ctxt->dealloc); in __ocfs2_remove_xattr_range()
[all …]
/Linux-v4.19/drivers/scsi/be2iscsi/
Dbe_cmds.c789 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local
803 ctxt, coalesce_wm); in beiscsi_cmd_cq_create()
804 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create()
805 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create()
807 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create()
808 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create()
809 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create()
810 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create()
811 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create()
812 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create()
[all …]
/Linux-v4.19/drivers/infiniband/hw/qib/
Dqib_file_ops.c193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info()
242 kinfo->spi_ctxt = rcd->ctxt; in qib_get_base_info()
308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update()
503 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free()
675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq()
745 what, rcd->ctxt, pfn, len, ret); in qib_mmap_mem()
1022 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf()
1115 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next()
1284 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, in setup_ctxt() argument
1300 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); in setup_ctxt()
[all …]

12345