Home
last modified time | relevance | path

Searched refs:ctxt (Results 26 – 50 of 118) sorted by relevance

12345

/Linux-v6.1/drivers/infiniband/hw/hfi1/
Dnetdev_rx.c59 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allocate_ctxt() argument
85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt()
86 *ctxt = uctxt; in hfi1_netdev_allocate_ctxt()
122 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allot_ctxt() argument
127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); in hfi1_netdev_allot_ctxt()
133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt()
136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); in hfi1_netdev_allot_ctxt()
137 *ctxt = NULL; in hfi1_netdev_allot_ctxt()
213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init()
271 rxq->rcd->ctxt); in enable_queues()
[all …]
Dfile_ops.c131 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument
134 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
282 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter()
321 u16 ctxt; in hfi1_file_mmap() local
329 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap()
332 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap()
435 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap()
525 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap()
534 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, in hfi1_file_mmap()
610 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close()
[all …]
Dtrace_misc.h75 __field(u32, ctxt)
83 __entry->ctxt = packet->rcd->ctxt;
92 __entry->ctxt,
Dchip.h583 static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_kctxt_csr() argument
587 return read_csr(dd, offset0 + (0x100 * ctxt)); in read_kctxt_csr()
590 static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_kctxt_csr() argument
594 write_csr(dd, offset0 + (0x100 * ctxt), value); in write_kctxt_csr()
606 int ctxt, in get_kctxt_csr_addr() argument
609 return get_csr_addr(dd, offset0 + (0x100 * ctxt)); in get_kctxt_csr_addr()
618 static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_uctxt_csr() argument
622 return read_csr(dd, offset0 + (0x1000 * ctxt)); in read_uctxt_csr()
625 static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_uctxt_csr() argument
629 write_csr(dd, offset0 + (0x1000 * ctxt), value); in write_uctxt_csr()
[all …]
Duser_sdma.c147 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues()
168 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, in hfi1_user_sdma_alloc_queues()
177 uctxt->ctxt); in hfi1_user_sdma_alloc_queues()
240 trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); in hfi1_user_sdma_free_queues()
325 dd->unit, uctxt->ctxt, fd->subctxt, in hfi1_user_sdma_process_request()
332 dd->unit, uctxt->ctxt, fd->subctxt, ret); in hfi1_user_sdma_process_request()
336 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, in hfi1_user_sdma_process_request()
341 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); in hfi1_user_sdma_process_request()
352 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, in hfi1_user_sdma_process_request()
360 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); in hfi1_user_sdma_process_request()
[all …]
Dmsix.c141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; in msix_request_rcd_irq_common()
142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); in msix_request_rcd_irq_common()
144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common()
159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq()
175 rcd->dd->unit, rcd->ctxt); in msix_netdev_request_rcd_irq()
/Linux-v6.1/arch/arm64/include/asm/
Dkvm_hyp.h74 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
75 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
77 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
78 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
79 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
80 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
/Linux-v6.1/drivers/net/ethernet/intel/ice/
Dice_lib.c291 struct ice_vsi_ctx *ctxt; in ice_vsi_delete() local
294 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_delete()
295 if (!ctxt) in ice_vsi_delete()
299 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete()
300 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete()
302 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete()
304 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete()
309 kfree(ctxt); in ice_vsi_delete()
859 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) in ice_set_dflt_vsi_ctx() argument
863 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
[all …]
/Linux-v6.1/drivers/scsi/be2iscsi/
Dbe_cmds.c784 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local
798 ctxt, coalesce_wm); in beiscsi_cmd_cq_create()
799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create()
800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create()
802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create()
803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create()
804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create()
805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create()
806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create()
807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create()
[all …]
/Linux-v6.1/drivers/infiniband/hw/qib/
Dqib_file_ops.c193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info()
242 kinfo->spi_ctxt = rcd->ctxt; in qib_get_base_info()
308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update()
503 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free()
675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq()
745 what, rcd->ctxt, pfn, len, ret); in qib_mmap_mem()
1022 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf()
1115 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next()
1284 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, in setup_ctxt() argument
1300 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); in setup_ctxt()
[all …]
Dqib_tx.c135 unsigned ctxt; in find_ctxt() local
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
140 rcd = dd->rcd[ctxt]; in find_ctxt()
459 unsigned ctxt; in qib_cancel_sends() local
471 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
473 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
Dqib_init.c165 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, in qib_create_ctxtdata() argument
178 rcd->ctxt = ctxt; in qib_create_ctxtdata()
179 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata()
181 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ in qib_create_ctxtdata()
1292 int ctxt; in cleanup_device_data() local
1330 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { in cleanup_device_data()
1331 int ctxt_tidbase = ctxt * dd->rcvtidcnt; in cleanup_device_data()
1361 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { in cleanup_device_data()
1362 struct qib_ctxtdata *rcd = tmp[ctxt]; in cleanup_device_data()
1364 tmp[ctxt] = NULL; /* debugging paranoia */ in cleanup_device_data()
[all …]
/Linux-v6.1/arch/arm64/kvm/hyp/include/hyp/
Ddebug-sr.h92 struct kvm_cpu_context *ctxt) in __debug_save_state() argument
106 ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1); in __debug_save_state()
110 struct kvm_cpu_context *ctxt) in __debug_restore_state() argument
125 write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1); in __debug_restore_state()
139 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_guest_common()
158 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_host_common()
/Linux-v6.1/arch/x86/kvm/
Dx86.c97 #define emul_to_vcpu(ctxt) \ argument
98 ((struct kvm_vcpu *)(ctxt)->vcpu)
342 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
7227 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, in kvm_fetch_guest_virt() argument
7231 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt()
7272 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, in emulator_read_std() argument
7276 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std()
7287 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, in kvm_read_guest_phys_system() argument
7290 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_phys_system()
7326 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, in emulator_write_std() argument
[all …]
/Linux-v6.1/arch/arm64/kvm/
Dreset.c310 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); in kvm_reset_vcpu()
311 vcpu->arch.ctxt.spsr_abt = 0; in kvm_reset_vcpu()
312 vcpu->arch.ctxt.spsr_und = 0; in kvm_reset_vcpu()
313 vcpu->arch.ctxt.spsr_irq = 0; in kvm_reset_vcpu()
314 vcpu->arch.ctxt.spsr_fiq = 0; in kvm_reset_vcpu()
Darch_timer.c55 u32 timer_get_ctl(struct arch_timer_context *ctxt) in timer_get_ctl() argument
57 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_ctl()
59 switch(arch_timer_ctx_index(ctxt)) { in timer_get_ctl()
70 u64 timer_get_cval(struct arch_timer_context *ctxt) in timer_get_cval() argument
72 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_cval()
74 switch(arch_timer_ctx_index(ctxt)) { in timer_get_cval()
85 static u64 timer_get_offset(struct arch_timer_context *ctxt) in timer_get_offset() argument
87 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_offset()
89 switch(arch_timer_ctx_index(ctxt)) { in timer_get_offset()
97 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) in timer_set_ctl() argument
[all …]
Dguest.c136 return &vcpu->arch.ctxt.regs.regs[off]; in core_reg_addr()
139 return &vcpu->arch.ctxt.regs.sp; in core_reg_addr()
142 return &vcpu->arch.ctxt.regs.pc; in core_reg_addr()
145 return &vcpu->arch.ctxt.regs.pstate; in core_reg_addr()
148 return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1); in core_reg_addr()
151 return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1); in core_reg_addr()
154 return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1); in core_reg_addr()
157 return &vcpu->arch.ctxt.spsr_abt; in core_reg_addr()
160 return &vcpu->arch.ctxt.spsr_und; in core_reg_addr()
163 return &vcpu->arch.ctxt.spsr_irq; in core_reg_addr()
[all …]
/Linux-v6.1/arch/x86/kernel/cpu/mtrr/
Dmtrr.h49 void set_mtrr_done(struct set_mtrr_context *ctxt);
50 void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
51 void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
/Linux-v6.1/net/sunrpc/xprtrdma/
Dsvc_rdma_backchannel.c149 struct svc_rdma_send_ctxt *ctxt; in rpcrdma_bc_send_request() local
153 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request()
154 if (!ctxt) in rpcrdma_bc_send_request()
157 p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_MIN); in rpcrdma_bc_send_request()
169 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request()
175 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request()
/Linux-v6.1/include/linux/
Dwwan.h164 int (*newlink)(void *ctxt, struct net_device *dev,
166 void (*dellink)(void *ctxt, struct net_device *dev,
171 void *ctxt, u32 def_link_id);
/Linux-v6.1/arch/arm64/kvm/hyp/nvhe/
Dswitch.c59 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps() local
67 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); in __activate_traps()
69 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); in __activate_traps()
261 guest_ctxt = &vcpu->arch.ctxt; in __kvm_vcpu_run()
/Linux-v6.1/include/trace/events/
Drpcrdma.h1538 const struct svc_rdma_recv_ctxt *ctxt,
1543 TP_ARGS(ctxt, p, hdrlen),
1556 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1557 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1573 const struct svc_rdma_recv_ctxt *ctxt,
1577 TP_ARGS(ctxt, hdrlen),
1586 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1587 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1598 const struct svc_rdma_recv_ctxt *ctxt,
1602 TP_ARGS(ctxt, p),
[all …]
/Linux-v6.1/drivers/net/ethernet/intel/i40e/
Di40e_client.c675 struct i40e_vsi_context ctxt; in i40e_client_update_vsi_ctxt() local
683 ctxt.seid = pf->main_vsi_seid; in i40e_client_update_vsi_ctxt()
684 ctxt.pf_num = pf->hw.pf_id; in i40e_client_update_vsi_ctxt()
685 err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_client_update_vsi_ctxt()
686 ctxt.flags = I40E_AQ_VSI_TYPE_PF; in i40e_client_update_vsi_ctxt()
698 ctxt.info.valid_sections = in i40e_client_update_vsi_ctxt()
700 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; in i40e_client_update_vsi_ctxt()
703 ctxt.info.valid_sections = in i40e_client_update_vsi_ctxt()
705 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; in i40e_client_update_vsi_ctxt()
714 err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_client_update_vsi_ctxt()
Di40e_main.c1925 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map_mqprio() argument
1974 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio()
1975 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_vsi_setup_queue_map_mqprio()
1976 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1977 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map_mqprio()
2013 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map() argument
2030 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); in i40e_vsi_setup_queue_map()
2132 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map()
2144 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map()
2147 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
[all …]
/Linux-v6.1/drivers/hwtracing/coresight/
Dcoresight-etm-perf.c431 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_start() local
432 struct perf_output_handle *handle = &ctxt->handle; in etm_event_start()
440 if (WARN_ON(ctxt->event_data)) in etm_event_start()
484 ctxt->event_data = event_data; in etm_event_start()
509 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_stop() local
510 struct perf_output_handle *handle = &ctxt->handle; in etm_event_stop()
519 WARN_ON(perf_get_aux(handle) != ctxt->event_data)) in etm_event_stop()
522 event_data = ctxt->event_data; in etm_event_stop()
524 ctxt->event_data = NULL; in etm_event_stop()

12345