/Linux-v5.4/drivers/gpu/drm/i915/gvt/ |
D | execlist.c | 42 #define execlist_ring_mmio(gvt, ring_id, offset) \ argument 43 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 57 static int ring_id_to_context_switch_event(unsigned int ring_id) in ring_id_to_context_switch_event() argument 59 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) in ring_id_to_context_switch_event() 62 return context_switch_events[ring_id]; in ring_id_to_context_switch_event() 96 int ring_id = execlist->ring_id; in emulate_execlist_status() local 98 ring_id, _EL_OFFSET_STATUS); in emulate_execlist_status() 131 int ring_id = execlist->ring_id; in emulate_csb_update() local 138 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update() 140 ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update() [all …]
|
D | mmio_context.c | 165 int ring_id, i; in load_render_mocs() local 171 for (ring_id = 0; ring_id < cnt; ring_id++) { in load_render_mocs() 172 if (!HAS_ENGINE(dev_priv, ring_id)) in load_render_mocs() 174 offset.reg = regs[ring_id]; in load_render_mocs() 176 gen9_render_mocs.control_table[ring_id][i] = in load_render_mocs() 199 int ring_id = req->engine->id; in restore_context_mmio_for_inhibit() local 200 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; in restore_context_mmio_for_inhibit() 216 if (mmio->ring_id != ring_id || in restore_context_mmio_for_inhibit() 224 *(cs-2), *(cs-1), vgpu->id, ring_id); in restore_context_mmio_for_inhibit() 346 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) in handle_tlb_pending_event() argument [all …]
|
D | scheduler.c | 100 if (workload->ring_id != RCS0) in sr_oa_regs() 130 int ring_id = workload->ring_id; in populate_shadow_context() local 156 if (ring_id == RCS0) { in populate_shadow_context() 177 gvt_dbg_sched("ring id %d workload lrca %x", ring_id, in populate_shadow_context() 180 context_page_num = gvt->dev_priv->engine[ring_id]->context_size; in populate_shadow_context() 184 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) in populate_shadow_context() 212 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) in save_ring_hw_state() argument 215 u32 ring_base = dev_priv->engine[ring_id]->mmio_base; in save_ring_hw_state() 233 enum intel_engine_id ring_id = req->engine->id; in shadow_context_status_change() local 240 scheduler->engine_owner[ring_id]) { in shadow_context_status_change() [all …]
|
D | trace.h | 116 TP_PROTO(int id, char *type, int ring_id, int root_entry_type, 119 TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa), 128 id, type, ring_id, root_entry_type, gma, gpa); 229 TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, 233 TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type, 238 __field(u8, ring_id) 250 __entry->ring_id = ring_id; 263 __entry->ring_id,
|
D | scheduler.h | 82 int ring_id; member 132 #define workload_q_head(vgpu, ring_id) \ argument 133 (&(vgpu->submission.workload_q_head[ring_id])) 158 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
D | cmd_parser.c | 463 int ring_id; member 636 static inline u32 get_opcode(u32 cmd, int ring_id) in get_opcode() argument 640 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; in get_opcode() 648 unsigned int opcode, int ring_id) in find_cmd_entry() argument 653 if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) in find_cmd_entry() 660 u32 cmd, int ring_id) in get_cmd_info() argument 664 opcode = get_opcode(cmd, ring_id); in get_cmd_info() 668 return find_cmd_entry(gvt, opcode, ring_id); in get_cmd_info() 676 static inline void print_opcode(u32 cmd, int ring_id) in print_opcode() argument 681 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; in print_opcode() [all …]
|
D | mmio_context.h | 40 int ring_id; member 48 struct intel_vgpu *next, int ring_id);
|
D | sched_policy.c | 447 int ring_id; in intel_vgpu_stop_schedule() local 470 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { in intel_vgpu_stop_schedule() 471 if (scheduler->engine_owner[ring_id] == vgpu) { in intel_vgpu_stop_schedule() 472 intel_gvt_switch_mmio(vgpu, NULL, ring_id); in intel_vgpu_stop_schedule() 473 scheduler->engine_owner[ring_id] = NULL; in intel_vgpu_stop_schedule()
|
D | interrupt.h | 229 int gvt_ring_id_to_pipe_control_notify_event(int ring_id); 230 int gvt_ring_id_to_mi_flush_dw_event(int ring_id); 231 int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
|
D | execlist.h | 171 int ring_id; member 180 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
D | handlers.c | 512 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); in force_nonpriv_write() local 517 if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) { in force_nonpriv_write() 519 vgpu->id, ring_id, offset, bytes); in force_nonpriv_write() 523 ring_base = dev_priv->engine[ring_id]->mmio_base; in force_nonpriv_write() 1473 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); in hws_pga_write() local 1485 if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { in hws_pga_write() 1490 vgpu->hws_pga[ring_id] = value; in hws_pga_write() 1640 int ring_id; in mmio_read_from_hw() local 1643 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset); in mmio_read_from_hw() 1650 if (ring_id >= 0) in mmio_read_from_hw() [all …]
|
/Linux-v5.4/drivers/gpu/drm/amd/amdkfd/ |
D | cik_event_interrupt.c | 53 tmp_ihre->ring_id &= 0x000000ff; in cik_event_interrupt_isr() 54 tmp_ihre->ring_id |= vmid << 8; in cik_event_interrupt_isr() 55 tmp_ihre->ring_id |= pasid << 16; in cik_event_interrupt_isr() 63 vmid = (ihre->ring_id & 0x0000ff00) >> 8; in cik_event_interrupt_isr() 69 pasid = (ihre->ring_id & 0xffff0000) >> 16; in cik_event_interrupt_isr() 90 unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8; in cik_event_interrupt_wq() 91 unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16; in cik_event_interrupt_wq()
|
D | kfd_int_process_v9.c | 111 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); in event_interrupt_wq_v9() local 117 info.prot_valid = ring_id & 0x08; in event_interrupt_wq_v9() 118 info.prot_read = ring_id & 0x10; in event_interrupt_wq_v9() 119 info.prot_write = ring_id & 0x20; in event_interrupt_wq_v9()
|
D | cik_int.h | 31 uint32_t ring_id; member
|
/Linux-v5.4/drivers/net/wireless/ath/wil6210/ |
D | txrx_edma.c | 119 int ring_id = wil_find_free_sring(wil); in wil_tx_init_edma() local 131 status_ring_size, ring_id); in wil_tx_init_edma() 133 if (ring_id < 0) in wil_tx_init_edma() 134 return ring_id; in wil_tx_init_edma() 139 sring = &wil->srings[ring_id]; in wil_tx_init_edma() 148 rc = wil_wmi_tx_sring_cfg(wil, ring_id); in wil_tx_init_edma() 153 wil->tx_sring_idx = ring_id; in wil_tx_init_edma() 357 u16 ring_id) in wil_init_rx_sring() argument 359 struct wil_status_ring *sring = &wil->srings[ring_id]; in wil_init_rx_sring() 363 status_ring_size, ring_id); in wil_init_rx_sring() [all …]
|
D | trace.h | 271 __field(u8, ring_id) 279 __entry->ring_id = msg->ring_id; 285 __entry->ring_id, __entry->index, __entry->len,
|
D | wmi.c | 2775 .ring_id = ringid, in wmi_addba() 2790 .ring_id = ringid, in wmi_delba_tx() 3699 int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id) in wil_wmi_tx_sring_cfg() argument 3703 struct wil_status_ring *sring = &wil->srings[ring_id]; in wil_wmi_tx_sring_cfg() 3717 cmd.ring_cfg.ring_id = ring_id; in wil_wmi_tx_sring_cfg() 3775 int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id) in wil_wmi_rx_sring_add() argument 3779 struct wil_status_ring *sring = &wil->srings[ring_id]; in wil_wmi_rx_sring_add() 3784 .ring_id = ring_id, in wil_wmi_rx_sring_add() 3827 .ring_id = WIL_RX_DESC_RING_ID, in wil_wmi_rx_desc_ring_add() 3860 int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid, in wil_wmi_tx_desc_ring_add() argument [all …]
|
D | txrx.c | 1268 static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid, in wil_tx_vring_modify() argument 1281 .ringid = ring_id, in wil_tx_vring_modify() 1299 struct wil_ring *vring = &wil->ring_tx[ring_id]; in wil_tx_vring_modify() 1300 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; in wil_tx_vring_modify() 1302 wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id, in wil_tx_vring_modify() 1307 wil_err(wil, "Tx ring [%d] not allocated\n", ring_id); in wil_tx_vring_modify() 1311 if (wil->ring2cid_tid[ring_id][0] != cid || in wil_tx_vring_modify() 1312 wil->ring2cid_tid[ring_id][1] != tid) { in wil_tx_vring_modify() 1314 wil->ring2cid_tid[ring_id][0], in wil_tx_vring_modify() 1315 wil->ring2cid_tid[ring_id][1]); in wil_tx_vring_modify() [all …]
|
/Linux-v5.4/drivers/crypto/inside-secure/ |
D | safexcel_ring.c | 116 int ring_id, in safexcel_add_cdesc() argument 124 cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr); in safexcel_add_cdesc() 169 int ring_id, in safexcel_add_rdesc() argument 175 rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr); in safexcel_add_rdesc()
|
/Linux-v5.4/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_ring2.c | 120 u32 ring_id; in xgene_enet_clr_desc_ring_id() local 122 ring_id = ring->id | OVERWRITE; in xgene_enet_clr_desc_ring_id() 123 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); in xgene_enet_clr_desc_ring_id()
|
D | xgene_enet_main.c | 1212 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) in xgene_enet_create_desc_ring() argument 1232 ring->id = ring_id; in xgene_enet_create_desc_ring() 1313 u16 ring_id, slots; in xgene_enet_create_desc_rings() local 1323 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); in xgene_enet_create_desc_rings() 1326 ring_id); in xgene_enet_create_desc_rings() 1334 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); in xgene_enet_create_desc_rings() 1337 ring_id); in xgene_enet_create_desc_rings() 1365 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); in xgene_enet_create_desc_rings() 1368 ring_id); in xgene_enet_create_desc_rings() 1398 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); in xgene_enet_create_desc_rings() [all …]
|
/Linux-v5.4/drivers/crypto/qat/qat_common/ |
D | adf_transport_debug.c | 207 int ring_id = *((int *)v) - 1; in adf_bank_show() local 208 struct adf_etr_ring_data *ring = &bank->rings[ring_id]; in adf_bank_show() 212 if (!(bank->ring_mask & 1 << ring_id)) in adf_bank_show()
|
/Linux-v5.4/drivers/net/ethernet/intel/i40e/ |
D | i40e_debugfs.c | 494 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, in i40e_dbg_dump_desc() argument 508 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { in i40e_dbg_dump_desc() 509 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); in i40e_dbg_dump_desc() 520 ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], in i40e_dbg_dump_desc() 527 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); in i40e_dbg_dump_desc() 554 vsi_seid, ring_id, desc_n, in i40e_dbg_dump_desc() 560 vsi_seid, ring_id, desc_n, in i40e_dbg_dump_desc() 918 int ring_id, desc_n; in i40e_dbg_command_write() local 921 &vsi_seid, &ring_id, &desc_n); in i40e_dbg_command_write() 922 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, in i40e_dbg_command_write() [all …]
|
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_trace.h | 83 __field(unsigned, ring_id) 95 __entry->ring_id = iv->ring_id; 109 __entry->ring_id, __entry->vmid,
|
D | amdgpu_irq.h | 49 unsigned ring_id; member
|