/Linux-v4.19/drivers/gpu/drm/i915/gvt/ |
D | vgpu.c | 38 void populate_pvinfo_page(struct intel_vgpu *vgpu) in populate_pvinfo_page() argument 41 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; in populate_pvinfo_page() 42 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; in populate_pvinfo_page() 43 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; in populate_pvinfo_page() 44 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; in populate_pvinfo_page() 45 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; in populate_pvinfo_page() 47 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; in populate_pvinfo_page() 48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; in populate_pvinfo_page() 49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; in populate_pvinfo_page() 51 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = in populate_pvinfo_page() [all …]
|
D | display.c | 38 static int get_edp_pipe(struct intel_vgpu *vgpu) in get_edp_pipe() argument 40 u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP); in get_edp_pipe() 58 static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) in edp_pipe_is_enabled() argument 60 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in edp_pipe_is_enabled() 62 if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) in edp_pipe_is_enabled() 65 if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) in edp_pipe_is_enabled() 70 int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) in pipe_is_enabled() argument 72 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in pipe_is_enabled() 77 if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) in pipe_is_enabled() 80 if (edp_pipe_is_enabled(vgpu) && in pipe_is_enabled() [all …]
|
D | cfg_space.c | 64 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, in vgpu_pci_cfg_mem_write() argument 67 u8 *cfg_base = vgpu_cfg_space(vgpu); in vgpu_pci_cfg_mem_write() 98 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, in intel_vgpu_emulate_cfg_read() argument 104 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) in intel_vgpu_emulate_cfg_read() 107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); in intel_vgpu_emulate_cfg_read() 111 static int map_aperture(struct intel_vgpu *vgpu, bool map) in map_aperture() argument 113 phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu); in map_aperture() 114 unsigned long aperture_sz = vgpu_aperture_sz(vgpu); in map_aperture() 119 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) in map_aperture() 122 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; in map_aperture() [all …]
|
D | mmio.c | 46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) in intel_vgpu_gpa_to_mmio_offset() argument 48 u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); in intel_vgpu_gpa_to_mmio_offset() 59 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, in failsafe_emulate_mmio_rw() argument 66 if (!vgpu || !p_data) in failsafe_emulate_mmio_rw() 69 gvt = vgpu->gvt; in failsafe_emulate_mmio_rw() 70 mutex_lock(&vgpu->vgpu_lock); in failsafe_emulate_mmio_rw() 71 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); in failsafe_emulate_mmio_rw() 74 intel_vgpu_default_mmio_read(vgpu, offset, p_data, in failsafe_emulate_mmio_rw() 77 intel_vgpu_default_mmio_write(vgpu, offset, p_data, in failsafe_emulate_mmio_rw() 81 pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset; in failsafe_emulate_mmio_rw() [all …]
|
D | aperture_gm.c | 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) in alloc_gm() argument 42 struct intel_gvt *gvt = vgpu->gvt; in alloc_gm() 50 node = &vgpu->gm.high_gm_node; in alloc_gm() 51 size = vgpu_hidden_sz(vgpu); in alloc_gm() 56 node = &vgpu->gm.low_gm_node; in alloc_gm() 57 size = vgpu_aperture_sz(vgpu); in alloc_gm() 76 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) in alloc_vgpu_gm() argument 78 struct intel_gvt *gvt = vgpu->gvt; in alloc_vgpu_gm() 82 ret = alloc_gm(vgpu, false); in alloc_vgpu_gm() 86 ret = alloc_gm(vgpu, true); in alloc_vgpu_gm() [all …]
|
D | edid.c | 49 static unsigned char edid_get_byte(struct intel_vgpu *vgpu) in edid_get_byte() argument 51 struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; in edid_get_byte() 68 if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) { in edid_get_byte() 70 intel_vgpu_port(vgpu, edid->port)->edid; in edid_get_byte() 110 static void reset_gmbus_controller(struct intel_vgpu *vgpu) in reset_gmbus_controller() argument 112 vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; in reset_gmbus_controller() 113 if (!vgpu->display.i2c_edid.edid_available) in reset_gmbus_controller() 114 vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; in reset_gmbus_controller() 115 vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; in reset_gmbus_controller() 119 static int gmbus0_mmio_write(struct intel_vgpu *vgpu, in gmbus0_mmio_write() argument [all …]
|
D | gvt.h | 116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) argument 132 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) argument 151 int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); 152 void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); 153 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); 399 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) argument 400 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) argument 401 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) argument 402 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) argument 404 #define vgpu_aperture_pa_base(vgpu) \ argument [all …]
|
D | mpt.h | 82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) in intel_gvt_hypervisor_attach_vgpu() argument 88 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); in intel_gvt_hypervisor_attach_vgpu() 98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) in intel_gvt_hypervisor_detach_vgpu() argument 104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle); in intel_gvt_hypervisor_detach_vgpu() 118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) in intel_gvt_hypervisor_inject_msi() argument 120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; in intel_gvt_hypervisor_inject_msi() 125 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); in intel_gvt_hypervisor_inject_msi() 126 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); in intel_gvt_hypervisor_inject_msi() 127 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); in intel_gvt_hypervisor_inject_msi() 136 trace_inject_msi(vgpu->id, addr, data); in intel_gvt_hypervisor_inject_msi() [all …]
|
D | kvmgt.c | 64 size_t (*rw)(struct intel_vgpu *vgpu, char *buf, 66 void (*release)(struct intel_vgpu *vgpu, 86 struct intel_vgpu *vgpu; member 95 struct intel_vgpu *vgpu; member 113 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 125 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); in gvt_unpin_guest_page() 131 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 148 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, in gvt_pin_guest_page() 176 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 180 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument [all …]
|
D | sched_policy.c | 37 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) in vgpu_has_pending_workload() argument 42 for_each_engine(engine, vgpu->gvt->dev_priv, i) { in vgpu_has_pending_workload() 43 if (!list_empty(workload_q_head(vgpu, i))) in vgpu_has_pending_workload() 55 struct intel_vgpu *vgpu; member 75 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) in vgpu_update_timeslice() argument 80 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) in vgpu_update_timeslice() 83 vgpu_data = vgpu->sched_data; in vgpu_update_timeslice() 179 struct intel_vgpu *vgpu = NULL; in find_busy_vgpu() local 187 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) in find_busy_vgpu() 192 vgpu = vgpu_data->vgpu; in find_busy_vgpu() [all …]
|
D | gtt.c | 54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) in intel_gvt_ggtt_validate_range() argument 56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size in intel_gvt_ggtt_validate_range() 57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { in intel_gvt_ggtt_validate_range() 66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) in intel_gvt_ggtt_gmadr_g2h() argument 68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), in intel_gvt_ggtt_gmadr_g2h() 72 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) in intel_gvt_ggtt_gmadr_g2h() 73 *h_addr = vgpu_aperture_gmadr_base(vgpu) in intel_gvt_ggtt_gmadr_g2h() 74 + (g_addr - vgpu_aperture_offset(vgpu)); in intel_gvt_ggtt_gmadr_g2h() 76 *h_addr = vgpu_hidden_gmadr_base(vgpu) in intel_gvt_ggtt_gmadr_g2h() 77 + (g_addr - vgpu_hidden_offset(vgpu)); in intel_gvt_ggtt_gmadr_g2h() [all …]
|
D | page_track.c | 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 48 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument 54 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 65 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 80 void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, in intel_vgpu_unregister_page_track() argument 85 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() 88 intel_gvt_hypervisor_disable_page_track(vgpu, gfn); in intel_vgpu_unregister_page_track() 101 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_enable_page_track() argument 106 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() [all …]
|
D | execlist.c | 95 struct intel_vgpu *vgpu = execlist->vgpu; in emulate_execlist_status() local 98 u32 status_reg = execlist_ring_mmio(vgpu->gvt, in emulate_execlist_status() 101 status.ldw = vgpu_vreg(vgpu, status_reg); in emulate_execlist_status() 102 status.udw = vgpu_vreg(vgpu, status_reg + 4); in emulate_execlist_status() 120 vgpu_vreg(vgpu, status_reg) = status.ldw; in emulate_execlist_status() 121 vgpu_vreg(vgpu, status_reg + 4) = status.udw; in emulate_execlist_status() 124 vgpu->id, status_reg, status.ldw, status.udw); in emulate_execlist_status() 131 struct intel_vgpu *vgpu = execlist->vgpu; in emulate_csb_update() local 137 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in emulate_csb_update() 139 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update() [all …]
|
D | scheduler.c | 82 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; in sr_oa_regs() 124 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context() local 125 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context() 148 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in populate_shadow_context() 158 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, in populate_shadow_context() 169 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context() 172 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context() 189 intel_gvt_hypervisor_read_gpa(vgpu, in populate_shadow_context() 206 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) in save_ring_hw_state() argument 208 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in save_ring_hw_state() [all …]
|
D | interrupt.c | 51 static void update_upstream_irq(struct intel_vgpu *vgpu, 175 int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, in intel_vgpu_reg_imr_handler() argument 178 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_imr_handler() 182 trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), in intel_vgpu_reg_imr_handler() 183 (vgpu_vreg(vgpu, reg) ^ imr)); in intel_vgpu_reg_imr_handler() 185 vgpu_vreg(vgpu, reg) = imr; in intel_vgpu_reg_imr_handler() 187 ops->check_pending_irq(vgpu); in intel_vgpu_reg_imr_handler() 205 int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, in intel_vgpu_reg_master_irq_handler() argument 208 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_master_irq_handler() 211 u32 virtual_ier = vgpu_vreg(vgpu, reg); in intel_vgpu_reg_master_irq_handler() [all …]
|
D | debugfs.c | 29 struct intel_vgpu *vgpu; member 67 vreg = vgpu_vreg(param->vgpu, offset); in mmio_diff_handler() 87 struct intel_vgpu *vgpu = s->private; in vgpu_mmio_diff_show() local 88 struct intel_gvt *gvt = vgpu->gvt; in vgpu_mmio_diff_show() 90 .vgpu = vgpu, in vgpu_mmio_diff_show() 130 struct intel_vgpu *vgpu = (struct intel_vgpu *)data; in vgpu_scan_nonprivbb_get() local 131 *val = vgpu->scan_nonprivbb; in vgpu_scan_nonprivbb_get() 144 struct intel_vgpu *vgpu = (struct intel_vgpu *)data; in vgpu_scan_nonprivbb_set() local 145 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in vgpu_scan_nonprivbb_set() 152 if (vgpu->scan_nonprivbb == val) in vgpu_scan_nonprivbb_set() [all …]
|
D | dmabuf.c | 89 struct intel_vgpu *vgpu = obj->vgpu; in dmabuf_gem_object_free() local 93 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) { in dmabuf_gem_object_free() 94 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { in dmabuf_gem_object_free() 98 intel_gvt_hypervisor_put_vfio_device(vgpu); in dmabuf_gem_object_free() 99 idr_remove(&vgpu->object_idr, in dmabuf_gem_object_free() 130 struct intel_vgpu *vgpu = obj->vgpu; in vgpu_gem_release() local 132 if (vgpu) { in vgpu_gem_release() 133 mutex_lock(&vgpu->dmabuf_lock); in vgpu_gem_release() 136 mutex_unlock(&vgpu->dmabuf_lock); in vgpu_gem_release() 208 struct intel_vgpu *vgpu, in vgpu_get_plane_info() argument [all …]
|
D | fb_decoder.c | 146 static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, in intel_vgpu_get_stride() argument 149 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in intel_vgpu_get_stride() 151 u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; in intel_vgpu_get_stride() 184 static int get_active_pipe(struct intel_vgpu *vgpu) in get_active_pipe() argument 189 if (pipe_is_enabled(vgpu, i)) in get_active_pipe() 204 int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, in intel_vgpu_decode_primary_plane() argument 208 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in intel_vgpu_decode_primary_plane() 211 pipe = get_active_pipe(vgpu); in intel_vgpu_decode_primary_plane() 215 val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); in intel_vgpu_decode_primary_plane() 251 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; in intel_vgpu_decode_primary_plane() [all …]
|
D | handlers.c | 70 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset, in read_vreg() argument 73 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); in read_vreg() 76 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, in write_vreg() argument 79 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); in write_vreg() 171 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) in enter_failsafe_mode() argument 186 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); in enter_failsafe_mode() 187 vgpu->failsafe = true; in enter_failsafe_mode() 190 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, in sanitize_fence_mmio_access() argument 193 unsigned int max_fence = vgpu_fence_sz(vgpu); in sanitize_fence_mmio_access() 203 if (!vgpu->pv_notified) in sanitize_fence_mmio_access() [all …]
|
D | opregion.c | 222 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) in intel_vgpu_init_opregion() argument 229 gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); in intel_vgpu_init_opregion() 230 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | in intel_vgpu_init_opregion() 233 if (!vgpu_opregion(vgpu)->va) { in intel_vgpu_init_opregion() 239 buf = (u8 *)vgpu_opregion(vgpu)->va; in intel_vgpu_init_opregion() 260 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) in map_vgpu_opregion() argument 266 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va in map_vgpu_opregion() 272 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, in map_vgpu_opregion() 273 vgpu_opregion(vgpu)->gfn[i], in map_vgpu_opregion() 282 vgpu_opregion(vgpu)->mapped = map; in map_vgpu_opregion() [all …]
|
D | display.h | 41 #define intel_vgpu_port(vgpu, port) \ argument 42 (&(vgpu->display.ports[port])) 44 #define intel_vgpu_has_monitor_on_port(vgpu, port) \ argument 45 (intel_vgpu_port(vgpu, port)->edid && \ 46 intel_vgpu_port(vgpu, port)->edid->data_valid) 48 #define intel_vgpu_port_is_dp(vgpu, port) \ argument 49 ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \ 50 (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \ 51 (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \ 52 (intel_vgpu_port(vgpu, port)->type == GVT_DP_D)) [all …]
|
D | gtt.h | 55 struct intel_vgpu *vgpu); 61 struct intel_vgpu *vgpu); 138 struct intel_vgpu *vgpu; member 166 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 203 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 204 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 205 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); 206 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 209 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); 212 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, [all …]
|
D | sched_policy.h | 40 int (*init_vgpu)(struct intel_vgpu *vgpu); 41 void (*clean_vgpu)(struct intel_vgpu *vgpu); 42 void (*start_schedule)(struct intel_vgpu *vgpu); 43 void (*stop_schedule)(struct intel_vgpu *vgpu); 52 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu); 54 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu); 56 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); 58 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
|
D | mmio.h | 80 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 81 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); 82 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 84 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 86 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 88 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, 91 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 93 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 99 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, 102 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
D | scheduler.h | 81 struct intel_vgpu *vgpu; member 130 #define workload_q_head(vgpu, ring_id) \ argument 131 (&(vgpu->submission.workload_q_head[ring_id])) 139 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); 141 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); 143 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 146 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 148 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 156 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, 161 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|