/Linux-v5.4/drivers/gpu/drm/i915/gvt/ |
D | gvt.c | 49 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, in intel_gvt_find_vgpu_type() argument 55 &gvt->dev_priv->drm.pdev->dev); in intel_gvt_find_vgpu_type() 57 for (i = 0; i < gvt->num_types; i++) { in intel_gvt_find_vgpu_type() 58 t = &gvt->types[i]; in intel_gvt_find_vgpu_type() 72 void *gvt = kdev_to_i915(dev)->gvt; in available_instances_show() local 74 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); in available_instances_show() 93 void *gvt = kdev_to_i915(dev)->gvt; in description_show() local 95 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); in description_show() 131 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) in intel_gvt_init_vgpu_type_groups() argument 137 for (i = 0; i < gvt->num_types; i++) { in intel_gvt_init_vgpu_type_groups() [all …]
|
D | vgpu.c | 106 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) in intel_gvt_init_vgpu_types() argument 124 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; in intel_gvt_init_vgpu_types() 125 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; in intel_gvt_init_vgpu_types() 128 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), in intel_gvt_init_vgpu_types() 130 if (!gvt->types) in intel_gvt_init_vgpu_types() 138 gvt->types[i].low_gm_size = vgpu_types[i].low_mm; in intel_gvt_init_vgpu_types() 139 gvt->types[i].high_gm_size = vgpu_types[i].high_mm; in intel_gvt_init_vgpu_types() 140 gvt->types[i].fence = vgpu_types[i].fence; in intel_gvt_init_vgpu_types() 146 gvt->types[i].weight = vgpu_types[i].weight; in intel_gvt_init_vgpu_types() 147 gvt->types[i].resolution = vgpu_types[i].edid; in intel_gvt_init_vgpu_types() [all …]
|
D | sched_policy.c | 42 for_each_engine(engine, vgpu->gvt->dev_priv, i) { in vgpu_has_pending_workload() 68 struct intel_gvt *gvt; member 80 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) in vgpu_update_timeslice() 132 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) in try_to_schedule_next_vgpu() argument 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() 155 for_each_engine(engine, gvt->dev_priv, i) { in try_to_schedule_next_vgpu() 172 for_each_engine(engine, gvt->dev_priv, i) in try_to_schedule_next_vgpu() 213 struct intel_gvt *gvt = sched_data->gvt; in tbs_sched_func() local 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in tbs_sched_func() 233 scheduler->next_vgpu = gvt->idle_vgpu; in tbs_sched_func() [all …]
|
D | gvt.h | 169 struct intel_gvt *gvt; member 348 return i915->gvt; in to_gvt() 361 static inline void intel_gvt_request_service(struct intel_gvt *gvt, in intel_gvt_request_service() argument 364 set_bit(service, (void *)&gvt->service_request); in intel_gvt_request_service() 365 wake_up(&gvt->service_thread_wq); in intel_gvt_request_service() 368 void intel_gvt_free_firmware(struct intel_gvt *gvt); 369 int intel_gvt_load_firmware(struct intel_gvt *gvt); 380 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) argument 381 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) argument 383 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) argument [all …]
|
D | mmio.c | 53 #define reg_is_mmio(gvt, reg) \ argument 54 (reg >= 0 && reg < gvt->device_info.mmio_size) 56 #define reg_is_gtt(gvt, reg) \ argument 57 (reg >= gvt->device_info.gtt_start_offset \ 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 63 struct intel_gvt *gvt = NULL; in failsafe_emulate_mmio_rw() local 70 gvt = vgpu->gvt; in failsafe_emulate_mmio_rw() 73 if (reg_is_mmio(gvt, offset)) { in failsafe_emulate_mmio_rw() 80 } else if (reg_is_gtt(gvt, offset)) { in failsafe_emulate_mmio_rw() 81 offset -= gvt->device_info.gtt_start_offset; in failsafe_emulate_mmio_rw() [all …]
|
D | aperture_gm.c | 43 struct intel_gvt *gvt = vgpu->gvt; in alloc_gm() local 44 struct drm_i915_private *dev_priv = gvt->dev_priv; in alloc_gm() 53 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); in alloc_gm() 54 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); in alloc_gm() 59 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); in alloc_gm() 60 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); in alloc_gm() 81 struct intel_gvt *gvt = vgpu->gvt; in alloc_vgpu_gm() local 82 struct drm_i915_private *dev_priv = gvt->dev_priv; in alloc_vgpu_gm() 109 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in free_vgpu_gm() 130 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_write_fence() local [all …]
|
D | debugfs.c | 58 static inline int mmio_diff_handler(struct intel_gvt *gvt, in mmio_diff_handler() argument 61 struct drm_i915_private *i915 = gvt->dev_priv; in mmio_diff_handler() 88 struct intel_gvt *gvt = vgpu->gvt; in vgpu_mmio_diff_show() local 98 mutex_lock(&gvt->lock); in vgpu_mmio_diff_show() 99 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 101 mmio_hw_access_pre(gvt->dev_priv); in vgpu_mmio_diff_show() 103 intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, ¶m); in vgpu_mmio_diff_show() 104 mmio_hw_access_post(gvt->dev_priv); in vgpu_mmio_diff_show() 106 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 107 mutex_unlock(&gvt->lock); in vgpu_mmio_diff_show() [all …]
|
D | firmware.c | 69 static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) in mmio_snapshot_handler() argument 71 struct drm_i915_private *i915 = gvt->dev_priv; in mmio_snapshot_handler() 78 static int expose_firmware_sysfs(struct intel_gvt *gvt) in expose_firmware_sysfs() argument 80 struct intel_gvt_device_info *info = &gvt->device_info; in expose_firmware_sysfs() 81 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; in expose_firmware_sysfs() 107 memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size); in expose_firmware_sysfs() 112 intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); in expose_firmware_sysfs() 114 memcpy(gvt->firmware.mmio, p, info->mmio_size); in expose_firmware_sysfs() 130 static void clean_firmware_sysfs(struct intel_gvt *gvt) in clean_firmware_sysfs() argument 132 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; in clean_firmware_sysfs() [all …]
|
D | mmio_context.c | 161 struct intel_gvt *gvt = dev_priv->gvt; in load_render_mocs() local 163 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; in load_render_mocs() 164 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; in load_render_mocs() 198 struct intel_gvt *gvt = vgpu->gvt; in restore_context_mmio_for_inhibit() local 200 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; in restore_context_mmio_for_inhibit() 214 for (mmio = gvt->engine_mmio_list.mmio; in restore_context_mmio_for_inhibit() 348 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in handle_tlb_pending_event() 351 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; in handle_tlb_pending_event() 352 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; in handle_tlb_pending_event() 407 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; in switch_mocs() [all …]
|
D | gtt.c | 90 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), in intel_gvt_ggtt_gmadr_h2g() 94 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) in intel_gvt_ggtt_gmadr_h2g() 96 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g() 99 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g() 305 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_get_entry64() 318 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); in gtt_get_entry64() 330 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_set_entry64() 343 write_pte64(vgpu->gvt->dev_priv, index, e->val64); in gtt_set_entry64() 553 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry() 580 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_set_root_entry() [all …]
|
D | interrupt.c | 147 struct intel_gvt *gvt, in regbase_to_irq_info() argument 150 struct intel_gvt_irq *irq = &gvt->irq; in regbase_to_irq_info() 178 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_imr_handler() local 179 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_imr_handler() 208 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_master_irq_handler() local 209 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_master_irq_handler() 247 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_ier_handler() local 248 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_ier_handler() 257 info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); in intel_vgpu_reg_ier_handler() 285 struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, in intel_vgpu_reg_iir_handler() [all …]
|
D | sched_policy.h | 38 int (*init)(struct intel_gvt *gvt); 39 void (*clean)(struct intel_gvt *gvt); 46 void intel_gvt_schedule(struct intel_gvt *gvt); 48 int intel_gvt_init_sched_policy(struct intel_gvt *gvt); 50 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); 60 void intel_gvt_kick_schedule(struct intel_gvt *gvt);
|
D | mmio.h | 70 int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, 72 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); 73 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); 75 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); 76 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); 77 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, 78 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), 97 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
D | scheduler.c | 86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; in sr_oa_regs() 129 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context() local 180 context_page_num = gvt->dev_priv->engine[ring_id]->context_size; in populate_shadow_context() 184 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) in populate_shadow_context() 214 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in save_ring_hw_state() 230 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, in shadow_context_status_change() local 232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() 388 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in intel_gvt_workload_req_alloc() 418 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in intel_gvt_scan_and_shadow_workload() 451 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer() local [all …]
|
D | display.c | 60 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in edp_pipe_is_enabled() 72 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in pipe_is_enabled() 171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in emulate_monitor_status_change() 361 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) in intel_gvt_check_vblank_emulation() argument 363 struct intel_gvt_irq *irq = &gvt->irq; in intel_gvt_check_vblank_emulation() 368 mutex_lock(&gvt->lock); in intel_gvt_check_vblank_emulation() 369 for_each_active_vgpu(gvt, vgpu, id) { in intel_gvt_check_vblank_emulation() 387 mutex_unlock(&gvt->lock); in intel_gvt_check_vblank_emulation() 392 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in emulate_vblank_on_pipe() 424 for_each_pipe(vgpu->gvt->dev_priv, pipe) in emulate_vblank() [all …]
|
D | cmd_parser.c | 502 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 647 static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, in find_cmd_entry() argument 652 hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { in find_cmd_entry() 659 static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt, in get_cmd_info() argument 668 return find_cmd_entry(gvt, opcode, ring_id); in get_cmd_info() 837 struct intel_gvt *gvt = s->vgpu->gvt; in force_nonpriv_reg_handler() local 841 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; in force_nonpriv_reg_handler() 854 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && in force_nonpriv_reg_handler() 883 struct intel_gvt *gvt = vgpu->gvt; in cmd_reg_handler() local 886 if (offset + 4 > gvt->device_info.mmio_size) { in cmd_reg_handler() [all …]
|
D | cfg_space.c | 112 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) in intel_vgpu_emulate_cfg_read() 305 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) in intel_vgpu_emulate_cfg_write() 361 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_init_cfg_space() local 362 const struct intel_gvt_device_info *info = &gvt->device_info; in intel_vgpu_init_cfg_space() 365 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, in intel_vgpu_init_cfg_space() 380 gvt_aperture_pa_base(gvt), true); in intel_vgpu_init_cfg_space() 394 pci_resource_len(gvt->dev_priv->drm.pdev, 0); in intel_vgpu_init_cfg_space() 396 pci_resource_len(gvt->dev_priv->drm.pdev, 2); in intel_vgpu_init_cfg_space()
|
D | handlers.c | 50 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) in intel_gvt_get_device_type() argument 52 if (IS_BROADWELL(gvt->dev_priv)) in intel_gvt_get_device_type() 54 else if (IS_SKYLAKE(gvt->dev_priv)) in intel_gvt_get_device_type() 56 else if (IS_KABYLAKE(gvt->dev_priv)) in intel_gvt_get_device_type() 58 else if (IS_BROXTON(gvt->dev_priv)) in intel_gvt_get_device_type() 60 else if (IS_COFFEELAKE(gvt->dev_priv)) in intel_gvt_get_device_type() 66 bool intel_gvt_match_device(struct intel_gvt *gvt, in intel_gvt_match_device() argument 69 return intel_gvt_get_device_type(gvt) & device; in intel_gvt_match_device() 84 static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt, in find_mmio_info() argument 89 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { in find_mmio_info() [all …]
|
D | execlist.c | 42 #define execlist_ring_mmio(gvt, ring_id, offset) \ argument 43 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 97 u32 status_reg = execlist_ring_mmio(vgpu->gvt, in emulate_execlist_status() 136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in emulate_csb_update() 138 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update() 140 ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update() 265 u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in get_next_execlist_slot() 521 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in init_vgpu_execlist() 532 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in clean_execlist() 547 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; in reset_execlist()
|
D | cmd_parser.h | 41 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); 43 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
|
D | Makefile | 2 GVT_DIR := gvt 3 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
|
D | mpt.h | 53 void *gvt, const void *ops) in intel_gvt_hypervisor_host_init() argument 58 return intel_gvt_host.mpt->host_init(dev, gvt, ops); in intel_gvt_hypervisor_host_init() 118 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; in intel_gvt_hypervisor_inject_msi()
|
D | kvmgt.c | 190 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; in gvt_dma_map_page() 213 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; in gvt_dma_unmap_page() 651 void *gvt; in intel_vgpu_create() local 655 gvt = kdev_to_i915(pdev)->gvt; in intel_vgpu_create() 657 type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj)); in intel_vgpu_create() 665 vgpu = intel_gvt_ops->vgpu_create(gvt, type); in intel_vgpu_create() 916 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, in intel_vgpu_aperture_rw() 985 struct intel_gvt *gvt = vgpu->gvt; in gtt_entry() local 995 return (offset >= gvt->device_info.gtt_start_offset && in gtt_entry() 996 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? in gtt_entry() [all …]
|
D | scheduler.h | 137 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); 139 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
|
D | display.h | 200 void intel_gvt_emulate_vblank(struct intel_gvt *gvt); 201 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|