Home
last modified time | relevance | path

Searched refs:gvt (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v5.15/drivers/gpu/drm/i915/gvt/
Dgvt.c66 static void init_device_info(struct intel_gvt *gvt) in init_device_info() argument
68 struct intel_gvt_device_info *info = &gvt->device_info; in init_device_info()
69 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); in init_device_info()
83 static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) in intel_gvt_test_and_emulate_vblank() argument
88 mutex_lock(&gvt->lock); in intel_gvt_test_and_emulate_vblank()
89 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { in intel_gvt_test_and_emulate_vblank()
91 (void *)&gvt->service_request)) { in intel_gvt_test_and_emulate_vblank()
96 mutex_unlock(&gvt->lock); in intel_gvt_test_and_emulate_vblank()
101 struct intel_gvt *gvt = (struct intel_gvt *)data; in gvt_service_thread() local
107 ret = wait_event_interruptible(gvt->service_thread_wq, in gvt_service_thread()
[all …]
Dvgpu.c40 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in populate_pvinfo_page()
107 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) in intel_gvt_init_vgpu_types() argument
125 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; in intel_gvt_init_vgpu_types()
126 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; in intel_gvt_init_vgpu_types()
129 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), in intel_gvt_init_vgpu_types()
131 if (!gvt->types) in intel_gvt_init_vgpu_types()
139 gvt->types[i].low_gm_size = vgpu_types[i].low_mm; in intel_gvt_init_vgpu_types()
140 gvt->types[i].high_gm_size = vgpu_types[i].high_mm; in intel_gvt_init_vgpu_types()
141 gvt->types[i].fence = vgpu_types[i].fence; in intel_gvt_init_vgpu_types()
147 gvt->types[i].weight = vgpu_types[i].weight; in intel_gvt_init_vgpu_types()
[all …]
Dsched_policy.c42 for_each_engine(engine, vgpu->gvt->gt, i) { in vgpu_has_pending_workload()
68 struct intel_gvt *gvt; member
80 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) in vgpu_update_timeslice()
132 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) in try_to_schedule_next_vgpu() argument
134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu()
155 for_each_engine(engine, gvt->gt, i) { in try_to_schedule_next_vgpu()
172 for_each_engine(engine, gvt->gt, i) in try_to_schedule_next_vgpu()
213 struct intel_gvt *gvt = sched_data->gvt; in tbs_sched_func() local
214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in tbs_sched_func()
233 scheduler->next_vgpu = gvt->idle_vgpu; in tbs_sched_func()
[all …]
Dgvt.h180 struct intel_gvt *gvt; member
347 return i915->gvt; in to_gvt()
363 static inline void intel_gvt_request_service(struct intel_gvt *gvt, in intel_gvt_request_service() argument
366 set_bit(service, (void *)&gvt->service_request); in intel_gvt_request_service()
367 wake_up(&gvt->service_thread_wq); in intel_gvt_request_service()
370 void intel_gvt_free_firmware(struct intel_gvt *gvt);
371 int intel_gvt_load_firmware(struct intel_gvt *gvt);
381 #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) argument
384 #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end argument
385 #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start argument
[all …]
Daperture_gm.c43 struct intel_gvt *gvt = vgpu->gvt; in alloc_gm() local
44 struct intel_gt *gt = gvt->gt; in alloc_gm()
53 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); in alloc_gm()
54 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); in alloc_gm()
59 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); in alloc_gm()
60 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); in alloc_gm()
81 struct intel_gvt *gvt = vgpu->gvt; in alloc_vgpu_gm() local
82 struct intel_gt *gt = gvt->gt; in alloc_vgpu_gm()
109 struct intel_gvt *gvt = vgpu->gvt; in free_vgpu_gm() local
110 struct intel_gt *gt = gvt->gt; in free_vgpu_gm()
[all …]
Dmmio.c53 #define reg_is_mmio(gvt, reg) \ argument
54 (reg >= 0 && reg < gvt->device_info.mmio_size)
56 #define reg_is_gtt(gvt, reg) \ argument
57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
63 struct intel_gvt *gvt = NULL; in failsafe_emulate_mmio_rw() local
70 gvt = vgpu->gvt; in failsafe_emulate_mmio_rw()
73 if (reg_is_mmio(gvt, offset)) { in failsafe_emulate_mmio_rw()
80 } else if (reg_is_gtt(gvt, offset)) { in failsafe_emulate_mmio_rw()
81 offset -= gvt->device_info.gtt_start_offset; in failsafe_emulate_mmio_rw()
[all …]
Ddebugfs.c58 static inline int mmio_diff_handler(struct intel_gvt *gvt, in mmio_diff_handler() argument
65 preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset)); in mmio_diff_handler()
87 struct intel_gvt *gvt = vgpu->gvt; in vgpu_mmio_diff_show() local
97 mutex_lock(&gvt->lock); in vgpu_mmio_diff_show()
98 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
100 mmio_hw_access_pre(gvt->gt); in vgpu_mmio_diff_show()
102 intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param); in vgpu_mmio_diff_show()
103 mmio_hw_access_post(gvt->gt); in vgpu_mmio_diff_show()
105 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
106 mutex_unlock(&gvt->lock); in vgpu_mmio_diff_show()
[all …]
Dfirmware.c69 static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) in mmio_snapshot_handler() argument
71 *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore, in mmio_snapshot_handler()
76 static int expose_firmware_sysfs(struct intel_gvt *gvt) in expose_firmware_sysfs() argument
78 struct intel_gvt_device_info *info = &gvt->device_info; in expose_firmware_sysfs()
79 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); in expose_firmware_sysfs()
105 memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size); in expose_firmware_sysfs()
110 intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); in expose_firmware_sysfs()
112 memcpy(gvt->firmware.mmio, p, info->mmio_size); in expose_firmware_sysfs()
128 static void clean_firmware_sysfs(struct intel_gvt *gvt) in clean_firmware_sysfs() argument
130 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); in clean_firmware_sysfs()
[all …]
Dmmio.h73 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
74 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
75 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
77 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
78 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
79 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
80 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
83 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
102 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
111 void intel_gvt_restore_fence(struct intel_gvt *gvt);
[all …]
Dgtt.c74 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_gvt_ggtt_gmadr_g2h()
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_gvt_ggtt_gmadr_h2g()
94 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), in intel_gvt_ggtt_gmadr_h2g()
98 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) in intel_gvt_ggtt_gmadr_h2g()
100 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g()
103 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g()
308 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_get_entry64()
321 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); in gtt_get_entry64()
333 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_set_entry64()
346 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); in gtt_set_entry64()
[all …]
Dinterrupt.c147 struct intel_gvt *gvt, in regbase_to_irq_info() argument
150 struct intel_gvt_irq *irq = &gvt->irq; in regbase_to_irq_info()
178 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_imr_handler() local
179 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_imr_handler()
208 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_master_irq_handler() local
209 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_master_irq_handler()
247 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_ier_handler() local
248 struct drm_i915_private *i915 = gvt->gt->i915; in intel_vgpu_reg_ier_handler()
249 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_ier_handler()
258 info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); in intel_vgpu_reg_ier_handler()
[all …]
Dsched_policy.h41 int (*init)(struct intel_gvt *gvt);
42 void (*clean)(struct intel_gvt *gvt);
49 void intel_gvt_schedule(struct intel_gvt *gvt);
51 int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
53 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
63 void intel_gvt_kick_schedule(struct intel_gvt *gvt);
Dmmio_context.c165 struct intel_gvt *gvt = engine->i915->gvt; in load_render_mocs() local
167 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; in load_render_mocs()
168 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; in load_render_mocs()
204 struct intel_gvt *gvt = vgpu->gvt; in restore_context_mmio_for_inhibit() local
206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; in restore_context_mmio_for_inhibit()
220 for (mmio = gvt->engine_mmio_list.mmio; in restore_context_mmio_for_inhibit()
355 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; in handle_tlb_pending_event()
356 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; in handle_tlb_pending_event()
482 for (mmio = engine->i915->gvt->engine_mmio_list.mmio; in switch_mmio()
581 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) in intel_gvt_init_engine_mmio_context() argument
[all …]
Dscheduler.c85 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs()
128 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context() local
216 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) in populate_shadow_context()
288 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, in shadow_context_status_change() local
290 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change()
522 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer() local
523 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; in prepare_shadow_batch_buffer()
846 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) in pick_next_workload() argument
848 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in pick_next_workload()
851 mutex_lock(&gvt->sched_lock); in pick_next_workload()
[all …]
Dhandlers.c51 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) in intel_gvt_get_device_type() argument
53 struct drm_i915_private *i915 = gvt->gt->i915; in intel_gvt_get_device_type()
69 bool intel_gvt_match_device(struct intel_gvt *gvt, in intel_gvt_match_device() argument
72 return intel_gvt_get_device_type(gvt) & device; in intel_gvt_match_device()
87 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, in intel_gvt_find_mmio_info() argument
92 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { in intel_gvt_find_mmio_info()
99 static int new_mmio_info(struct intel_gvt *gvt, in new_mmio_info() argument
107 if (!intel_gvt_match_device(gvt, device)) in new_mmio_info()
122 p = intel_gvt_find_mmio_info(gvt, info->offset); in new_mmio_info()
139 gvt->mmio.mmio_attribute[info->offset / 4] = flags; in new_mmio_info()
[all …]
Dcfg_space.c119 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_emulate_cfg_read()
125 offset + bytes > vgpu->gvt->device_info.cfg_space_size)) in intel_vgpu_emulate_cfg_read()
313 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_emulate_cfg_write()
320 offset + bytes > vgpu->gvt->device_info.cfg_space_size)) in intel_vgpu_emulate_cfg_write()
376 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_init_cfg_space() local
377 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); in intel_vgpu_init_cfg_space()
378 const struct intel_gvt_device_info *info = &gvt->device_info; in intel_vgpu_init_cfg_space()
382 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, in intel_vgpu_init_cfg_space()
397 gvt_aperture_pa_base(gvt), true); in intel_vgpu_init_cfg_space()
Dmpt.h55 void *gvt, const void *ops) in intel_gvt_hypervisor_host_init() argument
60 return intel_gvt_host.mpt->host_init(dev, gvt, ops); in intel_gvt_hypervisor_host_init()
66 static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) in intel_gvt_hypervisor_host_exit() argument
72 intel_gvt_host.mpt->host_exit(dev, gvt); in intel_gvt_hypervisor_host_exit()
120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; in intel_gvt_hypervisor_inject_msi()
Dkvmgt.c153 struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; in available_instances_show() local
155 type = &gvt->types[mtype_get_type_group_id(mtype)]; in available_instances_show()
174 struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; in description_show() local
176 type = &gvt->types[mtype_get_type_group_id(mtype)]; in description_show()
204 static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) in intel_gvt_init_vgpu_type_groups() argument
210 for (i = 0; i < gvt->num_types; i++) { in intel_gvt_init_vgpu_type_groups()
211 type = &gvt->types[i]; in intel_gvt_init_vgpu_type_groups()
233 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) in intel_gvt_cleanup_vgpu_type_groups() argument
238 for (i = 0; i < gvt->num_types; i++) { in intel_gvt_cleanup_vgpu_type_groups()
252 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in gvt_unpin_guest_page()
[all …]
Dcmd_parser.c515 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
661 find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, in find_cmd_entry() argument
666 hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { in find_cmd_entry()
675 get_cmd_info(struct intel_gvt *gvt, u32 cmd, in get_cmd_info() argument
684 return find_cmd_entry(gvt, opcode, engine); in get_cmd_info()
891 struct intel_gvt *gvt = vgpu->gvt; in cmd_reg_handler() local
895 if (offset + 4 > gvt->device_info.mmio_size) { in cmd_reg_handler()
904 intel_gvt_mmio_set_cmd_accessible(gvt, offset); in cmd_reg_handler()
905 mmio_info = intel_gvt_find_mmio_info(gvt, offset); in cmd_reg_handler()
907 intel_gvt_mmio_set_cmd_write_patch(gvt, offset); in cmd_reg_handler()
[all …]
Dcmd_parser.h46 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
48 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
DMakefile2 GVT_DIR := gvt
3 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
Ddisplay.c60 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in edp_pipe_is_enabled()
72 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in pipe_is_enabled()
172 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in emulate_monitor_status_change()
528 intel_gvt_request_service(vgpu->gvt, in vblank_timer_fn()
537 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in setup_virtual_dp_monitor()
620 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in emulate_vblank_on_pipe()
652 for_each_pipe(vgpu->gvt->gt->i915, pipe) in intel_vgpu_emulate_vblank()
667 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_emulate_hotplug()
756 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in intel_vgpu_clean_display()
782 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in intel_vgpu_init_display()
Dhypercall.h51 int (*host_init)(struct device *dev, void *gvt, const void *ops);
52 void (*host_exit)(struct device *dev, void *gvt);
Dgtt.h226 int intel_gvt_init_gtt(struct intel_gvt *gvt);
228 void intel_gvt_clean_gtt(struct intel_gvt *gvt);
295 void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
/Linux-v5.15/drivers/gpu/drm/i915/
Dintel_gvt.c133 return dev_priv->gvt; in intel_gvt_active()
163 intel_gvt_pm_resume(dev_priv->gvt); in intel_gvt_resume()

12