Lines Matching +full:south +full:- +full:field
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
42 #define iir_to_regbase(iir) (iir - 0x8)
43 #define ier_to_regbase(ier) (ier - 0xC)
45 #define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
46 #define get_irq_info(irq, e) (irq->events[e].info)
90 [PIPE_A_ODD_FIELD] = "Pipe A odd field",
91 [PIPE_A_EVEN_FIELD] = "Pipe A even field",
98 [PIPE_B_ODD_FIELD] = "Pipe B odd field",
99 [PIPE_B_EVEN_FIELD] = "Pipe B even field",
129 [ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
150 struct intel_gvt_irq *irq = &gvt->irq; in regbase_to_irq_info()
153 for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) { in regbase_to_irq_info()
154 if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg) in regbase_to_irq_info()
155 return irq->info[i]; in regbase_to_irq_info()
162 * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
178 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_imr_handler()
179 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_imr_handler()
182 trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), in intel_vgpu_reg_imr_handler()
187 ops->check_pending_irq(vgpu); in intel_vgpu_reg_imr_handler()
193 * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
208 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_master_irq_handler()
209 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_master_irq_handler()
213 trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier, in intel_vgpu_reg_master_irq_handler()
226 ops->check_pending_irq(vgpu); in intel_vgpu_reg_master_irq_handler()
232 * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
247 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reg_ier_handler()
248 struct drm_i915_private *i915 = gvt->gt->i915; in intel_vgpu_reg_ier_handler()
249 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_reg_ier_handler()
253 trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg), in intel_vgpu_reg_ier_handler()
259 if (drm_WARN_ON(&i915->drm, !info)) in intel_vgpu_reg_ier_handler()
260 return -EINVAL; in intel_vgpu_reg_ier_handler()
262 if (info->has_upstream_irq) in intel_vgpu_reg_ier_handler()
265 ops->check_pending_irq(vgpu); in intel_vgpu_reg_ier_handler()
271 * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
286 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_reg_iir_handler()
287 struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, in intel_vgpu_reg_iir_handler()
291 trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg), in intel_vgpu_reg_iir_handler()
294 if (drm_WARN_ON(&i915->drm, !info)) in intel_vgpu_reg_iir_handler()
295 return -EINVAL; in intel_vgpu_reg_iir_handler()
299 if (info->has_upstream_irq) in intel_vgpu_reg_iir_handler()
318 { -1, -1, ~0 },
324 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in update_upstream_irq()
325 struct intel_gvt_irq *irq = &vgpu->gvt->irq; in update_upstream_irq()
326 struct intel_gvt_irq_map *map = irq->irq_map; in update_upstream_irq()
332 regbase_to_iir(i915_mmio_reg_offset(info->reg_base))) in update_upstream_irq()
334 regbase_to_ier(i915_mmio_reg_offset(info->reg_base))); in update_upstream_irq()
336 if (!info->has_upstream_irq) in update_upstream_irq()
339 for (map = irq->irq_map; map->up_irq_bit != -1; map++) { in update_upstream_irq()
340 if (info->group != map->down_irq_group) in update_upstream_irq()
344 up_irq_info = irq->info[map->up_irq_group]; in update_upstream_irq()
346 drm_WARN_ON(&i915->drm, up_irq_info != in update_upstream_irq()
347 irq->info[map->up_irq_group]); in update_upstream_irq()
349 bit = map->up_irq_bit; in update_upstream_irq()
351 if (val & map->down_irq_bitmask) in update_upstream_irq()
357 if (drm_WARN_ON(&i915->drm, !up_irq_info)) in update_upstream_irq()
360 if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { in update_upstream_irq()
361 u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base); in update_upstream_irq()
367 i915_mmio_reg_offset(up_irq_info->reg_base)); in update_upstream_irq()
369 i915_mmio_reg_offset(up_irq_info->reg_base)); in update_upstream_irq()
374 if (up_irq_info->has_upstream_irq) in update_upstream_irq()
384 for (map = irq->irq_map; map->up_irq_bit != -1; map++) { in init_irq_map()
385 up_info = irq->info[map->up_irq_group]; in init_irq_map()
386 up_bit = map->up_irq_bit; in init_irq_map()
387 down_info = irq->info[map->down_irq_group]; in init_irq_map()
389 set_bit(up_bit, up_info->downstream_irq_bitmap); in init_irq_map()
390 down_info->has_upstream_irq = true; in init_irq_map()
392 gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n", in init_irq_map()
393 up_info->group, up_bit, in init_irq_map()
394 down_info->group, map->down_irq_bitmask); in init_irq_map()
415 reg_base = i915_mmio_reg_offset(info->reg_base); in propagate_event()
416 bit = irq->events[event].bit; in propagate_event()
420 trace_propagate_event(vgpu->id, irq_name[event], bit); in propagate_event()
430 if (!vgpu->irq.irq_warn_once[event]) { in handle_default_event_virt()
432 vgpu->id, event, irq_name[event]); in handle_default_event_virt()
433 vgpu->irq.irq_warn_once[event] = true; in handle_default_event_virt()
443 .name = #regname"-IRQ", \
445 .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
462 .name = "PCH-IRQ",
464 .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
470 struct intel_gvt_irq *irq = &vgpu->gvt->irq; in gen8_check_pending_irq()
477 for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) { in gen8_check_pending_irq()
478 struct intel_gvt_irq_info *info = irq->info[i]; in gen8_check_pending_irq()
481 if (!info->has_upstream_irq) in gen8_check_pending_irq()
484 reg_base = i915_mmio_reg_offset(info->reg_base); in gen8_check_pending_irq()
502 s->events[e].bit = b; \ in gen8_init_irq()
503 s->events[e].info = s->info[i]; \ in gen8_init_irq()
504 s->info[i]->bit_to_event[b] = e;\ in gen8_init_irq()
509 s->info[g] = i; \ in gen8_init_irq()
510 (i)->group = g; \ in gen8_init_irq()
511 set_bit(g, s->irq_info_bitmap); \ in gen8_init_irq()
543 if (HAS_ENGINE(gvt->gt, VCS1)) { in gen8_init_irq()
575 if (IS_BROADWELL(gvt->gt->i915)) { in gen8_init_irq()
588 } else if (GRAPHICS_VER(gvt->gt->i915) >= 9) { in gen8_init_irq()
613 * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
625 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_trigger_virtual_event()
626 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_trigger_virtual_event()
627 struct intel_gvt_irq *irq = &gvt->irq; in intel_vgpu_trigger_virtual_event()
629 struct intel_gvt_irq_ops *ops = gvt->irq.ops; in intel_vgpu_trigger_virtual_event()
632 drm_WARN_ON(&i915->drm, !handler); in intel_vgpu_trigger_virtual_event()
636 ops->check_pending_irq(vgpu); in intel_vgpu_trigger_virtual_event()
645 irq->events[i].info = NULL; in init_events()
646 irq->events[i].v_handler = handle_default_event_virt; in init_events()
651 * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
654 * This function is called at driver loading stage, to initialize the GVT-g IRQ
662 struct intel_gvt_irq *irq = &gvt->irq; in intel_gvt_init_irq()
666 irq->ops = &gen8_irq_ops; in intel_gvt_init_irq()
667 irq->irq_map = gen8_irq_map; in intel_gvt_init_irq()
673 irq->ops->init_irq(irq); in intel_gvt_init_irq()