Lines Matching +full:edid +full:- +full:emulation

2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
50 #include "edid.h"
62 /* Describe per-platform limitations. */
110 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
124 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
240 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
278 /* This reg is in GVT's mmio save-restor list and in hardware
309 enum intel_vgpu_edid edid; member
369 return i915->gvt; in to_gvt()
379 /* per-vGPU vblank emulation request */
388 set_bit(service, (void *)&gvt->service_request); in intel_gvt_request_service()
389 wake_up(&gvt->service_thread_wq); in intel_gvt_request_service()
403 #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
406 #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
407 #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
409 #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
410 #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
411 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
415 + gvt_aperture_sz(gvt) - 1)
420 + gvt_hidden_sz(gvt) - 1)
422 #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
425 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
426 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
427 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
428 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
431 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
433 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
436 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
440 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
444 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
446 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
447 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
462 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
464 (*(u32 *)(vgpu->mmio.vreg + (offset)))
466 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
468 (*(u64 *)(vgpu->mmio.vreg + (offset)))
471 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
472 for_each_if(vgpu->active)
485 * only update bit 31 - bit 4, in intel_vgpu_write_pci_bar()
486 * leave the bit 3 - bit 0 unchanged. in intel_vgpu_write_pci_bar()
560 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & in intel_vgpu_get_bar_gpa()
583 intel_runtime_pm_get(gt->uncore->rpm); in mmio_hw_access_pre()
588 intel_runtime_pm_put_unchecked(gt->uncore->rpm); in mmio_hw_access_post()
592 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
600 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED; in intel_gvt_mmio_set_accessed()
604 * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
614 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; in intel_gvt_mmio_is_cmd_accessible()
618 * intel_gvt_mmio_set_cmd_accessible -
627 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; in intel_gvt_mmio_set_cmd_accessible()
631 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
639 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; in intel_gvt_mmio_is_unalign()
643 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
654 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; in intel_gvt_mmio_has_mode_mask()
658 * intel_gvt_mmio_is_sr_in_ctx -
670 return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; in intel_gvt_mmio_is_sr_in_ctx()
674 * intel_gvt_mmio_set_sr_in_ctx -
675 * mask an MMIO in GVT's mmio save-restore list and also
684 gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; in intel_gvt_mmio_set_sr_in_ctx()
689 * intel_gvt_mmio_set_cmd_write_patch -
699 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; in intel_gvt_mmio_set_cmd_write_patch()
703 * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
714 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; in intel_gvt_mmio_is_cmd_write_patch()
718 * intel_gvt_read_gpa - copy data from GPA to host data buffer
730 if (!vgpu->attached) in intel_gvt_read_gpa()
731 return -ESRCH; in intel_gvt_read_gpa()
732 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false); in intel_gvt_read_gpa()
736 * intel_gvt_write_gpa - copy data from host data buffer to GPA
748 if (!vgpu->attached) in intel_gvt_write_gpa()
749 return -ESRCH; in intel_gvt_write_gpa()
750 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true); in intel_gvt_write_gpa()