Home
last modified time | relevance | path

Searched full:vm (Results 1 – 25 of 1085) sorted by relevance

12345678910>>...44

/Linux-v5.10/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
/Linux-v5.10/drivers/virtio/
Dvirtio_mem.c168 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument
177 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
187 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
191 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument
224 return (addr - mb_addr) / vm->subblock_size; in virtio_mem_phys_to_sb_id()
230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_set_state() argument
233 const unsigned long idx = mb_id - vm->first_mb_id; in virtio_mem_mb_set_state()
236 old_state = vm->mb_state[idx]; in virtio_mem_mb_set_state()
237 vm->mb_state[idx] = state; in virtio_mem_mb_set_state()
[all …]
/Linux-v5.10/tools/testing/selftests/kvm/include/
Dkvm_util.h22 * structure kvm_util is using to maintain the state of a VM.
65 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
66 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
68 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
74 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
75 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
78 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
81 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
84 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
87 * VM VCPU Dump
[all …]
/Linux-v5.10/tools/testing/selftests/kvm/lib/
Dkvm_util.c66 /* VM Enable Capability
69 * vm - Virtual Machine
76 * Enables a capability (KVM_CAP_*) on the VM.
78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
92 * vm - Virtual Machine
102 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument
105 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap()
117 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
119 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
[all …]
/Linux-v5.10/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) in virt_pgd_alloc() argument
22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc()
23 vm->page_size); in virt_pgd_alloc()
25 if (vm->pgd_created) in virt_pgd_alloc()
28 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_pgd_alloc()
30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc()
32 vm->pgd = paddr; in virt_pgd_alloc()
33 vm->pgd_created = true; in virt_pgd_alloc()
41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) in virt_alloc_region() argument
45 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region()
[all …]
/Linux-v5.10/drivers/gpu/drm/lima/
Dlima_vm.c18 struct lima_vm *vm; member
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument
43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range()
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument
52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page()
57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page()
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page()
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page()
60 if (!vm->bts[pbe].cpu) in lima_vm_map_page()
63 pts = vm->bts[pbe].dma; in lima_vm_map_page()
[all …]
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vm.c46 * for the entire GPU, there are multiple VM page tables active
47 * at any given time. The VM page tables can contain a mix
51 * Each VM has an ID associated with it and there is a page table
88 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
92 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_lock() argument
94 mutex_lock(&vm->eviction_lock); in amdgpu_vm_eviction_lock()
95 vm->saved_flags = memalloc_nofs_save(); in amdgpu_vm_eviction_lock()
98 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_trylock() argument
100 if (mutex_trylock(&vm->eviction_lock)) { in amdgpu_vm_eviction_trylock()
101 vm->saved_flags = memalloc_nofs_save(); in amdgpu_vm_eviction_trylock()
[all …]
/Linux-v5.10/drivers/gpu/drm/i915/selftests/
Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument
51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument
56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument
73 ppgtt->vm.gt = &i915->gt; in mock_ppgtt()
74 ppgtt->vm.i915 = i915; in mock_ppgtt()
75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
76 ppgtt->vm.file = ERR_PTR(-ENODEV); in mock_ppgtt()
[all …]
Di915_gem_gtt.c160 if (!ppgtt->vm.allocate_va_range) in igt_ppgtt_alloc()
171 limit = min(ppgtt->vm.total, limit); in igt_ppgtt_alloc()
177 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size); in igt_ppgtt_alloc()
181 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
183 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
187 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); in igt_ppgtt_alloc()
190 ppgtt->vm.clear_range(&ppgtt->vm, 0, size); in igt_ppgtt_alloc()
192 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
199 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last); in igt_ppgtt_alloc()
203 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
[all …]
/Linux-v5.10/drivers/gpu/drm/radeon/
Dradeon_vm.c37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
42 * Each VM has an ID associated with it and there is a page table
78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
104 * Tear down the VM manager (cayman+).
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
129 struct radeon_vm *vm, in radeon_vm_get_bos() argument
[all …]
/Linux-v5.10/sound/pci/ctxfi/
Dctvmem.c26 * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
42 mutex_lock(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
52 /* Move the vm node from unused list to used list directly */ in get_vm_block()
53 list_move(&entry->list, &vm->used); in get_vm_block()
54 vm->size -= size; in get_vm_block()
65 list_add(&block->list, &vm->used); in get_vm_block()
[all …]
/Linux-v5.10/drivers/gpu/drm/i915/gt/
Dgen8_ppgtt.c55 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt()
56 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt()
67 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt()
146 gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument
148 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count()
149 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count()
153 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument
155 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index()
157 if (vm->top == 2) in gen8_pdp_for_page_index()
160 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index()
[all …]
Dintel_ggtt.c44 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
46 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
48 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
51 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
54 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
60 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
112 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
115 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
117 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
130 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
[all …]
Dintel_gtt.c14 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
16 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
17 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
19 return i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma()
22 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) in pin_pt_dma() argument
34 void __i915_vm_close(struct i915_address_space *vm) in __i915_vm_close() argument
38 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) in __i915_vm_close()
41 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in __i915_vm_close()
54 GEM_BUG_ON(!list_empty(&vm->bound_list)); in __i915_vm_close()
56 mutex_unlock(&vm->mutex); in __i915_vm_close()
[all …]
Dintel_gtt.h61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
192 void (*bind_vma)(struct i915_address_space *vm,
201 void (*unbind_vma)(struct i915_address_space *vm,
233 * Since the vm may be shared between multiple contexts, we count how
261 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
268 void (*allocate_va_range)(struct i915_address_space *vm,
271 void (*clear_range)(struct i915_address_space *vm,
273 void (*insert_page)(struct i915_address_space *vm,
278 void (*insert_entries)(struct i915_address_space *vm,
282 void (*cleanup)(struct i915_address_space *vm);
[all …]
Dgen6_ppgtt.c19 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde()
79 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, in gen6_ppgtt_clear_range() argument
82 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_ppgtt_clear_range()
84 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range()
116 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, in gen6_ppgtt_insert_entries() argument
121 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in gen6_ppgtt_insert_entries()
126 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); in gen6_ppgtt_insert_entries()
174 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); in gen6_flush_pd()
180 static void gen6_alloc_va_range(struct i915_address_space *vm, in gen6_alloc_va_range() argument
184 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_alloc_va_range()
[all …]
Dintel_ppgtt.c13 struct i915_page_table *alloc_pt(struct i915_address_space *vm) in alloc_pt() argument
21 pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pt()
49 struct i915_page_directory *alloc_pd(struct i915_address_space *vm) in alloc_pd() argument
57 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pd()
67 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) in free_px() argument
173 trace_i915_ppgtt_create(&ppgtt->vm); in i915_ppgtt_create()
178 void ppgtt_bind_vma(struct i915_address_space *vm, in ppgtt_bind_vma() argument
187 vm->allocate_va_range(vm, stash, vma->node.start, vma->size); in ppgtt_bind_vma()
196 vm->insert_entries(vm, vma, cache_level, pte_flags); in ppgtt_bind_vma()
200 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) in ppgtt_unbind_vma() argument
[all …]
/Linux-v5.10/tools/testing/selftests/kvm/lib/x86_64/
Dprocessor.c212 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) in virt_pgd_alloc() argument
214 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pgd_alloc()
215 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc()
218 if (!vm->pgd_created) { in virt_pgd_alloc()
219 vm_paddr_t paddr = vm_phy_page_alloc(vm, in virt_pgd_alloc()
221 vm->pgd = paddr; in virt_pgd_alloc()
222 vm->pgd_created = true; in virt_pgd_alloc()
226 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_pg_map() argument
232 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pg_map()
233 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pg_map()
[all …]
Dvmx.c46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) in vcpu_enable_evmcs() argument
55 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); in vcpu_enable_evmcs()
69 * vm - The VM to allocate guest-virtual addresses in.
78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
80 vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
84 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
89 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
[all …]
/Linux-v5.10/tools/testing/selftests/kvm/x86_64/
Dvmx_set_nested_state_test.c30 void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) in test_nested_state() argument
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); in test_nested_state()
35 void test_nested_state_expect_errno(struct kvm_vm *vm, in test_nested_state_expect_errno() argument
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); in test_nested_state_expect_errno()
48 void test_nested_state_expect_einval(struct kvm_vm *vm, in test_nested_state_expect_einval() argument
51 test_nested_state_expect_errno(vm, state, EINVAL); in test_nested_state_expect_einval()
54 void test_nested_state_expect_efault(struct kvm_vm *vm, in test_nested_state_expect_efault() argument
57 test_nested_state_expect_errno(vm, state, EFAULT); in test_nested_state_expect_efault()
89 void test_vmx_nested_state(struct kvm_vm *vm) in test_vmx_nested_state() argument
99 test_nested_state_expect_einval(vm, state); in test_vmx_nested_state()
[all …]
Dplatform_info_test.c38 static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) in set_msr_platform_info_enabled() argument
45 vm_enable_cap(vm, &cap); in set_msr_platform_info_enabled()
48 static void test_msr_platform_info_enabled(struct kvm_vm *vm) in test_msr_platform_info_enabled() argument
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_enabled()
53 set_msr_platform_info_enabled(vm, true); in test_msr_platform_info_enabled()
54 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_enabled()
59 get_ucall(vm, VCPU_ID, &uc); in test_msr_platform_info_enabled()
68 static void test_msr_platform_info_disabled(struct kvm_vm *vm) in test_msr_platform_info_disabled() argument
70 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_disabled()
72 set_msr_platform_info_enabled(vm, false); in test_msr_platform_info_disabled()
[all …]
/Linux-v5.10/drivers/gpu/drm/imx/dcss/
Ddcss-ss.c126 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, in dcss_ss_sync_set() argument
135 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set()
136 vm->hactive - 1; in dcss_ss_sync_set()
137 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + in dcss_ss_sync_set()
138 vm->vactive - 1; in dcss_ss_sync_set()
142 hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set()
143 vm->hactive - 1; in dcss_ss_sync_set()
144 hsync_end = vm->hsync_len - 1; in dcss_ss_sync_set()
150 vsync_start = vm->vfront_porch - 1; in dcss_ss_sync_set()
151 vsync_end = vm->vfront_porch + vm->vsync_len - 1; in dcss_ss_sync_set()
[all …]
/Linux-v5.10/Documentation/virt/
Dne_overview.rst14 For example, an application that processes sensitive data and runs in a VM,
15 can be separated from other applications running in the same VM. This
16 application then runs in a separate VM than the primary VM, namely an enclave.
18 An enclave runs alongside the VM that spawned it. This setup matches low latency
20 memory and CPUs, are carved out of the primary VM. Each enclave is mapped to a
21 process running in the primary VM, that communicates with the NE driver via an
27 VM guest that uses the provided ioctl interface of the NE driver to spawn an
28 enclave VM (that's 2 below).
30 There is a NE emulated PCI device exposed to the primary VM. The driver for this
36 hypervisor running on the host where the primary VM is running. The Nitro
[all …]
/Linux-v5.10/arch/x86/include/asm/
Dvmxfeatures.h16 /* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */
17 #define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* "" VM-Exit on vectored interrupts */
18 #define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* "" VM-Exit on NMIs */
33 /* VM-Functions, shifted to bits 28-31 */
36 /* Primary Processor-Based VM-Execution Controls, word 1 */
37 #define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* "" VM-Exit if INTRs are unblocked in guest */
39 #define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* "" VM-Exit on HLT */
40 #define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* "" VM-Exit on INVLPG */
41 #define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* "" VM-Exit on MWAIT */
42 #define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* "" VM-Exit on RDPMC */
[all …]
/Linux-v5.10/drivers/video/fbdev/omap2/omapfb/dss/
Ddisplay.c259 void videomode_to_omap_video_timings(const struct videomode *vm, in videomode_to_omap_video_timings() argument
264 ovt->pixelclock = vm->pixelclock; in videomode_to_omap_video_timings()
265 ovt->x_res = vm->hactive; in videomode_to_omap_video_timings()
266 ovt->hbp = vm->hback_porch; in videomode_to_omap_video_timings()
267 ovt->hfp = vm->hfront_porch; in videomode_to_omap_video_timings()
268 ovt->hsw = vm->hsync_len; in videomode_to_omap_video_timings()
269 ovt->y_res = vm->vactive; in videomode_to_omap_video_timings()
270 ovt->vbp = vm->vback_porch; in videomode_to_omap_video_timings()
271 ovt->vfp = vm->vfront_porch; in videomode_to_omap_video_timings()
272 ovt->vsw = vm->vsync_len; in videomode_to_omap_video_timings()
[all …]

12345678910>>...44