/Linux-v5.15/tools/testing/selftests/kvm/lib/aarch64/ |
D | processor.c | 19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument 21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index() 27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index() 32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index() 35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index() 37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index() 38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index() [all …]
|
/Linux-v5.15/drivers/virtio/ |
D | virtio_mem.c | 262 static void virtio_mem_retry(struct virtio_mem *vm); 268 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument 277 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device() 287 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument 291 list_del_rcu(&vm->next); in unregister_virtio_mem_device() 318 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument 321 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id() 327 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument 330 return bb_id * vm->bbm.bb_size; in virtio_mem_bb_id_to_phys() 336 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/include/ |
D | kvm_util.h | 24 * structure kvm_util is using to maintain the state of a VM. 85 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); 86 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, 88 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 95 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); 96 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 98 uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm); 100 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, 103 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 105 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/ |
D | kvm_util.c | 93 /* VM Enable Capability 96 * vm - Virtual Machine 103 * Enables a capability (KVM_CAP_*) on the VM. 105 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument 109 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap() 119 * vm - Virtual Machine 129 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument 132 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap() 144 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument 150 vm_enable_cap(vm, &cap); in vm_enable_dirty_ring() [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/s390x/ |
D | processor.c | 14 void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc() argument 18 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc() 19 vm->page_size); in virt_pgd_alloc() 21 if (vm->pgd_created) in virt_pgd_alloc() 24 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_pgd_alloc() 26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc() 28 vm->pgd = paddr; in virt_pgd_alloc() 29 vm->pgd_created = true; in virt_pgd_alloc() 37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument 41 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region() [all …]
|
/Linux-v5.15/drivers/gpu/drm/lima/ |
D | lima_vm.c | 18 struct lima_vm *vm; member 35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument 43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range() 47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument 52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page() 57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page() 58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page() 59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page() 60 if (!vm->bts[pbe].cpu) in lima_vm_map_page() 63 pts = vm->bts[pbe].dma; in lima_vm_map_page() [all …]
|
/Linux-v5.15/drivers/virt/acrn/ |
D | vm.c | 21 * is wrote in VM creation ioctl. Use the rwlock mechanism to protect it. 25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument 33 "Failed to create VM! Error: %d\n", ret); in acrn_vm_create() 37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create() 38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create() 39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create() 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create() 43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create() 45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create() [all …]
|
D | irqfd.c | 24 * @vm: Associated VM pointer 28 * @list: Entry within &acrn_vm.irqfds of irqfds of a VM 33 struct acrn_vm *vm; member 44 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local 46 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject() 54 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown() 66 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local 69 vm = irqfd->vm; in hsm_irqfd_shutdown_work() 70 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 73 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() [all …]
|
D | acrn_drv.h | 28 * @user_vm_pa: Physical address of User VM to be mapped. 29 * @service_vm_pa: Physical address of Service VM to be mapped. 33 * to manage the EPT mappings of a single memory region of the User VM. Several 47 * @vmid: A User VM ID. 53 * multiple memory regions of a User VM. A &struct vm_memory_region_batch 65 * struct vm_memory_mapping - Memory map between a User VM and the Service VM 66 * @pages: Pages in Service VM kernel. 68 * @service_vm_va: Virtual address in Service VM kernel. 69 * @user_vm_pa: Physical address in User VM. 72 * HSM maintains memory mappings between a User VM GPA and the Service VM [all …]
|
D | mm.c | 18 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) in modify_region() argument 27 regions->vmid = vm->vmid; in modify_region() 34 "Failed to set memory region for VM[%u]!\n", vm->vmid); in modify_region() 42 * @vm: User VM. 43 * @user_gpa: A GPA of User VM. 44 * @service_gpa: A GPA of Service VM. 51 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, in acrn_mm_region_add() argument 67 ret = modify_region(vm, region); in acrn_mm_region_add() 78 * @vm: User VM. 79 * @user_gpa: A GPA of the User VM. [all …]
|
D | ioeventfd.c | 19 * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM 43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) in acrn_ioeventfd_shutdown() argument 45 lockdep_assert_held(&vm->ioeventfds_lock); in acrn_ioeventfd_shutdown() 52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, in hsm_ioeventfd_is_conflict() argument 57 lockdep_assert_held(&vm->ioeventfds_lock); in hsm_ioeventfd_is_conflict() 60 list_for_each_entry(p, &vm->ioeventfds, list) in hsm_ioeventfd_is_conflict() 72 * Assign an eventfd to a VM and create a HSM ioeventfd associated with the 76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, in acrn_ioeventfd_assign() argument 121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 123 if (hsm_ioeventfd_is_conflict(vm, p)) { in acrn_ioeventfd_assign() [all …]
|
D | ioreq.c | 39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument 52 * in which User VMs and Service VM are bound to dedicated CPU cores. in ioreq_complete_request() 64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request() 79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request() 84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request() 88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request() 93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument 97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() 98 if (vm->default_client) in acrn_ioreq_request_default_complete() 99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete() [all …]
|
/Linux-v5.15/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 50 * for the entire GPU, there are multiple VM page tables active 51 * at any given time. The VM page tables can contain a mix 55 * Each VM has an ID associated with it and there is a page table 92 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 95 * @vm: amdgpu_vm pointer 96 * @pasid: the pasid the VM is using on this GPU 98 * Set the pasid this VM is using on this GPU, can also be used to remove the 102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument 107 if (vm->pasid == pasid) in amdgpu_vm_set_pasid() 110 if (vm->pasid) { in amdgpu_vm_set_pasid() [all …]
|
/Linux-v5.15/drivers/gpu/drm/radeon/ |
D | radeon_vm.c | 37 * for the entire GPU, there are multiple VM page tables active 38 * at any given time. The VM page tables can contain a mix 42 * Each VM has an ID associated with it and there is a page table 78 * radeon_vm_manager_init - init the vm manager 82 * Init the vm manager (cayman+). 100 * radeon_vm_manager_fini - tear down the vm manager 104 * Tear down the VM manager (cayman+). 120 * radeon_vm_get_bos - add the vm BOs to a validation list 123 * @vm: vm providing the BOs 130 struct radeon_vm *vm, in radeon_vm_get_bos() argument [all …]
|
/Linux-v5.15/drivers/gpu/drm/i915/selftests/ |
D | mock_gtt.c | 27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument 35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument 41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument 51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument 56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument 60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument 73 ppgtt->vm.gt = &i915->gt; in mock_ppgtt() 74 ppgtt->vm.i915 = i915; in mock_ppgtt() 75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt() 76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt() [all …]
|
/Linux-v5.15/drivers/gpu/drm/i915/gt/ |
D | intel_ggtt.c | 47 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw() 49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw() 51 ggtt->vm.is_ggtt = true; in ggtt_init_hw() 54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw() 57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw() 63 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw() 124 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend() 127 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend() 129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend() 142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend() [all …]
|
D | gen8_ppgtt.c | 60 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt() 61 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt() 72 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt() 150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument 152 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count() 154 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count() 158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument 160 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index() 162 if (vm->top == 2) in gen8_pdp_for_page_index() 165 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index() [all …]
|
D | intel_gtt.c | 15 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument 31 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0); in alloc_pt_lmem() 33 * Ensure all paging structures for this vm share the same dma-resv in alloc_pt_lmem() 38 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem() 39 obj->shares_resv_from = vm; in alloc_pt_lmem() 45 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument 49 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma() 50 i915_gem_shrink_all(vm->i915); in alloc_pt_dma() 52 obj = i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma() 54 * Ensure all paging structures for this vm share the same dma-resv in alloc_pt_dma() [all …]
|
D | intel_gtt.h | 61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 198 void (*bind_vma)(struct i915_address_space *vm, 207 void (*unbind_vma)(struct i915_address_space *vm, 229 * Since the vm may be shared between multiple contexts, we count how 264 (*alloc_pt_dma)(struct i915_address_space *vm, int sz); 272 void (*allocate_va_range)(struct i915_address_space *vm, 275 void (*clear_range)(struct i915_address_space *vm, 277 void (*insert_page)(struct i915_address_space *vm, 282 void (*insert_entries)(struct i915_address_space *vm, 286 void (*cleanup)(struct i915_address_space *vm); [all …]
|
/Linux-v5.15/sound/pci/ctxfi/ |
D | ctvmem.c | 26 * Find or create vm block based on requested @size. 30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument 36 if (size > vm->size) { in get_vm_block() 42 mutex_lock(&vm->lock); in get_vm_block() 43 list_for_each(pos, &vm->unused) { in get_vm_block() 48 if (pos == &vm->unused) in get_vm_block() 52 /* Move the vm node from unused list to used list directly */ in get_vm_block() 53 list_move(&entry->list, &vm->used); in get_vm_block() 54 vm->size -= size; in get_vm_block() 65 list_add(&block->list, &vm->used); in get_vm_block() [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/x86_64/ |
D | set_boot_cpu_id.c | 41 static void test_set_boot_busy(struct kvm_vm *vm) in test_set_boot_busy() argument 45 res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID0); in test_set_boot_busy() 47 "KVM_SET_BOOT_CPU_ID set while running vm"); in test_set_boot_busy() 50 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) in run_vcpu() argument 57 vcpu_run(vm, vcpuid); in run_vcpu() 59 switch (get_ucall(vm, vcpuid, &uc)) { in run_vcpu() 65 test_set_boot_busy(vm); in run_vcpu() 78 exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason)); in run_vcpu() 85 struct kvm_vm *vm; in create_vm() local 91 vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR); in create_vm() [all …]
|
D | userspace_msr_exit_test.c | 398 static void run_guest(struct kvm_vm *vm) in run_guest() argument 402 rc = _vcpu_run(vm, VCPU_ID); in run_guest() 406 static void check_for_guest_assert(struct kvm_vm *vm) in check_for_guest_assert() argument 408 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in check_for_guest_assert() 412 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) { in check_for_guest_assert() 418 static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index) in process_rdmsr() argument 420 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_rdmsr() 422 check_for_guest_assert(vm); in process_rdmsr() 453 static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index) in process_wrmsr() argument 455 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_wrmsr() [all …]
|
D | emulator_error_test.c | 31 static void run_guest(struct kvm_vm *vm) in run_guest() argument 35 rc = _vcpu_run(vm, VCPU_ID); in run_guest() 60 static void process_exit_on_emulation_error(struct kvm_vm *vm) in process_exit_on_emulation_error() argument 62 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in process_exit_on_emulation_error() 96 vcpu_regs_get(vm, VCPU_ID, ®s); in process_exit_on_emulation_error() 98 vcpu_regs_set(vm, VCPU_ID, ®s); in process_exit_on_emulation_error() 103 static void do_guest_assert(struct kvm_vm *vm, struct ucall *uc) in do_guest_assert() argument 109 static void check_for_guest_assert(struct kvm_vm *vm) in check_for_guest_assert() argument 111 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in check_for_guest_assert() 115 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) { in check_for_guest_assert() [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/x86_64/ |
D | vmx.c | 46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) in vcpu_enable_evmcs() argument 55 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); in vcpu_enable_evmcs() 69 * vm - The VM to allocate guest-virtual addresses in. 78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument 80 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() 84 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 89 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() [all …]
|
D | processor.c | 177 void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc() argument 179 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pgd_alloc() 180 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc() 183 if (!vm->pgd_created) { in virt_pgd_alloc() 184 vm->pgd = vm_alloc_page_table(vm); in virt_pgd_alloc() 185 vm->pgd_created = true; in virt_pgd_alloc() 189 static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, in virt_get_pte() argument 192 uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift); in virt_get_pte() 193 int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu; in virt_get_pte() 198 static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm, in virt_create_upper_pte() argument [all …]
|