Lines Matching refs:vm

66 	struct kvm_vm *vm;  member
149 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument
150 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
151 if (!((vcpu) = vm->vcpus[i])) \
156 memslot2region(struct kvm_vm *vm, uint32_t memslot);
158 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument
162 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region()
274 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument
276 #define __vm_ioctl(vm, cmd, arg) \ argument
278 static_assert_is_vm(vm); \
279 kvm_do_ioctl((vm)->fd, cmd, arg); \
282 #define _vm_ioctl(vm, cmd, name, arg) \ argument
284 int ret = __vm_ioctl(vm, cmd, arg); \
289 #define vm_ioctl(vm, cmd, arg) \ argument
290 _vm_ioctl(vm, cmd, #cmd, arg)
315 static inline int vm_check_cap(struct kvm_vm *vm, long cap) in vm_check_cap() argument
317 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); in vm_check_cap()
323 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in __vm_enable_cap() argument
327 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in __vm_enable_cap()
329 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in vm_enable_cap() argument
333 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in vm_enable_cap()
336 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
342 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
344 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
347 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
349 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
353 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
356 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
366 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
369 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring() argument
371 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); in kvm_vm_reset_dirty_ring()
374 static inline int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd() argument
376 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); in vm_get_stats_fd()
421 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
424 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name) in vm_get_stat() argument
428 __vm_get_stat(vm, stat_name, &data, 1); in vm_get_stat()
432 void vm_create_irqchip(struct kvm_vm *vm);
434 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
436 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
438 void vm_userspace_mem_region_add(struct kvm_vm *vm,
443 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
444 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
445 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
446 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
447 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
448 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
449 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
450 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
452 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
453 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
455 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
457 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
459 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
460 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
461 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
462 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
659 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
660 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
662 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) in kvm_create_device() argument
664 int fd = __kvm_create_device(vm, type); in kvm_create_device()
691 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
692 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
699 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
700 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
704 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
706 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
708 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
756 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
763 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
780 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
783 #define sync_global_to_guest(vm, g) ({ \ argument
784 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
788 #define sync_global_from_guest(vm, g) ({ \ argument
789 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
799 #define write_guest_global(vm, g, val) ({ \ argument
800 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
825 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
828 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add() argument
831 return vm_arch_vcpu_add(vm, vcpu_id, guest_code); in vm_vcpu_add()
835 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
837 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, in vm_vcpu_recreate() argument
840 return vm_arch_vcpu_recreate(vm, vcpu_id); in vm_vcpu_recreate()
845 void virt_arch_pgd_alloc(struct kvm_vm *vm);
847 static inline void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc() argument
849 virt_arch_pgd_alloc(vm); in virt_pgd_alloc()
868 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
870 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_pg_map() argument
872 virt_arch_pg_map(vm, vaddr, paddr); in virt_pg_map()
891 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
893 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument
895 return addr_arch_gva2gpa(vm, gva); in addr_gva2gpa()
913 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
915 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_dump() argument
917 virt_arch_dump(stream, vm, indent); in virt_dump()
921 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) in __vm_disable_nx_huge_pages() argument
923 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); in __vm_disable_nx_huge_pages()
933 void kvm_arch_vm_post_create(struct kvm_vm *vm);