/Linux-v6.1/arch/powerpc/include/asm/ |
D | kvm_book3s_uvmem.h | 9 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); 10 void kvmppc_uvmem_slot_free(struct kvm *kvm, 12 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 16 unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, 20 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); 21 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); 22 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); 23 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm); 25 struct kvm *kvm, bool skip_page_out); 26 int kvmppc_uvmem_memslot_create(struct kvm *kvm, [all …]
|
D | kvm_ppc.h | 159 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); 160 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); 162 extern void kvmppc_rmap_reset(struct kvm *kvm); 166 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, 168 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, 170 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); 171 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); 172 extern void kvmppc_setup_partition_table(struct kvm *kvm); 174 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 192 extern int kvmppc_core_init_vm(struct kvm *kvm); [all …]
|
/Linux-v6.1/include/linux/ |
D | kvm_host.h | 177 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 179 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 180 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 182 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, 219 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 221 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 223 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 255 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 256 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 257 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); [all …]
|
/Linux-v6.1/virt/kvm/ |
D | eventfd.c | 36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument 46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local 49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject() 51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject() 54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject() 67 struct kvm *kvm; in irqfd_resampler_ack() local 73 kvm = resampler->kvm; in irqfd_resampler_ack() 75 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack() 78 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack() 81 srcu_read_lock_held(&kvm->irq_srcu)) in irqfd_resampler_ack() [all …]
|
D | kvm_mm.h | 14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) argument 15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) argument 16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) argument 17 #define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock) argument 18 #define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock) argument 20 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) argument 21 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) argument 22 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) argument 23 #define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) argument 24 #define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) argument [all …]
|
D | kvm_main.c | 156 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_range() argument 167 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) in kvm_arch_guest_memory_reclaimed() argument 300 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument 314 vcpu = kvm_get_vcpu(kvm, i); in kvm_make_vcpus_request_mask() 326 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, in kvm_make_all_cpus_request_except() argument 340 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request_except() 352 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument 354 return kvm_make_all_cpus_request_except(kvm, req, NULL); in kvm_make_all_cpus_request() 359 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument [all …]
|
D | irqchip.c | 22 int kvm_irq_map_gsi(struct kvm *kvm, in kvm_irq_map_gsi() argument 29 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_irq_map_gsi() 30 lockdep_is_held(&kvm->irq_lock)); in kvm_irq_map_gsi() 41 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_map_chip_pin() argument 45 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in kvm_irq_map_chip_pin() 49 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) in kvm_send_userspace_msi() argument 53 if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID)) in kvm_send_userspace_msi() 62 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); in kvm_send_userspace_msi() 71 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, in kvm_set_irq() argument 83 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_set_irq() [all …]
|
/Linux-v6.1/arch/x86/kvm/mmu/ |
D | tdp_mmu.h | 15 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 18 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, 20 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); 21 void kvm_tdp_mmu_zap_all(struct kvm *kvm); 22 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); 23 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); 27 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 29 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 30 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 31 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); [all …]
|
D | tdp_mmu.c | 17 int kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu() argument 29 kvm->arch.tdp_mmu_enabled = true; in kvm_mmu_init_tdp_mmu() 30 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu() 31 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu() 32 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); in kvm_mmu_init_tdp_mmu() 33 kvm->arch.tdp_mmu_zap_wq = wq; in kvm_mmu_init_tdp_mmu() 38 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument 42 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 44 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 49 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument [all …]
|
/Linux-v6.1/arch/x86/kvm/ |
D | irq_comm.c | 30 struct kvm *kvm, int irq_source_id, int level, in kvm_set_pic_irq() argument 33 struct kvm_pic *pic = kvm->arch.vpic; in kvm_set_pic_irq() 38 struct kvm *kvm, int irq_source_id, int level, in kvm_set_ioapic_irq() argument 41 struct kvm_ioapic *ioapic = kvm->arch.vioapic; in kvm_set_ioapic_irq() 46 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, in kvm_irq_delivery_to_apic() argument 54 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) in kvm_irq_delivery_to_apic() 65 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_irq_delivery_to_apic() 94 lowest = kvm_get_vcpu(kvm, idx); in kvm_irq_delivery_to_apic() 103 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, in kvm_set_msi_irq() argument 110 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? in kvm_set_msi_irq() [all …]
|
D | irq.h | 25 struct kvm; 53 struct kvm *kvm; member 62 int kvm_pic_init(struct kvm *kvm); 63 void kvm_pic_destroy(struct kvm *kvm); 64 int kvm_pic_read_irq(struct kvm *kvm); 67 static inline int irqchip_split(struct kvm *kvm) in irqchip_split() argument 69 int mode = kvm->arch.irqchip_mode; in irqchip_split() 76 static inline int irqchip_kernel(struct kvm *kvm) in irqchip_kernel() argument 78 int mode = kvm->arch.irqchip_mode; in irqchip_kernel() 85 static inline int pic_in_kernel(struct kvm *kvm) in pic_in_kernel() argument [all …]
|
D | xen.h | 21 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 22 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 23 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt); 25 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); 26 void kvm_xen_init_vm(struct kvm *kvm); 27 void kvm_xen_destroy_vm(struct kvm *kvm); 31 struct kvm *kvm); 32 int kvm_xen_setup_evtchn(struct kvm *kvm, 36 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) in kvm_xen_msr_enabled() argument 39 kvm->arch.xen_hvm_config.msr; in kvm_xen_msr_enabled() [all …]
|
/Linux-v6.1/arch/powerpc/kvm/ |
D | book3s_hv_uvmem.c | 233 struct kvm *kvm; member 248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_init() argument 263 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init() 264 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init() 265 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init() 273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_free() argument 277 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free() 278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free() 286 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free() 289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument [all …]
|
D | Makefile | 6 ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm 8 include $(srctree)/virt/kvm/Makefile.kvm 16 kvm-e500-objs := \ 26 kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) 28 kvm-e500mc-objs := \ 38 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 40 kvm-pr-y := \ 53 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ 58 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ 62 kvm-hv-y += \ [all …]
|
D | book3s_64_mmu_radix.c | 98 int lpid = vcpu->kvm->arch.lpid; in kvmhv_copy_tofrom_guest_radix() 140 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_walk_radix_tree() local 174 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); in kvmppc_mmu_walk_radix_tree() 236 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_radix_translate_table() local 252 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); in kvmppc_mmu_radix_translate_table() 283 vcpu->kvm->arch.process_table, pid, &pte); in kvmppc_mmu_radix_xlate() 309 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, in kvmppc_radix_tlbie_page() argument 347 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) in kvmppc_radix_flush_pwc() argument 368 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, in kvmppc_radix_update_pte() argument 375 static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, in kvmppc_radix_set_pte_at() argument [all …]
|
D | book3s_64_mmu_hv.c | 46 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, 52 struct kvm *kvm; member 117 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) in kvmppc_set_hpt() argument 119 atomic64_set(&kvm->arch.mmio_update, 0); in kvmppc_set_hpt() 120 kvm->arch.hpt = *info; in kvmppc_set_hpt() 121 kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); in kvmppc_set_hpt() 124 info->virt, (long)info->order, kvm->arch.lpid); in kvmppc_set_hpt() 127 long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) in kvmppc_alloc_reset_hpt() argument 132 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_alloc_reset_hpt() 133 if (kvm->arch.mmu_ready) { in kvmppc_alloc_reset_hpt() [all …]
|
/Linux-v6.1/arch/s390/kvm/ |
D | kvm-s390.c | 265 static int sca_switch_to_extended(struct kvm *kvm); 298 struct kvm *kvm; in kvm_clock_sync() local 303 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync() 304 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync() 307 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync() 308 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync() 545 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument 587 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension() 654 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument 659 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log() [all …]
|
D | pv.c | 21 static void kvm_s390_clear_pv_state(struct kvm *kvm) in kvm_s390_clear_pv_state() argument 23 kvm->arch.pv.handle = 0; in kvm_s390_clear_pv_state() 24 kvm->arch.pv.guest_len = 0; in kvm_s390_clear_pv_state() 25 kvm->arch.pv.stor_base = 0; in kvm_s390_clear_pv_state() 26 kvm->arch.pv.stor_var = NULL; in kvm_s390_clear_pv_state() 38 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", in kvm_s390_pv_destroy_cpu() 80 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu() 96 KVM_UV_EVENT(vcpu->kvm, 3, in kvm_s390_pv_create_cpu() 111 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu() 118 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm) in kvm_s390_pv_dealloc_vm() argument [all …]
|
D | kvm-s390.h | 54 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ 82 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); in is_vcpu_idle() 85 static inline int kvm_is_ucontrol(struct kvm *kvm) in kvm_is_ucontrol() argument 88 if (kvm->arch.gmap) in kvm_is_ucontrol() 182 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) in test_kvm_facility() argument 184 return __test_facility(nr, kvm->arch.model.fac_mask) && in test_kvm_facility() 185 __test_facility(nr, kvm->arch.model.fac_list); in test_kvm_facility() 199 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr) in test_kvm_cpu_feat() argument 202 return test_bit_inv(nr, kvm->arch.cpu_feat); in test_kvm_cpu_feat() 206 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) in kvm_s390_user_cpu_state_ctrl() argument [all …]
|
/Linux-v6.1/arch/arm64/kvm/vgic/ |
D | vgic.h | 103 return vcpu->kvm->arch.vgic.implementation_rev; in vgic_get_implementation_rev() 167 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 169 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq); 170 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); 174 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 176 void vgic_kick_vcpus(struct kvm *kvm); 180 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, 198 int vgic_v2_map_resources(struct kvm *kvm); 199 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 227 int vgic_v3_map_resources(struct kvm *kvm); [all …]
|
D | vgic-init.c | 52 void kvm_vgic_early_init(struct kvm *kvm) in kvm_vgic_early_init() argument 54 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_vgic_early_init() 71 int kvm_vgic_create(struct kvm *kvm, u32 type) in kvm_vgic_create() argument 77 if (irqchip_in_kernel(kvm)) in kvm_vgic_create() 91 if (!lock_all_vcpus(kvm)) in kvm_vgic_create() 94 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create() 101 kvm->max_vcpus = VGIC_V2_MAX_CPUS; in kvm_vgic_create() 103 kvm->max_vcpus = VGIC_V3_MAX_CPUS; in kvm_vgic_create() 105 if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) { in kvm_vgic_create() 110 kvm->arch.vgic.in_kernel = true; in kvm_vgic_create() [all …]
|
/Linux-v6.1/tools/perf/ |
D | builtin-kvm.c | 104 static const char *get_exit_reason(struct perf_kvm_stat *kvm, in get_exit_reason() argument 115 (unsigned long long)exit_code, kvm->exit_reasons_isa); in get_exit_reason() 119 void exit_event_decode_key(struct perf_kvm_stat *kvm, in exit_event_decode_key() argument 123 const char *exit_reason = get_exit_reason(kvm, key->exit_reasons, in exit_event_decode_key() 129 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) in register_kvm_events_ops() argument 134 if (!strcmp(events_ops->name, kvm->report_event)) { in register_kvm_events_ops() 135 kvm->events_ops = events_ops->ops; in register_kvm_events_ops() 150 static void init_kvm_event_record(struct perf_kvm_stat *kvm) in init_kvm_event_record() argument 155 INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); in init_kvm_event_record() 227 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, in find_create_kvm_event() argument [all …]
|
/Linux-v6.1/arch/riscv/kvm/ |
D | mmu.c | 97 static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, in gstage_get_leaf_entry() argument 104 ptep = (pte_t *)kvm->arch.pgd; in gstage_get_leaf_entry() 126 static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) in gstage_remote_tlb_flush() argument 134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); in gstage_remote_tlb_flush() 137 static int gstage_set_pte(struct kvm *kvm, u32 level, in gstage_set_pte() argument 142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; in gstage_set_pte() 172 gstage_remote_tlb_flush(kvm, current_level, addr); in gstage_set_pte() 177 static int gstage_map_page(struct kvm *kvm, in gstage_map_page() argument 219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page() 228 static void gstage_op_pte(struct kvm *kvm, gpa_t addr, in gstage_op_pte() argument [all …]
|
D | Makefile | 8 include $(srctree)/virt/kvm/Makefile.kvm 10 obj-$(CONFIG_KVM) += kvm.o 12 kvm-y += main.o 13 kvm-y += vm.o 14 kvm-y += vmid.o 15 kvm-y += tlb.o 16 kvm-y += mmu.o 17 kvm-y += vcpu.o 18 kvm-y += vcpu_exit.o 19 kvm-y += vcpu_fp.o [all …]
|
/Linux-v6.1/arch/x86/include/asm/ |
D | kvm_page_track.h | 45 void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot, 49 int kvm_page_track_init(struct kvm *kvm); 50 void kvm_page_track_cleanup(struct kvm *kvm); 52 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); 56 int kvm_page_track_create_memslot(struct kvm *kvm, 60 void kvm_slot_page_track_add_page(struct kvm *kvm, 63 void kvm_slot_page_track_remove_page(struct kvm *kvm, 66 bool kvm_slot_page_track_is_active(struct kvm *kvm, 71 kvm_page_track_register_notifier(struct kvm *kvm, 74 kvm_page_track_unregister_notifier(struct kvm *kvm, [all …]
|