Home
last modified time | relevance | path

Searched refs:mmu_lock (Results 1 – 25 of 30) sorted by relevance

12

/Linux-v5.15/virt/kvm/
Dmmu_lock.h14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
/Linux-v5.15/arch/powerpc/kvm/
Dbook3s_hv_nested.c724 spin_lock(&kvm->mmu_lock); in kvmhv_remove_nested()
735 spin_unlock(&kvm->mmu_lock); in kvmhv_remove_nested()
754 spin_lock(&kvm->mmu_lock); in kvmhv_release_all_nested()
766 spin_unlock(&kvm->mmu_lock); in kvmhv_release_all_nested()
783 spin_lock(&kvm->mmu_lock); in kvmhv_flush_nested()
785 spin_unlock(&kvm->mmu_lock); in kvmhv_flush_nested()
801 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
805 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
813 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
826 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
[all …]
Dbook3s_mmu_hpte.c63 spin_lock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
92 spin_unlock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
110 spin_lock(&vcpu3s->mmu_lock); in invalidate_pte()
114 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
127 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
369 spin_lock_init(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_init()
Dbook3s_64_mmu_radix.c635 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte()
771 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte()
859 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
864 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
994 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
998 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1104 spin_lock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1116 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1131 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1176 spin_lock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
[all …]
Dbook3s_hv_rm_mmu.c245 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
260 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
274 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
935 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
947 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
963 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
978 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
Dbook3s_64_mmu_host.c153 spin_lock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
205 spin_unlock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
Dbook3s_64_vio_hv.c543 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_rm_h_put_tce_indirect()
588 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_rm_h_put_tce_indirect()
Dbook3s_64_mmu_hv.c609 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
614 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
743 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset()
750 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset()
1383 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot()
1390 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
De500_mmu_host.c462 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
502 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
/Linux-v5.15/arch/arm64/kvm/
Dmmu.c61 cond_resched_lock(&kvm->mmu_lock); in stage2_apply_range()
182 assert_spin_locked(&kvm->mmu_lock); in __unmap_stage2_range()
216 spin_lock(&kvm->mmu_lock); in stage2_flush_vm()
222 spin_unlock(&kvm->mmu_lock); in stage2_flush_vm()
602 spin_lock(&kvm->mmu_lock); in stage2_unmap_vm()
608 spin_unlock(&kvm->mmu_lock); in stage2_unmap_vm()
618 spin_lock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
625 spin_unlock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
662 spin_lock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
665 spin_unlock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
[all …]
/Linux-v5.15/arch/x86/kvm/mmu/
Dtdp_mmu.c36 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
38 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
155 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
198 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
286 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_unlink_page()
506 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_set_spte_atomic_no_dirty_log()
616 lockdep_assert_held_write(&kvm->mmu_lock); in __tdp_mmu_set_spte()
697 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
704 cond_resched_rwlock_read(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
706 cond_resched_rwlock_write(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
[all …]
Dpage_track.c188 write_lock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
190 write_unlock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
206 write_lock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
208 write_unlock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
Dmmu.c2056 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { in mmu_sync_children()
2063 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2529 write_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2540 write_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2551 write_lock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2559 write_unlock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
3348 write_lock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3374 write_unlock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3436 write_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3471 write_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
[all …]
Dtdp_mmu.h42 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_zap_sp()
Dpaging_tmpl.h930 write_lock(&vcpu->kvm->mmu_lock); in FNAME()
943 write_unlock(&vcpu->kvm->mmu_lock); in FNAME()
981 write_lock(&vcpu->kvm->mmu_lock); in FNAME()
1016 write_unlock(&vcpu->kvm->mmu_lock); in FNAME()
/Linux-v5.15/arch/mips/kvm/
Dmmu.c522 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
557 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
639 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page()
647 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
677 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
Dmips.c227 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
232 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
267 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
273 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
/Linux-v5.15/arch/x86/kvm/
Ddebugfs.c107 write_lock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
127 write_unlock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
/Linux-v5.15/drivers/misc/habanalabs/common/mmu/
Dmmu.c84 mutex_init(&ctx->mmu_lock); in hl_mmu_ctx_init()
121 mutex_destroy(&ctx->mmu_lock); in hl_mmu_ctx_fini()
575 mutex_lock(&ctx->mmu_lock); in hl_mmu_get_tlb_info()
584 mutex_unlock(&ctx->mmu_lock); in hl_mmu_get_tlb_info()
/Linux-v5.15/drivers/misc/habanalabs/common/
Dcommand_buffer.c65 mutex_lock(&ctx->mmu_lock); in cb_map_mem()
85 mutex_unlock(&ctx->mmu_lock); in cb_map_mem()
102 mutex_unlock(&ctx->mmu_lock); in cb_map_mem()
119 mutex_lock(&ctx->mmu_lock); in cb_unmap_mem()
131 mutex_unlock(&ctx->mmu_lock); in cb_unmap_mem()
Dmemory.c1185 mutex_lock(&ctx->mmu_lock); in map_device_va()
1189 mutex_unlock(&ctx->mmu_lock); in map_device_va()
1198 mutex_unlock(&ctx->mmu_lock); in map_device_va()
1333 mutex_lock(&ctx->mmu_lock); in unmap_device_va()
1347 mutex_unlock(&ctx->mmu_lock); in unmap_device_va()
2124 mutex_lock(&ctx->mmu_lock); in hl_vm_ctx_fini()
2130 mutex_unlock(&ctx->mmu_lock); in hl_vm_ctx_fini()
/Linux-v5.15/Documentation/virt/kvm/
Dlocking.rst34 - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
35 kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
36 cannot be taken without already holding kvm->arch.mmu_lock (typically with
232 :Name: kvm->mmu_lock
/Linux-v5.15/arch/powerpc/include/asm/
Dkvm_book3s_64.h662 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_secondary_pte()
674 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_host_pte()
/Linux-v5.15/drivers/gpu/drm/i915/gvt/
Dkvmgt.c1809 write_lock(&kvm->mmu_lock); in kvmgt_page_track_add()
1818 write_unlock(&kvm->mmu_lock); in kvmgt_page_track_add()
1843 write_lock(&kvm->mmu_lock); in kvmgt_page_track_remove()
1852 write_unlock(&kvm->mmu_lock); in kvmgt_page_track_remove()
1878 write_lock(&kvm->mmu_lock); in kvmgt_page_track_flush_slot()
1887 write_unlock(&kvm->mmu_lock); in kvmgt_page_track_flush_slot()
/Linux-v5.15/include/linux/
Dkvm_host.h539 rwlock_t mmu_lock; member
541 spinlock_t mmu_lock;
1535 lockdep_assert_held(&kvm->mmu_lock); in mmu_notifier_retry_hva()

12