Lines Matching refs:sptep
213 u64 *sptep; member
243 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
305 static void mmu_spte_set(u64 *sptep, u64 spte);
454 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
467 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
468 mmu_spte_set(sptep, mask); in mark_mmio_spte()
486 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
490 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
641 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
643 WRITE_ONCE(*sptep, spte); in __set_spte()
646 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
648 WRITE_ONCE(*sptep, spte); in __update_clear_spte_fast()
651 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
653 return xchg(sptep, spte); in __update_clear_spte_slow()
656 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
658 return READ_ONCE(*sptep); in __get_spte_lockless()
669 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
671 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in count_spte_clear()
681 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
685 ssptep = (union split_spte *)sptep; in __set_spte()
700 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
704 ssptep = (union split_spte *)sptep; in __update_clear_spte_fast()
716 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
719 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
723 ssptep = (union split_spte *)sptep; in __update_clear_spte_slow()
730 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
753 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
755 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in __get_spte_lockless()
756 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless()
828 static void mmu_spte_set(u64 *sptep, u64 new_spte) in mmu_spte_set() argument
830 WARN_ON(is_shadow_present_pte(*sptep)); in mmu_spte_set()
831 __set_spte(sptep, new_spte); in mmu_spte_set()
838 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) in mmu_spte_update_no_track() argument
840 u64 old_spte = *sptep; in mmu_spte_update_no_track()
845 mmu_spte_set(sptep, new_spte); in mmu_spte_update_no_track()
850 __update_clear_spte_fast(sptep, new_spte); in mmu_spte_update_no_track()
852 old_spte = __update_clear_spte_slow(sptep, new_spte); in mmu_spte_update_no_track()
870 static bool mmu_spte_update(u64 *sptep, u64 new_spte) in mmu_spte_update() argument
873 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); in mmu_spte_update()
911 static int mmu_spte_clear_track_bits(u64 *sptep) in mmu_spte_clear_track_bits() argument
914 u64 old_spte = *sptep; in mmu_spte_clear_track_bits()
917 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_track_bits()
919 old_spte = __update_clear_spte_slow(sptep, 0ull); in mmu_spte_clear_track_bits()
947 static void mmu_spte_clear_no_track(u64 *sptep) in mmu_spte_clear_no_track() argument
949 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_no_track()
952 static u64 mmu_spte_get_lockless(u64 *sptep) in mmu_spte_get_lockless() argument
954 return __get_spte_lockless(sptep); in mmu_spte_get_lockless()
1004 static bool mmu_spte_age(u64 *sptep) in mmu_spte_age() argument
1006 u64 spte = mmu_spte_get_lockless(sptep); in mmu_spte_age()
1013 (unsigned long *)sptep); in mmu_spte_age()
1023 mmu_spte_update_no_track(sptep, spte); in mmu_spte_age()
1458 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) in pte_list_remove() argument
1460 mmu_spte_clear_track_bits(sptep); in pte_list_remove()
1461 __pte_list_remove(sptep, rmap_head); in pte_list_remove()
1535 u64 *sptep; in rmap_get_first() local
1542 sptep = (u64 *)rmap_head->val; in rmap_get_first()
1548 sptep = iter->desc->sptes[iter->pos]; in rmap_get_first()
1550 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_first()
1551 return sptep; in rmap_get_first()
1561 u64 *sptep; in rmap_get_next() local
1566 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1567 if (sptep) in rmap_get_next()
1576 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1583 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_next()
1584 return sptep; in rmap_get_next()
1591 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1593 if (mmu_spte_clear_track_bits(sptep)) in drop_spte()
1594 rmap_remove(kvm, sptep); in drop_spte()
1598 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) in __drop_large_spte() argument
1600 if (is_large_pte(*sptep)) { in __drop_large_spte()
1601 WARN_ON(page_header(__pa(sptep))->role.level == in __drop_large_spte()
1603 drop_spte(kvm, sptep); in __drop_large_spte()
1611 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) in drop_large_spte() argument
1613 if (__drop_large_spte(vcpu->kvm, sptep)) { in drop_large_spte()
1614 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in drop_large_spte()
1634 static bool spte_write_protect(u64 *sptep, bool pt_protect) in spte_write_protect() argument
1636 u64 spte = *sptep; in spte_write_protect()
1642 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); in spte_write_protect()
1648 return mmu_spte_update(sptep, spte); in spte_write_protect()
1655 u64 *sptep; in __rmap_write_protect() local
1659 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_write_protect()
1660 flush |= spte_write_protect(sptep, pt_protect); in __rmap_write_protect()
1665 static bool spte_clear_dirty(u64 *sptep) in spte_clear_dirty() argument
1667 u64 spte = *sptep; in spte_clear_dirty()
1669 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); in spte_clear_dirty()
1673 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1676 static bool spte_wrprot_for_clear_dirty(u64 *sptep) in spte_wrprot_for_clear_dirty() argument
1679 (unsigned long *)sptep); in spte_wrprot_for_clear_dirty()
1680 if (was_writable && !spte_ad_enabled(*sptep)) in spte_wrprot_for_clear_dirty()
1681 kvm_set_pfn_dirty(spte_to_pfn(*sptep)); in spte_wrprot_for_clear_dirty()
1694 u64 *sptep; in __rmap_clear_dirty() local
1698 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_clear_dirty()
1699 if (spte_ad_need_write_protect(*sptep)) in __rmap_clear_dirty()
1700 flush |= spte_wrprot_for_clear_dirty(sptep); in __rmap_clear_dirty()
1702 flush |= spte_clear_dirty(sptep); in __rmap_clear_dirty()
1707 static bool spte_set_dirty(u64 *sptep) in spte_set_dirty() argument
1709 u64 spte = *sptep; in spte_set_dirty()
1711 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); in spte_set_dirty()
1720 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1725 u64 *sptep; in __rmap_set_dirty() local
1729 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_set_dirty()
1730 if (spte_ad_enabled(*sptep)) in __rmap_set_dirty()
1731 flush |= spte_set_dirty(sptep); in __rmap_set_dirty()
1850 u64 *sptep; in kvm_zap_rmapp() local
1854 while ((sptep = rmap_get_first(rmap_head, &iter))) { in kvm_zap_rmapp()
1855 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); in kvm_zap_rmapp()
1857 pte_list_remove(rmap_head, sptep); in kvm_zap_rmapp()
1875 u64 *sptep; in kvm_set_pte_rmapp() local
1886 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_set_pte_rmapp()
1888 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1893 pte_list_remove(rmap_head, sptep); in kvm_set_pte_rmapp()
1896 new_spte = *sptep & ~PT64_BASE_ADDR_MASK; in kvm_set_pte_rmapp()
1904 mmu_spte_clear_track_bits(sptep); in kvm_set_pte_rmapp()
1905 mmu_spte_set(sptep, new_spte); in kvm_set_pte_rmapp()
2057 u64 *sptep; in kvm_age_rmapp() local
2061 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_age_rmapp()
2062 young |= mmu_spte_age(sptep); in kvm_age_rmapp()
2072 u64 *sptep; in kvm_test_age_rmapp() local
2075 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_test_age_rmapp()
2076 if (is_accessed_spte(*sptep)) in kvm_test_age_rmapp()
2197 u64 *sptep; in kvm_mmu_mark_parents_unsync() local
2200 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { in kvm_mmu_mark_parents_unsync()
2201 mark_unsync(sptep); in kvm_mmu_mark_parents_unsync()
2677 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; in shadow_walk_okay()
2695 __shadow_walk_next(iterator, *iterator->sptep); in shadow_walk_next()
2698 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, in link_shadow_page() argument
2713 mmu_spte_set(sptep, spte); in link_shadow_page()
2715 mmu_page_add_parent_pte(vcpu, sp, sptep); in link_shadow_page()
2718 mark_unsync(sptep); in link_shadow_page()
2721 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2724 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { in validate_direct_spte()
2734 child = page_header(*sptep & PT64_BASE_ADDR_MASK); in validate_direct_spte()
2738 drop_parent_pte(child, sptep); in validate_direct_spte()
2779 u64 *sptep; in kvm_mmu_unlink_parents() local
2782 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2783 drop_parent_pte(sp, sptep); in kvm_mmu_unlink_parents()
3044 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in set_spte() argument
3053 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) in set_spte()
3056 sp = page_header(__pa(sptep)); in set_spte()
3121 if (!can_unsync && is_writable_pte(*sptep)) in set_spte()
3142 if (mmu_spte_update(sptep, spte)) in set_spte()
3148 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, in mmu_set_spte() argument
3159 *sptep, write_fault, gfn); in mmu_set_spte()
3161 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
3167 !is_large_pte(*sptep)) { in mmu_set_spte()
3169 u64 pte = *sptep; in mmu_set_spte()
3172 drop_parent_pte(child, sptep); in mmu_set_spte()
3174 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
3176 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
3177 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
3183 set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, in mmu_set_spte()
3195 if (unlikely(is_mmio_spte(*sptep))) in mmu_set_spte()
3198 pgprintk("%s: setting spte %llx\n", __func__, *sptep); in mmu_set_spte()
3199 trace_kvm_mmu_set_spte(level, gfn, sptep); in mmu_set_spte()
3200 if (!was_rmapped && is_large_pte(*sptep)) in mmu_set_spte()
3203 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
3205 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
3207 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
3255 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch() argument
3262 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
3266 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
3277 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
3281 sp = page_header(__pa(sptep)); in direct_pte_prefetch()
3294 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
3301 u64 spte = *it.sptep; in disallowed_hugepage_adjust()
3345 drop_large_spte(vcpu, it.sptep); in __direct_map()
3346 if (!is_shadow_present_pte(*it.sptep)) { in __direct_map()
3350 link_shadow_page(vcpu, it.sptep, sp); in __direct_map()
3356 ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, in __direct_map()
3359 direct_pte_prefetch(vcpu, it.sptep); in __direct_map()
3481 u64 *sptep, u64 old_spte, u64 new_spte) in fast_pf_fix_direct_spte() argument
3499 if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) in fast_pf_fix_direct_spte()
3507 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
3556 sp = page_header(__pa(iterator.sptep)); in fast_page_fault()
3616 iterator.sptep, spte, in fast_page_fault()
3629 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
4037 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in walk_shadow_page_get_mmio_spte() argument
4053 spte = mmu_spte_get_lockless(iterator.sptep); in walk_shadow_page_get_mmio_spte()
4077 *sptep = spte; in walk_shadow_page_get_mmio_spte()
4145 clear_sp_write_flooding_count(iterator.sptep); in shadow_page_table_clear_flood()
4447 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
4450 if (unlikely(is_mmio_spte(*sptep))) { in sync_mmio_spte()
4451 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
4452 mmu_spte_clear_no_track(sptep); in sync_mmio_spte()
4457 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
5999 u64 *sptep; in kvm_mmu_zap_collapsible_spte() local
6006 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_mmu_zap_collapsible_spte()
6007 sp = page_header(__pa(sptep)); in kvm_mmu_zap_collapsible_spte()
6008 pfn = spte_to_pfn(*sptep); in kvm_mmu_zap_collapsible_spte()
6020 pte_list_remove(rmap_head, sptep); in kvm_mmu_zap_collapsible_spte()