/Linux-v5.4/Documentation/virt/kvm/ |
D | locking.txt | 36 spte. 39 SPTE_MMU_WRITEABLE bit on the spte: 45 On fast page fault path, we will use cmpxchg to atomically set the spte W 46 bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or 59 spte is the shadow page table entry corresponding with gpte and 60 spte = pfn1 65 old_spte = *spte; 67 spte = 0; 73 spte = pfn1; 75 if (cmpxchg(spte, old_spte, old_spte+W) [all …]
|
D | mmu.txt | 42 spte shadow pte (referring to pfns) 109 A nonleaf spte allows the hardware mmu to reach the leaf pages and 112 A leaf spte corresponds to either one or two translations encoded into 213 parent_ptes bit 0 is zero, only one spte points at this page and 214 parent_ptes points at this single spte, otherwise, there exists multiple 231 Only present on 32-bit hosts, where a 64-bit spte cannot be written 295 - check for valid generation number in the spte (see "Fast invalidation of 308 - walk the shadow page table to find the spte for the translation, 310 - If this is an mmio request, cache the mmio info to the spte and set some 311 reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask) [all …]
|
/Linux-v5.4/arch/x86/kvm/ |
D | mmu.c | 240 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument 243 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 244 __shadow_walk_next(&(_walker), spte)) 305 static void mmu_spte_set(u64 *sptep, u64 spte); 306 static bool is_executable_pte(u64 spte); 352 static bool is_mmio_spte(u64 spte) in is_mmio_spte() argument 354 return (spte & shadow_mmio_mask) == shadow_mmio_value; in is_mmio_spte() 373 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument 375 MMU_WARN_ON(is_mmio_spte(spte)); in spte_ad_enabled() 376 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; in spte_ad_enabled() [all …]
|
D | mmutrace.h | 308 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 309 TP_ARGS(spte, kvm_gen, spte_gen), 314 __field(u64, spte) 320 __entry->spte = spte; 323 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte, 336 __field(u64, spte) 347 __entry->spte = *sptep; 350 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK); 351 __entry->x = is_executable_pte(__entry->spte); 352 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1; [all …]
|
D | paging_tmpl.h | 175 struct kvm_mmu_page *sp, u64 *spte, in FNAME() 192 drop_spte(vcpu->kvm, spte); in FNAME() 517 u64 *spte, pt_element_t gpte, bool no_dirty_log) in FNAME() 523 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME() 526 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); in FNAME() 540 mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, in FNAME() 548 u64 *spte, const void *pte) in FNAME() 552 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME() 583 u64 *spte; in FNAME() local 595 spte = sp->spt + i; in FNAME() [all …]
|
/Linux-v5.4/arch/s390/mm/ |
D | pgtable.c | 634 pte_t spte, tpte; in ptep_shadow_pte() local 640 spte = *sptep; in ptep_shadow_pte() 641 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte() 642 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte() 646 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
|
/Linux-v5.4/mm/ |
D | hugetlb.c | 4838 pte_t *spte = NULL; in huge_pmd_share() local 4852 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share() 4854 if (spte) { in huge_pmd_share() 4855 get_page(virt_to_page(spte)); in huge_pmd_share() 4861 if (!spte) in huge_pmd_share() 4864 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share() 4867 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 4870 put_page(virt_to_page(spte)); in huge_pmd_share()
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local 451 if (spte != next) in nvkm_vmm_ref_hwpt() 455 if (!spte) { in nvkm_vmm_ref_hwpt()
|
/Linux-v5.4/arch/x86/include/asm/ |
D | kvm_host.h | 395 u64 *spte, const void *pte);
|