Home
last modified time | relevance | path

Searched refs:spte (Results 1 – 9 of 9) sorted by relevance

/Linux-v4.19/arch/x86/kvm/
Dmmu.c203 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument
206 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
207 __shadow_walk_next(&(_walker), spte))
263 static void mmu_spte_set(u64 *sptep, u64 spte);
280 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument
282 MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); in spte_ad_enabled()
283 return !(spte & shadow_acc_track_value); in spte_ad_enabled()
286 static inline u64 spte_shadow_accessed_mask(u64 spte) in spte_shadow_accessed_mask() argument
288 MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); in spte_shadow_accessed_mask()
289 return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; in spte_shadow_accessed_mask()
[all …]
Dmmutrace.h308 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
309 TP_ARGS(spte, kvm_gen, spte_gen),
314 __field(u64, spte)
320 __entry->spte = spte;
323 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
Dpaging_tmpl.h158 struct kvm_mmu_page *sp, u64 *spte, in FNAME()
174 drop_spte(vcpu->kvm, spte); in FNAME()
499 u64 *spte, pt_element_t gpte, bool no_dirty_log) in FNAME()
505 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
508 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); in FNAME()
522 mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, in FNAME()
529 u64 *spte, const void *pte) in FNAME()
533 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
564 u64 *spte; in FNAME() local
576 spte = sp->spt + i; in FNAME()
[all …]
/Linux-v4.19/Documentation/virtual/kvm/
Dlocking.txt38 spte.
41 SPTE_MMU_WRITEABLE bit on the spte:
47 On fast page fault path, we will use cmpxchg to atomically set the spte W
48 bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or
61 spte is the shadow page table entry corresponding with gpte and
62 spte = pfn1
67 old_spte = *spte;
69 spte = 0;
75 spte = pfn1;
77 if (cmpxchg(spte, old_spte, old_spte+W)
[all …]
Dmmu.txt42 spte shadow pte (referring to pfns)
109 A nonleaf spte allows the hardware mmu to reach the leaf pages and
112 A leaf spte corresponds to either one or two translations encoded into
210 parent_ptes bit 0 is zero, only one spte points at this page and
211 parent_ptes points at this single spte, otherwise, there exists multiple
232 Only present on 32-bit hosts, where a 64-bit spte cannot be written
296 - check for valid generation number in the spte (see "Fast invalidation of
309 - walk the shadow page table to find the spte for the translation,
311 - If this is an mmio request, cache the mmio info to the spte and set some
312 reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask)
[all …]
/Linux-v4.19/arch/s390/mm/
Dpgtable.c632 pte_t spte, tpte; in ptep_shadow_pte() local
638 spte = *sptep; in ptep_shadow_pte()
639 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte()
640 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte()
644 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
/Linux-v4.19/mm/
Dhugetlb.c4634 pte_t *spte = NULL; in huge_pmd_share() local
4648 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share()
4650 if (spte) { in huge_pmd_share()
4651 get_page(virt_to_page(spte)); in huge_pmd_share()
4657 if (!spte) in huge_pmd_share()
4660 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share()
4663 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share()
4666 put_page(virt_to_page(spte)); in huge_pmd_share()
/Linux-v4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c436 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local
439 if (spte != next) in nvkm_vmm_ref_hwpt()
443 if (!spte) { in nvkm_vmm_ref_hwpt()
/Linux-v4.19/arch/x86/include/asm/
Dkvm_host.h361 u64 *spte, const void *pte);