Home
last modified time | relevance | path

Searched refs:sptep (Results 1 – 10 of 10) sorted by relevance

/Linux-v6.6/arch/x86/kvm/mmu/
Dtdp_iter.h17 static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep) in kvm_tdp_mmu_read_spte() argument
19 return READ_ONCE(*rcu_dereference(sptep)); in kvm_tdp_mmu_read_spte()
22 static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte) in kvm_tdp_mmu_write_spte_atomic() argument
24 return xchg(rcu_dereference(sptep), new_spte); in kvm_tdp_mmu_write_spte_atomic()
27 static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) in __kvm_tdp_mmu_write_spte() argument
29 WRITE_ONCE(*rcu_dereference(sptep), new_spte); in __kvm_tdp_mmu_write_spte()
51 static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, in kvm_tdp_mmu_write_spte() argument
55 return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte); in kvm_tdp_mmu_write_spte()
57 __kvm_tdp_mmu_write_spte(sptep, new_spte); in kvm_tdp_mmu_write_spte()
61 static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, in tdp_mmu_clear_spte_bits() argument
[all …]
Dmmutrace.h212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213 TP_ARGS(sptep, gfn, spte),
216 __field(void *, sptep)
223 __entry->sptep = sptep;
229 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
257 u64 *sptep, u64 old_spte, int ret),
258 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
264 __field(u64 *, sptep)
274 __entry->sptep = sptep;
276 __entry->new_spte = *sptep;
[all …]
Dmmu.c158 u64 *sptep; member
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
184 static void mmu_spte_set(u64 *sptep, u64 spte);
288 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep() argument
290 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in kvm_flush_remote_tlbs_sptep()
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep()
296 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
301 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
302 mmu_spte_set(sptep, spte); in mark_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
[all …]
Dtdp_iter.c14 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep()
16 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in tdp_iter_refresh_sptep()
89 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in try_step_down()
122 iter->sptep++; in try_step_side()
123 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in try_step_side()
Dtdp_mmu.c191 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, in tdp_mmu_init_sp() argument
200 sp->ptep = sptep; in tdp_mmu_init_sp()
212 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_init_child_sp()
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp()
332 tdp_ptep_t sptep = pt + i; in handle_removed_pt() local
346 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); in handle_removed_pt()
361 old_spte = kvm_tdp_mmu_read_spte(sptep); in handle_removed_pt()
393 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, in handle_removed_pt()
532 u64 *sptep = rcu_dereference(iter->sptep); in tdp_mmu_set_spte_atomic() local
551 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) in tdp_mmu_set_spte_atomic()
[all …]
Dpaging_tmpl.h585 u64 *sptep) in FNAME()
592 sp = sptep_to_sp(sptep); in FNAME()
605 return __direct_pte_prefetch(vcpu, sp, sptep); in FNAME()
607 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); in FNAME()
611 if (spte == sptep) in FNAME()
668 clear_sp_write_flooding_count(it.sptep); in FNAME()
674 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn, in FNAME()
707 link_shadow_page(vcpu, it.sptep, sp); in FNAME()
729 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME()
735 validate_direct_spte(vcpu, it.sptep, direct_access); in FNAME()
[all …]
Dspte.h208 static inline int spte_index(u64 *sptep) in spte_index() argument
210 return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1); in spte_index()
235 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) in sptep_to_sp() argument
237 return to_shadow_page(__pa(sptep)); in sptep_to_sp()
/Linux-v6.6/arch/s390/mm/
Dpgtable.c688 pte_t *sptep, pte_t *tptep, pte_t pte) in ptep_shadow_pte() argument
696 spgste = pgste_get_lock(sptep); in ptep_shadow_pte()
697 spte = *sptep; in ptep_shadow_pte()
710 pgste_set_unlock(sptep, spgste); in ptep_shadow_pte()
Dgmap.c2128 pte_t *sptep, *tptep; in gmap_shadow_page() local
2152 sptep = gmap_pte_op_walk(parent, paddr, &ptl); in gmap_shadow_page()
2153 if (sptep) { in gmap_shadow_page()
2159 gmap_pte_op_end(sptep, ptl); in gmap_shadow_page()
2163 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); in gmap_shadow_page()
2170 gmap_pte_op_end(sptep, ptl); in gmap_shadow_page()
/Linux-v6.6/arch/s390/include/asm/
Dpgtable.h1287 pte_t *sptep, pte_t *tptep, pte_t pte);