Lines Matching refs:spte

74 	u64 spte = generation_mmio_spte_mask(gen);  in make_mmio_spte()  local
80 spte |= shadow_mmio_value | access; in make_mmio_spte()
81 spte |= gpa | shadow_nonpresent_or_rsvd_mask; in make_mmio_spte()
82 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) in make_mmio_spte()
85 return spte; in make_mmio_spte()
114 bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument
122 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte)) in spte_has_volatile_bits()
125 if (is_access_track_spte(spte)) in spte_has_volatile_bits()
128 if (spte_ad_enabled(spte)) { in spte_has_volatile_bits()
129 if (!(spte & shadow_accessed_mask) || in spte_has_volatile_bits()
130 (is_writable_pte(spte) && !(spte & shadow_dirty_mask))) in spte_has_volatile_bits()
144 u64 spte = SPTE_MMU_PRESENT_MASK; in make_spte() local
150 spte |= SPTE_TDP_AD_DISABLED_MASK; in make_spte()
152 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; in make_spte()
160 spte |= shadow_present_mask; in make_spte()
162 spte |= spte_shadow_accessed_mask(spte); in make_spte()
170 spte |= shadow_x_mask; in make_spte()
172 spte |= shadow_nx_mask; in make_spte()
175 spte |= shadow_user_mask; in make_spte()
178 spte |= PT_PAGE_SIZE_MASK; in make_spte()
181 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, in make_spte()
184 spte |= shadow_host_writable_mask; in make_spte()
189 spte |= shadow_me_value; in make_spte()
191 spte |= (u64)pfn << PAGE_SHIFT; in make_spte()
194 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; in make_spte()
216 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); in make_spte()
221 spte |= spte_shadow_dirty_mask(spte); in make_spte()
225 spte = mark_spte_for_access_track(spte); in make_spte()
227 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), in make_spte()
228 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, in make_spte()
229 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); in make_spte()
231 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { in make_spte()
237 *new_spte = spte; in make_spte()
241 static u64 make_spte_executable(u64 spte) in make_spte_executable() argument
243 bool is_access_track = is_access_track_spte(spte); in make_spte_executable()
246 spte = restore_acc_track_spte(spte); in make_spte_executable()
248 spte &= ~shadow_nx_mask; in make_spte_executable()
249 spte |= shadow_x_mask; in make_spte_executable()
252 spte = mark_spte_for_access_track(spte); in make_spte_executable()
254 return spte; in make_spte_executable()
302 u64 spte = SPTE_MMU_PRESENT_MASK; in make_nonleaf_spte() local
304 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | in make_nonleaf_spte()
308 spte |= SPTE_TDP_AD_DISABLED_MASK; in make_nonleaf_spte()
310 spte |= shadow_accessed_mask; in make_nonleaf_spte()
312 return spte; in make_nonleaf_spte()
331 u64 mark_spte_for_access_track(u64 spte) in mark_spte_for_access_track() argument
333 if (spte_ad_enabled(spte)) in mark_spte_for_access_track()
334 return spte & ~shadow_accessed_mask; in mark_spte_for_access_track()
336 if (is_access_track_spte(spte)) in mark_spte_for_access_track()
337 return spte; in mark_spte_for_access_track()
339 check_spte_writable_invariants(spte); in mark_spte_for_access_track()
341 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << in mark_spte_for_access_track()
345 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << in mark_spte_for_access_track()
347 spte &= ~shadow_acc_track_mask; in mark_spte_for_access_track()
349 return spte; in mark_spte_for_access_track()