Lines Matching +full:first +full:- +full:generation

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables kernel and guest-mode vCPU access to guest physical
32 spin_lock(&kvm->gpc_lock); in gfn_to_pfn_cache_invalidate_start()
33 list_for_each_entry(gpc, &kvm->gpc_list, list) { in gfn_to_pfn_cache_invalidate_start()
34 write_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
38 gpc->uhva >= start && gpc->uhva < end) { in gfn_to_pfn_cache_invalidate_start()
39 gpc->valid = false; in gfn_to_pfn_cache_invalidate_start()
45 if (gpc->usage & KVM_GUEST_USES_PFN) { in gfn_to_pfn_cache_invalidate_start()
50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); in gfn_to_pfn_cache_invalidate_start()
53 write_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
55 spin_unlock(&kvm->gpc_lock); in gfn_to_pfn_cache_invalidate_start()
84 if (!gpc->active) in kvm_gfn_to_pfn_cache_check()
90 if (gpc->gpa != gpa || gpc->generation != slots->generation || in kvm_gfn_to_pfn_cache_check()
91 kvm_is_error_hva(gpc->uhva)) in kvm_gfn_to_pfn_cache_check()
94 if (!gpc->valid) in kvm_gfn_to_pfn_cache_check()
124 * is not protected by gpc->lock. It is guaranteed to in mmu_notifier_retry_cache()
125 * be elevated before the mmu_notifier acquires gpc->lock, and in mmu_notifier_retry_cache()
128 if (kvm->mn_active_invalidate_count) in mmu_notifier_retry_cache()
135 * old (non-zero) value of mn_active_invalidate_count or the in mmu_notifier_retry_cache()
139 return kvm->mmu_invalidate_seq != mmu_seq; in mmu_notifier_retry_cache()
145 void *old_khva = gpc->khva - offset_in_page(gpc->khva); in hva_to_pfn_retry()
150 lockdep_assert_held(&gpc->refresh_lock); in hva_to_pfn_retry()
152 lockdep_assert_held_write(&gpc->lock); in hva_to_pfn_retry()
155 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva in hva_to_pfn_retry()
157 * different task may not fail the gpa/uhva/generation checks. in hva_to_pfn_retry()
159 gpc->valid = false; in hva_to_pfn_retry()
162 mmu_seq = kvm->mmu_invalidate_seq; in hva_to_pfn_retry()
165 write_unlock_irq(&gpc->lock); in hva_to_pfn_retry()
188 new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL); in hva_to_pfn_retry()
195 * too must be done outside of gpc->lock! in hva_to_pfn_retry()
197 if (gpc->usage & KVM_HOST_USES_PFN) { in hva_to_pfn_retry()
198 if (new_pfn == gpc->pfn) { in hva_to_pfn_retry()
213 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
219 WARN_ON_ONCE(gpc->valid); in hva_to_pfn_retry()
222 gpc->valid = true; in hva_to_pfn_retry()
223 gpc->pfn = new_pfn; in hva_to_pfn_retry()
224 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); in hva_to_pfn_retry()
236 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
238 return -EFAULT; in hva_to_pfn_retry()
257 return -EINVAL; in kvm_gfn_to_pfn_cache_refresh()
262 * gpa, memslots generation, etc..., so they must be fully serialized. in kvm_gfn_to_pfn_cache_refresh()
264 mutex_lock(&gpc->refresh_lock); in kvm_gfn_to_pfn_cache_refresh()
266 write_lock_irq(&gpc->lock); in kvm_gfn_to_pfn_cache_refresh()
268 if (!gpc->active) { in kvm_gfn_to_pfn_cache_refresh()
269 ret = -EINVAL; in kvm_gfn_to_pfn_cache_refresh()
273 old_pfn = gpc->pfn; in kvm_gfn_to_pfn_cache_refresh()
274 old_khva = gpc->khva - offset_in_page(gpc->khva); in kvm_gfn_to_pfn_cache_refresh()
275 old_uhva = gpc->uhva; in kvm_gfn_to_pfn_cache_refresh()
277 /* If the userspace HVA is invalid, refresh that first */ in kvm_gfn_to_pfn_cache_refresh()
278 if (gpc->gpa != gpa || gpc->generation != slots->generation || in kvm_gfn_to_pfn_cache_refresh()
279 kvm_is_error_hva(gpc->uhva)) { in kvm_gfn_to_pfn_cache_refresh()
282 gpc->gpa = gpa; in kvm_gfn_to_pfn_cache_refresh()
283 gpc->generation = slots->generation; in kvm_gfn_to_pfn_cache_refresh()
284 gpc->memslot = __gfn_to_memslot(slots, gfn); in kvm_gfn_to_pfn_cache_refresh()
285 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in kvm_gfn_to_pfn_cache_refresh()
287 if (kvm_is_error_hva(gpc->uhva)) { in kvm_gfn_to_pfn_cache_refresh()
288 ret = -EFAULT; in kvm_gfn_to_pfn_cache_refresh()
297 if (!gpc->valid || old_uhva != gpc->uhva) { in kvm_gfn_to_pfn_cache_refresh()
302 * But do update gpc->khva because the offset within the page in kvm_gfn_to_pfn_cache_refresh()
305 gpc->khva = old_khva + page_offset; in kvm_gfn_to_pfn_cache_refresh()
314 * Some/all of the uhva, gpa, and memslot generation info may still be in kvm_gfn_to_pfn_cache_refresh()
318 gpc->valid = false; in kvm_gfn_to_pfn_cache_refresh()
319 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gfn_to_pfn_cache_refresh()
320 gpc->khva = NULL; in kvm_gfn_to_pfn_cache_refresh()
324 unmap_old = (old_pfn != gpc->pfn); in kvm_gfn_to_pfn_cache_refresh()
327 write_unlock_irq(&gpc->lock); in kvm_gfn_to_pfn_cache_refresh()
329 mutex_unlock(&gpc->refresh_lock); in kvm_gfn_to_pfn_cache_refresh()
343 mutex_lock(&gpc->refresh_lock); in kvm_gfn_to_pfn_cache_unmap()
344 write_lock_irq(&gpc->lock); in kvm_gfn_to_pfn_cache_unmap()
346 gpc->valid = false; in kvm_gfn_to_pfn_cache_unmap()
348 old_khva = gpc->khva - offset_in_page(gpc->khva); in kvm_gfn_to_pfn_cache_unmap()
349 old_pfn = gpc->pfn; in kvm_gfn_to_pfn_cache_unmap()
355 gpc->khva = NULL; in kvm_gfn_to_pfn_cache_unmap()
356 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gfn_to_pfn_cache_unmap()
358 write_unlock_irq(&gpc->lock); in kvm_gfn_to_pfn_cache_unmap()
359 mutex_unlock(&gpc->refresh_lock); in kvm_gfn_to_pfn_cache_unmap()
367 rwlock_init(&gpc->lock); in kvm_gpc_init()
368 mutex_init(&gpc->refresh_lock); in kvm_gpc_init()
378 if (!gpc->active) { in kvm_gpc_activate()
379 gpc->khva = NULL; in kvm_gpc_activate()
380 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_activate()
381 gpc->uhva = KVM_HVA_ERR_BAD; in kvm_gpc_activate()
382 gpc->vcpu = vcpu; in kvm_gpc_activate()
383 gpc->usage = usage; in kvm_gpc_activate()
384 gpc->valid = false; in kvm_gpc_activate()
386 spin_lock(&kvm->gpc_lock); in kvm_gpc_activate()
387 list_add(&gpc->list, &kvm->gpc_list); in kvm_gpc_activate()
388 spin_unlock(&kvm->gpc_lock); in kvm_gpc_activate()
395 write_lock_irq(&gpc->lock); in kvm_gpc_activate()
396 gpc->active = true; in kvm_gpc_activate()
397 write_unlock_irq(&gpc->lock); in kvm_gpc_activate()
405 if (gpc->active) { in kvm_gpc_deactivate()
409 * until gpc->lock is dropped and refresh is guaranteed to fail. in kvm_gpc_deactivate()
411 write_lock_irq(&gpc->lock); in kvm_gpc_deactivate()
412 gpc->active = false; in kvm_gpc_deactivate()
413 write_unlock_irq(&gpc->lock); in kvm_gpc_deactivate()
415 spin_lock(&kvm->gpc_lock); in kvm_gpc_deactivate()
416 list_del(&gpc->list); in kvm_gpc_deactivate()
417 spin_unlock(&kvm->gpc_lock); in kvm_gpc_deactivate()