Lines Matching full:gpc
35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_shared_info_init() local
45 kvm_gpc_deactivate(kvm, gpc); in kvm_xen_shared_info_init()
50 ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, in kvm_xen_shared_info_init()
62 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
64 if (gpc->valid) in kvm_xen_shared_info_init()
67 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
81 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
88 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
104 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
204 struct gfn_to_pfn_cache *gpc = &vx->runstate_cache; in kvm_xen_update_runstate_guest() local
220 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
221 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in kvm_xen_update_runstate_guest()
223 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
229 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len)) in kvm_xen_update_runstate_guest()
232 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
255 user_state = gpc->khva; in kvm_xen_update_runstate_guest()
258 user_times = gpc->khva + offsetof(struct vcpu_runstate_info, in kvm_xen_update_runstate_guest()
261 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info, in kvm_xen_update_runstate_guest()
312 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
314 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest()
343 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events() local
354 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
355 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in kvm_xen_inject_pending_events()
357 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
359 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, in kvm_xen_inject_pending_events()
363 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
366 /* Now gpc->khva is a valid kernel address for the vcpu_info */ in kvm_xen_inject_pending_events()
368 struct vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
380 struct compat_vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
391 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
397 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_inject_pending_events()
402 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt() local
419 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
420 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in __kvm_xen_has_interrupt()
422 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
435 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, in __kvm_xen_has_interrupt()
443 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
446 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; in __kvm_xen_has_interrupt()
447 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
969 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event() local
975 read_lock_irqsave(&gpc->lock, flags); in wait_pending_event()
977 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) in wait_pending_event()
982 struct shared_info *shinfo = gpc->khva; in wait_pending_event()
985 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event()
998 read_unlock_irqrestore(&gpc->lock, flags); in wait_pending_event()
1345 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast() local
1373 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1374 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) in kvm_xen_set_evtchn_fast()
1378 struct shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1383 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1404 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1405 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1407 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1408 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) { in kvm_xen_set_evtchn_fast()
1419 struct vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1425 struct compat_vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1441 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1498 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn() local
1506 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); in kvm_xen_set_evtchn()