Lines Matching refs:gdev
47 static void vbg_guest_mappings_init(struct vbg_dev *gdev) in vbg_guest_mappings_init() argument
63 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_init()
82 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); in vbg_guest_mappings_init()
83 if (!gdev->guest_mappings_dummy_page) in vbg_guest_mappings_init()
87 pages[i] = gdev->guest_mappings_dummy_page; in vbg_guest_mappings_init()
105 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_init()
107 gdev->guest_mappings = guest_mappings[i]; in vbg_guest_mappings_init()
117 if (!gdev->guest_mappings) { in vbg_guest_mappings_init()
118 __free_page(gdev->guest_mappings_dummy_page); in vbg_guest_mappings_init()
119 gdev->guest_mappings_dummy_page = NULL; in vbg_guest_mappings_init()
132 static void vbg_guest_mappings_exit(struct vbg_dev *gdev) in vbg_guest_mappings_exit() argument
137 if (!gdev->guest_mappings) in vbg_guest_mappings_exit()
152 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_exit()
161 vunmap(gdev->guest_mappings); in vbg_guest_mappings_exit()
162 gdev->guest_mappings = NULL; in vbg_guest_mappings_exit()
164 __free_page(gdev->guest_mappings_dummy_page); in vbg_guest_mappings_exit()
165 gdev->guest_mappings_dummy_page = NULL; in vbg_guest_mappings_exit()
173 static int vbg_report_guest_info(struct vbg_dev *gdev) in vbg_report_guest_info() argument
212 rc = vbg_req_perform(gdev, req2); in vbg_report_guest_info()
214 rc = vbg_req_perform(gdev, req1); in vbg_report_guest_info()
216 rc = vbg_req_perform(gdev, req1); in vbg_report_guest_info()
218 rc = vbg_req_perform(gdev, req2); in vbg_report_guest_info()
237 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) in vbg_report_driver_status() argument
254 rc = vbg_req_perform(gdev, req); in vbg_report_driver_status()
269 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) in vbg_balloon_inflate() argument
271 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; in vbg_balloon_inflate()
295 rc = vbg_req_perform(gdev, req); in vbg_balloon_inflate()
302 gdev->mem_balloon.pages[chunk_idx] = pages; in vbg_balloon_inflate()
320 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) in vbg_balloon_deflate() argument
322 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; in vbg_balloon_deflate()
323 struct page **pages = gdev->mem_balloon.pages[chunk_idx]; in vbg_balloon_deflate()
333 rc = vbg_req_perform(gdev, req); in vbg_balloon_deflate()
342 gdev->mem_balloon.pages[chunk_idx] = NULL; in vbg_balloon_deflate()
353 struct vbg_dev *gdev = in vbg_balloon_work() local
355 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req; in vbg_balloon_work()
364 rc = vbg_req_perform(gdev, req); in vbg_balloon_work()
374 if (!gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
375 gdev->mem_balloon.pages = in vbg_balloon_work()
376 devm_kcalloc(gdev->dev, req->phys_mem_chunks, in vbg_balloon_work()
378 if (!gdev->mem_balloon.pages) in vbg_balloon_work()
381 gdev->mem_balloon.max_chunks = req->phys_mem_chunks; in vbg_balloon_work()
385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
394 ret = vbg_balloon_inflate(gdev, i); in vbg_balloon_work()
398 gdev->mem_balloon.chunks++; in vbg_balloon_work()
402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
403 ret = vbg_balloon_deflate(gdev, i); in vbg_balloon_work()
407 gdev->mem_balloon.chunks--; in vbg_balloon_work()
417 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer); in vbg_heartbeat_timer() local
419 vbg_req_perform(gdev, gdev->guest_heartbeat_req); in vbg_heartbeat_timer()
420 mod_timer(&gdev->heartbeat_timer, in vbg_heartbeat_timer()
421 msecs_to_jiffies(gdev->heartbeat_interval_ms)); in vbg_heartbeat_timer()
431 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) in vbg_heartbeat_host_config() argument
443 rc = vbg_req_perform(gdev, req); in vbg_heartbeat_host_config()
445 gdev->heartbeat_interval_ms = req->interval_ns; in vbg_heartbeat_host_config()
456 static int vbg_heartbeat_init(struct vbg_dev *gdev) in vbg_heartbeat_init() argument
461 ret = vbg_heartbeat_host_config(gdev, false); in vbg_heartbeat_init()
465 ret = vbg_heartbeat_host_config(gdev, true); in vbg_heartbeat_init()
469 gdev->guest_heartbeat_req = vbg_req_alloc( in vbg_heartbeat_init()
470 sizeof(*gdev->guest_heartbeat_req), in vbg_heartbeat_init()
473 if (!gdev->guest_heartbeat_req) in vbg_heartbeat_init()
477 __func__, gdev->heartbeat_interval_ms); in vbg_heartbeat_init()
478 mod_timer(&gdev->heartbeat_timer, 0); in vbg_heartbeat_init()
487 static void vbg_heartbeat_exit(struct vbg_dev *gdev) in vbg_heartbeat_exit() argument
489 del_timer_sync(&gdev->heartbeat_timer); in vbg_heartbeat_exit()
490 vbg_heartbeat_host_config(gdev, false); in vbg_heartbeat_exit()
491 vbg_req_free(gdev->guest_heartbeat_req, in vbg_heartbeat_exit()
492 sizeof(*gdev->guest_heartbeat_req)); in vbg_heartbeat_exit()
537 static int vbg_reset_host_event_filter(struct vbg_dev *gdev, in vbg_reset_host_event_filter() argument
550 rc = vbg_req_perform(gdev, req); in vbg_reset_host_event_filter()
574 static int vbg_set_session_event_filter(struct vbg_dev *gdev, in vbg_set_session_event_filter() argument
597 mutex_lock(&gdev->session_mutex); in vbg_set_session_event_filter()
609 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous); in vbg_set_session_event_filter()
610 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask; in vbg_set_session_event_filter()
612 if (gdev->event_filter_host == or_mask || !req) in vbg_set_session_event_filter()
615 gdev->event_filter_host = or_mask; in vbg_set_session_event_filter()
618 rc = vbg_req_perform(gdev, req); in vbg_set_session_event_filter()
623 gdev->event_filter_host = U32_MAX; in vbg_set_session_event_filter()
627 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, in vbg_set_session_event_filter()
633 mutex_unlock(&gdev->session_mutex); in vbg_set_session_event_filter()
644 static int vbg_reset_host_capabilities(struct vbg_dev *gdev) in vbg_reset_host_capabilities() argument
656 rc = vbg_req_perform(gdev, req); in vbg_reset_host_capabilities()
672 static int vbg_set_host_capabilities(struct vbg_dev *gdev, in vbg_set_host_capabilities() argument
680 WARN_ON(!mutex_is_locked(&gdev->session_mutex)); in vbg_set_host_capabilities()
682 caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask; in vbg_set_host_capabilities()
684 if (gdev->guest_caps_host == caps) in vbg_set_host_capabilities()
692 gdev->guest_caps_host = U32_MAX; in vbg_set_host_capabilities()
698 rc = vbg_req_perform(gdev, req); in vbg_set_host_capabilities()
701 gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX; in vbg_set_host_capabilities()
720 static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, in vbg_acquire_session_capabilities() argument
729 mutex_lock(&gdev->session_mutex); in vbg_acquire_session_capabilities()
731 if (gdev->set_guest_caps_tracker.mask & or_mask) { in vbg_acquire_session_capabilities()
743 spin_lock_irqsave(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
744 gdev->acquire_mode_guest_caps |= or_mask; in vbg_acquire_session_capabilities()
745 spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
758 if (gdev->acquired_guest_caps & or_mask) { in vbg_acquire_session_capabilities()
763 gdev->acquired_guest_caps |= or_mask; in vbg_acquire_session_capabilities()
764 gdev->acquired_guest_caps &= ~not_mask; in vbg_acquire_session_capabilities()
766 spin_lock_irqsave(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
769 spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
771 ret = vbg_set_host_capabilities(gdev, session, session_termination); in vbg_acquire_session_capabilities()
774 gdev->acquired_guest_caps &= ~or_mask; in vbg_acquire_session_capabilities()
775 gdev->acquired_guest_caps |= not_mask; in vbg_acquire_session_capabilities()
776 spin_lock_irqsave(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
779 spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
792 spin_lock_irqsave(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
795 gdev->pending_events |= in vbg_acquire_session_capabilities()
798 if (gdev->pending_events) in vbg_acquire_session_capabilities()
801 spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); in vbg_acquire_session_capabilities()
804 wake_up(&gdev->event_wq); in vbg_acquire_session_capabilities()
808 mutex_unlock(&gdev->session_mutex); in vbg_acquire_session_capabilities()
825 static int vbg_set_session_capabilities(struct vbg_dev *gdev, in vbg_set_session_capabilities() argument
833 mutex_lock(&gdev->session_mutex); in vbg_set_session_capabilities()
835 if (gdev->acquire_mode_guest_caps & or_mask) { in vbg_set_session_capabilities()
852 vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous); in vbg_set_session_capabilities()
854 ret = vbg_set_host_capabilities(gdev, session, session_termination); in vbg_set_session_capabilities()
857 vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, in vbg_set_session_capabilities()
863 mutex_unlock(&gdev->session_mutex); in vbg_set_session_capabilities()
873 static int vbg_query_host_version(struct vbg_dev *gdev) in vbg_query_host_version() argument
883 rc = vbg_req_perform(gdev, req); in vbg_query_host_version()
890 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", in vbg_query_host_version()
892 gdev->host_features = req->features; in vbg_query_host_version()
894 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version, in vbg_query_host_version()
895 gdev->host_features); in vbg_query_host_version()
922 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) in vbg_core_init() argument
926 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM; in vbg_core_init()
927 gdev->event_filter_host = U32_MAX; /* forces a report */ in vbg_core_init()
928 gdev->guest_caps_host = U32_MAX; /* forces a report */ in vbg_core_init()
930 init_waitqueue_head(&gdev->event_wq); in vbg_core_init()
931 init_waitqueue_head(&gdev->hgcm_wq); in vbg_core_init()
932 spin_lock_init(&gdev->event_spinlock); in vbg_core_init()
933 mutex_init(&gdev->session_mutex); in vbg_core_init()
934 mutex_init(&gdev->cancel_req_mutex); in vbg_core_init()
935 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0); in vbg_core_init()
936 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); in vbg_core_init()
938 gdev->mem_balloon.get_req = in vbg_core_init()
939 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), in vbg_core_init()
942 gdev->mem_balloon.change_req = in vbg_core_init()
943 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), in vbg_core_init()
946 gdev->cancel_req = in vbg_core_init()
947 vbg_req_alloc(sizeof(*(gdev->cancel_req)), in vbg_core_init()
950 gdev->ack_events_req = in vbg_core_init()
951 vbg_req_alloc(sizeof(*gdev->ack_events_req), in vbg_core_init()
954 gdev->mouse_status_req = in vbg_core_init()
955 vbg_req_alloc(sizeof(*gdev->mouse_status_req), in vbg_core_init()
959 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || in vbg_core_init()
960 !gdev->cancel_req || !gdev->ack_events_req || in vbg_core_init()
961 !gdev->mouse_status_req) in vbg_core_init()
964 ret = vbg_query_host_version(gdev); in vbg_core_init()
968 ret = vbg_report_guest_info(gdev); in vbg_core_init()
974 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events); in vbg_core_init()
981 ret = vbg_reset_host_capabilities(gdev); in vbg_core_init()
988 ret = vbg_core_set_mouse_status(gdev, 0); in vbg_core_init()
995 vbg_guest_mappings_init(gdev); in vbg_core_init()
996 vbg_heartbeat_init(gdev); in vbg_core_init()
999 ret = vbg_report_driver_status(gdev, true); in vbg_core_init()
1006 vbg_req_free(gdev->mouse_status_req, in vbg_core_init()
1007 sizeof(*gdev->mouse_status_req)); in vbg_core_init()
1008 vbg_req_free(gdev->ack_events_req, in vbg_core_init()
1009 sizeof(*gdev->ack_events_req)); in vbg_core_init()
1010 vbg_req_free(gdev->cancel_req, in vbg_core_init()
1011 sizeof(*gdev->cancel_req)); in vbg_core_init()
1012 vbg_req_free(gdev->mem_balloon.change_req, in vbg_core_init()
1013 sizeof(*gdev->mem_balloon.change_req)); in vbg_core_init()
1014 vbg_req_free(gdev->mem_balloon.get_req, in vbg_core_init()
1015 sizeof(*gdev->mem_balloon.get_req)); in vbg_core_init()
1026 void vbg_core_exit(struct vbg_dev *gdev) in vbg_core_exit() argument
1028 vbg_heartbeat_exit(gdev); in vbg_core_exit()
1029 vbg_guest_mappings_exit(gdev); in vbg_core_exit()
1032 vbg_reset_host_event_filter(gdev, 0); in vbg_core_exit()
1033 vbg_reset_host_capabilities(gdev); in vbg_core_exit()
1034 vbg_core_set_mouse_status(gdev, 0); in vbg_core_exit()
1036 vbg_req_free(gdev->mouse_status_req, in vbg_core_exit()
1037 sizeof(*gdev->mouse_status_req)); in vbg_core_exit()
1038 vbg_req_free(gdev->ack_events_req, in vbg_core_exit()
1039 sizeof(*gdev->ack_events_req)); in vbg_core_exit()
1040 vbg_req_free(gdev->cancel_req, in vbg_core_exit()
1041 sizeof(*gdev->cancel_req)); in vbg_core_exit()
1042 vbg_req_free(gdev->mem_balloon.change_req, in vbg_core_exit()
1043 sizeof(*gdev->mem_balloon.change_req)); in vbg_core_exit()
1044 vbg_req_free(gdev->mem_balloon.get_req, in vbg_core_exit()
1045 sizeof(*gdev->mem_balloon.get_req)); in vbg_core_exit()
1056 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) in vbg_core_open_session() argument
1064 session->gdev = gdev; in vbg_core_open_session()
1076 struct vbg_dev *gdev = session->gdev; in vbg_core_close_session() local
1079 vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true); in vbg_core_close_session()
1080 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); in vbg_core_close_session()
1081 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); in vbg_core_close_session()
1088 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST, in vbg_core_close_session()
1138 static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, in vbg_get_allowed_event_mask_for_session() argument
1141 u32 acquire_mode_caps = gdev->acquire_mode_guest_caps; in vbg_get_allowed_event_mask_for_session()
1156 static bool vbg_wait_event_cond(struct vbg_dev *gdev, in vbg_wait_event_cond() argument
1164 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_wait_event_cond()
1166 events = gdev->pending_events & event_mask; in vbg_wait_event_cond()
1167 events &= vbg_get_allowed_event_mask_for_session(gdev, session); in vbg_wait_event_cond()
1170 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_wait_event_cond()
1176 static u32 vbg_consume_events_locked(struct vbg_dev *gdev, in vbg_consume_events_locked() argument
1180 u32 events = gdev->pending_events & event_mask; in vbg_consume_events_locked()
1182 events &= vbg_get_allowed_event_mask_for_session(gdev, session); in vbg_consume_events_locked()
1183 gdev->pending_events &= ~events; in vbg_consume_events_locked()
1187 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev, in vbg_ioctl_wait_for_events() argument
1208 gdev->event_wq, in vbg_ioctl_wait_for_events()
1209 vbg_wait_event_cond(gdev, session, event_mask), in vbg_ioctl_wait_for_events()
1212 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_ioctl_wait_for_events()
1220 vbg_consume_events_locked(gdev, session, event_mask); in vbg_ioctl_wait_for_events()
1223 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_ioctl_wait_for_events()
1234 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, in vbg_ioctl_interrupt_all_wait_events() argument
1243 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_ioctl_interrupt_all_wait_events()
1245 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_ioctl_interrupt_all_wait_events()
1247 wake_up(&gdev->event_wq); in vbg_ioctl_interrupt_all_wait_events()
1259 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, in vbg_req_allowed() argument
1350 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev, in vbg_ioctl_vmmrequest() argument
1365 ret = vbg_req_allowed(gdev, session, data); in vbg_ioctl_vmmrequest()
1369 vbg_req_perform(gdev, data); in vbg_ioctl_vmmrequest()
1375 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, in vbg_ioctl_hgcm_connect() argument
1386 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1393 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1398 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc, in vbg_ioctl_hgcm_connect()
1401 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1409 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1414 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, in vbg_ioctl_hgcm_disconnect() argument
1428 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1435 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1440 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_disconnect()
1443 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1448 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1467 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, in vbg_ioctl_hgcm_call() argument
1522 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_call()
1526 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_call()
1534 ret = vbg_hgcm_call32(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_call()
1539 ret = vbg_hgcm_call(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_call()
1567 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, in vbg_ioctl_change_filter_mask() argument
1582 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask, in vbg_ioctl_change_filter_mask()
1586 static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev, in vbg_ioctl_acquire_guest_capabilities() argument
1605 return vbg_acquire_session_capabilities(gdev, session, or_mask, in vbg_ioctl_acquire_guest_capabilities()
1609 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, in vbg_ioctl_change_guest_capabilities() argument
1624 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, in vbg_ioctl_change_guest_capabilities()
1630 caps->u.out.global_caps = gdev->guest_caps_host; in vbg_ioctl_change_guest_capabilities()
1635 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, in vbg_ioctl_check_balloon() argument
1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
1651 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, in vbg_ioctl_write_core_dump() argument
1666 dump->hdr.rc = vbg_req_perform(gdev, req); in vbg_ioctl_write_core_dump()
1682 struct vbg_dev *gdev = session->gdev; in vbg_core_ioctl() local
1699 return vbg_ioctl_vmmrequest(gdev, session, data); in vbg_core_ioctl()
1709 return vbg_ioctl_hgcm_connect(gdev, session, data); in vbg_core_ioctl()
1711 return vbg_ioctl_hgcm_disconnect(gdev, session, data); in vbg_core_ioctl()
1713 return vbg_ioctl_wait_for_events(gdev, session, data); in vbg_core_ioctl()
1715 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); in vbg_core_ioctl()
1717 return vbg_ioctl_change_filter_mask(gdev, session, data); in vbg_core_ioctl()
1719 return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); in vbg_core_ioctl()
1721 return vbg_ioctl_change_guest_capabilities(gdev, session, data); in vbg_core_ioctl()
1723 return vbg_ioctl_check_balloon(gdev, data); in vbg_core_ioctl()
1725 return vbg_ioctl_write_core_dump(gdev, session, data); in vbg_core_ioctl()
1736 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); in vbg_core_ioctl()
1753 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) in vbg_core_set_mouse_status() argument
1767 rc = vbg_req_perform(gdev, req); in vbg_core_set_mouse_status()
1778 struct vbg_dev *gdev = dev_id; in vbg_core_isr() local
1779 struct vmmdev_events *req = gdev->ack_events_req; in vbg_core_isr()
1785 if (!gdev->mmio->V.V1_04.have_events) in vbg_core_isr()
1791 rc = vbg_req_perform(gdev, req); in vbg_core_isr()
1805 wake_up(&gdev->hgcm_wq); in vbg_core_isr()
1810 schedule_work(&gdev->mem_balloon.work); in vbg_core_isr()
1815 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_core_isr()
1816 gdev->pending_events |= events; in vbg_core_isr()
1817 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_core_isr()
1819 wake_up(&gdev->event_wq); in vbg_core_isr()
1823 vbg_linux_mouse_event(gdev); in vbg_core_isr()