Lines Matching refs:gdev
46 static void vbg_guest_mappings_init(struct vbg_dev *gdev) in vbg_guest_mappings_init() argument
62 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_init()
81 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); in vbg_guest_mappings_init()
82 if (!gdev->guest_mappings_dummy_page) in vbg_guest_mappings_init()
86 pages[i] = gdev->guest_mappings_dummy_page; in vbg_guest_mappings_init()
104 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_init()
106 gdev->guest_mappings = guest_mappings[i]; in vbg_guest_mappings_init()
116 if (!gdev->guest_mappings) { in vbg_guest_mappings_init()
117 __free_page(gdev->guest_mappings_dummy_page); in vbg_guest_mappings_init()
118 gdev->guest_mappings_dummy_page = NULL; in vbg_guest_mappings_init()
131 static void vbg_guest_mappings_exit(struct vbg_dev *gdev) in vbg_guest_mappings_exit() argument
136 if (!gdev->guest_mappings) in vbg_guest_mappings_exit()
151 rc = vbg_req_perform(gdev, req); in vbg_guest_mappings_exit()
160 vunmap(gdev->guest_mappings); in vbg_guest_mappings_exit()
161 gdev->guest_mappings = NULL; in vbg_guest_mappings_exit()
163 __free_page(gdev->guest_mappings_dummy_page); in vbg_guest_mappings_exit()
164 gdev->guest_mappings_dummy_page = NULL; in vbg_guest_mappings_exit()
172 static int vbg_report_guest_info(struct vbg_dev *gdev) in vbg_report_guest_info() argument
211 rc = vbg_req_perform(gdev, req2); in vbg_report_guest_info()
213 rc = vbg_req_perform(gdev, req1); in vbg_report_guest_info()
215 rc = vbg_req_perform(gdev, req1); in vbg_report_guest_info()
217 rc = vbg_req_perform(gdev, req2); in vbg_report_guest_info()
236 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) in vbg_report_driver_status() argument
253 rc = vbg_req_perform(gdev, req); in vbg_report_driver_status()
268 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) in vbg_balloon_inflate() argument
270 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; in vbg_balloon_inflate()
294 rc = vbg_req_perform(gdev, req); in vbg_balloon_inflate()
301 gdev->mem_balloon.pages[chunk_idx] = pages; in vbg_balloon_inflate()
319 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) in vbg_balloon_deflate() argument
321 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; in vbg_balloon_deflate()
322 struct page **pages = gdev->mem_balloon.pages[chunk_idx]; in vbg_balloon_deflate()
332 rc = vbg_req_perform(gdev, req); in vbg_balloon_deflate()
341 gdev->mem_balloon.pages[chunk_idx] = NULL; in vbg_balloon_deflate()
352 struct vbg_dev *gdev = in vbg_balloon_work() local
354 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req; in vbg_balloon_work()
363 rc = vbg_req_perform(gdev, req); in vbg_balloon_work()
373 if (!gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
374 gdev->mem_balloon.pages = in vbg_balloon_work()
375 devm_kcalloc(gdev->dev, req->phys_mem_chunks, in vbg_balloon_work()
377 if (!gdev->mem_balloon.pages) in vbg_balloon_work()
380 gdev->mem_balloon.max_chunks = req->phys_mem_chunks; in vbg_balloon_work()
384 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
386 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
390 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
392 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
393 ret = vbg_balloon_inflate(gdev, i); in vbg_balloon_work()
397 gdev->mem_balloon.chunks++; in vbg_balloon_work()
401 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
402 ret = vbg_balloon_deflate(gdev, i); in vbg_balloon_work()
406 gdev->mem_balloon.chunks--; in vbg_balloon_work()
416 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer); in vbg_heartbeat_timer() local
418 vbg_req_perform(gdev, gdev->guest_heartbeat_req); in vbg_heartbeat_timer()
419 mod_timer(&gdev->heartbeat_timer, in vbg_heartbeat_timer()
420 msecs_to_jiffies(gdev->heartbeat_interval_ms)); in vbg_heartbeat_timer()
430 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) in vbg_heartbeat_host_config() argument
442 rc = vbg_req_perform(gdev, req); in vbg_heartbeat_host_config()
444 gdev->heartbeat_interval_ms = req->interval_ns; in vbg_heartbeat_host_config()
455 static int vbg_heartbeat_init(struct vbg_dev *gdev) in vbg_heartbeat_init() argument
460 ret = vbg_heartbeat_host_config(gdev, false); in vbg_heartbeat_init()
464 ret = vbg_heartbeat_host_config(gdev, true); in vbg_heartbeat_init()
468 gdev->guest_heartbeat_req = vbg_req_alloc( in vbg_heartbeat_init()
469 sizeof(*gdev->guest_heartbeat_req), in vbg_heartbeat_init()
472 if (!gdev->guest_heartbeat_req) in vbg_heartbeat_init()
476 __func__, gdev->heartbeat_interval_ms); in vbg_heartbeat_init()
477 mod_timer(&gdev->heartbeat_timer, 0); in vbg_heartbeat_init()
486 static void vbg_heartbeat_exit(struct vbg_dev *gdev) in vbg_heartbeat_exit() argument
488 del_timer_sync(&gdev->heartbeat_timer); in vbg_heartbeat_exit()
489 vbg_heartbeat_host_config(gdev, false); in vbg_heartbeat_exit()
490 vbg_req_free(gdev->guest_heartbeat_req, in vbg_heartbeat_exit()
491 sizeof(*gdev->guest_heartbeat_req)); in vbg_heartbeat_exit()
536 static int vbg_reset_host_event_filter(struct vbg_dev *gdev, in vbg_reset_host_event_filter() argument
549 rc = vbg_req_perform(gdev, req); in vbg_reset_host_event_filter()
573 static int vbg_set_session_event_filter(struct vbg_dev *gdev, in vbg_set_session_event_filter() argument
596 mutex_lock(&gdev->session_mutex); in vbg_set_session_event_filter()
608 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous); in vbg_set_session_event_filter()
609 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask; in vbg_set_session_event_filter()
611 if (gdev->event_filter_host == or_mask || !req) in vbg_set_session_event_filter()
614 gdev->event_filter_host = or_mask; in vbg_set_session_event_filter()
617 rc = vbg_req_perform(gdev, req); in vbg_set_session_event_filter()
622 gdev->event_filter_host = U32_MAX; in vbg_set_session_event_filter()
626 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, in vbg_set_session_event_filter()
632 mutex_unlock(&gdev->session_mutex); in vbg_set_session_event_filter()
643 static int vbg_reset_host_capabilities(struct vbg_dev *gdev) in vbg_reset_host_capabilities() argument
655 rc = vbg_req_perform(gdev, req); in vbg_reset_host_capabilities()
675 static int vbg_set_session_capabilities(struct vbg_dev *gdev, in vbg_set_session_capabilities() argument
698 mutex_lock(&gdev->session_mutex); in vbg_set_session_capabilities()
710 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); in vbg_set_session_capabilities()
711 or_mask = gdev->guest_caps_tracker.mask; in vbg_set_session_capabilities()
713 if (gdev->guest_caps_host == or_mask || !req) in vbg_set_session_capabilities()
716 gdev->guest_caps_host = or_mask; in vbg_set_session_capabilities()
719 rc = vbg_req_perform(gdev, req); in vbg_set_session_capabilities()
724 gdev->guest_caps_host = U32_MAX; in vbg_set_session_capabilities()
728 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, in vbg_set_session_capabilities()
734 mutex_unlock(&gdev->session_mutex); in vbg_set_session_capabilities()
745 static int vbg_query_host_version(struct vbg_dev *gdev) in vbg_query_host_version() argument
755 rc = vbg_req_perform(gdev, req); in vbg_query_host_version()
762 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", in vbg_query_host_version()
764 gdev->host_features = req->features; in vbg_query_host_version()
766 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version, in vbg_query_host_version()
767 gdev->host_features); in vbg_query_host_version()
794 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) in vbg_core_init() argument
798 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM; in vbg_core_init()
799 gdev->event_filter_host = U32_MAX; /* forces a report */ in vbg_core_init()
800 gdev->guest_caps_host = U32_MAX; /* forces a report */ in vbg_core_init()
802 init_waitqueue_head(&gdev->event_wq); in vbg_core_init()
803 init_waitqueue_head(&gdev->hgcm_wq); in vbg_core_init()
804 spin_lock_init(&gdev->event_spinlock); in vbg_core_init()
805 mutex_init(&gdev->session_mutex); in vbg_core_init()
806 mutex_init(&gdev->cancel_req_mutex); in vbg_core_init()
807 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0); in vbg_core_init()
808 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); in vbg_core_init()
810 gdev->mem_balloon.get_req = in vbg_core_init()
811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), in vbg_core_init()
814 gdev->mem_balloon.change_req = in vbg_core_init()
815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), in vbg_core_init()
818 gdev->cancel_req = in vbg_core_init()
819 vbg_req_alloc(sizeof(*(gdev->cancel_req)), in vbg_core_init()
822 gdev->ack_events_req = in vbg_core_init()
823 vbg_req_alloc(sizeof(*gdev->ack_events_req), in vbg_core_init()
826 gdev->mouse_status_req = in vbg_core_init()
827 vbg_req_alloc(sizeof(*gdev->mouse_status_req), in vbg_core_init()
831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || in vbg_core_init()
832 !gdev->cancel_req || !gdev->ack_events_req || in vbg_core_init()
833 !gdev->mouse_status_req) in vbg_core_init()
836 ret = vbg_query_host_version(gdev); in vbg_core_init()
840 ret = vbg_report_guest_info(gdev); in vbg_core_init()
846 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events); in vbg_core_init()
853 ret = vbg_reset_host_capabilities(gdev); in vbg_core_init()
860 ret = vbg_core_set_mouse_status(gdev, 0); in vbg_core_init()
867 vbg_guest_mappings_init(gdev); in vbg_core_init()
868 vbg_heartbeat_init(gdev); in vbg_core_init()
871 ret = vbg_report_driver_status(gdev, true); in vbg_core_init()
878 vbg_req_free(gdev->mouse_status_req, in vbg_core_init()
879 sizeof(*gdev->mouse_status_req)); in vbg_core_init()
880 vbg_req_free(gdev->ack_events_req, in vbg_core_init()
881 sizeof(*gdev->ack_events_req)); in vbg_core_init()
882 vbg_req_free(gdev->cancel_req, in vbg_core_init()
883 sizeof(*gdev->cancel_req)); in vbg_core_init()
884 vbg_req_free(gdev->mem_balloon.change_req, in vbg_core_init()
885 sizeof(*gdev->mem_balloon.change_req)); in vbg_core_init()
886 vbg_req_free(gdev->mem_balloon.get_req, in vbg_core_init()
887 sizeof(*gdev->mem_balloon.get_req)); in vbg_core_init()
898 void vbg_core_exit(struct vbg_dev *gdev) in vbg_core_exit() argument
900 vbg_heartbeat_exit(gdev); in vbg_core_exit()
901 vbg_guest_mappings_exit(gdev); in vbg_core_exit()
904 vbg_reset_host_event_filter(gdev, 0); in vbg_core_exit()
905 vbg_reset_host_capabilities(gdev); in vbg_core_exit()
906 vbg_core_set_mouse_status(gdev, 0); in vbg_core_exit()
908 vbg_req_free(gdev->mouse_status_req, in vbg_core_exit()
909 sizeof(*gdev->mouse_status_req)); in vbg_core_exit()
910 vbg_req_free(gdev->ack_events_req, in vbg_core_exit()
911 sizeof(*gdev->ack_events_req)); in vbg_core_exit()
912 vbg_req_free(gdev->cancel_req, in vbg_core_exit()
913 sizeof(*gdev->cancel_req)); in vbg_core_exit()
914 vbg_req_free(gdev->mem_balloon.change_req, in vbg_core_exit()
915 sizeof(*gdev->mem_balloon.change_req)); in vbg_core_exit()
916 vbg_req_free(gdev->mem_balloon.get_req, in vbg_core_exit()
917 sizeof(*gdev->mem_balloon.get_req)); in vbg_core_exit()
928 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) in vbg_core_open_session() argument
936 session->gdev = gdev; in vbg_core_open_session()
948 struct vbg_dev *gdev = session->gdev; in vbg_core_close_session() local
951 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); in vbg_core_close_session()
952 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); in vbg_core_close_session()
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST, in vbg_core_close_session()
1008 static bool vbg_wait_event_cond(struct vbg_dev *gdev, in vbg_wait_event_cond() argument
1016 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_wait_event_cond()
1018 events = gdev->pending_events & event_mask; in vbg_wait_event_cond()
1021 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_wait_event_cond()
1027 static u32 vbg_consume_events_locked(struct vbg_dev *gdev, in vbg_consume_events_locked() argument
1031 u32 events = gdev->pending_events & event_mask; in vbg_consume_events_locked()
1033 gdev->pending_events &= ~events; in vbg_consume_events_locked()
1037 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev, in vbg_ioctl_wait_for_events() argument
1058 gdev->event_wq, in vbg_ioctl_wait_for_events()
1059 vbg_wait_event_cond(gdev, session, event_mask), in vbg_ioctl_wait_for_events()
1062 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_ioctl_wait_for_events()
1070 vbg_consume_events_locked(gdev, session, event_mask); in vbg_ioctl_wait_for_events()
1073 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_ioctl_wait_for_events()
1084 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, in vbg_ioctl_interrupt_all_wait_events() argument
1093 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_ioctl_interrupt_all_wait_events()
1095 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_ioctl_interrupt_all_wait_events()
1097 wake_up(&gdev->event_wq); in vbg_ioctl_interrupt_all_wait_events()
1109 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, in vbg_req_allowed() argument
1198 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev, in vbg_ioctl_vmmrequest() argument
1213 ret = vbg_req_allowed(gdev, session, data); in vbg_ioctl_vmmrequest()
1217 vbg_req_perform(gdev, data); in vbg_ioctl_vmmrequest()
1223 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, in vbg_ioctl_hgcm_connect() argument
1234 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1241 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc, in vbg_ioctl_hgcm_connect()
1249 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1257 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_connect()
1262 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, in vbg_ioctl_hgcm_disconnect() argument
1276 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1283 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_disconnect()
1291 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1296 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_disconnect()
1315 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, in vbg_ioctl_hgcm_call() argument
1370 mutex_lock(&gdev->session_mutex); in vbg_ioctl_hgcm_call()
1374 mutex_unlock(&gdev->session_mutex); in vbg_ioctl_hgcm_call()
1382 ret = vbg_hgcm_call32(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_call()
1387 ret = vbg_hgcm_call(gdev, session->requestor, client_id, in vbg_ioctl_hgcm_call()
1415 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, in vbg_ioctl_change_filter_mask() argument
1430 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask, in vbg_ioctl_change_filter_mask()
1434 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, in vbg_ioctl_change_guest_capabilities() argument
1449 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, in vbg_ioctl_change_guest_capabilities()
1455 caps->u.out.global_caps = gdev->guest_caps_host; in vbg_ioctl_change_guest_capabilities()
1460 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, in vbg_ioctl_check_balloon() argument
1466 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
1476 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, in vbg_ioctl_write_core_dump() argument
1491 dump->hdr.rc = vbg_req_perform(gdev, req); in vbg_ioctl_write_core_dump()
1507 struct vbg_dev *gdev = session->gdev; in vbg_core_ioctl() local
1523 return vbg_ioctl_vmmrequest(gdev, session, data); in vbg_core_ioctl()
1533 return vbg_ioctl_hgcm_connect(gdev, session, data); in vbg_core_ioctl()
1535 return vbg_ioctl_hgcm_disconnect(gdev, session, data); in vbg_core_ioctl()
1537 return vbg_ioctl_wait_for_events(gdev, session, data); in vbg_core_ioctl()
1539 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); in vbg_core_ioctl()
1541 return vbg_ioctl_change_filter_mask(gdev, session, data); in vbg_core_ioctl()
1543 return vbg_ioctl_change_guest_capabilities(gdev, session, data); in vbg_core_ioctl()
1545 return vbg_ioctl_check_balloon(gdev, data); in vbg_core_ioctl()
1547 return vbg_ioctl_write_core_dump(gdev, session, data); in vbg_core_ioctl()
1558 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); in vbg_core_ioctl()
1574 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) in vbg_core_set_mouse_status() argument
1588 rc = vbg_req_perform(gdev, req); in vbg_core_set_mouse_status()
1599 struct vbg_dev *gdev = dev_id; in vbg_core_isr() local
1600 struct vmmdev_events *req = gdev->ack_events_req; in vbg_core_isr()
1606 if (!gdev->mmio->V.V1_04.have_events) in vbg_core_isr()
1612 rc = vbg_req_perform(gdev, req); in vbg_core_isr()
1626 wake_up(&gdev->hgcm_wq); in vbg_core_isr()
1631 schedule_work(&gdev->mem_balloon.work); in vbg_core_isr()
1636 spin_lock_irqsave(&gdev->event_spinlock, flags); in vbg_core_isr()
1637 gdev->pending_events |= events; in vbg_core_isr()
1638 spin_unlock_irqrestore(&gdev->event_spinlock, flags); in vbg_core_isr()
1640 wake_up(&gdev->event_wq); in vbg_core_isr()
1644 vbg_linux_mouse_event(gdev); in vbg_core_isr()