1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
4 *
5 * Copyright (C) 2007-2016 Oracle Corporation
6 */
7
8 #include <linux/device.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
18
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
27
28 #define GUEST_MAPPINGS_TRIES 5
29
30 #define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
34 /**
35 * Reserves memory in which the VMM can relocate any guest mappings
36 * that are floating around.
37 *
38 * This operation is a little bit tricky since the VMM might not accept
39 * just any address because of address clashes between the three contexts
40 * it operates in, so we try several times.
41 *
42 * Failure to reserve the guest mappings is ignored.
43 *
44 * @gdev: The Guest extension device.
45 */
vbg_guest_mappings_init(struct vbg_dev * gdev)46 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
47 {
48 struct vmmdev_hypervisorinfo *req;
49 void *guest_mappings[GUEST_MAPPINGS_TRIES];
50 struct page **pages = NULL;
51 u32 size, hypervisor_size;
52 int i, rc;
53
54 /* Query the required space. */
55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
57 if (!req)
58 return;
59
60 req->hypervisor_start = 0;
61 req->hypervisor_size = 0;
62 rc = vbg_req_perform(gdev, req);
63 if (rc < 0)
64 goto out;
65
66 /*
67 * The VMM will report back if there is nothing it wants to map, like
68 * for instance in VT-x and AMD-V mode.
69 */
70 if (req->hypervisor_size == 0)
71 goto out;
72
73 hypervisor_size = req->hypervisor_size;
74 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
75 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
76
77 pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
78 if (!pages)
79 goto out;
80
81 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
82 if (!gdev->guest_mappings_dummy_page)
83 goto out;
84
85 for (i = 0; i < (size >> PAGE_SHIFT); i++)
86 pages[i] = gdev->guest_mappings_dummy_page;
87
88 /*
89 * Try several times, the VMM might not accept some addresses because
90 * of address clashes between the three contexts.
91 */
92 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
93 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
94 VM_MAP, PAGE_KERNEL_RO);
95 if (!guest_mappings[i])
96 break;
97
98 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
99 req->header.rc = VERR_INTERNAL_ERROR;
100 req->hypervisor_size = hypervisor_size;
101 req->hypervisor_start =
102 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
103
104 rc = vbg_req_perform(gdev, req);
105 if (rc >= 0) {
106 gdev->guest_mappings = guest_mappings[i];
107 break;
108 }
109 }
110
111 /* Free vmap's from failed attempts. */
112 while (--i >= 0)
113 vunmap(guest_mappings[i]);
114
115 /* On failure free the dummy-page backing the vmap */
116 if (!gdev->guest_mappings) {
117 __free_page(gdev->guest_mappings_dummy_page);
118 gdev->guest_mappings_dummy_page = NULL;
119 }
120
121 out:
122 vbg_req_free(req, sizeof(*req));
123 kfree(pages);
124 }
125
126 /**
127 * Undo what vbg_guest_mappings_init did.
128 *
129 * @gdev: The Guest extension device.
130 */
vbg_guest_mappings_exit(struct vbg_dev * gdev)131 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
132 {
133 struct vmmdev_hypervisorinfo *req;
134 int rc;
135
136 if (!gdev->guest_mappings)
137 return;
138
139 /*
140 * Tell the host that we're going to free the memory we reserved for
141 * it, the free it up. (Leak the memory if anything goes wrong here.)
142 */
143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
145 if (!req)
146 return;
147
148 req->hypervisor_start = 0;
149 req->hypervisor_size = 0;
150
151 rc = vbg_req_perform(gdev, req);
152
153 vbg_req_free(req, sizeof(*req));
154
155 if (rc < 0) {
156 vbg_err("%s error: %d\n", __func__, rc);
157 return;
158 }
159
160 vunmap(gdev->guest_mappings);
161 gdev->guest_mappings = NULL;
162
163 __free_page(gdev->guest_mappings_dummy_page);
164 gdev->guest_mappings_dummy_page = NULL;
165 }
166
167 /**
168 * Report the guest information to the host.
169 * Return: 0 or negative errno value.
170 * @gdev: The Guest extension device.
171 */
vbg_report_guest_info(struct vbg_dev * gdev)172 static int vbg_report_guest_info(struct vbg_dev *gdev)
173 {
174 /*
175 * Allocate and fill in the two guest info reports.
176 */
177 struct vmmdev_guest_info *req1 = NULL;
178 struct vmmdev_guest_info2 *req2 = NULL;
179 int rc, ret = -ENOMEM;
180
181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
185 if (!req1 || !req2)
186 goto out_free;
187
188 req1->interface_version = VMMDEV_VERSION;
189 req1->os_type = VMMDEV_OSTYPE_LINUX26;
190 #if __BITS_PER_LONG == 64
191 req1->os_type |= VMMDEV_OSTYPE_X64;
192 #endif
193
194 req2->additions_major = VBG_VERSION_MAJOR;
195 req2->additions_minor = VBG_VERSION_MINOR;
196 req2->additions_build = VBG_VERSION_BUILD;
197 req2->additions_revision = VBG_SVN_REV;
198 req2->additions_features =
199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
200 strlcpy(req2->name, VBG_VERSION_STRING,
201 sizeof(req2->name));
202
203 /*
204 * There are two protocols here:
205 * 1. INFO2 + INFO1. Supported by >=3.2.51.
206 * 2. INFO1 and optionally INFO2. The old protocol.
207 *
208 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
209 * if not supported by the VMMDev (message ordering requirement).
210 */
211 rc = vbg_req_perform(gdev, req2);
212 if (rc >= 0) {
213 rc = vbg_req_perform(gdev, req1);
214 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
215 rc = vbg_req_perform(gdev, req1);
216 if (rc >= 0) {
217 rc = vbg_req_perform(gdev, req2);
218 if (rc == VERR_NOT_IMPLEMENTED)
219 rc = VINF_SUCCESS;
220 }
221 }
222 ret = vbg_status_code_to_errno(rc);
223
224 out_free:
225 vbg_req_free(req2, sizeof(*req2));
226 vbg_req_free(req1, sizeof(*req1));
227 return ret;
228 }
229
230 /**
231 * Report the guest driver status to the host.
232 * Return: 0 or negative errno value.
233 * @gdev: The Guest extension device.
234 * @active: Flag whether the driver is now active or not.
235 */
vbg_report_driver_status(struct vbg_dev * gdev,bool active)236 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
237 {
238 struct vmmdev_guest_status *req;
239 int rc;
240
241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
243 if (!req)
244 return -ENOMEM;
245
246 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
247 if (active)
248 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
249 else
250 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
251 req->flags = 0;
252
253 rc = vbg_req_perform(gdev, req);
254 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
255 rc = VINF_SUCCESS;
256
257 vbg_req_free(req, sizeof(*req));
258
259 return vbg_status_code_to_errno(rc);
260 }
261
262 /**
263 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
264 * Return: 0 or negative errno value.
265 * @gdev: The Guest extension device.
266 * @chunk_idx: Index of the chunk.
267 */
vbg_balloon_inflate(struct vbg_dev * gdev,u32 chunk_idx)268 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
269 {
270 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
271 struct page **pages;
272 int i, rc, ret;
273
274 pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
275 sizeof(*pages),
276 GFP_KERNEL | __GFP_NOWARN);
277 if (!pages)
278 return -ENOMEM;
279
280 req->header.size = sizeof(*req);
281 req->inflate = true;
282 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
283
284 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
285 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
286 if (!pages[i]) {
287 ret = -ENOMEM;
288 goto out_error;
289 }
290
291 req->phys_page[i] = page_to_phys(pages[i]);
292 }
293
294 rc = vbg_req_perform(gdev, req);
295 if (rc < 0) {
296 vbg_err("%s error, rc: %d\n", __func__, rc);
297 ret = vbg_status_code_to_errno(rc);
298 goto out_error;
299 }
300
301 gdev->mem_balloon.pages[chunk_idx] = pages;
302
303 return 0;
304
305 out_error:
306 while (--i >= 0)
307 __free_page(pages[i]);
308 kfree(pages);
309
310 return ret;
311 }
312
313 /**
314 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
315 * Return: 0 or negative errno value.
316 * @gdev: The Guest extension device.
317 * @chunk_idx: Index of the chunk.
318 */
vbg_balloon_deflate(struct vbg_dev * gdev,u32 chunk_idx)319 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
320 {
321 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
322 struct page **pages = gdev->mem_balloon.pages[chunk_idx];
323 int i, rc;
324
325 req->header.size = sizeof(*req);
326 req->inflate = false;
327 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
328
329 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
330 req->phys_page[i] = page_to_phys(pages[i]);
331
332 rc = vbg_req_perform(gdev, req);
333 if (rc < 0) {
334 vbg_err("%s error, rc: %d\n", __func__, rc);
335 return vbg_status_code_to_errno(rc);
336 }
337
338 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
339 __free_page(pages[i]);
340 kfree(pages);
341 gdev->mem_balloon.pages[chunk_idx] = NULL;
342
343 return 0;
344 }
345
346 /**
347 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
348 * the host wants the balloon to be and adjust accordingly.
349 */
vbg_balloon_work(struct work_struct * work)350 static void vbg_balloon_work(struct work_struct *work)
351 {
352 struct vbg_dev *gdev =
353 container_of(work, struct vbg_dev, mem_balloon.work);
354 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
355 u32 i, chunks;
356 int rc, ret;
357
358 /*
359 * Setting this bit means that we request the value from the host and
360 * change the guest memory balloon according to the returned value.
361 */
362 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
363 rc = vbg_req_perform(gdev, req);
364 if (rc < 0) {
365 vbg_err("%s error, rc: %d)\n", __func__, rc);
366 return;
367 }
368
369 /*
370 * The host always returns the same maximum amount of chunks, so
371 * we do this once.
372 */
373 if (!gdev->mem_balloon.max_chunks) {
374 gdev->mem_balloon.pages =
375 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
376 sizeof(struct page **), GFP_KERNEL);
377 if (!gdev->mem_balloon.pages)
378 return;
379
380 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
381 }
382
383 chunks = req->balloon_chunks;
384 if (chunks > gdev->mem_balloon.max_chunks) {
385 vbg_err("%s: illegal balloon size %u (max=%u)\n",
386 __func__, chunks, gdev->mem_balloon.max_chunks);
387 return;
388 }
389
390 if (chunks > gdev->mem_balloon.chunks) {
391 /* inflate */
392 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
393 ret = vbg_balloon_inflate(gdev, i);
394 if (ret < 0)
395 return;
396
397 gdev->mem_balloon.chunks++;
398 }
399 } else {
400 /* deflate */
401 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
402 ret = vbg_balloon_deflate(gdev, i);
403 if (ret < 0)
404 return;
405
406 gdev->mem_balloon.chunks--;
407 }
408 }
409 }
410
411 /**
412 * Callback for heartbeat timer.
413 */
vbg_heartbeat_timer(struct timer_list * t)414 static void vbg_heartbeat_timer(struct timer_list *t)
415 {
416 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
417
418 vbg_req_perform(gdev, gdev->guest_heartbeat_req);
419 mod_timer(&gdev->heartbeat_timer,
420 msecs_to_jiffies(gdev->heartbeat_interval_ms));
421 }
422
423 /**
424 * Configure the host to check guest's heartbeat
425 * and get heartbeat interval from the host.
426 * Return: 0 or negative errno value.
427 * @gdev: The Guest extension device.
428 * @enabled: Set true to enable guest heartbeat checks on host.
429 */
vbg_heartbeat_host_config(struct vbg_dev * gdev,bool enabled)430 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
431 {
432 struct vmmdev_heartbeat *req;
433 int rc;
434
435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
437 if (!req)
438 return -ENOMEM;
439
440 req->enabled = enabled;
441 req->interval_ns = 0;
442 rc = vbg_req_perform(gdev, req);
443 do_div(req->interval_ns, 1000000); /* ns -> ms */
444 gdev->heartbeat_interval_ms = req->interval_ns;
445 vbg_req_free(req, sizeof(*req));
446
447 return vbg_status_code_to_errno(rc);
448 }
449
450 /**
451 * Initializes the heartbeat timer. This feature may be disabled by the host.
452 * Return: 0 or negative errno value.
453 * @gdev: The Guest extension device.
454 */
vbg_heartbeat_init(struct vbg_dev * gdev)455 static int vbg_heartbeat_init(struct vbg_dev *gdev)
456 {
457 int ret;
458
459 /* Make sure that heartbeat checking is disabled if we fail. */
460 ret = vbg_heartbeat_host_config(gdev, false);
461 if (ret < 0)
462 return ret;
463
464 ret = vbg_heartbeat_host_config(gdev, true);
465 if (ret < 0)
466 return ret;
467
468 gdev->guest_heartbeat_req = vbg_req_alloc(
469 sizeof(*gdev->guest_heartbeat_req),
470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
472 if (!gdev->guest_heartbeat_req)
473 return -ENOMEM;
474
475 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
476 __func__, gdev->heartbeat_interval_ms);
477 mod_timer(&gdev->heartbeat_timer, 0);
478
479 return 0;
480 }
481
482 /**
483 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
484 * @gdev: The Guest extension device.
485 */
vbg_heartbeat_exit(struct vbg_dev * gdev)486 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
487 {
488 del_timer_sync(&gdev->heartbeat_timer);
489 vbg_heartbeat_host_config(gdev, false);
490 vbg_req_free(gdev->guest_heartbeat_req,
491 sizeof(*gdev->guest_heartbeat_req));
492 }
493
494 /**
495 * Applies a change to the bit usage tracker.
496 * Return: true if the mask changed, false if not.
497 * @tracker: The bit usage tracker.
498 * @changed: The bits to change.
499 * @previous: The previous value of the bits.
500 */
vbg_track_bit_usage(struct vbg_bit_usage_tracker * tracker,u32 changed,u32 previous)501 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
502 u32 changed, u32 previous)
503 {
504 bool global_change = false;
505
506 while (changed) {
507 u32 bit = ffs(changed) - 1;
508 u32 bitmask = BIT(bit);
509
510 if (bitmask & previous) {
511 tracker->per_bit_usage[bit] -= 1;
512 if (tracker->per_bit_usage[bit] == 0) {
513 global_change = true;
514 tracker->mask &= ~bitmask;
515 }
516 } else {
517 tracker->per_bit_usage[bit] += 1;
518 if (tracker->per_bit_usage[bit] == 1) {
519 global_change = true;
520 tracker->mask |= bitmask;
521 }
522 }
523
524 changed &= ~bitmask;
525 }
526
527 return global_change;
528 }
529
530 /**
531 * Init and termination worker for resetting the (host) event filter on the host
532 * Return: 0 or negative errno value.
533 * @gdev: The Guest extension device.
534 * @fixed_events: Fixed events (init time).
535 */
vbg_reset_host_event_filter(struct vbg_dev * gdev,u32 fixed_events)536 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
537 u32 fixed_events)
538 {
539 struct vmmdev_mask *req;
540 int rc;
541
542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
544 if (!req)
545 return -ENOMEM;
546
547 req->not_mask = U32_MAX & ~fixed_events;
548 req->or_mask = fixed_events;
549 rc = vbg_req_perform(gdev, req);
550 if (rc < 0)
551 vbg_err("%s error, rc: %d\n", __func__, rc);
552
553 vbg_req_free(req, sizeof(*req));
554 return vbg_status_code_to_errno(rc);
555 }
556
557 /**
558 * Changes the event filter mask for the given session.
559 *
560 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
561 * do session cleanup. Takes the session spinlock.
562 *
563 * Return: 0 or negative errno value.
564 * @gdev: The Guest extension device.
565 * @session: The session.
566 * @or_mask: The events to add.
567 * @not_mask: The events to remove.
568 * @session_termination: Set if we're called by the session cleanup code.
569 * This tweaks the error handling so we perform
570 * proper session cleanup even if the host
571 * misbehaves.
572 */
vbg_set_session_event_filter(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)573 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
574 struct vbg_session *session,
575 u32 or_mask, u32 not_mask,
576 bool session_termination)
577 {
578 struct vmmdev_mask *req;
579 u32 changed, previous;
580 int rc, ret = 0;
581
582 /*
583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
586 */
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
590 if (!req) {
591 if (!session_termination)
592 return -ENOMEM;
593 /* Ignore allocation failure, we must do session cleanup. */
594 }
595
596 mutex_lock(&gdev->session_mutex);
597
598 /* Apply the changes to the session mask. */
599 previous = session->event_filter;
600 session->event_filter |= or_mask;
601 session->event_filter &= ~not_mask;
602
603 /* If anything actually changed, update the global usage counters. */
604 changed = previous ^ session->event_filter;
605 if (!changed)
606 goto out;
607
608 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
609 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
610
611 if (gdev->event_filter_host == or_mask || !req)
612 goto out;
613
614 gdev->event_filter_host = or_mask;
615 req->or_mask = or_mask;
616 req->not_mask = ~or_mask;
617 rc = vbg_req_perform(gdev, req);
618 if (rc < 0) {
619 ret = vbg_status_code_to_errno(rc);
620
621 /* Failed, roll back (unless it's session termination time). */
622 gdev->event_filter_host = U32_MAX;
623 if (session_termination)
624 goto out;
625
626 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
627 session->event_filter);
628 session->event_filter = previous;
629 }
630
631 out:
632 mutex_unlock(&gdev->session_mutex);
633 vbg_req_free(req, sizeof(*req));
634
635 return ret;
636 }
637
638 /**
639 * Init and termination worker for set guest capabilities to zero on the host.
640 * Return: 0 or negative errno value.
641 * @gdev: The Guest extension device.
642 */
vbg_reset_host_capabilities(struct vbg_dev * gdev)643 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
644 {
645 struct vmmdev_mask *req;
646 int rc;
647
648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
650 if (!req)
651 return -ENOMEM;
652
653 req->not_mask = U32_MAX;
654 req->or_mask = 0;
655 rc = vbg_req_perform(gdev, req);
656 if (rc < 0)
657 vbg_err("%s error, rc: %d\n", __func__, rc);
658
659 vbg_req_free(req, sizeof(*req));
660 return vbg_status_code_to_errno(rc);
661 }
662
663 /**
664 * Sets the guest capabilities for a session. Takes the session spinlock.
665 * Return: 0 or negative errno value.
666 * @gdev: The Guest extension device.
667 * @session: The session.
668 * @or_mask: The capabilities to add.
669 * @not_mask: The capabilities to remove.
670 * @session_termination: Set if we're called by the session cleanup code.
671 * This tweaks the error handling so we perform
672 * proper session cleanup even if the host
673 * misbehaves.
674 */
vbg_set_session_capabilities(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)675 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
676 struct vbg_session *session,
677 u32 or_mask, u32 not_mask,
678 bool session_termination)
679 {
680 struct vmmdev_mask *req;
681 u32 changed, previous;
682 int rc, ret = 0;
683
684 /*
685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
688 */
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
692 if (!req) {
693 if (!session_termination)
694 return -ENOMEM;
695 /* Ignore allocation failure, we must do session cleanup. */
696 }
697
698 mutex_lock(&gdev->session_mutex);
699
700 /* Apply the changes to the session mask. */
701 previous = session->guest_caps;
702 session->guest_caps |= or_mask;
703 session->guest_caps &= ~not_mask;
704
705 /* If anything actually changed, update the global usage counters. */
706 changed = previous ^ session->guest_caps;
707 if (!changed)
708 goto out;
709
710 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
711 or_mask = gdev->guest_caps_tracker.mask;
712
713 if (gdev->guest_caps_host == or_mask || !req)
714 goto out;
715
716 gdev->guest_caps_host = or_mask;
717 req->or_mask = or_mask;
718 req->not_mask = ~or_mask;
719 rc = vbg_req_perform(gdev, req);
720 if (rc < 0) {
721 ret = vbg_status_code_to_errno(rc);
722
723 /* Failed, roll back (unless it's session termination time). */
724 gdev->guest_caps_host = U32_MAX;
725 if (session_termination)
726 goto out;
727
728 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
729 session->guest_caps);
730 session->guest_caps = previous;
731 }
732
733 out:
734 mutex_unlock(&gdev->session_mutex);
735 vbg_req_free(req, sizeof(*req));
736
737 return ret;
738 }
739
740 /**
741 * vbg_query_host_version get the host feature mask and version information.
742 * Return: 0 or negative errno value.
743 * @gdev: The Guest extension device.
744 */
vbg_query_host_version(struct vbg_dev * gdev)745 static int vbg_query_host_version(struct vbg_dev *gdev)
746 {
747 struct vmmdev_host_version *req;
748 int rc, ret;
749
750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
752 if (!req)
753 return -ENOMEM;
754
755 rc = vbg_req_perform(gdev, req);
756 ret = vbg_status_code_to_errno(rc);
757 if (ret) {
758 vbg_err("%s error: %d\n", __func__, rc);
759 goto out;
760 }
761
762 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
763 req->major, req->minor, req->build, req->revision);
764 gdev->host_features = req->features;
765
766 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
767 gdev->host_features);
768
769 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
770 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
771 ret = -ENODEV;
772 }
773
774 out:
775 vbg_req_free(req, sizeof(*req));
776 return ret;
777 }
778
779 /**
780 * Initializes the VBoxGuest device extension when the
781 * device driver is loaded.
782 *
783 * The native code locates the VMMDev on the PCI bus and retrieve
784 * the MMIO and I/O port ranges, this function will take care of
785 * mapping the MMIO memory (if present). Upon successful return
786 * the native code should set up the interrupt handler.
787 *
788 * Return: 0 or negative errno value.
789 *
790 * @gdev: The Guest extension device.
791 * @fixed_events: Events that will be enabled upon init and no client
792 * will ever be allowed to mask.
793 */
vbg_core_init(struct vbg_dev * gdev,u32 fixed_events)794 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
795 {
796 int ret = -ENOMEM;
797
798 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
799 gdev->event_filter_host = U32_MAX; /* forces a report */
800 gdev->guest_caps_host = U32_MAX; /* forces a report */
801
802 init_waitqueue_head(&gdev->event_wq);
803 init_waitqueue_head(&gdev->hgcm_wq);
804 spin_lock_init(&gdev->event_spinlock);
805 mutex_init(&gdev->session_mutex);
806 mutex_init(&gdev->cancel_req_mutex);
807 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
808 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
809
810 gdev->mem_balloon.get_req =
811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
814 gdev->mem_balloon.change_req =
815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
818 gdev->cancel_req =
819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
822 gdev->ack_events_req =
823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
826 gdev->mouse_status_req =
827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
830
831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
832 !gdev->cancel_req || !gdev->ack_events_req ||
833 !gdev->mouse_status_req)
834 goto err_free_reqs;
835
836 ret = vbg_query_host_version(gdev);
837 if (ret)
838 goto err_free_reqs;
839
840 ret = vbg_report_guest_info(gdev);
841 if (ret) {
842 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
843 goto err_free_reqs;
844 }
845
846 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
847 if (ret) {
848 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
849 ret);
850 goto err_free_reqs;
851 }
852
853 ret = vbg_reset_host_capabilities(gdev);
854 if (ret) {
855 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
856 ret);
857 goto err_free_reqs;
858 }
859
860 ret = vbg_core_set_mouse_status(gdev, 0);
861 if (ret) {
862 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
863 goto err_free_reqs;
864 }
865
866 /* These may fail without requiring the driver init to fail. */
867 vbg_guest_mappings_init(gdev);
868 vbg_heartbeat_init(gdev);
869
870 /* All Done! */
871 ret = vbg_report_driver_status(gdev, true);
872 if (ret < 0)
873 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
874
875 return 0;
876
877 err_free_reqs:
878 vbg_req_free(gdev->mouse_status_req,
879 sizeof(*gdev->mouse_status_req));
880 vbg_req_free(gdev->ack_events_req,
881 sizeof(*gdev->ack_events_req));
882 vbg_req_free(gdev->cancel_req,
883 sizeof(*gdev->cancel_req));
884 vbg_req_free(gdev->mem_balloon.change_req,
885 sizeof(*gdev->mem_balloon.change_req));
886 vbg_req_free(gdev->mem_balloon.get_req,
887 sizeof(*gdev->mem_balloon.get_req));
888 return ret;
889 }
890
891 /**
892 * Call this on exit to clean-up vboxguest-core managed resources.
893 *
894 * The native code should call this before the driver is loaded,
895 * but don't call this on shutdown.
896 * @gdev: The Guest extension device.
897 */
vbg_core_exit(struct vbg_dev * gdev)898 void vbg_core_exit(struct vbg_dev *gdev)
899 {
900 vbg_heartbeat_exit(gdev);
901 vbg_guest_mappings_exit(gdev);
902
903 /* Clear the host flags (mouse status etc). */
904 vbg_reset_host_event_filter(gdev, 0);
905 vbg_reset_host_capabilities(gdev);
906 vbg_core_set_mouse_status(gdev, 0);
907
908 vbg_req_free(gdev->mouse_status_req,
909 sizeof(*gdev->mouse_status_req));
910 vbg_req_free(gdev->ack_events_req,
911 sizeof(*gdev->ack_events_req));
912 vbg_req_free(gdev->cancel_req,
913 sizeof(*gdev->cancel_req));
914 vbg_req_free(gdev->mem_balloon.change_req,
915 sizeof(*gdev->mem_balloon.change_req));
916 vbg_req_free(gdev->mem_balloon.get_req,
917 sizeof(*gdev->mem_balloon.get_req));
918 }
919
920 /**
921 * Creates a VBoxGuest user session.
922 *
923 * vboxguest_linux.c calls this when userspace opens the char-device.
924 * Return: A pointer to the new session or an ERR_PTR on error.
925 * @gdev: The Guest extension device.
926 * @requestor: VMMDEV_REQUESTOR_* flags
927 */
vbg_core_open_session(struct vbg_dev * gdev,u32 requestor)928 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
929 {
930 struct vbg_session *session;
931
932 session = kzalloc(sizeof(*session), GFP_KERNEL);
933 if (!session)
934 return ERR_PTR(-ENOMEM);
935
936 session->gdev = gdev;
937 session->requestor = requestor;
938
939 return session;
940 }
941
942 /**
943 * Closes a VBoxGuest session.
944 * @session: The session to close (and free).
945 */
vbg_core_close_session(struct vbg_session * session)946 void vbg_core_close_session(struct vbg_session *session)
947 {
948 struct vbg_dev *gdev = session->gdev;
949 int i, rc;
950
951 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
952 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
953
954 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
955 if (!session->hgcm_client_ids[i])
956 continue;
957
958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
961 }
962
963 kfree(session);
964 }
965
vbg_ioctl_chk(struct vbg_ioctl_hdr * hdr,size_t in_size,size_t out_size)966 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
967 size_t out_size)
968 {
969 if (hdr->size_in != (sizeof(*hdr) + in_size) ||
970 hdr->size_out != (sizeof(*hdr) + out_size))
971 return -EINVAL;
972
973 return 0;
974 }
975
vbg_ioctl_driver_version_info(struct vbg_ioctl_driver_version_info * info)976 static int vbg_ioctl_driver_version_info(
977 struct vbg_ioctl_driver_version_info *info)
978 {
979 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
980 u16 min_maj_version, req_maj_version;
981
982 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
983 return -EINVAL;
984
985 req_maj_version = info->u.in.req_version >> 16;
986 min_maj_version = info->u.in.min_version >> 16;
987
988 if (info->u.in.min_version > info->u.in.req_version ||
989 min_maj_version != req_maj_version)
990 return -EINVAL;
991
992 if (info->u.in.min_version <= VBG_IOC_VERSION &&
993 min_maj_version == vbg_maj_version) {
994 info->u.out.session_version = VBG_IOC_VERSION;
995 } else {
996 info->u.out.session_version = U32_MAX;
997 info->hdr.rc = VERR_VERSION_MISMATCH;
998 }
999
1000 info->u.out.driver_version = VBG_IOC_VERSION;
1001 info->u.out.driver_revision = 0;
1002 info->u.out.reserved1 = 0;
1003 info->u.out.reserved2 = 0;
1004
1005 return 0;
1006 }
1007
vbg_wait_event_cond(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1008 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
1009 struct vbg_session *session,
1010 u32 event_mask)
1011 {
1012 unsigned long flags;
1013 bool wakeup;
1014 u32 events;
1015
1016 spin_lock_irqsave(&gdev->event_spinlock, flags);
1017
1018 events = gdev->pending_events & event_mask;
1019 wakeup = events || session->cancel_waiters;
1020
1021 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1022
1023 return wakeup;
1024 }
1025
1026 /* Must be called with the event_lock held */
vbg_consume_events_locked(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1027 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
1028 struct vbg_session *session,
1029 u32 event_mask)
1030 {
1031 u32 events = gdev->pending_events & event_mask;
1032
1033 gdev->pending_events &= ~events;
1034 return events;
1035 }
1036
vbg_ioctl_wait_for_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_wait_for_events * wait)1037 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1038 struct vbg_session *session,
1039 struct vbg_ioctl_wait_for_events *wait)
1040 {
1041 u32 timeout_ms = wait->u.in.timeout_ms;
1042 u32 event_mask = wait->u.in.events;
1043 unsigned long flags;
1044 long timeout;
1045 int ret = 0;
1046
1047 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1048 return -EINVAL;
1049
1050 if (timeout_ms == U32_MAX)
1051 timeout = MAX_SCHEDULE_TIMEOUT;
1052 else
1053 timeout = msecs_to_jiffies(timeout_ms);
1054
1055 wait->u.out.events = 0;
1056 do {
1057 timeout = wait_event_interruptible_timeout(
1058 gdev->event_wq,
1059 vbg_wait_event_cond(gdev, session, event_mask),
1060 timeout);
1061
1062 spin_lock_irqsave(&gdev->event_spinlock, flags);
1063
1064 if (timeout < 0 || session->cancel_waiters) {
1065 ret = -EINTR;
1066 } else if (timeout == 0) {
1067 ret = -ETIMEDOUT;
1068 } else {
1069 wait->u.out.events =
1070 vbg_consume_events_locked(gdev, session, event_mask);
1071 }
1072
1073 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1074
1075 /*
1076 * Someone else may have consumed the event(s) first, in
1077 * which case we go back to waiting.
1078 */
1079 } while (ret == 0 && wait->u.out.events == 0);
1080
1081 return ret;
1082 }
1083
vbg_ioctl_interrupt_all_wait_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hdr * hdr)1084 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1085 struct vbg_session *session,
1086 struct vbg_ioctl_hdr *hdr)
1087 {
1088 unsigned long flags;
1089
1090 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1091 return -EINVAL;
1092
1093 spin_lock_irqsave(&gdev->event_spinlock, flags);
1094 session->cancel_waiters = true;
1095 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1096
1097 wake_up(&gdev->event_wq);
1098
1099 return 0;
1100 }
1101
1102 /**
1103 * Checks if the VMM request is allowed in the context of the given session.
1104 * Return: 0 or negative errno value.
1105 * @gdev: The Guest extension device.
1106 * @session: The calling session.
1107 * @req: The request.
1108 */
vbg_req_allowed(struct vbg_dev * gdev,struct vbg_session * session,const struct vmmdev_request_header * req)1109 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1110 const struct vmmdev_request_header *req)
1111 {
1112 const struct vmmdev_guest_status *guest_status;
1113 bool trusted_apps_only;
1114
1115 switch (req->request_type) {
1116 /* Trusted users apps only. */
1117 case VMMDEVREQ_QUERY_CREDENTIALS:
1118 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1119 case VMMDEVREQ_REGISTER_SHARED_MODULE:
1120 case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1121 case VMMDEVREQ_WRITE_COREDUMP:
1122 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1123 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1124 case VMMDEVREQ_CHECK_SHARED_MODULES:
1125 case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1126 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1127 case VMMDEVREQ_REPORT_GUEST_STATS:
1128 case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1129 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1130 trusted_apps_only = true;
1131 break;
1132
1133 /* Anyone. */
1134 case VMMDEVREQ_GET_MOUSE_STATUS:
1135 case VMMDEVREQ_SET_MOUSE_STATUS:
1136 case VMMDEVREQ_SET_POINTER_SHAPE:
1137 case VMMDEVREQ_GET_HOST_VERSION:
1138 case VMMDEVREQ_IDLE:
1139 case VMMDEVREQ_GET_HOST_TIME:
1140 case VMMDEVREQ_SET_POWER_STATUS:
1141 case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1142 case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1143 case VMMDEVREQ_REPORT_GUEST_STATUS:
1144 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1145 case VMMDEVREQ_VIDEMODE_SUPPORTED:
1146 case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1147 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1148 case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1149 case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1150 case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1151 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1152 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1153 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1154 case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1155 case VMMDEVREQ_LOG_STRING:
1156 case VMMDEVREQ_GET_SESSION_ID:
1157 trusted_apps_only = false;
1158 break;
1159
1160 /* Depends on the request parameters... */
1161 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1162 guest_status = (const struct vmmdev_guest_status *)req;
1163 switch (guest_status->facility) {
1164 case VBOXGUEST_FACILITY_TYPE_ALL:
1165 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1166 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1167 guest_status->facility);
1168 return -EPERM;
1169 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1170 trusted_apps_only = true;
1171 break;
1172 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1173 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1174 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1175 default:
1176 trusted_apps_only = false;
1177 break;
1178 }
1179 break;
1180
1181 /* Anything else is not allowed. */
1182 default:
1183 vbg_err("Denying userspace vmm call type %#08x\n",
1184 req->request_type);
1185 return -EPERM;
1186 }
1187
1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1191 req->request_type);
1192 return -EPERM;
1193 }
1194
1195 return 0;
1196 }
1197
vbg_ioctl_vmmrequest(struct vbg_dev * gdev,struct vbg_session * session,void * data)1198 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1199 struct vbg_session *session, void *data)
1200 {
1201 struct vbg_ioctl_hdr *hdr = data;
1202 int ret;
1203
1204 if (hdr->size_in != hdr->size_out)
1205 return -EINVAL;
1206
1207 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1208 return -E2BIG;
1209
1210 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1211 return -EINVAL;
1212
1213 ret = vbg_req_allowed(gdev, session, data);
1214 if (ret < 0)
1215 return ret;
1216
1217 vbg_req_perform(gdev, data);
1218 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1219
1220 return 0;
1221 }
1222
vbg_ioctl_hgcm_connect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_connect * conn)1223 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1224 struct vbg_session *session,
1225 struct vbg_ioctl_hgcm_connect *conn)
1226 {
1227 u32 client_id;
1228 int i, ret;
1229
1230 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1231 return -EINVAL;
1232
1233 /* Find a free place in the sessions clients array and claim it */
1234 mutex_lock(&gdev->session_mutex);
1235 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1236 if (!session->hgcm_client_ids[i]) {
1237 session->hgcm_client_ids[i] = U32_MAX;
1238 break;
1239 }
1240 }
1241 mutex_unlock(&gdev->session_mutex);
1242
1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1244 return -EMFILE;
1245
1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1247 &client_id, &conn->hdr.rc);
1248
1249 mutex_lock(&gdev->session_mutex);
1250 if (ret == 0 && conn->hdr.rc >= 0) {
1251 conn->u.out.client_id = client_id;
1252 session->hgcm_client_ids[i] = client_id;
1253 } else {
1254 conn->u.out.client_id = 0;
1255 session->hgcm_client_ids[i] = 0;
1256 }
1257 mutex_unlock(&gdev->session_mutex);
1258
1259 return ret;
1260 }
1261
vbg_ioctl_hgcm_disconnect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_disconnect * disconn)1262 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1263 struct vbg_session *session,
1264 struct vbg_ioctl_hgcm_disconnect *disconn)
1265 {
1266 u32 client_id;
1267 int i, ret;
1268
1269 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1270 return -EINVAL;
1271
1272 client_id = disconn->u.in.client_id;
1273 if (client_id == 0 || client_id == U32_MAX)
1274 return -EINVAL;
1275
1276 mutex_lock(&gdev->session_mutex);
1277 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1278 if (session->hgcm_client_ids[i] == client_id) {
1279 session->hgcm_client_ids[i] = U32_MAX;
1280 break;
1281 }
1282 }
1283 mutex_unlock(&gdev->session_mutex);
1284
1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1286 return -EINVAL;
1287
1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1290
1291 mutex_lock(&gdev->session_mutex);
1292 if (ret == 0 && disconn->hdr.rc >= 0)
1293 session->hgcm_client_ids[i] = 0;
1294 else
1295 session->hgcm_client_ids[i] = client_id;
1296 mutex_unlock(&gdev->session_mutex);
1297
1298 return ret;
1299 }
1300
vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)1301 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1302 {
1303 switch (type) {
1304 case VMMDEV_HGCM_PARM_TYPE_32BIT:
1305 case VMMDEV_HGCM_PARM_TYPE_64BIT:
1306 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1307 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1308 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1309 return true;
1310 default:
1311 return false;
1312 }
1313 }
1314
vbg_ioctl_hgcm_call(struct vbg_dev * gdev,struct vbg_session * session,bool f32bit,struct vbg_ioctl_hgcm_call * call)1315 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1316 struct vbg_session *session, bool f32bit,
1317 struct vbg_ioctl_hgcm_call *call)
1318 {
1319 size_t actual_size;
1320 u32 client_id;
1321 int i, ret;
1322
1323 if (call->hdr.size_in < sizeof(*call))
1324 return -EINVAL;
1325
1326 if (call->hdr.size_in != call->hdr.size_out)
1327 return -EINVAL;
1328
1329 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1330 return -E2BIG;
1331
1332 client_id = call->client_id;
1333 if (client_id == 0 || client_id == U32_MAX)
1334 return -EINVAL;
1335
1336 actual_size = sizeof(*call);
1337 if (f32bit)
1338 actual_size += call->parm_count *
1339 sizeof(struct vmmdev_hgcm_function_parameter32);
1340 else
1341 actual_size += call->parm_count *
1342 sizeof(struct vmmdev_hgcm_function_parameter);
1343 if (call->hdr.size_in < actual_size) {
1344 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1345 call->hdr.size_in, actual_size);
1346 return -EINVAL;
1347 }
1348 call->hdr.size_out = actual_size;
1349
1350 /* Validate parameter types */
1351 if (f32bit) {
1352 struct vmmdev_hgcm_function_parameter32 *parm =
1353 VBG_IOCTL_HGCM_CALL_PARMS32(call);
1354
1355 for (i = 0; i < call->parm_count; i++)
1356 if (!vbg_param_valid(parm[i].type))
1357 return -EINVAL;
1358 } else {
1359 struct vmmdev_hgcm_function_parameter *parm =
1360 VBG_IOCTL_HGCM_CALL_PARMS(call);
1361
1362 for (i = 0; i < call->parm_count; i++)
1363 if (!vbg_param_valid(parm[i].type))
1364 return -EINVAL;
1365 }
1366
1367 /*
1368 * Validate the client id.
1369 */
1370 mutex_lock(&gdev->session_mutex);
1371 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1372 if (session->hgcm_client_ids[i] == client_id)
1373 break;
1374 mutex_unlock(&gdev->session_mutex);
1375 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1376 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1377 client_id);
1378 return -EINVAL;
1379 }
1380
1381 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1382 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1383 call->function, call->timeout_ms,
1384 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1385 call->parm_count, &call->hdr.rc);
1386 else
1387 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1388 call->function, call->timeout_ms,
1389 VBG_IOCTL_HGCM_CALL_PARMS(call),
1390 call->parm_count, &call->hdr.rc);
1391
1392 if (ret == -E2BIG) {
1393 /* E2BIG needs to be reported through the hdr.rc field. */
1394 call->hdr.rc = VERR_OUT_OF_RANGE;
1395 ret = 0;
1396 }
1397
1398 if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1399 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1400
1401 return ret;
1402 }
1403
vbg_ioctl_log(struct vbg_ioctl_log * log)1404 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1405 {
1406 if (log->hdr.size_out != sizeof(log->hdr))
1407 return -EINVAL;
1408
1409 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1410 log->u.in.msg);
1411
1412 return 0;
1413 }
1414
vbg_ioctl_change_filter_mask(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_change_filter * filter)1415 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1416 struct vbg_session *session,
1417 struct vbg_ioctl_change_filter *filter)
1418 {
1419 u32 or_mask, not_mask;
1420
1421 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1422 return -EINVAL;
1423
1424 or_mask = filter->u.in.or_mask;
1425 not_mask = filter->u.in.not_mask;
1426
1427 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1428 return -EINVAL;
1429
1430 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1431 false);
1432 }
1433
vbg_ioctl_change_guest_capabilities(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_set_guest_caps * caps)1434 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1435 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1436 {
1437 u32 or_mask, not_mask;
1438 int ret;
1439
1440 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1441 return -EINVAL;
1442
1443 or_mask = caps->u.in.or_mask;
1444 not_mask = caps->u.in.not_mask;
1445
1446 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1447 return -EINVAL;
1448
1449 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1450 false);
1451 if (ret)
1452 return ret;
1453
1454 caps->u.out.session_caps = session->guest_caps;
1455 caps->u.out.global_caps = gdev->guest_caps_host;
1456
1457 return 0;
1458 }
1459
vbg_ioctl_check_balloon(struct vbg_dev * gdev,struct vbg_ioctl_check_balloon * balloon_info)1460 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1461 struct vbg_ioctl_check_balloon *balloon_info)
1462 {
1463 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1464 return -EINVAL;
1465
1466 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1467 /*
1468 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1469 * events entirely in the kernel, see vbg_core_isr().
1470 */
1471 balloon_info->u.out.handle_in_r3 = false;
1472
1473 return 0;
1474 }
1475
vbg_ioctl_write_core_dump(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_write_coredump * dump)1476 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1477 struct vbg_session *session,
1478 struct vbg_ioctl_write_coredump *dump)
1479 {
1480 struct vmmdev_write_core_dump *req;
1481
1482 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1483 return -EINVAL;
1484
1485 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1486 session->requestor);
1487 if (!req)
1488 return -ENOMEM;
1489
1490 req->flags = dump->u.in.flags;
1491 dump->hdr.rc = vbg_req_perform(gdev, req);
1492
1493 vbg_req_free(req, sizeof(*req));
1494 return 0;
1495 }
1496
1497 /**
1498 * Common IOCtl for user to kernel communication.
1499 * Return: 0 or negative errno value.
1500 * @session: The client session.
1501 * @req: The requested function.
1502 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1503 */
vbg_core_ioctl(struct vbg_session * session,unsigned int req,void * data)1504 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1505 {
1506 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1507 struct vbg_dev *gdev = session->gdev;
1508 struct vbg_ioctl_hdr *hdr = data;
1509 bool f32bit = false;
1510
1511 hdr->rc = VINF_SUCCESS;
1512 if (!hdr->size_out)
1513 hdr->size_out = hdr->size_in;
1514
1515 /*
1516 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1517 * already checked by vbg_misc_device_ioctl().
1518 */
1519
1520 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1521 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1522 req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
1523 return vbg_ioctl_vmmrequest(gdev, session, data);
1524
1525 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1526 return -EINVAL;
1527
1528 /* Fixed size requests. */
1529 switch (req) {
1530 case VBG_IOCTL_DRIVER_VERSION_INFO:
1531 return vbg_ioctl_driver_version_info(data);
1532 case VBG_IOCTL_HGCM_CONNECT:
1533 return vbg_ioctl_hgcm_connect(gdev, session, data);
1534 case VBG_IOCTL_HGCM_DISCONNECT:
1535 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1536 case VBG_IOCTL_WAIT_FOR_EVENTS:
1537 return vbg_ioctl_wait_for_events(gdev, session, data);
1538 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1539 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1540 case VBG_IOCTL_CHANGE_FILTER_MASK:
1541 return vbg_ioctl_change_filter_mask(gdev, session, data);
1542 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1543 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1544 case VBG_IOCTL_CHECK_BALLOON:
1545 return vbg_ioctl_check_balloon(gdev, data);
1546 case VBG_IOCTL_WRITE_CORE_DUMP:
1547 return vbg_ioctl_write_core_dump(gdev, session, data);
1548 }
1549
1550 /* Variable sized requests. */
1551 switch (req_no_size) {
1552 #ifdef CONFIG_COMPAT
1553 case VBG_IOCTL_HGCM_CALL_32(0):
1554 f32bit = true;
1555 #endif
1556 /* Fall through */
1557 case VBG_IOCTL_HGCM_CALL(0):
1558 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1559 case VBG_IOCTL_LOG(0):
1560 return vbg_ioctl_log(data);
1561 }
1562
1563 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1564 return -ENOTTY;
1565 }
1566
1567 /**
1568 * Report guest supported mouse-features to the host.
1569 *
1570 * Return: 0 or negative errno value.
1571 * @gdev: The Guest extension device.
1572 * @features: The set of features to report to the host.
1573 */
vbg_core_set_mouse_status(struct vbg_dev * gdev,u32 features)1574 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1575 {
1576 struct vmmdev_mouse_status *req;
1577 int rc;
1578
1579 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1580 VBG_KERNEL_REQUEST);
1581 if (!req)
1582 return -ENOMEM;
1583
1584 req->mouse_features = features;
1585 req->pointer_pos_x = 0;
1586 req->pointer_pos_y = 0;
1587
1588 rc = vbg_req_perform(gdev, req);
1589 if (rc < 0)
1590 vbg_err("%s error, rc: %d\n", __func__, rc);
1591
1592 vbg_req_free(req, sizeof(*req));
1593 return vbg_status_code_to_errno(rc);
1594 }
1595
1596 /** Core interrupt service routine. */
vbg_core_isr(int irq,void * dev_id)1597 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1598 {
1599 struct vbg_dev *gdev = dev_id;
1600 struct vmmdev_events *req = gdev->ack_events_req;
1601 bool mouse_position_changed = false;
1602 unsigned long flags;
1603 u32 events = 0;
1604 int rc;
1605
1606 if (!gdev->mmio->V.V1_04.have_events)
1607 return IRQ_NONE;
1608
1609 /* Get and acknowlegde events. */
1610 req->header.rc = VERR_INTERNAL_ERROR;
1611 req->events = 0;
1612 rc = vbg_req_perform(gdev, req);
1613 if (rc < 0) {
1614 vbg_err("Error performing events req, rc: %d\n", rc);
1615 return IRQ_NONE;
1616 }
1617
1618 events = req->events;
1619
1620 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1621 mouse_position_changed = true;
1622 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1623 }
1624
1625 if (events & VMMDEV_EVENT_HGCM) {
1626 wake_up(&gdev->hgcm_wq);
1627 events &= ~VMMDEV_EVENT_HGCM;
1628 }
1629
1630 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1631 schedule_work(&gdev->mem_balloon.work);
1632 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1633 }
1634
1635 if (events) {
1636 spin_lock_irqsave(&gdev->event_spinlock, flags);
1637 gdev->pending_events |= events;
1638 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1639
1640 wake_up(&gdev->event_wq);
1641 }
1642
1643 if (mouse_position_changed)
1644 vbg_linux_mouse_event(gdev);
1645
1646 return IRQ_HANDLED;
1647 }
1648