1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VMware VMCI Driver
4 *
5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 */
7
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/highmem.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/pagemap.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/uio.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22 #include <linux/skbuff.h>
23
24 #include "vmci_handle_array.h"
25 #include "vmci_queue_pair.h"
26 #include "vmci_datagram.h"
27 #include "vmci_resource.h"
28 #include "vmci_context.h"
29 #include "vmci_driver.h"
30 #include "vmci_event.h"
31 #include "vmci_route.h"
32
33 /*
34 * In the following, we will distinguish between two kinds of VMX processes -
35 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
36 * VMCI page files in the VMX and supporting VM to VM communication and the
37 * newer ones that use the guest memory directly. We will in the following
38 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
39 * new-style VMX'en.
40 *
41 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
42 * removed for readability) - see below for more details on the transtions:
43 *
44 * -------------- NEW -------------
45 * | |
46 * \_/ \_/
47 * CREATED_NO_MEM <-----------------> CREATED_MEM
48 * | | |
49 * | o-----------------------o |
50 * | | |
51 * \_/ \_/ \_/
52 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
53 * | | |
54 * | o----------------------o |
55 * | | |
56 * \_/ \_/ \_/
57 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
58 * | |
59 * | |
60 * -------------> gone <-------------
61 *
62 * In more detail. When a VMCI queue pair is first created, it will be in the
63 * VMCIQPB_NEW state. It will then move into one of the following states:
64 *
65 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
66 *
67 * - the created was performed by a host endpoint, in which case there is
68 * no backing memory yet.
69 *
70 * - the create was initiated by an old-style VMX, that uses
71 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
72 * a later point in time. This state can be distinguished from the one
73 * above by the context ID of the creator. A host side is not allowed to
74 * attach until the page store has been set.
75 *
76 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
77 * is created by a VMX using the queue pair device backend that
78 * sets the UVAs of the queue pair immediately and stores the
79 * information for later attachers. At this point, it is ready for
80 * the host side to attach to it.
81 *
82 * Once the queue pair is in one of the created states (with the exception of
83 * the case mentioned for older VMX'en above), it is possible to attach to the
84 * queue pair. Again we have two new states possible:
85 *
86 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
87 * paths:
88 *
89 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
90 * pair, and attaches to a queue pair previously created by the host side.
91 *
92 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
93 * already created by a guest.
94 *
95 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
96 * vmci_qp_broker_set_page_store (see below).
97 *
98 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
99 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
100 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
101 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
102 * will be entered.
103 *
104 * From the attached queue pair, the queue pair can enter the shutdown states
105 * when either side of the queue pair detaches. If the guest side detaches
106 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
107 * the content of the queue pair will no longer be available. If the host
108 * side detaches first, the queue pair will either enter the
109 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
110 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
111 * (e.g., the host detaches while a guest is stunned).
112 *
113 * New-style VMX'en will also unmap guest memory, if the guest is
114 * quiesced, e.g., during a snapshot operation. In that case, the guest
115 * memory will no longer be available, and the queue pair will transition from
116 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
117 * in which case the queue pair will transition from the *_NO_MEM state at that
118 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
119 * since the peer may have either attached or detached in the meantime. The
120 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
121 * *_MEM state, and vice versa.
122 */
123
124 /* The Kernel specific component of the struct vmci_queue structure. */
125 struct vmci_queue_kern_if {
126 struct mutex __mutex; /* Protects the queue. */
127 struct mutex *mutex; /* Shared by producer and consumer queues. */
128 size_t num_pages; /* Number of pages incl. header. */
129 bool host; /* Host or guest? */
130 union {
131 struct {
132 dma_addr_t *pas;
133 void **vas;
134 } g; /* Used by the guest. */
135 struct {
136 struct page **page;
137 struct page **header_page;
138 } h; /* Used by the host. */
139 } u;
140 };
141
142 /*
143 * This structure is opaque to the clients.
144 */
145 struct vmci_qp {
146 struct vmci_handle handle;
147 struct vmci_queue *produce_q;
148 struct vmci_queue *consume_q;
149 u64 produce_q_size;
150 u64 consume_q_size;
151 u32 peer;
152 u32 flags;
153 u32 priv_flags;
154 bool guest_endpoint;
155 unsigned int blocked;
156 unsigned int generation;
157 wait_queue_head_t event;
158 };
159
160 enum qp_broker_state {
161 VMCIQPB_NEW,
162 VMCIQPB_CREATED_NO_MEM,
163 VMCIQPB_CREATED_MEM,
164 VMCIQPB_ATTACHED_NO_MEM,
165 VMCIQPB_ATTACHED_MEM,
166 VMCIQPB_SHUTDOWN_NO_MEM,
167 VMCIQPB_SHUTDOWN_MEM,
168 VMCIQPB_GONE
169 };
170
171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
172 _qpb->state == VMCIQPB_ATTACHED_MEM || \
173 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
174
175 /*
176 * In the queue pair broker, we always use the guest point of view for
177 * the produce and consume queue values and references, e.g., the
178 * produce queue size stored is the guests produce queue size. The
179 * host endpoint will need to swap these around. The only exception is
180 * the local queue pairs on the host, in which case the host endpoint
181 * that creates the queue pair will have the right orientation, and
182 * the attaching host endpoint will need to swap.
183 */
184 struct qp_entry {
185 struct list_head list_item;
186 struct vmci_handle handle;
187 u32 peer;
188 u32 flags;
189 u64 produce_size;
190 u64 consume_size;
191 u32 ref_count;
192 };
193
194 struct qp_broker_entry {
195 struct vmci_resource resource;
196 struct qp_entry qp;
197 u32 create_id;
198 u32 attach_id;
199 enum qp_broker_state state;
200 bool require_trusted_attach;
201 bool created_by_trusted;
202 bool vmci_page_files; /* Created by VMX using VMCI page files */
203 struct vmci_queue *produce_q;
204 struct vmci_queue *consume_q;
205 struct vmci_queue_header saved_produce_q;
206 struct vmci_queue_header saved_consume_q;
207 vmci_event_release_cb wakeup_cb;
208 void *client_data;
209 void *local_mem; /* Kernel memory for local queue pair */
210 };
211
212 struct qp_guest_endpoint {
213 struct vmci_resource resource;
214 struct qp_entry qp;
215 u64 num_ppns;
216 void *produce_q;
217 void *consume_q;
218 struct ppn_set ppn_set;
219 };
220
221 struct qp_list {
222 struct list_head head;
223 struct mutex mutex; /* Protect queue list. */
224 };
225
226 static struct qp_list qp_broker_list = {
227 .head = LIST_HEAD_INIT(qp_broker_list.head),
228 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
229 };
230
231 static struct qp_list qp_guest_endpoints = {
232 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
233 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
234 };
235
236 #define INVALID_VMCI_GUEST_MEM_ID 0
237 #define QPE_NUM_PAGES(_QPE) ((u32) \
238 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
239 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
240
241
242 /*
243 * Frees kernel VA space for a given queue and its queue header, and
244 * frees physical data pages.
245 */
qp_free_queue(void * q,u64 size)246 static void qp_free_queue(void *q, u64 size)
247 {
248 struct vmci_queue *queue = q;
249
250 if (queue) {
251 u64 i;
252
253 /* Given size does not include header, so add in a page here. */
254 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
255 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
256 queue->kernel_if->u.g.vas[i],
257 queue->kernel_if->u.g.pas[i]);
258 }
259
260 vfree(queue);
261 }
262 }
263
264 /*
265 * Allocates kernel queue pages of specified size with IOMMU mappings,
266 * plus space for the queue structure/kernel interface and the queue
267 * header.
268 */
qp_alloc_queue(u64 size,u32 flags)269 static void *qp_alloc_queue(u64 size, u32 flags)
270 {
271 u64 i;
272 struct vmci_queue *queue;
273 size_t pas_size;
274 size_t vas_size;
275 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
276 u64 num_pages;
277
278 if (size > SIZE_MAX - PAGE_SIZE)
279 return NULL;
280 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
281 if (num_pages >
282 (SIZE_MAX - queue_size) /
283 (sizeof(*queue->kernel_if->u.g.pas) +
284 sizeof(*queue->kernel_if->u.g.vas)))
285 return NULL;
286
287 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
288 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
289 queue_size += pas_size + vas_size;
290
291 queue = vmalloc(queue_size);
292 if (!queue)
293 return NULL;
294
295 queue->q_header = NULL;
296 queue->saved_header = NULL;
297 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
298 queue->kernel_if->mutex = NULL;
299 queue->kernel_if->num_pages = num_pages;
300 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
301 queue->kernel_if->u.g.vas =
302 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
303 queue->kernel_if->host = false;
304
305 for (i = 0; i < num_pages; i++) {
306 queue->kernel_if->u.g.vas[i] =
307 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
308 &queue->kernel_if->u.g.pas[i],
309 GFP_KERNEL);
310 if (!queue->kernel_if->u.g.vas[i]) {
311 /* Size excl. the header. */
312 qp_free_queue(queue, i * PAGE_SIZE);
313 return NULL;
314 }
315 }
316
317 /* Queue header is the first page. */
318 queue->q_header = queue->kernel_if->u.g.vas[0];
319
320 return queue;
321 }
322
323 /*
324 * Copies from a given buffer or iovector to a VMCI Queue. Uses
325 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
326 * by traversing the offset -> page translation structure for the queue.
327 * Assumes that offset + size does not wrap around in the queue.
328 */
qp_memcpy_to_queue_iter(struct vmci_queue * queue,u64 queue_offset,struct iov_iter * from,size_t size)329 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
330 u64 queue_offset,
331 struct iov_iter *from,
332 size_t size)
333 {
334 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
335 size_t bytes_copied = 0;
336
337 while (bytes_copied < size) {
338 const u64 page_index =
339 (queue_offset + bytes_copied) / PAGE_SIZE;
340 const size_t page_offset =
341 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
342 void *va;
343 size_t to_copy;
344
345 if (kernel_if->host)
346 va = kmap(kernel_if->u.h.page[page_index]);
347 else
348 va = kernel_if->u.g.vas[page_index + 1];
349 /* Skip header. */
350
351 if (size - bytes_copied > PAGE_SIZE - page_offset)
352 /* Enough payload to fill up from this page. */
353 to_copy = PAGE_SIZE - page_offset;
354 else
355 to_copy = size - bytes_copied;
356
357 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
358 from)) {
359 if (kernel_if->host)
360 kunmap(kernel_if->u.h.page[page_index]);
361 return VMCI_ERROR_INVALID_ARGS;
362 }
363 bytes_copied += to_copy;
364 if (kernel_if->host)
365 kunmap(kernel_if->u.h.page[page_index]);
366 }
367
368 return VMCI_SUCCESS;
369 }
370
371 /*
372 * Copies to a given buffer or iovector from a VMCI Queue. Uses
373 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
374 * by traversing the offset -> page translation structure for the queue.
375 * Assumes that offset + size does not wrap around in the queue.
376 */
qp_memcpy_from_queue_iter(struct iov_iter * to,const struct vmci_queue * queue,u64 queue_offset,size_t size)377 static int qp_memcpy_from_queue_iter(struct iov_iter *to,
378 const struct vmci_queue *queue,
379 u64 queue_offset, size_t size)
380 {
381 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
382 size_t bytes_copied = 0;
383
384 while (bytes_copied < size) {
385 const u64 page_index =
386 (queue_offset + bytes_copied) / PAGE_SIZE;
387 const size_t page_offset =
388 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
389 void *va;
390 size_t to_copy;
391 int err;
392
393 if (kernel_if->host)
394 va = kmap(kernel_if->u.h.page[page_index]);
395 else
396 va = kernel_if->u.g.vas[page_index + 1];
397 /* Skip header. */
398
399 if (size - bytes_copied > PAGE_SIZE - page_offset)
400 /* Enough payload to fill up this page. */
401 to_copy = PAGE_SIZE - page_offset;
402 else
403 to_copy = size - bytes_copied;
404
405 err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
406 if (err != to_copy) {
407 if (kernel_if->host)
408 kunmap(kernel_if->u.h.page[page_index]);
409 return VMCI_ERROR_INVALID_ARGS;
410 }
411 bytes_copied += to_copy;
412 if (kernel_if->host)
413 kunmap(kernel_if->u.h.page[page_index]);
414 }
415
416 return VMCI_SUCCESS;
417 }
418
419 /*
420 * Allocates two list of PPNs --- one for the pages in the produce queue,
421 * and the other for the pages in the consume queue. Intializes the list
422 * of PPNs with the page frame numbers of the KVA for the two queues (and
423 * the queue headers).
424 */
qp_alloc_ppn_set(void * prod_q,u64 num_produce_pages,void * cons_q,u64 num_consume_pages,struct ppn_set * ppn_set)425 static int qp_alloc_ppn_set(void *prod_q,
426 u64 num_produce_pages,
427 void *cons_q,
428 u64 num_consume_pages, struct ppn_set *ppn_set)
429 {
430 u64 *produce_ppns;
431 u64 *consume_ppns;
432 struct vmci_queue *produce_q = prod_q;
433 struct vmci_queue *consume_q = cons_q;
434 u64 i;
435
436 if (!produce_q || !num_produce_pages || !consume_q ||
437 !num_consume_pages || !ppn_set)
438 return VMCI_ERROR_INVALID_ARGS;
439
440 if (ppn_set->initialized)
441 return VMCI_ERROR_ALREADY_EXISTS;
442
443 produce_ppns =
444 kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
445 GFP_KERNEL);
446 if (!produce_ppns)
447 return VMCI_ERROR_NO_MEM;
448
449 consume_ppns =
450 kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
451 GFP_KERNEL);
452 if (!consume_ppns) {
453 kfree(produce_ppns);
454 return VMCI_ERROR_NO_MEM;
455 }
456
457 for (i = 0; i < num_produce_pages; i++)
458 produce_ppns[i] =
459 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
460
461 for (i = 0; i < num_consume_pages; i++)
462 consume_ppns[i] =
463 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
464
465 ppn_set->num_produce_pages = num_produce_pages;
466 ppn_set->num_consume_pages = num_consume_pages;
467 ppn_set->produce_ppns = produce_ppns;
468 ppn_set->consume_ppns = consume_ppns;
469 ppn_set->initialized = true;
470 return VMCI_SUCCESS;
471 }
472
473 /*
474 * Frees the two list of PPNs for a queue pair.
475 */
qp_free_ppn_set(struct ppn_set * ppn_set)476 static void qp_free_ppn_set(struct ppn_set *ppn_set)
477 {
478 if (ppn_set->initialized) {
479 /* Do not call these functions on NULL inputs. */
480 kfree(ppn_set->produce_ppns);
481 kfree(ppn_set->consume_ppns);
482 }
483 memset(ppn_set, 0, sizeof(*ppn_set));
484 }
485
486 /*
487 * Populates the list of PPNs in the hypercall structure with the PPNS
488 * of the produce queue and the consume queue.
489 */
qp_populate_ppn_set(u8 * call_buf,const struct ppn_set * ppn_set)490 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
491 {
492 if (vmci_use_ppn64()) {
493 memcpy(call_buf, ppn_set->produce_ppns,
494 ppn_set->num_produce_pages *
495 sizeof(*ppn_set->produce_ppns));
496 memcpy(call_buf +
497 ppn_set->num_produce_pages *
498 sizeof(*ppn_set->produce_ppns),
499 ppn_set->consume_ppns,
500 ppn_set->num_consume_pages *
501 sizeof(*ppn_set->consume_ppns));
502 } else {
503 int i;
504 u32 *ppns = (u32 *) call_buf;
505
506 for (i = 0; i < ppn_set->num_produce_pages; i++)
507 ppns[i] = (u32) ppn_set->produce_ppns[i];
508
509 ppns = &ppns[ppn_set->num_produce_pages];
510
511 for (i = 0; i < ppn_set->num_consume_pages; i++)
512 ppns[i] = (u32) ppn_set->consume_ppns[i];
513 }
514
515 return VMCI_SUCCESS;
516 }
517
518 /*
519 * Allocates kernel VA space of specified size plus space for the queue
520 * and kernel interface. This is different from the guest queue allocator,
521 * because we do not allocate our own queue header/data pages here but
522 * share those of the guest.
523 */
qp_host_alloc_queue(u64 size)524 static struct vmci_queue *qp_host_alloc_queue(u64 size)
525 {
526 struct vmci_queue *queue;
527 size_t queue_page_size;
528 u64 num_pages;
529 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
530
531 if (size > SIZE_MAX - PAGE_SIZE)
532 return NULL;
533 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
534 if (num_pages > (SIZE_MAX - queue_size) /
535 sizeof(*queue->kernel_if->u.h.page))
536 return NULL;
537
538 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
539
540 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
541 if (queue) {
542 queue->q_header = NULL;
543 queue->saved_header = NULL;
544 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
545 queue->kernel_if->host = true;
546 queue->kernel_if->mutex = NULL;
547 queue->kernel_if->num_pages = num_pages;
548 queue->kernel_if->u.h.header_page =
549 (struct page **)((u8 *)queue + queue_size);
550 queue->kernel_if->u.h.page =
551 &queue->kernel_if->u.h.header_page[1];
552 }
553
554 return queue;
555 }
556
557 /*
558 * Frees kernel memory for a given queue (header plus translation
559 * structure).
560 */
qp_host_free_queue(struct vmci_queue * queue,u64 queue_size)561 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
562 {
563 kfree(queue);
564 }
565
566 /*
567 * Initialize the mutex for the pair of queues. This mutex is used to
568 * protect the q_header and the buffer from changing out from under any
569 * users of either queue. Of course, it's only any good if the mutexes
570 * are actually acquired. Queue structure must lie on non-paged memory
571 * or we cannot guarantee access to the mutex.
572 */
qp_init_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)573 static void qp_init_queue_mutex(struct vmci_queue *produce_q,
574 struct vmci_queue *consume_q)
575 {
576 /*
577 * Only the host queue has shared state - the guest queues do not
578 * need to synchronize access using a queue mutex.
579 */
580
581 if (produce_q->kernel_if->host) {
582 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
583 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
584 mutex_init(produce_q->kernel_if->mutex);
585 }
586 }
587
588 /*
589 * Cleans up the mutex for the pair of queues.
590 */
qp_cleanup_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)591 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
592 struct vmci_queue *consume_q)
593 {
594 if (produce_q->kernel_if->host) {
595 produce_q->kernel_if->mutex = NULL;
596 consume_q->kernel_if->mutex = NULL;
597 }
598 }
599
600 /*
601 * Acquire the mutex for the queue. Note that the produce_q and
602 * the consume_q share a mutex. So, only one of the two need to
603 * be passed in to this routine. Either will work just fine.
604 */
qp_acquire_queue_mutex(struct vmci_queue * queue)605 static void qp_acquire_queue_mutex(struct vmci_queue *queue)
606 {
607 if (queue->kernel_if->host)
608 mutex_lock(queue->kernel_if->mutex);
609 }
610
611 /*
612 * Release the mutex for the queue. Note that the produce_q and
613 * the consume_q share a mutex. So, only one of the two need to
614 * be passed in to this routine. Either will work just fine.
615 */
qp_release_queue_mutex(struct vmci_queue * queue)616 static void qp_release_queue_mutex(struct vmci_queue *queue)
617 {
618 if (queue->kernel_if->host)
619 mutex_unlock(queue->kernel_if->mutex);
620 }
621
622 /*
623 * Helper function to release pages in the PageStoreAttachInfo
624 * previously obtained using get_user_pages.
625 */
qp_release_pages(struct page ** pages,u64 num_pages,bool dirty)626 static void qp_release_pages(struct page **pages,
627 u64 num_pages, bool dirty)
628 {
629 int i;
630
631 for (i = 0; i < num_pages; i++) {
632 if (dirty)
633 set_page_dirty(pages[i]);
634
635 put_page(pages[i]);
636 pages[i] = NULL;
637 }
638 }
639
640 /*
641 * Lock the user pages referenced by the {produce,consume}Buffer
642 * struct into memory and populate the {produce,consume}Pages
643 * arrays in the attach structure with them.
644 */
qp_host_get_user_memory(u64 produce_uva,u64 consume_uva,struct vmci_queue * produce_q,struct vmci_queue * consume_q)645 static int qp_host_get_user_memory(u64 produce_uva,
646 u64 consume_uva,
647 struct vmci_queue *produce_q,
648 struct vmci_queue *consume_q)
649 {
650 int retval;
651 int err = VMCI_SUCCESS;
652
653 retval = get_user_pages_fast((uintptr_t) produce_uva,
654 produce_q->kernel_if->num_pages,
655 FOLL_WRITE,
656 produce_q->kernel_if->u.h.header_page);
657 if (retval < (int)produce_q->kernel_if->num_pages) {
658 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
659 retval);
660 if (retval > 0)
661 qp_release_pages(produce_q->kernel_if->u.h.header_page,
662 retval, false);
663 err = VMCI_ERROR_NO_MEM;
664 goto out;
665 }
666
667 retval = get_user_pages_fast((uintptr_t) consume_uva,
668 consume_q->kernel_if->num_pages,
669 FOLL_WRITE,
670 consume_q->kernel_if->u.h.header_page);
671 if (retval < (int)consume_q->kernel_if->num_pages) {
672 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
673 retval);
674 if (retval > 0)
675 qp_release_pages(consume_q->kernel_if->u.h.header_page,
676 retval, false);
677 qp_release_pages(produce_q->kernel_if->u.h.header_page,
678 produce_q->kernel_if->num_pages, false);
679 err = VMCI_ERROR_NO_MEM;
680 }
681
682 out:
683 return err;
684 }
685
686 /*
687 * Registers the specification of the user pages used for backing a queue
688 * pair. Enough information to map in pages is stored in the OS specific
689 * part of the struct vmci_queue structure.
690 */
qp_host_register_user_memory(struct vmci_qp_page_store * page_store,struct vmci_queue * produce_q,struct vmci_queue * consume_q)691 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
692 struct vmci_queue *produce_q,
693 struct vmci_queue *consume_q)
694 {
695 u64 produce_uva;
696 u64 consume_uva;
697
698 /*
699 * The new style and the old style mapping only differs in
700 * that we either get a single or two UVAs, so we split the
701 * single UVA range at the appropriate spot.
702 */
703 produce_uva = page_store->pages;
704 consume_uva = page_store->pages +
705 produce_q->kernel_if->num_pages * PAGE_SIZE;
706 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
707 consume_q);
708 }
709
710 /*
711 * Releases and removes the references to user pages stored in the attach
712 * struct. Pages are released from the page cache and may become
713 * swappable again.
714 */
qp_host_unregister_user_memory(struct vmci_queue * produce_q,struct vmci_queue * consume_q)715 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
716 struct vmci_queue *consume_q)
717 {
718 qp_release_pages(produce_q->kernel_if->u.h.header_page,
719 produce_q->kernel_if->num_pages, true);
720 memset(produce_q->kernel_if->u.h.header_page, 0,
721 sizeof(*produce_q->kernel_if->u.h.header_page) *
722 produce_q->kernel_if->num_pages);
723 qp_release_pages(consume_q->kernel_if->u.h.header_page,
724 consume_q->kernel_if->num_pages, true);
725 memset(consume_q->kernel_if->u.h.header_page, 0,
726 sizeof(*consume_q->kernel_if->u.h.header_page) *
727 consume_q->kernel_if->num_pages);
728 }
729
730 /*
731 * Once qp_host_register_user_memory has been performed on a
732 * queue, the queue pair headers can be mapped into the
733 * kernel. Once mapped, they must be unmapped with
734 * qp_host_unmap_queues prior to calling
735 * qp_host_unregister_user_memory.
736 * Pages are pinned.
737 */
qp_host_map_queues(struct vmci_queue * produce_q,struct vmci_queue * consume_q)738 static int qp_host_map_queues(struct vmci_queue *produce_q,
739 struct vmci_queue *consume_q)
740 {
741 int result;
742
743 if (!produce_q->q_header || !consume_q->q_header) {
744 struct page *headers[2];
745
746 if (produce_q->q_header != consume_q->q_header)
747 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
748
749 if (produce_q->kernel_if->u.h.header_page == NULL ||
750 *produce_q->kernel_if->u.h.header_page == NULL)
751 return VMCI_ERROR_UNAVAILABLE;
752
753 headers[0] = *produce_q->kernel_if->u.h.header_page;
754 headers[1] = *consume_q->kernel_if->u.h.header_page;
755
756 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
757 if (produce_q->q_header != NULL) {
758 consume_q->q_header =
759 (struct vmci_queue_header *)((u8 *)
760 produce_q->q_header +
761 PAGE_SIZE);
762 result = VMCI_SUCCESS;
763 } else {
764 pr_warn("vmap failed\n");
765 result = VMCI_ERROR_NO_MEM;
766 }
767 } else {
768 result = VMCI_SUCCESS;
769 }
770
771 return result;
772 }
773
774 /*
775 * Unmaps previously mapped queue pair headers from the kernel.
776 * Pages are unpinned.
777 */
qp_host_unmap_queues(u32 gid,struct vmci_queue * produce_q,struct vmci_queue * consume_q)778 static int qp_host_unmap_queues(u32 gid,
779 struct vmci_queue *produce_q,
780 struct vmci_queue *consume_q)
781 {
782 if (produce_q->q_header) {
783 if (produce_q->q_header < consume_q->q_header)
784 vunmap(produce_q->q_header);
785 else
786 vunmap(consume_q->q_header);
787
788 produce_q->q_header = NULL;
789 consume_q->q_header = NULL;
790 }
791
792 return VMCI_SUCCESS;
793 }
794
795 /*
796 * Finds the entry in the list corresponding to a given handle. Assumes
797 * that the list is locked.
798 */
qp_list_find(struct qp_list * qp_list,struct vmci_handle handle)799 static struct qp_entry *qp_list_find(struct qp_list *qp_list,
800 struct vmci_handle handle)
801 {
802 struct qp_entry *entry;
803
804 if (vmci_handle_is_invalid(handle))
805 return NULL;
806
807 list_for_each_entry(entry, &qp_list->head, list_item) {
808 if (vmci_handle_is_equal(entry->handle, handle))
809 return entry;
810 }
811
812 return NULL;
813 }
814
815 /*
816 * Finds the entry in the list corresponding to a given handle.
817 */
818 static struct qp_guest_endpoint *
qp_guest_handle_to_entry(struct vmci_handle handle)819 qp_guest_handle_to_entry(struct vmci_handle handle)
820 {
821 struct qp_guest_endpoint *entry;
822 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
823
824 entry = qp ? container_of(
825 qp, struct qp_guest_endpoint, qp) : NULL;
826 return entry;
827 }
828
829 /*
830 * Finds the entry in the list corresponding to a given handle.
831 */
832 static struct qp_broker_entry *
qp_broker_handle_to_entry(struct vmci_handle handle)833 qp_broker_handle_to_entry(struct vmci_handle handle)
834 {
835 struct qp_broker_entry *entry;
836 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
837
838 entry = qp ? container_of(
839 qp, struct qp_broker_entry, qp) : NULL;
840 return entry;
841 }
842
843 /*
844 * Dispatches a queue pair event message directly into the local event
845 * queue.
846 */
qp_notify_peer_local(bool attach,struct vmci_handle handle)847 static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
848 {
849 u32 context_id = vmci_get_context_id();
850 struct vmci_event_qp ev;
851
852 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
853 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
854 VMCI_CONTEXT_RESOURCE_ID);
855 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
856 ev.msg.event_data.event =
857 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
858 ev.payload.peer_id = context_id;
859 ev.payload.handle = handle;
860
861 return vmci_event_dispatch(&ev.msg.hdr);
862 }
863
864 /*
865 * Allocates and initializes a qp_guest_endpoint structure.
866 * Allocates a queue_pair rid (and handle) iff the given entry has
867 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
868 * are reserved handles. Assumes that the QP list mutex is held
869 * by the caller.
870 */
871 static struct qp_guest_endpoint *
qp_guest_endpoint_create(struct vmci_handle handle,u32 peer,u32 flags,u64 produce_size,u64 consume_size,void * produce_q,void * consume_q)872 qp_guest_endpoint_create(struct vmci_handle handle,
873 u32 peer,
874 u32 flags,
875 u64 produce_size,
876 u64 consume_size,
877 void *produce_q,
878 void *consume_q)
879 {
880 int result;
881 struct qp_guest_endpoint *entry;
882 /* One page each for the queue headers. */
883 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
884 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
885
886 if (vmci_handle_is_invalid(handle)) {
887 u32 context_id = vmci_get_context_id();
888
889 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
890 }
891
892 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
893 if (entry) {
894 entry->qp.peer = peer;
895 entry->qp.flags = flags;
896 entry->qp.produce_size = produce_size;
897 entry->qp.consume_size = consume_size;
898 entry->qp.ref_count = 0;
899 entry->num_ppns = num_ppns;
900 entry->produce_q = produce_q;
901 entry->consume_q = consume_q;
902 INIT_LIST_HEAD(&entry->qp.list_item);
903
904 /* Add resource obj */
905 result = vmci_resource_add(&entry->resource,
906 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
907 handle);
908 entry->qp.handle = vmci_resource_handle(&entry->resource);
909 if ((result != VMCI_SUCCESS) ||
910 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
911 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
912 handle.context, handle.resource, result);
913 kfree(entry);
914 entry = NULL;
915 }
916 }
917 return entry;
918 }
919
920 /*
921 * Frees a qp_guest_endpoint structure.
922 */
qp_guest_endpoint_destroy(struct qp_guest_endpoint * entry)923 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
924 {
925 qp_free_ppn_set(&entry->ppn_set);
926 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
927 qp_free_queue(entry->produce_q, entry->qp.produce_size);
928 qp_free_queue(entry->consume_q, entry->qp.consume_size);
929 /* Unlink from resource hash table and free callback */
930 vmci_resource_remove(&entry->resource);
931
932 kfree(entry);
933 }
934
935 /*
936 * Helper to make a queue_pairAlloc hypercall when the driver is
937 * supporting a guest device.
938 */
qp_alloc_hypercall(const struct qp_guest_endpoint * entry)939 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
940 {
941 struct vmci_qp_alloc_msg *alloc_msg;
942 size_t msg_size;
943 size_t ppn_size;
944 int result;
945
946 if (!entry || entry->num_ppns <= 2)
947 return VMCI_ERROR_INVALID_ARGS;
948
949 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
950 msg_size = sizeof(*alloc_msg) +
951 (size_t) entry->num_ppns * ppn_size;
952 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
953 if (!alloc_msg)
954 return VMCI_ERROR_NO_MEM;
955
956 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
957 VMCI_QUEUEPAIR_ALLOC);
958 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
959 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
960 alloc_msg->handle = entry->qp.handle;
961 alloc_msg->peer = entry->qp.peer;
962 alloc_msg->flags = entry->qp.flags;
963 alloc_msg->produce_size = entry->qp.produce_size;
964 alloc_msg->consume_size = entry->qp.consume_size;
965 alloc_msg->num_ppns = entry->num_ppns;
966
967 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
968 &entry->ppn_set);
969 if (result == VMCI_SUCCESS)
970 result = vmci_send_datagram(&alloc_msg->hdr);
971
972 kfree(alloc_msg);
973
974 return result;
975 }
976
977 /*
978 * Helper to make a queue_pairDetach hypercall when the driver is
979 * supporting a guest device.
980 */
qp_detatch_hypercall(struct vmci_handle handle)981 static int qp_detatch_hypercall(struct vmci_handle handle)
982 {
983 struct vmci_qp_detach_msg detach_msg;
984
985 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
986 VMCI_QUEUEPAIR_DETACH);
987 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
988 detach_msg.hdr.payload_size = sizeof(handle);
989 detach_msg.handle = handle;
990
991 return vmci_send_datagram(&detach_msg.hdr);
992 }
993
994 /*
995 * Adds the given entry to the list. Assumes that the list is locked.
996 */
qp_list_add_entry(struct qp_list * qp_list,struct qp_entry * entry)997 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
998 {
999 if (entry)
1000 list_add(&entry->list_item, &qp_list->head);
1001 }
1002
1003 /*
1004 * Removes the given entry from the list. Assumes that the list is locked.
1005 */
qp_list_remove_entry(struct qp_list * qp_list,struct qp_entry * entry)1006 static void qp_list_remove_entry(struct qp_list *qp_list,
1007 struct qp_entry *entry)
1008 {
1009 if (entry)
1010 list_del(&entry->list_item);
1011 }
1012
1013 /*
1014 * Helper for VMCI queue_pair detach interface. Frees the physical
1015 * pages for the queue pair.
1016 */
qp_detatch_guest_work(struct vmci_handle handle)1017 static int qp_detatch_guest_work(struct vmci_handle handle)
1018 {
1019 int result;
1020 struct qp_guest_endpoint *entry;
1021 u32 ref_count = ~0; /* To avoid compiler warning below */
1022
1023 mutex_lock(&qp_guest_endpoints.mutex);
1024
1025 entry = qp_guest_handle_to_entry(handle);
1026 if (!entry) {
1027 mutex_unlock(&qp_guest_endpoints.mutex);
1028 return VMCI_ERROR_NOT_FOUND;
1029 }
1030
1031 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1032 result = VMCI_SUCCESS;
1033
1034 if (entry->qp.ref_count > 1) {
1035 result = qp_notify_peer_local(false, handle);
1036 /*
1037 * We can fail to notify a local queuepair
1038 * because we can't allocate. We still want
1039 * to release the entry if that happens, so
1040 * don't bail out yet.
1041 */
1042 }
1043 } else {
1044 result = qp_detatch_hypercall(handle);
1045 if (result < VMCI_SUCCESS) {
1046 /*
1047 * We failed to notify a non-local queuepair.
1048 * That other queuepair might still be
1049 * accessing the shared memory, so don't
1050 * release the entry yet. It will get cleaned
1051 * up by VMCIqueue_pair_Exit() if necessary
1052 * (assuming we are going away, otherwise why
1053 * did this fail?).
1054 */
1055
1056 mutex_unlock(&qp_guest_endpoints.mutex);
1057 return result;
1058 }
1059 }
1060
1061 /*
1062 * If we get here then we either failed to notify a local queuepair, or
1063 * we succeeded in all cases. Release the entry if required.
1064 */
1065
1066 entry->qp.ref_count--;
1067 if (entry->qp.ref_count == 0)
1068 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1069
1070 /* If we didn't remove the entry, this could change once we unlock. */
1071 if (entry)
1072 ref_count = entry->qp.ref_count;
1073
1074 mutex_unlock(&qp_guest_endpoints.mutex);
1075
1076 if (ref_count == 0)
1077 qp_guest_endpoint_destroy(entry);
1078
1079 return result;
1080 }
1081
1082 /*
1083 * This functions handles the actual allocation of a VMCI queue
1084 * pair guest endpoint. Allocates physical pages for the queue
1085 * pair. It makes OS dependent calls through generic wrappers.
1086 */
qp_alloc_guest_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags)1087 static int qp_alloc_guest_work(struct vmci_handle *handle,
1088 struct vmci_queue **produce_q,
1089 u64 produce_size,
1090 struct vmci_queue **consume_q,
1091 u64 consume_size,
1092 u32 peer,
1093 u32 flags,
1094 u32 priv_flags)
1095 {
1096 const u64 num_produce_pages =
1097 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1098 const u64 num_consume_pages =
1099 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1100 void *my_produce_q = NULL;
1101 void *my_consume_q = NULL;
1102 int result;
1103 struct qp_guest_endpoint *queue_pair_entry = NULL;
1104
1105 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1106 return VMCI_ERROR_NO_ACCESS;
1107
1108 mutex_lock(&qp_guest_endpoints.mutex);
1109
1110 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1111 if (queue_pair_entry) {
1112 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1113 /* Local attach case. */
1114 if (queue_pair_entry->qp.ref_count > 1) {
1115 pr_devel("Error attempting to attach more than once\n");
1116 result = VMCI_ERROR_UNAVAILABLE;
1117 goto error_keep_entry;
1118 }
1119
1120 if (queue_pair_entry->qp.produce_size != consume_size ||
1121 queue_pair_entry->qp.consume_size !=
1122 produce_size ||
1123 queue_pair_entry->qp.flags !=
1124 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1125 pr_devel("Error mismatched queue pair in local attach\n");
1126 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1127 goto error_keep_entry;
1128 }
1129
1130 /*
1131 * Do a local attach. We swap the consume and
1132 * produce queues for the attacher and deliver
1133 * an attach event.
1134 */
1135 result = qp_notify_peer_local(true, *handle);
1136 if (result < VMCI_SUCCESS)
1137 goto error_keep_entry;
1138
1139 my_produce_q = queue_pair_entry->consume_q;
1140 my_consume_q = queue_pair_entry->produce_q;
1141 goto out;
1142 }
1143
1144 result = VMCI_ERROR_ALREADY_EXISTS;
1145 goto error_keep_entry;
1146 }
1147
1148 my_produce_q = qp_alloc_queue(produce_size, flags);
1149 if (!my_produce_q) {
1150 pr_warn("Error allocating pages for produce queue\n");
1151 result = VMCI_ERROR_NO_MEM;
1152 goto error;
1153 }
1154
1155 my_consume_q = qp_alloc_queue(consume_size, flags);
1156 if (!my_consume_q) {
1157 pr_warn("Error allocating pages for consume queue\n");
1158 result = VMCI_ERROR_NO_MEM;
1159 goto error;
1160 }
1161
1162 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1163 produce_size, consume_size,
1164 my_produce_q, my_consume_q);
1165 if (!queue_pair_entry) {
1166 pr_warn("Error allocating memory in %s\n", __func__);
1167 result = VMCI_ERROR_NO_MEM;
1168 goto error;
1169 }
1170
1171 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1172 num_consume_pages,
1173 &queue_pair_entry->ppn_set);
1174 if (result < VMCI_SUCCESS) {
1175 pr_warn("qp_alloc_ppn_set failed\n");
1176 goto error;
1177 }
1178
1179 /*
1180 * It's only necessary to notify the host if this queue pair will be
1181 * attached to from another context.
1182 */
1183 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1184 /* Local create case. */
1185 u32 context_id = vmci_get_context_id();
1186
1187 /*
1188 * Enforce similar checks on local queue pairs as we
1189 * do for regular ones. The handle's context must
1190 * match the creator or attacher context id (here they
1191 * are both the current context id) and the
1192 * attach-only flag cannot exist during create. We
1193 * also ensure specified peer is this context or an
1194 * invalid one.
1195 */
1196 if (queue_pair_entry->qp.handle.context != context_id ||
1197 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1198 queue_pair_entry->qp.peer != context_id)) {
1199 result = VMCI_ERROR_NO_ACCESS;
1200 goto error;
1201 }
1202
1203 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1204 result = VMCI_ERROR_NOT_FOUND;
1205 goto error;
1206 }
1207 } else {
1208 result = qp_alloc_hypercall(queue_pair_entry);
1209 if (result < VMCI_SUCCESS) {
1210 pr_warn("qp_alloc_hypercall result = %d\n", result);
1211 goto error;
1212 }
1213 }
1214
1215 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1216 (struct vmci_queue *)my_consume_q);
1217
1218 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1219
1220 out:
1221 queue_pair_entry->qp.ref_count++;
1222 *handle = queue_pair_entry->qp.handle;
1223 *produce_q = (struct vmci_queue *)my_produce_q;
1224 *consume_q = (struct vmci_queue *)my_consume_q;
1225
1226 /*
1227 * We should initialize the queue pair header pages on a local
1228 * queue pair create. For non-local queue pairs, the
1229 * hypervisor initializes the header pages in the create step.
1230 */
1231 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1232 queue_pair_entry->qp.ref_count == 1) {
1233 vmci_q_header_init((*produce_q)->q_header, *handle);
1234 vmci_q_header_init((*consume_q)->q_header, *handle);
1235 }
1236
1237 mutex_unlock(&qp_guest_endpoints.mutex);
1238
1239 return VMCI_SUCCESS;
1240
1241 error:
1242 mutex_unlock(&qp_guest_endpoints.mutex);
1243 if (queue_pair_entry) {
1244 /* The queues will be freed inside the destroy routine. */
1245 qp_guest_endpoint_destroy(queue_pair_entry);
1246 } else {
1247 qp_free_queue(my_produce_q, produce_size);
1248 qp_free_queue(my_consume_q, consume_size);
1249 }
1250 return result;
1251
1252 error_keep_entry:
1253 /* This path should only be used when an existing entry was found. */
1254 mutex_unlock(&qp_guest_endpoints.mutex);
1255 return result;
1256 }
1257
1258 /*
1259 * The first endpoint issuing a queue pair allocation will create the state
1260 * of the queue pair in the queue pair broker.
1261 *
1262 * If the creator is a guest, it will associate a VMX virtual address range
1263 * with the queue pair as specified by the page_store. For compatibility with
1264 * older VMX'en, that would use a separate step to set the VMX virtual
1265 * address range, the virtual address range can be registered later using
1266 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1267 * used.
1268 *
1269 * If the creator is the host, a page_store of NULL should be used as well,
1270 * since the host is not able to supply a page store for the queue pair.
1271 *
1272 * For older VMX and host callers, the queue pair will be created in the
1273 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1274 * created in VMCOQPB_CREATED_MEM state.
1275 */
qp_broker_create(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1276 static int qp_broker_create(struct vmci_handle handle,
1277 u32 peer,
1278 u32 flags,
1279 u32 priv_flags,
1280 u64 produce_size,
1281 u64 consume_size,
1282 struct vmci_qp_page_store *page_store,
1283 struct vmci_ctx *context,
1284 vmci_event_release_cb wakeup_cb,
1285 void *client_data, struct qp_broker_entry **ent)
1286 {
1287 struct qp_broker_entry *entry = NULL;
1288 const u32 context_id = vmci_ctx_get_id(context);
1289 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1290 int result;
1291 u64 guest_produce_size;
1292 u64 guest_consume_size;
1293
1294 /* Do not create if the caller asked not to. */
1295 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1296 return VMCI_ERROR_NOT_FOUND;
1297
1298 /*
1299 * Creator's context ID should match handle's context ID or the creator
1300 * must allow the context in handle's context ID as the "peer".
1301 */
1302 if (handle.context != context_id && handle.context != peer)
1303 return VMCI_ERROR_NO_ACCESS;
1304
1305 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1306 return VMCI_ERROR_DST_UNREACHABLE;
1307
1308 /*
1309 * Creator's context ID for local queue pairs should match the
1310 * peer, if a peer is specified.
1311 */
1312 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1313 return VMCI_ERROR_NO_ACCESS;
1314
1315 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1316 if (!entry)
1317 return VMCI_ERROR_NO_MEM;
1318
1319 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1320 /*
1321 * The queue pair broker entry stores values from the guest
1322 * point of view, so a creating host side endpoint should swap
1323 * produce and consume values -- unless it is a local queue
1324 * pair, in which case no swapping is necessary, since the local
1325 * attacher will swap queues.
1326 */
1327
1328 guest_produce_size = consume_size;
1329 guest_consume_size = produce_size;
1330 } else {
1331 guest_produce_size = produce_size;
1332 guest_consume_size = consume_size;
1333 }
1334
1335 entry->qp.handle = handle;
1336 entry->qp.peer = peer;
1337 entry->qp.flags = flags;
1338 entry->qp.produce_size = guest_produce_size;
1339 entry->qp.consume_size = guest_consume_size;
1340 entry->qp.ref_count = 1;
1341 entry->create_id = context_id;
1342 entry->attach_id = VMCI_INVALID_ID;
1343 entry->state = VMCIQPB_NEW;
1344 entry->require_trusted_attach =
1345 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1346 entry->created_by_trusted =
1347 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1348 entry->vmci_page_files = false;
1349 entry->wakeup_cb = wakeup_cb;
1350 entry->client_data = client_data;
1351 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1352 if (entry->produce_q == NULL) {
1353 result = VMCI_ERROR_NO_MEM;
1354 goto error;
1355 }
1356 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1357 if (entry->consume_q == NULL) {
1358 result = VMCI_ERROR_NO_MEM;
1359 goto error;
1360 }
1361
1362 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1363
1364 INIT_LIST_HEAD(&entry->qp.list_item);
1365
1366 if (is_local) {
1367 u8 *tmp;
1368
1369 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1370 PAGE_SIZE, GFP_KERNEL);
1371 if (entry->local_mem == NULL) {
1372 result = VMCI_ERROR_NO_MEM;
1373 goto error;
1374 }
1375 entry->state = VMCIQPB_CREATED_MEM;
1376 entry->produce_q->q_header = entry->local_mem;
1377 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1378 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1379 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1380 } else if (page_store) {
1381 /*
1382 * The VMX already initialized the queue pair headers, so no
1383 * need for the kernel side to do that.
1384 */
1385 result = qp_host_register_user_memory(page_store,
1386 entry->produce_q,
1387 entry->consume_q);
1388 if (result < VMCI_SUCCESS)
1389 goto error;
1390
1391 entry->state = VMCIQPB_CREATED_MEM;
1392 } else {
1393 /*
1394 * A create without a page_store may be either a host
1395 * side create (in which case we are waiting for the
1396 * guest side to supply the memory) or an old style
1397 * queue pair create (in which case we will expect a
1398 * set page store call as the next step).
1399 */
1400 entry->state = VMCIQPB_CREATED_NO_MEM;
1401 }
1402
1403 qp_list_add_entry(&qp_broker_list, &entry->qp);
1404 if (ent != NULL)
1405 *ent = entry;
1406
1407 /* Add to resource obj */
1408 result = vmci_resource_add(&entry->resource,
1409 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1410 handle);
1411 if (result != VMCI_SUCCESS) {
1412 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1413 handle.context, handle.resource, result);
1414 goto error;
1415 }
1416
1417 entry->qp.handle = vmci_resource_handle(&entry->resource);
1418 if (is_local) {
1419 vmci_q_header_init(entry->produce_q->q_header,
1420 entry->qp.handle);
1421 vmci_q_header_init(entry->consume_q->q_header,
1422 entry->qp.handle);
1423 }
1424
1425 vmci_ctx_qp_create(context, entry->qp.handle);
1426
1427 return VMCI_SUCCESS;
1428
1429 error:
1430 if (entry != NULL) {
1431 qp_host_free_queue(entry->produce_q, guest_produce_size);
1432 qp_host_free_queue(entry->consume_q, guest_consume_size);
1433 kfree(entry);
1434 }
1435
1436 return result;
1437 }
1438
1439 /*
1440 * Enqueues an event datagram to notify the peer VM attached to
1441 * the given queue pair handle about attach/detach event by the
1442 * given VM. Returns Payload size of datagram enqueued on
1443 * success, error code otherwise.
1444 */
qp_notify_peer(bool attach,struct vmci_handle handle,u32 my_id,u32 peer_id)1445 static int qp_notify_peer(bool attach,
1446 struct vmci_handle handle,
1447 u32 my_id,
1448 u32 peer_id)
1449 {
1450 int rv;
1451 struct vmci_event_qp ev;
1452
1453 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1454 peer_id == VMCI_INVALID_ID)
1455 return VMCI_ERROR_INVALID_ARGS;
1456
1457 /*
1458 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1459 * number of pending events from the hypervisor to a given VM
1460 * otherwise a rogue VM could do an arbitrary number of attach
1461 * and detach operations causing memory pressure in the host
1462 * kernel.
1463 */
1464
1465 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1466 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1467 VMCI_CONTEXT_RESOURCE_ID);
1468 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1469 ev.msg.event_data.event = attach ?
1470 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1471 ev.payload.handle = handle;
1472 ev.payload.peer_id = my_id;
1473
1474 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1475 &ev.msg.hdr, false);
1476 if (rv < VMCI_SUCCESS)
1477 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1478 attach ? "ATTACH" : "DETACH", peer_id);
1479
1480 return rv;
1481 }
1482
1483 /*
1484 * The second endpoint issuing a queue pair allocation will attach to
1485 * the queue pair registered with the queue pair broker.
1486 *
1487 * If the attacher is a guest, it will associate a VMX virtual address
1488 * range with the queue pair as specified by the page_store. At this
1489 * point, the already attach host endpoint may start using the queue
1490 * pair, and an attach event is sent to it. For compatibility with
1491 * older VMX'en, that used a separate step to set the VMX virtual
1492 * address range, the virtual address range can be registered later
1493 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1494 * NULL should be used, and the attach event will be generated once
1495 * the actual page store has been set.
1496 *
1497 * If the attacher is the host, a page_store of NULL should be used as
1498 * well, since the page store information is already set by the guest.
1499 *
1500 * For new VMX and host callers, the queue pair will be moved to the
1501 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1502 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1503 */
qp_broker_attach(struct qp_broker_entry * entry,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1504 static int qp_broker_attach(struct qp_broker_entry *entry,
1505 u32 peer,
1506 u32 flags,
1507 u32 priv_flags,
1508 u64 produce_size,
1509 u64 consume_size,
1510 struct vmci_qp_page_store *page_store,
1511 struct vmci_ctx *context,
1512 vmci_event_release_cb wakeup_cb,
1513 void *client_data,
1514 struct qp_broker_entry **ent)
1515 {
1516 const u32 context_id = vmci_ctx_get_id(context);
1517 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1518 int result;
1519
1520 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1521 entry->state != VMCIQPB_CREATED_MEM)
1522 return VMCI_ERROR_UNAVAILABLE;
1523
1524 if (is_local) {
1525 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1526 context_id != entry->create_id) {
1527 return VMCI_ERROR_INVALID_ARGS;
1528 }
1529 } else if (context_id == entry->create_id ||
1530 context_id == entry->attach_id) {
1531 return VMCI_ERROR_ALREADY_EXISTS;
1532 }
1533
1534 if (VMCI_CONTEXT_IS_VM(context_id) &&
1535 VMCI_CONTEXT_IS_VM(entry->create_id))
1536 return VMCI_ERROR_DST_UNREACHABLE;
1537
1538 /*
1539 * If we are attaching from a restricted context then the queuepair
1540 * must have been created by a trusted endpoint.
1541 */
1542 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1543 !entry->created_by_trusted)
1544 return VMCI_ERROR_NO_ACCESS;
1545
1546 /*
1547 * If we are attaching to a queuepair that was created by a restricted
1548 * context then we must be trusted.
1549 */
1550 if (entry->require_trusted_attach &&
1551 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1552 return VMCI_ERROR_NO_ACCESS;
1553
1554 /*
1555 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1556 * control check is not performed.
1557 */
1558 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1559 return VMCI_ERROR_NO_ACCESS;
1560
1561 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1562 /*
1563 * Do not attach if the caller doesn't support Host Queue Pairs
1564 * and a host created this queue pair.
1565 */
1566
1567 if (!vmci_ctx_supports_host_qp(context))
1568 return VMCI_ERROR_INVALID_RESOURCE;
1569
1570 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1571 struct vmci_ctx *create_context;
1572 bool supports_host_qp;
1573
1574 /*
1575 * Do not attach a host to a user created queue pair if that
1576 * user doesn't support host queue pair end points.
1577 */
1578
1579 create_context = vmci_ctx_get(entry->create_id);
1580 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1581 vmci_ctx_put(create_context);
1582
1583 if (!supports_host_qp)
1584 return VMCI_ERROR_INVALID_RESOURCE;
1585 }
1586
1587 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1588 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1589
1590 if (context_id != VMCI_HOST_CONTEXT_ID) {
1591 /*
1592 * The queue pair broker entry stores values from the guest
1593 * point of view, so an attaching guest should match the values
1594 * stored in the entry.
1595 */
1596
1597 if (entry->qp.produce_size != produce_size ||
1598 entry->qp.consume_size != consume_size) {
1599 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1600 }
1601 } else if (entry->qp.produce_size != consume_size ||
1602 entry->qp.consume_size != produce_size) {
1603 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1604 }
1605
1606 if (context_id != VMCI_HOST_CONTEXT_ID) {
1607 /*
1608 * If a guest attached to a queue pair, it will supply
1609 * the backing memory. If this is a pre NOVMVM vmx,
1610 * the backing memory will be supplied by calling
1611 * vmci_qp_broker_set_page_store() following the
1612 * return of the vmci_qp_broker_alloc() call. If it is
1613 * a vmx of version NOVMVM or later, the page store
1614 * must be supplied as part of the
1615 * vmci_qp_broker_alloc call. Under all circumstances
1616 * must the initially created queue pair not have any
1617 * memory associated with it already.
1618 */
1619
1620 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1621 return VMCI_ERROR_INVALID_ARGS;
1622
1623 if (page_store != NULL) {
1624 /*
1625 * Patch up host state to point to guest
1626 * supplied memory. The VMX already
1627 * initialized the queue pair headers, so no
1628 * need for the kernel side to do that.
1629 */
1630
1631 result = qp_host_register_user_memory(page_store,
1632 entry->produce_q,
1633 entry->consume_q);
1634 if (result < VMCI_SUCCESS)
1635 return result;
1636
1637 entry->state = VMCIQPB_ATTACHED_MEM;
1638 } else {
1639 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1640 }
1641 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1642 /*
1643 * The host side is attempting to attach to a queue
1644 * pair that doesn't have any memory associated with
1645 * it. This must be a pre NOVMVM vmx that hasn't set
1646 * the page store information yet, or a quiesced VM.
1647 */
1648
1649 return VMCI_ERROR_UNAVAILABLE;
1650 } else {
1651 /* The host side has successfully attached to a queue pair. */
1652 entry->state = VMCIQPB_ATTACHED_MEM;
1653 }
1654
1655 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1656 result =
1657 qp_notify_peer(true, entry->qp.handle, context_id,
1658 entry->create_id);
1659 if (result < VMCI_SUCCESS)
1660 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1661 entry->create_id, entry->qp.handle.context,
1662 entry->qp.handle.resource);
1663 }
1664
1665 entry->attach_id = context_id;
1666 entry->qp.ref_count++;
1667 if (wakeup_cb) {
1668 entry->wakeup_cb = wakeup_cb;
1669 entry->client_data = client_data;
1670 }
1671
1672 /*
1673 * When attaching to local queue pairs, the context already has
1674 * an entry tracking the queue pair, so don't add another one.
1675 */
1676 if (!is_local)
1677 vmci_ctx_qp_create(context, entry->qp.handle);
1678
1679 if (ent != NULL)
1680 *ent = entry;
1681
1682 return VMCI_SUCCESS;
1683 }
1684
1685 /*
1686 * queue_pair_Alloc for use when setting up queue pair endpoints
1687 * on the host.
1688 */
qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent,bool * swap)1689 static int qp_broker_alloc(struct vmci_handle handle,
1690 u32 peer,
1691 u32 flags,
1692 u32 priv_flags,
1693 u64 produce_size,
1694 u64 consume_size,
1695 struct vmci_qp_page_store *page_store,
1696 struct vmci_ctx *context,
1697 vmci_event_release_cb wakeup_cb,
1698 void *client_data,
1699 struct qp_broker_entry **ent,
1700 bool *swap)
1701 {
1702 const u32 context_id = vmci_ctx_get_id(context);
1703 bool create;
1704 struct qp_broker_entry *entry = NULL;
1705 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1706 int result;
1707
1708 if (vmci_handle_is_invalid(handle) ||
1709 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1710 !(produce_size || consume_size) ||
1711 !context || context_id == VMCI_INVALID_ID ||
1712 handle.context == VMCI_INVALID_ID) {
1713 return VMCI_ERROR_INVALID_ARGS;
1714 }
1715
1716 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1717 return VMCI_ERROR_INVALID_ARGS;
1718
1719 /*
1720 * In the initial argument check, we ensure that non-vmkernel hosts
1721 * are not allowed to create local queue pairs.
1722 */
1723
1724 mutex_lock(&qp_broker_list.mutex);
1725
1726 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1727 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1728 context_id, handle.context, handle.resource);
1729 mutex_unlock(&qp_broker_list.mutex);
1730 return VMCI_ERROR_ALREADY_EXISTS;
1731 }
1732
1733 if (handle.resource != VMCI_INVALID_ID)
1734 entry = qp_broker_handle_to_entry(handle);
1735
1736 if (!entry) {
1737 create = true;
1738 result =
1739 qp_broker_create(handle, peer, flags, priv_flags,
1740 produce_size, consume_size, page_store,
1741 context, wakeup_cb, client_data, ent);
1742 } else {
1743 create = false;
1744 result =
1745 qp_broker_attach(entry, peer, flags, priv_flags,
1746 produce_size, consume_size, page_store,
1747 context, wakeup_cb, client_data, ent);
1748 }
1749
1750 mutex_unlock(&qp_broker_list.mutex);
1751
1752 if (swap)
1753 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1754 !(create && is_local);
1755
1756 return result;
1757 }
1758
1759 /*
1760 * This function implements the kernel API for allocating a queue
1761 * pair.
1762 */
qp_alloc_host_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,vmci_event_release_cb wakeup_cb,void * client_data)1763 static int qp_alloc_host_work(struct vmci_handle *handle,
1764 struct vmci_queue **produce_q,
1765 u64 produce_size,
1766 struct vmci_queue **consume_q,
1767 u64 consume_size,
1768 u32 peer,
1769 u32 flags,
1770 u32 priv_flags,
1771 vmci_event_release_cb wakeup_cb,
1772 void *client_data)
1773 {
1774 struct vmci_handle new_handle;
1775 struct vmci_ctx *context;
1776 struct qp_broker_entry *entry;
1777 int result;
1778 bool swap;
1779
1780 if (vmci_handle_is_invalid(*handle)) {
1781 new_handle = vmci_make_handle(
1782 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1783 } else
1784 new_handle = *handle;
1785
1786 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1787 entry = NULL;
1788 result =
1789 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1790 produce_size, consume_size, NULL, context,
1791 wakeup_cb, client_data, &entry, &swap);
1792 if (result == VMCI_SUCCESS) {
1793 if (swap) {
1794 /*
1795 * If this is a local queue pair, the attacher
1796 * will swap around produce and consume
1797 * queues.
1798 */
1799
1800 *produce_q = entry->consume_q;
1801 *consume_q = entry->produce_q;
1802 } else {
1803 *produce_q = entry->produce_q;
1804 *consume_q = entry->consume_q;
1805 }
1806
1807 *handle = vmci_resource_handle(&entry->resource);
1808 } else {
1809 *handle = VMCI_INVALID_HANDLE;
1810 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1811 result);
1812 }
1813 vmci_ctx_put(context);
1814 return result;
1815 }
1816
1817 /*
1818 * Allocates a VMCI queue_pair. Only checks validity of input
1819 * arguments. The real work is done in the host or guest
1820 * specific function.
1821 */
vmci_qp_alloc(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,bool guest_endpoint,vmci_event_release_cb wakeup_cb,void * client_data)1822 int vmci_qp_alloc(struct vmci_handle *handle,
1823 struct vmci_queue **produce_q,
1824 u64 produce_size,
1825 struct vmci_queue **consume_q,
1826 u64 consume_size,
1827 u32 peer,
1828 u32 flags,
1829 u32 priv_flags,
1830 bool guest_endpoint,
1831 vmci_event_release_cb wakeup_cb,
1832 void *client_data)
1833 {
1834 if (!handle || !produce_q || !consume_q ||
1835 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1836 return VMCI_ERROR_INVALID_ARGS;
1837
1838 if (guest_endpoint) {
1839 return qp_alloc_guest_work(handle, produce_q,
1840 produce_size, consume_q,
1841 consume_size, peer,
1842 flags, priv_flags);
1843 } else {
1844 return qp_alloc_host_work(handle, produce_q,
1845 produce_size, consume_q,
1846 consume_size, peer, flags,
1847 priv_flags, wakeup_cb, client_data);
1848 }
1849 }
1850
1851 /*
1852 * This function implements the host kernel API for detaching from
1853 * a queue pair.
1854 */
qp_detatch_host_work(struct vmci_handle handle)1855 static int qp_detatch_host_work(struct vmci_handle handle)
1856 {
1857 int result;
1858 struct vmci_ctx *context;
1859
1860 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1861
1862 result = vmci_qp_broker_detach(handle, context);
1863
1864 vmci_ctx_put(context);
1865 return result;
1866 }
1867
1868 /*
1869 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1870 * Real work is done in the host or guest specific function.
1871 */
qp_detatch(struct vmci_handle handle,bool guest_endpoint)1872 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1873 {
1874 if (vmci_handle_is_invalid(handle))
1875 return VMCI_ERROR_INVALID_ARGS;
1876
1877 if (guest_endpoint)
1878 return qp_detatch_guest_work(handle);
1879 else
1880 return qp_detatch_host_work(handle);
1881 }
1882
1883 /*
1884 * Returns the entry from the head of the list. Assumes that the list is
1885 * locked.
1886 */
qp_list_get_head(struct qp_list * qp_list)1887 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1888 {
1889 if (!list_empty(&qp_list->head)) {
1890 struct qp_entry *entry =
1891 list_first_entry(&qp_list->head, struct qp_entry,
1892 list_item);
1893 return entry;
1894 }
1895
1896 return NULL;
1897 }
1898
vmci_qp_broker_exit(void)1899 void vmci_qp_broker_exit(void)
1900 {
1901 struct qp_entry *entry;
1902 struct qp_broker_entry *be;
1903
1904 mutex_lock(&qp_broker_list.mutex);
1905
1906 while ((entry = qp_list_get_head(&qp_broker_list))) {
1907 be = (struct qp_broker_entry *)entry;
1908
1909 qp_list_remove_entry(&qp_broker_list, entry);
1910 kfree(be);
1911 }
1912
1913 mutex_unlock(&qp_broker_list.mutex);
1914 }
1915
1916 /*
1917 * Requests that a queue pair be allocated with the VMCI queue
1918 * pair broker. Allocates a queue pair entry if one does not
1919 * exist. Attaches to one if it exists, and retrieves the page
1920 * files backing that queue_pair. Assumes that the queue pair
1921 * broker lock is held.
1922 */
vmci_qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context)1923 int vmci_qp_broker_alloc(struct vmci_handle handle,
1924 u32 peer,
1925 u32 flags,
1926 u32 priv_flags,
1927 u64 produce_size,
1928 u64 consume_size,
1929 struct vmci_qp_page_store *page_store,
1930 struct vmci_ctx *context)
1931 {
1932 return qp_broker_alloc(handle, peer, flags, priv_flags,
1933 produce_size, consume_size,
1934 page_store, context, NULL, NULL, NULL, NULL);
1935 }
1936
1937 /*
1938 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
1939 * step to add the UVAs of the VMX mapping of the queue pair. This function
1940 * provides backwards compatibility with such VMX'en, and takes care of
1941 * registering the page store for a queue pair previously allocated by the
1942 * VMX during create or attach. This function will move the queue pair state
1943 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
1944 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
1945 * attached state with memory, the queue pair is ready to be used by the
1946 * host peer, and an attached event will be generated.
1947 *
1948 * Assumes that the queue pair broker lock is held.
1949 *
1950 * This function is only used by the hosted platform, since there is no
1951 * issue with backwards compatibility for vmkernel.
1952 */
vmci_qp_broker_set_page_store(struct vmci_handle handle,u64 produce_uva,u64 consume_uva,struct vmci_ctx * context)1953 int vmci_qp_broker_set_page_store(struct vmci_handle handle,
1954 u64 produce_uva,
1955 u64 consume_uva,
1956 struct vmci_ctx *context)
1957 {
1958 struct qp_broker_entry *entry;
1959 int result;
1960 const u32 context_id = vmci_ctx_get_id(context);
1961
1962 if (vmci_handle_is_invalid(handle) || !context ||
1963 context_id == VMCI_INVALID_ID)
1964 return VMCI_ERROR_INVALID_ARGS;
1965
1966 /*
1967 * We only support guest to host queue pairs, so the VMX must
1968 * supply UVAs for the mapped page files.
1969 */
1970
1971 if (produce_uva == 0 || consume_uva == 0)
1972 return VMCI_ERROR_INVALID_ARGS;
1973
1974 mutex_lock(&qp_broker_list.mutex);
1975
1976 if (!vmci_ctx_qp_exists(context, handle)) {
1977 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
1978 context_id, handle.context, handle.resource);
1979 result = VMCI_ERROR_NOT_FOUND;
1980 goto out;
1981 }
1982
1983 entry = qp_broker_handle_to_entry(handle);
1984 if (!entry) {
1985 result = VMCI_ERROR_NOT_FOUND;
1986 goto out;
1987 }
1988
1989 /*
1990 * If I'm the owner then I can set the page store.
1991 *
1992 * Or, if a host created the queue_pair and I'm the attached peer
1993 * then I can set the page store.
1994 */
1995 if (entry->create_id != context_id &&
1996 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
1997 entry->attach_id != context_id)) {
1998 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
1999 goto out;
2000 }
2001
2002 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2003 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2004 result = VMCI_ERROR_UNAVAILABLE;
2005 goto out;
2006 }
2007
2008 result = qp_host_get_user_memory(produce_uva, consume_uva,
2009 entry->produce_q, entry->consume_q);
2010 if (result < VMCI_SUCCESS)
2011 goto out;
2012
2013 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2014 if (result < VMCI_SUCCESS) {
2015 qp_host_unregister_user_memory(entry->produce_q,
2016 entry->consume_q);
2017 goto out;
2018 }
2019
2020 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2021 entry->state = VMCIQPB_CREATED_MEM;
2022 else
2023 entry->state = VMCIQPB_ATTACHED_MEM;
2024
2025 entry->vmci_page_files = true;
2026
2027 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2028 result =
2029 qp_notify_peer(true, handle, context_id, entry->create_id);
2030 if (result < VMCI_SUCCESS) {
2031 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2032 entry->create_id, entry->qp.handle.context,
2033 entry->qp.handle.resource);
2034 }
2035 }
2036
2037 result = VMCI_SUCCESS;
2038 out:
2039 mutex_unlock(&qp_broker_list.mutex);
2040 return result;
2041 }
2042
2043 /*
2044 * Resets saved queue headers for the given QP broker
2045 * entry. Should be used when guest memory becomes available
2046 * again, or the guest detaches.
2047 */
qp_reset_saved_headers(struct qp_broker_entry * entry)2048 static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2049 {
2050 entry->produce_q->saved_header = NULL;
2051 entry->consume_q->saved_header = NULL;
2052 }
2053
2054 /*
2055 * The main entry point for detaching from a queue pair registered with the
2056 * queue pair broker. If more than one endpoint is attached to the queue
2057 * pair, the first endpoint will mainly decrement a reference count and
2058 * generate a notification to its peer. The last endpoint will clean up
2059 * the queue pair state registered with the broker.
2060 *
2061 * When a guest endpoint detaches, it will unmap and unregister the guest
2062 * memory backing the queue pair. If the host is still attached, it will
2063 * no longer be able to access the queue pair content.
2064 *
2065 * If the queue pair is already in a state where there is no memory
2066 * registered for the queue pair (any *_NO_MEM state), it will transition to
2067 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2068 * endpoint is the first of two endpoints to detach. If the host endpoint is
2069 * the first out of two to detach, the queue pair will move to the
2070 * VMCIQPB_SHUTDOWN_MEM state.
2071 */
vmci_qp_broker_detach(struct vmci_handle handle,struct vmci_ctx * context)2072 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2073 {
2074 struct qp_broker_entry *entry;
2075 const u32 context_id = vmci_ctx_get_id(context);
2076 u32 peer_id;
2077 bool is_local = false;
2078 int result;
2079
2080 if (vmci_handle_is_invalid(handle) || !context ||
2081 context_id == VMCI_INVALID_ID) {
2082 return VMCI_ERROR_INVALID_ARGS;
2083 }
2084
2085 mutex_lock(&qp_broker_list.mutex);
2086
2087 if (!vmci_ctx_qp_exists(context, handle)) {
2088 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2089 context_id, handle.context, handle.resource);
2090 result = VMCI_ERROR_NOT_FOUND;
2091 goto out;
2092 }
2093
2094 entry = qp_broker_handle_to_entry(handle);
2095 if (!entry) {
2096 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2097 context_id, handle.context, handle.resource);
2098 result = VMCI_ERROR_NOT_FOUND;
2099 goto out;
2100 }
2101
2102 if (context_id != entry->create_id && context_id != entry->attach_id) {
2103 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2104 goto out;
2105 }
2106
2107 if (context_id == entry->create_id) {
2108 peer_id = entry->attach_id;
2109 entry->create_id = VMCI_INVALID_ID;
2110 } else {
2111 peer_id = entry->create_id;
2112 entry->attach_id = VMCI_INVALID_ID;
2113 }
2114 entry->qp.ref_count--;
2115
2116 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2117
2118 if (context_id != VMCI_HOST_CONTEXT_ID) {
2119 bool headers_mapped;
2120
2121 /*
2122 * Pre NOVMVM vmx'en may detach from a queue pair
2123 * before setting the page store, and in that case
2124 * there is no user memory to detach from. Also, more
2125 * recent VMX'en may detach from a queue pair in the
2126 * quiesced state.
2127 */
2128
2129 qp_acquire_queue_mutex(entry->produce_q);
2130 headers_mapped = entry->produce_q->q_header ||
2131 entry->consume_q->q_header;
2132 if (QPBROKERSTATE_HAS_MEM(entry)) {
2133 result =
2134 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2135 entry->produce_q,
2136 entry->consume_q);
2137 if (result < VMCI_SUCCESS)
2138 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2139 handle.context, handle.resource,
2140 result);
2141
2142 qp_host_unregister_user_memory(entry->produce_q,
2143 entry->consume_q);
2144
2145 }
2146
2147 if (!headers_mapped)
2148 qp_reset_saved_headers(entry);
2149
2150 qp_release_queue_mutex(entry->produce_q);
2151
2152 if (!headers_mapped && entry->wakeup_cb)
2153 entry->wakeup_cb(entry->client_data);
2154
2155 } else {
2156 if (entry->wakeup_cb) {
2157 entry->wakeup_cb = NULL;
2158 entry->client_data = NULL;
2159 }
2160 }
2161
2162 if (entry->qp.ref_count == 0) {
2163 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2164
2165 if (is_local)
2166 kfree(entry->local_mem);
2167
2168 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2169 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2170 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2171 /* Unlink from resource hash table and free callback */
2172 vmci_resource_remove(&entry->resource);
2173
2174 kfree(entry);
2175
2176 vmci_ctx_qp_destroy(context, handle);
2177 } else {
2178 qp_notify_peer(false, handle, context_id, peer_id);
2179 if (context_id == VMCI_HOST_CONTEXT_ID &&
2180 QPBROKERSTATE_HAS_MEM(entry)) {
2181 entry->state = VMCIQPB_SHUTDOWN_MEM;
2182 } else {
2183 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2184 }
2185
2186 if (!is_local)
2187 vmci_ctx_qp_destroy(context, handle);
2188
2189 }
2190 result = VMCI_SUCCESS;
2191 out:
2192 mutex_unlock(&qp_broker_list.mutex);
2193 return result;
2194 }
2195
2196 /*
2197 * Establishes the necessary mappings for a queue pair given a
2198 * reference to the queue pair guest memory. This is usually
2199 * called when a guest is unquiesced and the VMX is allowed to
2200 * map guest memory once again.
2201 */
vmci_qp_broker_map(struct vmci_handle handle,struct vmci_ctx * context,u64 guest_mem)2202 int vmci_qp_broker_map(struct vmci_handle handle,
2203 struct vmci_ctx *context,
2204 u64 guest_mem)
2205 {
2206 struct qp_broker_entry *entry;
2207 const u32 context_id = vmci_ctx_get_id(context);
2208 int result;
2209
2210 if (vmci_handle_is_invalid(handle) || !context ||
2211 context_id == VMCI_INVALID_ID)
2212 return VMCI_ERROR_INVALID_ARGS;
2213
2214 mutex_lock(&qp_broker_list.mutex);
2215
2216 if (!vmci_ctx_qp_exists(context, handle)) {
2217 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2218 context_id, handle.context, handle.resource);
2219 result = VMCI_ERROR_NOT_FOUND;
2220 goto out;
2221 }
2222
2223 entry = qp_broker_handle_to_entry(handle);
2224 if (!entry) {
2225 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2226 context_id, handle.context, handle.resource);
2227 result = VMCI_ERROR_NOT_FOUND;
2228 goto out;
2229 }
2230
2231 if (context_id != entry->create_id && context_id != entry->attach_id) {
2232 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2233 goto out;
2234 }
2235
2236 result = VMCI_SUCCESS;
2237
2238 if (context_id != VMCI_HOST_CONTEXT_ID) {
2239 struct vmci_qp_page_store page_store;
2240
2241 page_store.pages = guest_mem;
2242 page_store.len = QPE_NUM_PAGES(entry->qp);
2243
2244 qp_acquire_queue_mutex(entry->produce_q);
2245 qp_reset_saved_headers(entry);
2246 result =
2247 qp_host_register_user_memory(&page_store,
2248 entry->produce_q,
2249 entry->consume_q);
2250 qp_release_queue_mutex(entry->produce_q);
2251 if (result == VMCI_SUCCESS) {
2252 /* Move state from *_NO_MEM to *_MEM */
2253
2254 entry->state++;
2255
2256 if (entry->wakeup_cb)
2257 entry->wakeup_cb(entry->client_data);
2258 }
2259 }
2260
2261 out:
2262 mutex_unlock(&qp_broker_list.mutex);
2263 return result;
2264 }
2265
2266 /*
2267 * Saves a snapshot of the queue headers for the given QP broker
2268 * entry. Should be used when guest memory is unmapped.
2269 * Results:
2270 * VMCI_SUCCESS on success, appropriate error code if guest memory
2271 * can't be accessed..
2272 */
qp_save_headers(struct qp_broker_entry * entry)2273 static int qp_save_headers(struct qp_broker_entry *entry)
2274 {
2275 int result;
2276
2277 if (entry->produce_q->saved_header != NULL &&
2278 entry->consume_q->saved_header != NULL) {
2279 /*
2280 * If the headers have already been saved, we don't need to do
2281 * it again, and we don't want to map in the headers
2282 * unnecessarily.
2283 */
2284
2285 return VMCI_SUCCESS;
2286 }
2287
2288 if (NULL == entry->produce_q->q_header ||
2289 NULL == entry->consume_q->q_header) {
2290 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2291 if (result < VMCI_SUCCESS)
2292 return result;
2293 }
2294
2295 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2296 sizeof(entry->saved_produce_q));
2297 entry->produce_q->saved_header = &entry->saved_produce_q;
2298 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2299 sizeof(entry->saved_consume_q));
2300 entry->consume_q->saved_header = &entry->saved_consume_q;
2301
2302 return VMCI_SUCCESS;
2303 }
2304
2305 /*
2306 * Removes all references to the guest memory of a given queue pair, and
2307 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2308 * called when a VM is being quiesced where access to guest memory should
2309 * avoided.
2310 */
vmci_qp_broker_unmap(struct vmci_handle handle,struct vmci_ctx * context,u32 gid)2311 int vmci_qp_broker_unmap(struct vmci_handle handle,
2312 struct vmci_ctx *context,
2313 u32 gid)
2314 {
2315 struct qp_broker_entry *entry;
2316 const u32 context_id = vmci_ctx_get_id(context);
2317 int result;
2318
2319 if (vmci_handle_is_invalid(handle) || !context ||
2320 context_id == VMCI_INVALID_ID)
2321 return VMCI_ERROR_INVALID_ARGS;
2322
2323 mutex_lock(&qp_broker_list.mutex);
2324
2325 if (!vmci_ctx_qp_exists(context, handle)) {
2326 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2327 context_id, handle.context, handle.resource);
2328 result = VMCI_ERROR_NOT_FOUND;
2329 goto out;
2330 }
2331
2332 entry = qp_broker_handle_to_entry(handle);
2333 if (!entry) {
2334 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2335 context_id, handle.context, handle.resource);
2336 result = VMCI_ERROR_NOT_FOUND;
2337 goto out;
2338 }
2339
2340 if (context_id != entry->create_id && context_id != entry->attach_id) {
2341 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2342 goto out;
2343 }
2344
2345 if (context_id != VMCI_HOST_CONTEXT_ID) {
2346 qp_acquire_queue_mutex(entry->produce_q);
2347 result = qp_save_headers(entry);
2348 if (result < VMCI_SUCCESS)
2349 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2350 handle.context, handle.resource, result);
2351
2352 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2353
2354 /*
2355 * On hosted, when we unmap queue pairs, the VMX will also
2356 * unmap the guest memory, so we invalidate the previously
2357 * registered memory. If the queue pair is mapped again at a
2358 * later point in time, we will need to reregister the user
2359 * memory with a possibly new user VA.
2360 */
2361 qp_host_unregister_user_memory(entry->produce_q,
2362 entry->consume_q);
2363
2364 /*
2365 * Move state from *_MEM to *_NO_MEM.
2366 */
2367 entry->state--;
2368
2369 qp_release_queue_mutex(entry->produce_q);
2370 }
2371
2372 result = VMCI_SUCCESS;
2373
2374 out:
2375 mutex_unlock(&qp_broker_list.mutex);
2376 return result;
2377 }
2378
2379 /*
2380 * Destroys all guest queue pair endpoints. If active guest queue
2381 * pairs still exist, hypercalls to attempt detach from these
2382 * queue pairs will be made. Any failure to detach is silently
2383 * ignored.
2384 */
vmci_qp_guest_endpoints_exit(void)2385 void vmci_qp_guest_endpoints_exit(void)
2386 {
2387 struct qp_entry *entry;
2388 struct qp_guest_endpoint *ep;
2389
2390 mutex_lock(&qp_guest_endpoints.mutex);
2391
2392 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2393 ep = (struct qp_guest_endpoint *)entry;
2394
2395 /* Don't make a hypercall for local queue_pairs. */
2396 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2397 qp_detatch_hypercall(entry->handle);
2398
2399 /* We cannot fail the exit, so let's reset ref_count. */
2400 entry->ref_count = 0;
2401 qp_list_remove_entry(&qp_guest_endpoints, entry);
2402
2403 qp_guest_endpoint_destroy(ep);
2404 }
2405
2406 mutex_unlock(&qp_guest_endpoints.mutex);
2407 }
2408
2409 /*
2410 * Helper routine that will lock the queue pair before subsequent
2411 * operations.
2412 * Note: Non-blocking on the host side is currently only implemented in ESX.
2413 * Since non-blocking isn't yet implemented on the host personality we
2414 * have no reason to acquire a spin lock. So to avoid the use of an
2415 * unnecessary lock only acquire the mutex if we can block.
2416 */
qp_lock(const struct vmci_qp * qpair)2417 static void qp_lock(const struct vmci_qp *qpair)
2418 {
2419 qp_acquire_queue_mutex(qpair->produce_q);
2420 }
2421
2422 /*
2423 * Helper routine that unlocks the queue pair after calling
2424 * qp_lock.
2425 */
qp_unlock(const struct vmci_qp * qpair)2426 static void qp_unlock(const struct vmci_qp *qpair)
2427 {
2428 qp_release_queue_mutex(qpair->produce_q);
2429 }
2430
2431 /*
2432 * The queue headers may not be mapped at all times. If a queue is
2433 * currently not mapped, it will be attempted to do so.
2434 */
qp_map_queue_headers(struct vmci_queue * produce_q,struct vmci_queue * consume_q)2435 static int qp_map_queue_headers(struct vmci_queue *produce_q,
2436 struct vmci_queue *consume_q)
2437 {
2438 int result;
2439
2440 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2441 result = qp_host_map_queues(produce_q, consume_q);
2442 if (result < VMCI_SUCCESS)
2443 return (produce_q->saved_header &&
2444 consume_q->saved_header) ?
2445 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2446 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2447 }
2448
2449 return VMCI_SUCCESS;
2450 }
2451
2452 /*
2453 * Helper routine that will retrieve the produce and consume
2454 * headers of a given queue pair. If the guest memory of the
2455 * queue pair is currently not available, the saved queue headers
2456 * will be returned, if these are available.
2457 */
qp_get_queue_headers(const struct vmci_qp * qpair,struct vmci_queue_header ** produce_q_header,struct vmci_queue_header ** consume_q_header)2458 static int qp_get_queue_headers(const struct vmci_qp *qpair,
2459 struct vmci_queue_header **produce_q_header,
2460 struct vmci_queue_header **consume_q_header)
2461 {
2462 int result;
2463
2464 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2465 if (result == VMCI_SUCCESS) {
2466 *produce_q_header = qpair->produce_q->q_header;
2467 *consume_q_header = qpair->consume_q->q_header;
2468 } else if (qpair->produce_q->saved_header &&
2469 qpair->consume_q->saved_header) {
2470 *produce_q_header = qpair->produce_q->saved_header;
2471 *consume_q_header = qpair->consume_q->saved_header;
2472 result = VMCI_SUCCESS;
2473 }
2474
2475 return result;
2476 }
2477
2478 /*
2479 * Callback from VMCI queue pair broker indicating that a queue
2480 * pair that was previously not ready, now either is ready or
2481 * gone forever.
2482 */
qp_wakeup_cb(void * client_data)2483 static int qp_wakeup_cb(void *client_data)
2484 {
2485 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2486
2487 qp_lock(qpair);
2488 while (qpair->blocked > 0) {
2489 qpair->blocked--;
2490 qpair->generation++;
2491 wake_up(&qpair->event);
2492 }
2493 qp_unlock(qpair);
2494
2495 return VMCI_SUCCESS;
2496 }
2497
2498 /*
2499 * Makes the calling thread wait for the queue pair to become
2500 * ready for host side access. Returns true when thread is
2501 * woken up after queue pair state change, false otherwise.
2502 */
qp_wait_for_ready_queue(struct vmci_qp * qpair)2503 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2504 {
2505 unsigned int generation;
2506
2507 qpair->blocked++;
2508 generation = qpair->generation;
2509 qp_unlock(qpair);
2510 wait_event(qpair->event, generation != qpair->generation);
2511 qp_lock(qpair);
2512
2513 return true;
2514 }
2515
2516 /*
2517 * Enqueues a given buffer to the produce queue using the provided
2518 * function. As many bytes as possible (space available in the queue)
2519 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2520 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2521 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2522 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2523 * an error occured when accessing the buffer,
2524 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2525 * available. Otherwise, the number of bytes written to the queue is
2526 * returned. Updates the tail pointer of the produce queue.
2527 */
qp_enqueue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 produce_q_size,struct iov_iter * from)2528 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2529 struct vmci_queue *consume_q,
2530 const u64 produce_q_size,
2531 struct iov_iter *from)
2532 {
2533 s64 free_space;
2534 u64 tail;
2535 size_t buf_size = iov_iter_count(from);
2536 size_t written;
2537 ssize_t result;
2538
2539 result = qp_map_queue_headers(produce_q, consume_q);
2540 if (unlikely(result != VMCI_SUCCESS))
2541 return result;
2542
2543 free_space = vmci_q_header_free_space(produce_q->q_header,
2544 consume_q->q_header,
2545 produce_q_size);
2546 if (free_space == 0)
2547 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2548
2549 if (free_space < VMCI_SUCCESS)
2550 return (ssize_t) free_space;
2551
2552 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2553 tail = vmci_q_header_producer_tail(produce_q->q_header);
2554 if (likely(tail + written < produce_q_size)) {
2555 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2556 } else {
2557 /* Tail pointer wraps around. */
2558
2559 const size_t tmp = (size_t) (produce_q_size - tail);
2560
2561 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2562 if (result >= VMCI_SUCCESS)
2563 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2564 written - tmp);
2565 }
2566
2567 if (result < VMCI_SUCCESS)
2568 return result;
2569
2570 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2571 produce_q_size);
2572 return written;
2573 }
2574
2575 /*
2576 * Dequeues data (if available) from the given consume queue. Writes data
2577 * to the user provided buffer using the provided function.
2578 * Assumes the queue->mutex has been acquired.
2579 * Results:
2580 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2581 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2582 * (as defined by the queue size).
2583 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2584 * Otherwise the number of bytes dequeued is returned.
2585 * Side effects:
2586 * Updates the head pointer of the consume queue.
2587 */
qp_dequeue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 consume_q_size,struct iov_iter * to,bool update_consumer)2588 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2589 struct vmci_queue *consume_q,
2590 const u64 consume_q_size,
2591 struct iov_iter *to,
2592 bool update_consumer)
2593 {
2594 size_t buf_size = iov_iter_count(to);
2595 s64 buf_ready;
2596 u64 head;
2597 size_t read;
2598 ssize_t result;
2599
2600 result = qp_map_queue_headers(produce_q, consume_q);
2601 if (unlikely(result != VMCI_SUCCESS))
2602 return result;
2603
2604 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2605 produce_q->q_header,
2606 consume_q_size);
2607 if (buf_ready == 0)
2608 return VMCI_ERROR_QUEUEPAIR_NODATA;
2609
2610 if (buf_ready < VMCI_SUCCESS)
2611 return (ssize_t) buf_ready;
2612
2613 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2614 head = vmci_q_header_consumer_head(produce_q->q_header);
2615 if (likely(head + read < consume_q_size)) {
2616 result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2617 } else {
2618 /* Head pointer wraps around. */
2619
2620 const size_t tmp = (size_t) (consume_q_size - head);
2621
2622 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2623 if (result >= VMCI_SUCCESS)
2624 result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2625 read - tmp);
2626
2627 }
2628
2629 if (result < VMCI_SUCCESS)
2630 return result;
2631
2632 if (update_consumer)
2633 vmci_q_header_add_consumer_head(produce_q->q_header,
2634 read, consume_q_size);
2635
2636 return read;
2637 }
2638
2639 /*
2640 * vmci_qpair_alloc() - Allocates a queue pair.
2641 * @qpair: Pointer for the new vmci_qp struct.
2642 * @handle: Handle to track the resource.
2643 * @produce_qsize: Desired size of the producer queue.
2644 * @consume_qsize: Desired size of the consumer queue.
2645 * @peer: ContextID of the peer.
2646 * @flags: VMCI flags.
2647 * @priv_flags: VMCI priviledge flags.
2648 *
2649 * This is the client interface for allocating the memory for a
2650 * vmci_qp structure and then attaching to the underlying
2651 * queue. If an error occurs allocating the memory for the
2652 * vmci_qp structure no attempt is made to attach. If an
2653 * error occurs attaching, then the structure is freed.
2654 */
vmci_qpair_alloc(struct vmci_qp ** qpair,struct vmci_handle * handle,u64 produce_qsize,u64 consume_qsize,u32 peer,u32 flags,u32 priv_flags)2655 int vmci_qpair_alloc(struct vmci_qp **qpair,
2656 struct vmci_handle *handle,
2657 u64 produce_qsize,
2658 u64 consume_qsize,
2659 u32 peer,
2660 u32 flags,
2661 u32 priv_flags)
2662 {
2663 struct vmci_qp *my_qpair;
2664 int retval;
2665 struct vmci_handle src = VMCI_INVALID_HANDLE;
2666 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2667 enum vmci_route route;
2668 vmci_event_release_cb wakeup_cb;
2669 void *client_data;
2670
2671 /*
2672 * Restrict the size of a queuepair. The device already
2673 * enforces a limit on the total amount of memory that can be
2674 * allocated to queuepairs for a guest. However, we try to
2675 * allocate this memory before we make the queuepair
2676 * allocation hypercall. On Linux, we allocate each page
2677 * separately, which means rather than fail, the guest will
2678 * thrash while it tries to allocate, and will become
2679 * increasingly unresponsive to the point where it appears to
2680 * be hung. So we place a limit on the size of an individual
2681 * queuepair here, and leave the device to enforce the
2682 * restriction on total queuepair memory. (Note that this
2683 * doesn't prevent all cases; a user with only this much
2684 * physical memory could still get into trouble.) The error
2685 * used by the device is NO_RESOURCES, so use that here too.
2686 */
2687
2688 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2689 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2690 return VMCI_ERROR_NO_RESOURCES;
2691
2692 retval = vmci_route(&src, &dst, false, &route);
2693 if (retval < VMCI_SUCCESS)
2694 route = vmci_guest_code_active() ?
2695 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2696
2697 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2698 pr_devel("NONBLOCK OR PINNED set");
2699 return VMCI_ERROR_INVALID_ARGS;
2700 }
2701
2702 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2703 if (!my_qpair)
2704 return VMCI_ERROR_NO_MEM;
2705
2706 my_qpair->produce_q_size = produce_qsize;
2707 my_qpair->consume_q_size = consume_qsize;
2708 my_qpair->peer = peer;
2709 my_qpair->flags = flags;
2710 my_qpair->priv_flags = priv_flags;
2711
2712 wakeup_cb = NULL;
2713 client_data = NULL;
2714
2715 if (VMCI_ROUTE_AS_HOST == route) {
2716 my_qpair->guest_endpoint = false;
2717 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2718 my_qpair->blocked = 0;
2719 my_qpair->generation = 0;
2720 init_waitqueue_head(&my_qpair->event);
2721 wakeup_cb = qp_wakeup_cb;
2722 client_data = (void *)my_qpair;
2723 }
2724 } else {
2725 my_qpair->guest_endpoint = true;
2726 }
2727
2728 retval = vmci_qp_alloc(handle,
2729 &my_qpair->produce_q,
2730 my_qpair->produce_q_size,
2731 &my_qpair->consume_q,
2732 my_qpair->consume_q_size,
2733 my_qpair->peer,
2734 my_qpair->flags,
2735 my_qpair->priv_flags,
2736 my_qpair->guest_endpoint,
2737 wakeup_cb, client_data);
2738
2739 if (retval < VMCI_SUCCESS) {
2740 kfree(my_qpair);
2741 return retval;
2742 }
2743
2744 *qpair = my_qpair;
2745 my_qpair->handle = *handle;
2746
2747 return retval;
2748 }
2749 EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2750
2751 /*
2752 * vmci_qpair_detach() - Detatches the client from a queue pair.
2753 * @qpair: Reference of a pointer to the qpair struct.
2754 *
2755 * This is the client interface for detaching from a VMCIQPair.
2756 * Note that this routine will free the memory allocated for the
2757 * vmci_qp structure too.
2758 */
vmci_qpair_detach(struct vmci_qp ** qpair)2759 int vmci_qpair_detach(struct vmci_qp **qpair)
2760 {
2761 int result;
2762 struct vmci_qp *old_qpair;
2763
2764 if (!qpair || !(*qpair))
2765 return VMCI_ERROR_INVALID_ARGS;
2766
2767 old_qpair = *qpair;
2768 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2769
2770 /*
2771 * The guest can fail to detach for a number of reasons, and
2772 * if it does so, it will cleanup the entry (if there is one).
2773 * The host can fail too, but it won't cleanup the entry
2774 * immediately, it will do that later when the context is
2775 * freed. Either way, we need to release the qpair struct
2776 * here; there isn't much the caller can do, and we don't want
2777 * to leak.
2778 */
2779
2780 memset(old_qpair, 0, sizeof(*old_qpair));
2781 old_qpair->handle = VMCI_INVALID_HANDLE;
2782 old_qpair->peer = VMCI_INVALID_ID;
2783 kfree(old_qpair);
2784 *qpair = NULL;
2785
2786 return result;
2787 }
2788 EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2789
2790 /*
2791 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2792 * @qpair: Pointer to the queue pair struct.
2793 * @producer_tail: Reference used for storing producer tail index.
2794 * @consumer_head: Reference used for storing the consumer head index.
2795 *
2796 * This is the client interface for getting the current indexes of the
2797 * QPair from the point of the view of the caller as the producer.
2798 */
vmci_qpair_get_produce_indexes(const struct vmci_qp * qpair,u64 * producer_tail,u64 * consumer_head)2799 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2800 u64 *producer_tail,
2801 u64 *consumer_head)
2802 {
2803 struct vmci_queue_header *produce_q_header;
2804 struct vmci_queue_header *consume_q_header;
2805 int result;
2806
2807 if (!qpair)
2808 return VMCI_ERROR_INVALID_ARGS;
2809
2810 qp_lock(qpair);
2811 result =
2812 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2813 if (result == VMCI_SUCCESS)
2814 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2815 producer_tail, consumer_head);
2816 qp_unlock(qpair);
2817
2818 if (result == VMCI_SUCCESS &&
2819 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2820 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2821 return VMCI_ERROR_INVALID_SIZE;
2822
2823 return result;
2824 }
2825 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2826
2827 /*
2828 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
2829 * @qpair: Pointer to the queue pair struct.
2830 * @consumer_tail: Reference used for storing consumer tail index.
2831 * @producer_head: Reference used for storing the producer head index.
2832 *
2833 * This is the client interface for getting the current indexes of the
2834 * QPair from the point of the view of the caller as the consumer.
2835 */
vmci_qpair_get_consume_indexes(const struct vmci_qp * qpair,u64 * consumer_tail,u64 * producer_head)2836 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2837 u64 *consumer_tail,
2838 u64 *producer_head)
2839 {
2840 struct vmci_queue_header *produce_q_header;
2841 struct vmci_queue_header *consume_q_header;
2842 int result;
2843
2844 if (!qpair)
2845 return VMCI_ERROR_INVALID_ARGS;
2846
2847 qp_lock(qpair);
2848 result =
2849 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2850 if (result == VMCI_SUCCESS)
2851 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2852 consumer_tail, producer_head);
2853 qp_unlock(qpair);
2854
2855 if (result == VMCI_SUCCESS &&
2856 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2857 (producer_head && *producer_head >= qpair->consume_q_size)))
2858 return VMCI_ERROR_INVALID_SIZE;
2859
2860 return result;
2861 }
2862 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2863
2864 /*
2865 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2866 * @qpair: Pointer to the queue pair struct.
2867 *
2868 * This is the client interface for getting the amount of free
2869 * space in the QPair from the point of the view of the caller as
2870 * the producer which is the common case. Returns < 0 if err, else
2871 * available bytes into which data can be enqueued if > 0.
2872 */
vmci_qpair_produce_free_space(const struct vmci_qp * qpair)2873 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2874 {
2875 struct vmci_queue_header *produce_q_header;
2876 struct vmci_queue_header *consume_q_header;
2877 s64 result;
2878
2879 if (!qpair)
2880 return VMCI_ERROR_INVALID_ARGS;
2881
2882 qp_lock(qpair);
2883 result =
2884 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2885 if (result == VMCI_SUCCESS)
2886 result = vmci_q_header_free_space(produce_q_header,
2887 consume_q_header,
2888 qpair->produce_q_size);
2889 else
2890 result = 0;
2891
2892 qp_unlock(qpair);
2893
2894 return result;
2895 }
2896 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2897
2898 /*
2899 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2900 * @qpair: Pointer to the queue pair struct.
2901 *
2902 * This is the client interface for getting the amount of free
2903 * space in the QPair from the point of the view of the caller as
2904 * the consumer which is not the common case. Returns < 0 if err, else
2905 * available bytes into which data can be enqueued if > 0.
2906 */
vmci_qpair_consume_free_space(const struct vmci_qp * qpair)2907 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2908 {
2909 struct vmci_queue_header *produce_q_header;
2910 struct vmci_queue_header *consume_q_header;
2911 s64 result;
2912
2913 if (!qpair)
2914 return VMCI_ERROR_INVALID_ARGS;
2915
2916 qp_lock(qpair);
2917 result =
2918 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2919 if (result == VMCI_SUCCESS)
2920 result = vmci_q_header_free_space(consume_q_header,
2921 produce_q_header,
2922 qpair->consume_q_size);
2923 else
2924 result = 0;
2925
2926 qp_unlock(qpair);
2927
2928 return result;
2929 }
2930 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
2931
2932 /*
2933 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
2934 * producer queue.
2935 * @qpair: Pointer to the queue pair struct.
2936 *
2937 * This is the client interface for getting the amount of
2938 * enqueued data in the QPair from the point of the view of the
2939 * caller as the producer which is not the common case. Returns < 0 if err,
2940 * else available bytes that may be read.
2941 */
vmci_qpair_produce_buf_ready(const struct vmci_qp * qpair)2942 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
2943 {
2944 struct vmci_queue_header *produce_q_header;
2945 struct vmci_queue_header *consume_q_header;
2946 s64 result;
2947
2948 if (!qpair)
2949 return VMCI_ERROR_INVALID_ARGS;
2950
2951 qp_lock(qpair);
2952 result =
2953 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2954 if (result == VMCI_SUCCESS)
2955 result = vmci_q_header_buf_ready(produce_q_header,
2956 consume_q_header,
2957 qpair->produce_q_size);
2958 else
2959 result = 0;
2960
2961 qp_unlock(qpair);
2962
2963 return result;
2964 }
2965 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
2966
2967 /*
2968 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
2969 * consumer queue.
2970 * @qpair: Pointer to the queue pair struct.
2971 *
2972 * This is the client interface for getting the amount of
2973 * enqueued data in the QPair from the point of the view of the
2974 * caller as the consumer which is the normal case. Returns < 0 if err,
2975 * else available bytes that may be read.
2976 */
vmci_qpair_consume_buf_ready(const struct vmci_qp * qpair)2977 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
2978 {
2979 struct vmci_queue_header *produce_q_header;
2980 struct vmci_queue_header *consume_q_header;
2981 s64 result;
2982
2983 if (!qpair)
2984 return VMCI_ERROR_INVALID_ARGS;
2985
2986 qp_lock(qpair);
2987 result =
2988 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2989 if (result == VMCI_SUCCESS)
2990 result = vmci_q_header_buf_ready(consume_q_header,
2991 produce_q_header,
2992 qpair->consume_q_size);
2993 else
2994 result = 0;
2995
2996 qp_unlock(qpair);
2997
2998 return result;
2999 }
3000 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3001
3002 /*
3003 * vmci_qpair_enqueue() - Throw data on the queue.
3004 * @qpair: Pointer to the queue pair struct.
3005 * @buf: Pointer to buffer containing data
3006 * @buf_size: Length of buffer.
3007 * @buf_type: Buffer type (Unused).
3008 *
3009 * This is the client interface for enqueueing data into the queue.
3010 * Returns number of bytes enqueued or < 0 on error.
3011 */
vmci_qpair_enqueue(struct vmci_qp * qpair,const void * buf,size_t buf_size,int buf_type)3012 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3013 const void *buf,
3014 size_t buf_size,
3015 int buf_type)
3016 {
3017 ssize_t result;
3018 struct iov_iter from;
3019 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3020
3021 if (!qpair || !buf)
3022 return VMCI_ERROR_INVALID_ARGS;
3023
3024 iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
3025
3026 qp_lock(qpair);
3027
3028 do {
3029 result = qp_enqueue_locked(qpair->produce_q,
3030 qpair->consume_q,
3031 qpair->produce_q_size,
3032 &from);
3033
3034 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3035 !qp_wait_for_ready_queue(qpair))
3036 result = VMCI_ERROR_WOULD_BLOCK;
3037
3038 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3039
3040 qp_unlock(qpair);
3041
3042 return result;
3043 }
3044 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3045
3046 /*
3047 * vmci_qpair_dequeue() - Get data from the queue.
3048 * @qpair: Pointer to the queue pair struct.
3049 * @buf: Pointer to buffer for the data
3050 * @buf_size: Length of buffer.
3051 * @buf_type: Buffer type (Unused).
3052 *
3053 * This is the client interface for dequeueing data from the queue.
3054 * Returns number of bytes dequeued or < 0 on error.
3055 */
vmci_qpair_dequeue(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3056 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3057 void *buf,
3058 size_t buf_size,
3059 int buf_type)
3060 {
3061 ssize_t result;
3062 struct iov_iter to;
3063 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3064
3065 if (!qpair || !buf)
3066 return VMCI_ERROR_INVALID_ARGS;
3067
3068 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3069
3070 qp_lock(qpair);
3071
3072 do {
3073 result = qp_dequeue_locked(qpair->produce_q,
3074 qpair->consume_q,
3075 qpair->consume_q_size,
3076 &to, true);
3077
3078 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3079 !qp_wait_for_ready_queue(qpair))
3080 result = VMCI_ERROR_WOULD_BLOCK;
3081
3082 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3083
3084 qp_unlock(qpair);
3085
3086 return result;
3087 }
3088 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3089
3090 /*
3091 * vmci_qpair_peek() - Peek at the data in the queue.
3092 * @qpair: Pointer to the queue pair struct.
3093 * @buf: Pointer to buffer for the data
3094 * @buf_size: Length of buffer.
3095 * @buf_type: Buffer type (Unused on Linux).
3096 *
3097 * This is the client interface for peeking into a queue. (I.e.,
3098 * copy data from the queue without updating the head pointer.)
3099 * Returns number of bytes dequeued or < 0 on error.
3100 */
vmci_qpair_peek(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3101 ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3102 void *buf,
3103 size_t buf_size,
3104 int buf_type)
3105 {
3106 struct iov_iter to;
3107 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3108 ssize_t result;
3109
3110 if (!qpair || !buf)
3111 return VMCI_ERROR_INVALID_ARGS;
3112
3113 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3114
3115 qp_lock(qpair);
3116
3117 do {
3118 result = qp_dequeue_locked(qpair->produce_q,
3119 qpair->consume_q,
3120 qpair->consume_q_size,
3121 &to, false);
3122
3123 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3124 !qp_wait_for_ready_queue(qpair))
3125 result = VMCI_ERROR_WOULD_BLOCK;
3126
3127 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3128
3129 qp_unlock(qpair);
3130
3131 return result;
3132 }
3133 EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3134
3135 /*
3136 * vmci_qpair_enquev() - Throw data on the queue using iov.
3137 * @qpair: Pointer to the queue pair struct.
3138 * @iov: Pointer to buffer containing data
3139 * @iov_size: Length of buffer.
3140 * @buf_type: Buffer type (Unused).
3141 *
3142 * This is the client interface for enqueueing data into the queue.
3143 * This function uses IO vectors to handle the work. Returns number
3144 * of bytes enqueued or < 0 on error.
3145 */
vmci_qpair_enquev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3146 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3147 struct msghdr *msg,
3148 size_t iov_size,
3149 int buf_type)
3150 {
3151 ssize_t result;
3152
3153 if (!qpair)
3154 return VMCI_ERROR_INVALID_ARGS;
3155
3156 qp_lock(qpair);
3157
3158 do {
3159 result = qp_enqueue_locked(qpair->produce_q,
3160 qpair->consume_q,
3161 qpair->produce_q_size,
3162 &msg->msg_iter);
3163
3164 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3165 !qp_wait_for_ready_queue(qpair))
3166 result = VMCI_ERROR_WOULD_BLOCK;
3167
3168 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3169
3170 qp_unlock(qpair);
3171
3172 return result;
3173 }
3174 EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3175
3176 /*
3177 * vmci_qpair_dequev() - Get data from the queue using iov.
3178 * @qpair: Pointer to the queue pair struct.
3179 * @iov: Pointer to buffer for the data
3180 * @iov_size: Length of buffer.
3181 * @buf_type: Buffer type (Unused).
3182 *
3183 * This is the client interface for dequeueing data from the queue.
3184 * This function uses IO vectors to handle the work. Returns number
3185 * of bytes dequeued or < 0 on error.
3186 */
vmci_qpair_dequev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3187 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3188 struct msghdr *msg,
3189 size_t iov_size,
3190 int buf_type)
3191 {
3192 ssize_t result;
3193
3194 if (!qpair)
3195 return VMCI_ERROR_INVALID_ARGS;
3196
3197 qp_lock(qpair);
3198
3199 do {
3200 result = qp_dequeue_locked(qpair->produce_q,
3201 qpair->consume_q,
3202 qpair->consume_q_size,
3203 &msg->msg_iter, true);
3204
3205 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3206 !qp_wait_for_ready_queue(qpair))
3207 result = VMCI_ERROR_WOULD_BLOCK;
3208
3209 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3210
3211 qp_unlock(qpair);
3212
3213 return result;
3214 }
3215 EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3216
3217 /*
3218 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3219 * @qpair: Pointer to the queue pair struct.
3220 * @iov: Pointer to buffer for the data
3221 * @iov_size: Length of buffer.
3222 * @buf_type: Buffer type (Unused on Linux).
3223 *
3224 * This is the client interface for peeking into a queue. (I.e.,
3225 * copy data from the queue without updating the head pointer.)
3226 * This function uses IO vectors to handle the work. Returns number
3227 * of bytes peeked or < 0 on error.
3228 */
vmci_qpair_peekv(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3229 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3230 struct msghdr *msg,
3231 size_t iov_size,
3232 int buf_type)
3233 {
3234 ssize_t result;
3235
3236 if (!qpair)
3237 return VMCI_ERROR_INVALID_ARGS;
3238
3239 qp_lock(qpair);
3240
3241 do {
3242 result = qp_dequeue_locked(qpair->produce_q,
3243 qpair->consume_q,
3244 qpair->consume_q_size,
3245 &msg->msg_iter, false);
3246
3247 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3248 !qp_wait_for_ready_queue(qpair))
3249 result = VMCI_ERROR_WOULD_BLOCK;
3250
3251 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3252
3253 qp_unlock(qpair);
3254 return result;
3255 }
3256 EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3257