Lines Matching refs:consume_q

148 	struct vmci_queue *consume_q;  member
204 struct vmci_queue *consume_q; member
217 void *consume_q; member
433 struct vmci_queue *consume_q = cons_q; in qp_alloc_ppn_set() local
436 if (!produce_q || !num_produce_pages || !consume_q || in qp_alloc_ppn_set()
463 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; in qp_alloc_ppn_set()
574 struct vmci_queue *consume_q) in qp_init_queue_mutex() argument
583 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
592 struct vmci_queue *consume_q) in qp_cleanup_queue_mutex() argument
596 consume_q->kernel_if->mutex = NULL; in qp_cleanup_queue_mutex()
648 struct vmci_queue *consume_q) in qp_host_get_user_memory() argument
667 consume_q->kernel_if->num_pages, in qp_host_get_user_memory()
669 consume_q->kernel_if->u.h.header_page); in qp_host_get_user_memory()
670 if (retval < (int)consume_q->kernel_if->num_pages) { in qp_host_get_user_memory()
673 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
691 struct vmci_queue *consume_q) in qp_host_register_user_memory() argument
705 consume_q); in qp_host_register_user_memory()
714 struct vmci_queue *consume_q) in qp_host_unregister_user_memory() argument
721 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_unregister_user_memory()
722 consume_q->kernel_if->num_pages, true); in qp_host_unregister_user_memory()
723 memset(consume_q->kernel_if->u.h.header_page, 0, in qp_host_unregister_user_memory()
724 sizeof(*consume_q->kernel_if->u.h.header_page) * in qp_host_unregister_user_memory()
725 consume_q->kernel_if->num_pages); in qp_host_unregister_user_memory()
737 struct vmci_queue *consume_q) in qp_host_map_queues() argument
741 if (!produce_q->q_header || !consume_q->q_header) { in qp_host_map_queues()
744 if (produce_q->q_header != consume_q->q_header) in qp_host_map_queues()
752 headers[1] = *consume_q->kernel_if->u.h.header_page; in qp_host_map_queues()
756 consume_q->q_header = in qp_host_map_queues()
778 struct vmci_queue *consume_q) in qp_host_unmap_queues() argument
781 if (produce_q->q_header < consume_q->q_header) in qp_host_unmap_queues()
784 vunmap(consume_q->q_header); in qp_host_unmap_queues()
787 consume_q->q_header = NULL; in qp_host_unmap_queues()
876 void *consume_q) in qp_guest_endpoint_create() argument
899 entry->consume_q = consume_q; in qp_guest_endpoint_create()
924 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in qp_guest_endpoint_destroy()
926 qp_free_queue(entry->consume_q, entry->qp.consume_size); in qp_guest_endpoint_destroy()
1088 struct vmci_queue **consume_q, in qp_alloc_guest_work() argument
1137 my_produce_q = queue_pair_entry->consume_q; in qp_alloc_guest_work()
1222 *consume_q = (struct vmci_queue *)my_consume_q; in qp_alloc_guest_work()
1232 vmci_q_header_init((*consume_q)->q_header, *handle); in qp_alloc_guest_work()
1354 entry->consume_q = qp_host_alloc_queue(guest_consume_size); in qp_broker_create()
1355 if (entry->consume_q == NULL) { in qp_broker_create()
1360 qp_init_queue_mutex(entry->produce_q, entry->consume_q); in qp_broker_create()
1377 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; in qp_broker_create()
1385 entry->consume_q); in qp_broker_create()
1419 vmci_q_header_init(entry->consume_q->q_header, in qp_broker_create()
1430 qp_host_free_queue(entry->consume_q, guest_consume_size); in qp_broker_create()
1631 entry->consume_q); in qp_broker_attach()
1764 struct vmci_queue **consume_q, in qp_alloc_host_work() argument
1798 *produce_q = entry->consume_q; in qp_alloc_host_work()
1799 *consume_q = entry->produce_q; in qp_alloc_host_work()
1802 *consume_q = entry->consume_q; in qp_alloc_host_work()
1823 struct vmci_queue **consume_q, in vmci_qp_alloc() argument
1832 if (!handle || !produce_q || !consume_q || in vmci_qp_alloc()
1838 produce_size, consume_q, in vmci_qp_alloc()
1843 produce_size, consume_q, in vmci_qp_alloc()
2007 entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2011 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2014 entry->consume_q); in vmci_qp_broker_set_page_store()
2049 entry->consume_q->saved_header = NULL; in qp_reset_saved_headers()
2129 entry->consume_q->q_header; in vmci_qp_broker_detach()
2134 entry->consume_q); in vmci_qp_broker_detach()
2141 entry->consume_q); in vmci_qp_broker_detach()
2166 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in vmci_qp_broker_detach()
2168 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); in vmci_qp_broker_detach()
2247 entry->consume_q); in vmci_qp_broker_map()
2276 entry->consume_q->saved_header != NULL) { in qp_save_headers()
2287 NULL == entry->consume_q->q_header) { in qp_save_headers()
2288 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in qp_save_headers()
2296 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, in qp_save_headers()
2298 entry->consume_q->saved_header = &entry->saved_consume_q; in qp_save_headers()
2350 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); in vmci_qp_broker_unmap()
2360 entry->consume_q); in vmci_qp_broker_unmap()
2434 struct vmci_queue *consume_q) in qp_map_queue_headers() argument
2438 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { in qp_map_queue_headers()
2439 result = qp_host_map_queues(produce_q, consume_q); in qp_map_queue_headers()
2442 consume_q->saved_header) ? in qp_map_queue_headers()
2462 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); in qp_get_queue_headers()
2465 *consume_q_header = qpair->consume_q->q_header; in qp_get_queue_headers()
2467 qpair->consume_q->saved_header) { in qp_get_queue_headers()
2469 *consume_q_header = qpair->consume_q->saved_header; in qp_get_queue_headers()
2527 struct vmci_queue *consume_q, in qp_enqueue_locked() argument
2537 result = qp_map_queue_headers(produce_q, consume_q); in qp_enqueue_locked()
2542 consume_q->q_header, in qp_enqueue_locked()
2587 struct vmci_queue *consume_q, in qp_dequeue_locked() argument
2598 result = qp_map_queue_headers(produce_q, consume_q); in qp_dequeue_locked()
2602 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, in qp_dequeue_locked()
2614 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); in qp_dequeue_locked()
2620 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); in qp_dequeue_locked()
2622 result = qp_memcpy_from_queue_iter(to, consume_q, 0, in qp_dequeue_locked()
2729 &my_qpair->consume_q, in vmci_qpair_alloc()
3028 qpair->consume_q, in vmci_qpair_enqueue()
3072 qpair->consume_q, in vmci_qpair_dequeue()
3117 qpair->consume_q, in vmci_qpair_peek()
3158 qpair->consume_q, in vmci_qpair_enquev()
3199 qpair->consume_q, in vmci_qpair_dequev()
3241 qpair->consume_q, in vmci_qpair_peekv()