Lines Matching refs:consume_q

148 	struct vmci_queue *consume_q;  member
204 struct vmci_queue *consume_q; member
217 void *consume_q; member
435 struct vmci_queue *consume_q = cons_q; in qp_alloc_ppn_set() local
438 if (!produce_q || !num_produce_pages || !consume_q || in qp_alloc_ppn_set()
465 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; in qp_alloc_ppn_set()
579 struct vmci_queue *consume_q) in qp_init_queue_mutex() argument
588 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
597 struct vmci_queue *consume_q) in qp_cleanup_queue_mutex() argument
601 consume_q->kernel_if->mutex = NULL; in qp_cleanup_queue_mutex()
653 struct vmci_queue *consume_q) in qp_host_get_user_memory() argument
673 consume_q->kernel_if->num_pages, in qp_host_get_user_memory()
675 consume_q->kernel_if->u.h.header_page); in qp_host_get_user_memory()
676 if (retval < (int)consume_q->kernel_if->num_pages) { in qp_host_get_user_memory()
680 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
698 struct vmci_queue *consume_q) in qp_host_register_user_memory() argument
712 consume_q); in qp_host_register_user_memory()
721 struct vmci_queue *consume_q) in qp_host_unregister_user_memory() argument
728 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_unregister_user_memory()
729 consume_q->kernel_if->num_pages, true); in qp_host_unregister_user_memory()
730 memset(consume_q->kernel_if->u.h.header_page, 0, in qp_host_unregister_user_memory()
731 sizeof(*consume_q->kernel_if->u.h.header_page) * in qp_host_unregister_user_memory()
732 consume_q->kernel_if->num_pages); in qp_host_unregister_user_memory()
744 struct vmci_queue *consume_q) in qp_host_map_queues() argument
748 if (!produce_q->q_header || !consume_q->q_header) { in qp_host_map_queues()
751 if (produce_q->q_header != consume_q->q_header) in qp_host_map_queues()
759 headers[1] = *consume_q->kernel_if->u.h.header_page; in qp_host_map_queues()
763 consume_q->q_header = in qp_host_map_queues()
785 struct vmci_queue *consume_q) in qp_host_unmap_queues() argument
788 if (produce_q->q_header < consume_q->q_header) in qp_host_unmap_queues()
791 vunmap(consume_q->q_header); in qp_host_unmap_queues()
794 consume_q->q_header = NULL; in qp_host_unmap_queues()
883 void *consume_q) in qp_guest_endpoint_create() argument
906 entry->consume_q = consume_q; in qp_guest_endpoint_create()
931 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in qp_guest_endpoint_destroy()
933 qp_free_queue(entry->consume_q, entry->qp.consume_size); in qp_guest_endpoint_destroy()
1095 struct vmci_queue **consume_q, in qp_alloc_guest_work() argument
1144 my_produce_q = queue_pair_entry->consume_q; in qp_alloc_guest_work()
1229 *consume_q = (struct vmci_queue *)my_consume_q; in qp_alloc_guest_work()
1239 vmci_q_header_init((*consume_q)->q_header, *handle); in qp_alloc_guest_work()
1361 entry->consume_q = qp_host_alloc_queue(guest_consume_size); in qp_broker_create()
1362 if (entry->consume_q == NULL) { in qp_broker_create()
1367 qp_init_queue_mutex(entry->produce_q, entry->consume_q); in qp_broker_create()
1384 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; in qp_broker_create()
1392 entry->consume_q); in qp_broker_create()
1426 vmci_q_header_init(entry->consume_q->q_header, in qp_broker_create()
1437 qp_host_free_queue(entry->consume_q, guest_consume_size); in qp_broker_create()
1638 entry->consume_q); in qp_broker_attach()
1771 struct vmci_queue **consume_q, in qp_alloc_host_work() argument
1805 *produce_q = entry->consume_q; in qp_alloc_host_work()
1806 *consume_q = entry->produce_q; in qp_alloc_host_work()
1809 *consume_q = entry->consume_q; in qp_alloc_host_work()
1830 struct vmci_queue **consume_q, in vmci_qp_alloc() argument
1839 if (!handle || !produce_q || !consume_q || in vmci_qp_alloc()
1845 produce_size, consume_q, in vmci_qp_alloc()
1850 produce_size, consume_q, in vmci_qp_alloc()
2017 entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2021 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2024 entry->consume_q); in vmci_qp_broker_set_page_store()
2059 entry->consume_q->saved_header = NULL; in qp_reset_saved_headers()
2139 entry->consume_q->q_header; in vmci_qp_broker_detach()
2144 entry->consume_q); in vmci_qp_broker_detach()
2151 entry->consume_q); in vmci_qp_broker_detach()
2176 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in vmci_qp_broker_detach()
2178 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); in vmci_qp_broker_detach()
2258 entry->consume_q); in vmci_qp_broker_map()
2287 entry->consume_q->saved_header != NULL) { in qp_save_headers()
2298 NULL == entry->consume_q->q_header) { in qp_save_headers()
2299 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in qp_save_headers()
2307 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, in qp_save_headers()
2309 entry->consume_q->saved_header = &entry->saved_consume_q; in qp_save_headers()
2362 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); in vmci_qp_broker_unmap()
2372 entry->consume_q); in vmci_qp_broker_unmap()
2446 struct vmci_queue *consume_q) in qp_map_queue_headers() argument
2450 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { in qp_map_queue_headers()
2451 result = qp_host_map_queues(produce_q, consume_q); in qp_map_queue_headers()
2454 consume_q->saved_header) ? in qp_map_queue_headers()
2474 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); in qp_get_queue_headers()
2477 *consume_q_header = qpair->consume_q->q_header; in qp_get_queue_headers()
2479 qpair->consume_q->saved_header) { in qp_get_queue_headers()
2481 *consume_q_header = qpair->consume_q->saved_header; in qp_get_queue_headers()
2539 struct vmci_queue *consume_q, in qp_enqueue_locked() argument
2549 result = qp_map_queue_headers(produce_q, consume_q); in qp_enqueue_locked()
2554 consume_q->q_header, in qp_enqueue_locked()
2599 struct vmci_queue *consume_q, in qp_dequeue_locked() argument
2610 result = qp_map_queue_headers(produce_q, consume_q); in qp_dequeue_locked()
2614 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, in qp_dequeue_locked()
2626 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); in qp_dequeue_locked()
2632 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); in qp_dequeue_locked()
2634 result = qp_memcpy_from_queue_iter(to, consume_q, 0, in qp_dequeue_locked()
2740 &my_qpair->consume_q, in vmci_qpair_alloc()
3039 qpair->consume_q, in vmci_qpair_enqueue()
3083 qpair->consume_q, in vmci_qpair_dequeue()
3128 qpair->consume_q, in vmci_qpair_peek()
3169 qpair->consume_q, in vmci_qpair_enquev()
3210 qpair->consume_q, in vmci_qpair_dequev()
3252 qpair->consume_q, in vmci_qpair_peekv()