Lines Matching refs:produce_q
147 struct vmci_queue *produce_q; member
203 struct vmci_queue *produce_q; member
216 void *produce_q; member
432 struct vmci_queue *produce_q = prod_q; in qp_alloc_ppn_set() local
436 if (!produce_q || !num_produce_pages || !consume_q || in qp_alloc_ppn_set()
459 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; in qp_alloc_ppn_set()
573 static void qp_init_queue_mutex(struct vmci_queue *produce_q, in qp_init_queue_mutex() argument
581 if (produce_q->kernel_if->host) { in qp_init_queue_mutex()
582 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
583 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
584 mutex_init(produce_q->kernel_if->mutex); in qp_init_queue_mutex()
591 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, in qp_cleanup_queue_mutex() argument
594 if (produce_q->kernel_if->host) { in qp_cleanup_queue_mutex()
595 produce_q->kernel_if->mutex = NULL; in qp_cleanup_queue_mutex()
647 struct vmci_queue *produce_q, in qp_host_get_user_memory() argument
654 produce_q->kernel_if->num_pages, in qp_host_get_user_memory()
656 produce_q->kernel_if->u.h.header_page); in qp_host_get_user_memory()
657 if (retval < (int)produce_q->kernel_if->num_pages) { in qp_host_get_user_memory()
660 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
675 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
676 produce_q->kernel_if->num_pages, false); in qp_host_get_user_memory()
690 struct vmci_queue *produce_q, in qp_host_register_user_memory() argument
703 produce_q->kernel_if->num_pages * PAGE_SIZE; in qp_host_register_user_memory()
704 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, in qp_host_register_user_memory()
713 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, in qp_host_unregister_user_memory() argument
716 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_unregister_user_memory()
717 produce_q->kernel_if->num_pages, true); in qp_host_unregister_user_memory()
718 memset(produce_q->kernel_if->u.h.header_page, 0, in qp_host_unregister_user_memory()
719 sizeof(*produce_q->kernel_if->u.h.header_page) * in qp_host_unregister_user_memory()
720 produce_q->kernel_if->num_pages); in qp_host_unregister_user_memory()
736 static int qp_host_map_queues(struct vmci_queue *produce_q, in qp_host_map_queues() argument
741 if (!produce_q->q_header || !consume_q->q_header) { in qp_host_map_queues()
744 if (produce_q->q_header != consume_q->q_header) in qp_host_map_queues()
747 if (produce_q->kernel_if->u.h.header_page == NULL || in qp_host_map_queues()
748 *produce_q->kernel_if->u.h.header_page == NULL) in qp_host_map_queues()
751 headers[0] = *produce_q->kernel_if->u.h.header_page; in qp_host_map_queues()
754 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); in qp_host_map_queues()
755 if (produce_q->q_header != NULL) { in qp_host_map_queues()
758 produce_q->q_header + in qp_host_map_queues()
777 struct vmci_queue *produce_q, in qp_host_unmap_queues() argument
780 if (produce_q->q_header) { in qp_host_unmap_queues()
781 if (produce_q->q_header < consume_q->q_header) in qp_host_unmap_queues()
782 vunmap(produce_q->q_header); in qp_host_unmap_queues()
786 produce_q->q_header = NULL; in qp_host_unmap_queues()
875 void *produce_q, in qp_guest_endpoint_create() argument
898 entry->produce_q = produce_q; in qp_guest_endpoint_create()
924 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in qp_guest_endpoint_destroy()
925 qp_free_queue(entry->produce_q, entry->qp.produce_size); in qp_guest_endpoint_destroy()
1086 struct vmci_queue **produce_q, in qp_alloc_guest_work() argument
1138 my_consume_q = queue_pair_entry->produce_q; in qp_alloc_guest_work()
1221 *produce_q = (struct vmci_queue *)my_produce_q; in qp_alloc_guest_work()
1231 vmci_q_header_init((*produce_q)->q_header, *handle); in qp_alloc_guest_work()
1349 entry->produce_q = qp_host_alloc_queue(guest_produce_size); in qp_broker_create()
1350 if (entry->produce_q == NULL) { in qp_broker_create()
1360 qp_init_queue_mutex(entry->produce_q, entry->consume_q); in qp_broker_create()
1374 entry->produce_q->q_header = entry->local_mem; in qp_broker_create()
1384 entry->produce_q, in qp_broker_create()
1417 vmci_q_header_init(entry->produce_q->q_header, in qp_broker_create()
1429 qp_host_free_queue(entry->produce_q, guest_produce_size); in qp_broker_create()
1630 entry->produce_q, in qp_broker_attach()
1762 struct vmci_queue **produce_q, in qp_alloc_host_work() argument
1798 *produce_q = entry->consume_q; in qp_alloc_host_work()
1799 *consume_q = entry->produce_q; in qp_alloc_host_work()
1801 *produce_q = entry->produce_q; in qp_alloc_host_work()
1821 struct vmci_queue **produce_q, in vmci_qp_alloc() argument
1832 if (!handle || !produce_q || !consume_q || in vmci_qp_alloc()
1837 return qp_alloc_guest_work(handle, produce_q, in vmci_qp_alloc()
1842 return qp_alloc_host_work(handle, produce_q, in vmci_qp_alloc()
2007 entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2011 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2013 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_set_page_store()
2048 entry->produce_q->saved_header = NULL; in qp_reset_saved_headers()
2127 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_detach()
2128 headers_mapped = entry->produce_q->q_header || in vmci_qp_broker_detach()
2133 entry->produce_q, in vmci_qp_broker_detach()
2140 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_detach()
2148 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_detach()
2166 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in vmci_qp_broker_detach()
2167 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); in vmci_qp_broker_detach()
2242 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_map()
2246 entry->produce_q, in vmci_qp_broker_map()
2248 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_map()
2275 if (entry->produce_q->saved_header != NULL && in qp_save_headers()
2286 if (NULL == entry->produce_q->q_header || in qp_save_headers()
2288 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in qp_save_headers()
2293 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, in qp_save_headers()
2295 entry->produce_q->saved_header = &entry->saved_produce_q; in qp_save_headers()
2344 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_unmap()
2350 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); in vmci_qp_broker_unmap()
2359 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_unmap()
2367 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_unmap()
2417 qp_acquire_queue_mutex(qpair->produce_q); in qp_lock()
2426 qp_release_queue_mutex(qpair->produce_q); in qp_unlock()
2433 static int qp_map_queue_headers(struct vmci_queue *produce_q, in qp_map_queue_headers() argument
2438 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { in qp_map_queue_headers()
2439 result = qp_host_map_queues(produce_q, consume_q); in qp_map_queue_headers()
2441 return (produce_q->saved_header && in qp_map_queue_headers()
2462 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); in qp_get_queue_headers()
2464 *produce_q_header = qpair->produce_q->q_header; in qp_get_queue_headers()
2466 } else if (qpair->produce_q->saved_header && in qp_get_queue_headers()
2468 *produce_q_header = qpair->produce_q->saved_header; in qp_get_queue_headers()
2526 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, in qp_enqueue_locked() argument
2537 result = qp_map_queue_headers(produce_q, consume_q); in qp_enqueue_locked()
2541 free_space = vmci_q_header_free_space(produce_q->q_header, in qp_enqueue_locked()
2551 tail = vmci_q_header_producer_tail(produce_q->q_header); in qp_enqueue_locked()
2553 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); in qp_enqueue_locked()
2559 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); in qp_enqueue_locked()
2561 result = qp_memcpy_to_queue_iter(produce_q, 0, from, in qp_enqueue_locked()
2568 vmci_q_header_add_producer_tail(produce_q->q_header, written, in qp_enqueue_locked()
2586 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, in qp_dequeue_locked() argument
2598 result = qp_map_queue_headers(produce_q, consume_q); in qp_dequeue_locked()
2603 produce_q->q_header, in qp_dequeue_locked()
2612 head = vmci_q_header_consumer_head(produce_q->q_header); in qp_dequeue_locked()
2631 vmci_q_header_add_consumer_head(produce_q->q_header, in qp_dequeue_locked()
2727 &my_qpair->produce_q, in vmci_qpair_alloc()
3027 result = qp_enqueue_locked(qpair->produce_q, in vmci_qpair_enqueue()
3071 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_dequeue()
3116 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_peek()
3157 result = qp_enqueue_locked(qpair->produce_q, in vmci_qpair_enquev()
3198 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_dequev()
3240 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_peekv()