Lines Matching refs:vsock
55 struct vhost_vsock *vsock; in __vhost_vsock_get() local
57 list_for_each_entry(vsock, &vhost_vsock_list, list) { in __vhost_vsock_get()
58 u32 other_cid = vsock->guest_cid; in __vhost_vsock_get()
65 return vsock; in __vhost_vsock_get()
74 struct vhost_vsock *vsock; in vhost_vsock_get() local
77 vsock = __vhost_vsock_get(guest_cid); in vhost_vsock_get()
80 return vsock; in vhost_vsock_get()
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock, in vhost_transport_do_send_pkt() argument
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
97 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
107 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
108 if (list_empty(&vsock->send_pkt_list)) { in vhost_transport_do_send_pkt()
109 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
110 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
114 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_transport_do_send_pkt()
117 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
122 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
123 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
124 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
129 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
130 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
131 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
136 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
137 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
172 val = atomic_dec_return(&vsock->queued_replies); in vhost_transport_do_send_pkt()
187 vhost_signal(&vsock->dev, vq); in vhost_transport_do_send_pkt()
199 struct vhost_vsock *vsock; in vhost_transport_send_pkt_work() local
201 vsock = container_of(work, struct vhost_vsock, send_pkt_work); in vhost_transport_send_pkt_work()
202 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
204 vhost_transport_do_send_pkt(vsock, vq); in vhost_transport_send_pkt_work()
210 struct vhost_vsock *vsock; in vhost_transport_send_pkt() local
214 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); in vhost_transport_send_pkt()
215 if (!vsock) { in vhost_transport_send_pkt()
221 atomic_inc(&vsock->queued_replies); in vhost_transport_send_pkt()
223 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
224 list_add_tail(&pkt->list, &vsock->send_pkt_list); in vhost_transport_send_pkt()
225 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
227 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); in vhost_transport_send_pkt()
234 struct vhost_vsock *vsock; in vhost_transport_cancel_pkt() local
240 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); in vhost_transport_cancel_pkt()
241 if (!vsock) in vhost_transport_cancel_pkt()
244 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
245 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { in vhost_transport_cancel_pkt()
250 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
260 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
263 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in vhost_transport_cancel_pkt()
331 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) in vhost_vsock_more_replies() argument
333 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
337 val = atomic_read(&vsock->queued_replies); in vhost_vsock_more_replies()
346 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_tx_kick() local
358 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
362 if (!vhost_vsock_more_replies(vsock)) { in vhost_vsock_handle_tx_kick()
376 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_vsock_handle_tx_kick()
377 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
395 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) in vhost_vsock_handle_tx_kick()
406 vhost_signal(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
416 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_rx_kick() local
419 vhost_transport_do_send_pkt(vsock, vq); in vhost_vsock_handle_rx_kick()
422 static int vhost_vsock_start(struct vhost_vsock *vsock) in vhost_vsock_start() argument
428 mutex_lock(&vsock->dev.mutex); in vhost_vsock_start()
430 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_start()
434 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
435 vq = &vsock->vqs[i]; in vhost_vsock_start()
445 vq->private_data = vsock; in vhost_vsock_start()
454 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
461 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
462 vq = &vsock->vqs[i]; in vhost_vsock_start()
469 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
473 static int vhost_vsock_stop(struct vhost_vsock *vsock) in vhost_vsock_stop() argument
478 mutex_lock(&vsock->dev.mutex); in vhost_vsock_stop()
480 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_stop()
484 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop()
485 struct vhost_virtqueue *vq = &vsock->vqs[i]; in vhost_vsock_stop()
493 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_stop()
497 static void vhost_vsock_free(struct vhost_vsock *vsock) in vhost_vsock_free() argument
499 kvfree(vsock); in vhost_vsock_free()
505 struct vhost_vsock *vsock; in vhost_vsock_dev_open() local
511 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); in vhost_vsock_dev_open()
512 if (!vsock) in vhost_vsock_dev_open()
515 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); in vhost_vsock_dev_open()
521 vsock->guest_cid = 0; /* no CID assigned yet */ in vhost_vsock_dev_open()
523 atomic_set(&vsock->queued_replies, 0); in vhost_vsock_dev_open()
525 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_dev_open()
526 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; in vhost_vsock_dev_open()
527 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; in vhost_vsock_dev_open()
528 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; in vhost_vsock_dev_open()
530 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); in vhost_vsock_dev_open()
532 file->private_data = vsock; in vhost_vsock_dev_open()
533 spin_lock_init(&vsock->send_pkt_list_lock); in vhost_vsock_dev_open()
534 INIT_LIST_HEAD(&vsock->send_pkt_list); in vhost_vsock_dev_open()
535 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); in vhost_vsock_dev_open()
538 list_add_tail(&vsock->list, &vhost_vsock_list); in vhost_vsock_dev_open()
543 vhost_vsock_free(vsock); in vhost_vsock_dev_open()
547 static void vhost_vsock_flush(struct vhost_vsock *vsock) in vhost_vsock_flush() argument
551 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) in vhost_vsock_flush()
552 if (vsock->vqs[i].handle_kick) in vhost_vsock_flush()
553 vhost_poll_flush(&vsock->vqs[i].poll); in vhost_vsock_flush()
554 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); in vhost_vsock_flush()
577 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_dev_release() local
580 list_del(&vsock->list); in vhost_vsock_dev_release()
587 vhost_vsock_stop(vsock); in vhost_vsock_dev_release()
588 vhost_vsock_flush(vsock); in vhost_vsock_dev_release()
589 vhost_dev_stop(&vsock->dev); in vhost_vsock_dev_release()
591 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
592 while (!list_empty(&vsock->send_pkt_list)) { in vhost_vsock_dev_release()
595 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_vsock_dev_release()
600 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
602 vhost_dev_cleanup(&vsock->dev); in vhost_vsock_dev_release()
603 kfree(vsock->dev.vqs); in vhost_vsock_dev_release()
604 vhost_vsock_free(vsock); in vhost_vsock_dev_release()
608 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) in vhost_vsock_set_cid() argument
624 if (other && other != vsock) { in vhost_vsock_set_cid()
628 vsock->guest_cid = guest_cid; in vhost_vsock_set_cid()
634 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) in vhost_vsock_set_features() argument
642 mutex_lock(&vsock->dev.mutex); in vhost_vsock_set_features()
644 !vhost_log_access_ok(&vsock->dev)) { in vhost_vsock_set_features()
645 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
649 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_set_features()
650 vq = &vsock->vqs[i]; in vhost_vsock_set_features()
655 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
662 struct vhost_vsock *vsock = f->private_data; in vhost_vsock_dev_ioctl() local
673 return vhost_vsock_set_cid(vsock, guest_cid); in vhost_vsock_dev_ioctl()
678 return vhost_vsock_start(vsock); in vhost_vsock_dev_ioctl()
680 return vhost_vsock_stop(vsock); in vhost_vsock_dev_ioctl()
689 return vhost_vsock_set_features(vsock, features); in vhost_vsock_dev_ioctl()
691 mutex_lock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
692 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
694 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
696 vhost_vsock_flush(vsock); in vhost_vsock_dev_ioctl()
697 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()