Lines Matching refs:vsock
73 struct vhost_vsock *vsock; in vhost_vsock_get() local
75 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { in vhost_vsock_get()
76 u32 other_cid = vsock->guest_cid; in vhost_vsock_get()
83 return vsock; in vhost_vsock_get()
91 vhost_transport_do_send_pkt(struct vhost_vsock *vsock, in vhost_transport_do_send_pkt() argument
94 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
108 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
119 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
120 if (list_empty(&vsock->send_pkt_list)) { in vhost_transport_do_send_pkt()
121 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
122 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
126 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_transport_do_send_pkt()
129 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
134 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
135 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
136 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
141 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
142 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
143 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
148 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
149 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
240 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
241 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
242 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
247 val = atomic_dec_return(&vsock->queued_replies); in vhost_transport_do_send_pkt()
260 vhost_signal(&vsock->dev, vq); in vhost_transport_do_send_pkt()
272 struct vhost_vsock *vsock; in vhost_transport_send_pkt_work() local
274 vsock = container_of(work, struct vhost_vsock, send_pkt_work); in vhost_transport_send_pkt_work()
275 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
277 vhost_transport_do_send_pkt(vsock, vq); in vhost_transport_send_pkt_work()
283 struct vhost_vsock *vsock; in vhost_transport_send_pkt() local
289 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); in vhost_transport_send_pkt()
290 if (!vsock) { in vhost_transport_send_pkt()
297 atomic_inc(&vsock->queued_replies); in vhost_transport_send_pkt()
299 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
300 list_add_tail(&pkt->list, &vsock->send_pkt_list); in vhost_transport_send_pkt()
301 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
303 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); in vhost_transport_send_pkt()
312 struct vhost_vsock *vsock; in vhost_transport_cancel_pkt() local
321 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); in vhost_transport_cancel_pkt()
322 if (!vsock) in vhost_transport_cancel_pkt()
325 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
326 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { in vhost_transport_cancel_pkt()
331 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
341 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
344 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in vhost_transport_cancel_pkt()
416 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) in vhost_vsock_more_replies() argument
418 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
422 val = atomic_read(&vsock->queued_replies); in vhost_vsock_more_replies()
479 struct vhost_vsock *vsock; in vhost_transport_seqpacket_allow() local
483 vsock = vhost_vsock_get(remote_cid); in vhost_transport_seqpacket_allow()
485 if (vsock) in vhost_transport_seqpacket_allow()
486 seqpacket_allow = vsock->seqpacket_allow; in vhost_transport_seqpacket_allow()
497 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_tx_kick() local
512 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
516 if (!vhost_vsock_more_replies(vsock)) { in vhost_vsock_handle_tx_kick()
530 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_vsock_handle_tx_kick()
531 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
549 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && in vhost_vsock_handle_tx_kick()
564 vhost_signal(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
574 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_rx_kick() local
577 vhost_transport_do_send_pkt(vsock, vq); in vhost_vsock_handle_rx_kick()
580 static int vhost_vsock_start(struct vhost_vsock *vsock) in vhost_vsock_start() argument
586 mutex_lock(&vsock->dev.mutex); in vhost_vsock_start()
588 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_start()
592 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
593 vq = &vsock->vqs[i]; in vhost_vsock_start()
603 vhost_vq_set_backend(vq, vsock); in vhost_vsock_start()
615 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); in vhost_vsock_start()
617 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
624 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
625 vq = &vsock->vqs[i]; in vhost_vsock_start()
632 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
636 static int vhost_vsock_stop(struct vhost_vsock *vsock) in vhost_vsock_stop() argument
641 mutex_lock(&vsock->dev.mutex); in vhost_vsock_stop()
643 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_stop()
647 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop()
648 struct vhost_virtqueue *vq = &vsock->vqs[i]; in vhost_vsock_stop()
656 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_stop()
660 static void vhost_vsock_free(struct vhost_vsock *vsock) in vhost_vsock_free() argument
662 kvfree(vsock); in vhost_vsock_free()
668 struct vhost_vsock *vsock; in vhost_vsock_dev_open() local
674 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); in vhost_vsock_dev_open()
675 if (!vsock) in vhost_vsock_dev_open()
678 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); in vhost_vsock_dev_open()
684 vsock->guest_cid = 0; /* no CID assigned yet */ in vhost_vsock_dev_open()
686 atomic_set(&vsock->queued_replies, 0); in vhost_vsock_dev_open()
688 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_dev_open()
689 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; in vhost_vsock_dev_open()
690 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; in vhost_vsock_dev_open()
691 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; in vhost_vsock_dev_open()
693 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), in vhost_vsock_dev_open()
697 file->private_data = vsock; in vhost_vsock_dev_open()
698 spin_lock_init(&vsock->send_pkt_list_lock); in vhost_vsock_dev_open()
699 INIT_LIST_HEAD(&vsock->send_pkt_list); in vhost_vsock_dev_open()
700 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); in vhost_vsock_dev_open()
704 vhost_vsock_free(vsock); in vhost_vsock_dev_open()
708 static void vhost_vsock_flush(struct vhost_vsock *vsock) in vhost_vsock_flush() argument
712 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) in vhost_vsock_flush()
713 if (vsock->vqs[i].handle_kick) in vhost_vsock_flush()
714 vhost_poll_flush(&vsock->vqs[i].poll); in vhost_vsock_flush()
715 vhost_work_dev_flush(&vsock->dev); in vhost_vsock_flush()
746 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_dev_release() local
749 if (vsock->guest_cid) in vhost_vsock_dev_release()
750 hash_del_rcu(&vsock->hash); in vhost_vsock_dev_release()
760 vhost_vsock_stop(vsock); in vhost_vsock_dev_release()
761 vhost_vsock_flush(vsock); in vhost_vsock_dev_release()
762 vhost_dev_stop(&vsock->dev); in vhost_vsock_dev_release()
764 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
765 while (!list_empty(&vsock->send_pkt_list)) { in vhost_vsock_dev_release()
768 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_vsock_dev_release()
773 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
775 vhost_dev_cleanup(&vsock->dev); in vhost_vsock_dev_release()
776 kfree(vsock->dev.vqs); in vhost_vsock_dev_release()
777 vhost_vsock_free(vsock); in vhost_vsock_dev_release()
781 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) in vhost_vsock_set_cid() argument
803 if (other && other != vsock) { in vhost_vsock_set_cid()
808 if (vsock->guest_cid) in vhost_vsock_set_cid()
809 hash_del_rcu(&vsock->hash); in vhost_vsock_set_cid()
811 vsock->guest_cid = guest_cid; in vhost_vsock_set_cid()
812 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); in vhost_vsock_set_cid()
818 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) in vhost_vsock_set_features() argument
826 mutex_lock(&vsock->dev.mutex); in vhost_vsock_set_features()
828 !vhost_log_access_ok(&vsock->dev)) { in vhost_vsock_set_features()
833 if (vhost_init_device_iotlb(&vsock->dev, true)) in vhost_vsock_set_features()
838 vsock->seqpacket_allow = true; in vhost_vsock_set_features()
840 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_set_features()
841 vq = &vsock->vqs[i]; in vhost_vsock_set_features()
846 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
850 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
857 struct vhost_vsock *vsock = f->private_data; in vhost_vsock_dev_ioctl() local
868 return vhost_vsock_set_cid(vsock, guest_cid); in vhost_vsock_dev_ioctl()
873 return vhost_vsock_start(vsock); in vhost_vsock_dev_ioctl()
875 return vhost_vsock_stop(vsock); in vhost_vsock_dev_ioctl()
884 return vhost_vsock_set_features(vsock, features); in vhost_vsock_dev_ioctl()
895 vhost_set_backend_features(&vsock->dev, features); in vhost_vsock_dev_ioctl()
898 mutex_lock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
899 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
901 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
903 vhost_vsock_flush(vsock); in vhost_vsock_dev_ioctl()
904 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
912 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_read_iter() local
913 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_read_iter()
923 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_write_iter() local
924 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_write_iter()
931 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_poll() local
932 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_poll()