Lines Matching refs:vsock
66 struct vhost_vsock *vsock; in vhost_vsock_get() local
68 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { in vhost_vsock_get()
69 u32 other_cid = vsock->guest_cid; in vhost_vsock_get()
76 return vsock; in vhost_vsock_get()
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock, in vhost_transport_do_send_pkt() argument
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
98 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
108 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
109 if (list_empty(&vsock->send_pkt_list)) { in vhost_transport_do_send_pkt()
110 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
111 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
115 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_transport_do_send_pkt()
118 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
123 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
124 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
125 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
130 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
131 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
132 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
137 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
138 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
199 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
200 list_add(&pkt->list, &vsock->send_pkt_list); in vhost_transport_do_send_pkt()
201 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_do_send_pkt()
206 val = atomic_dec_return(&vsock->queued_replies); in vhost_transport_do_send_pkt()
219 vhost_signal(&vsock->dev, vq); in vhost_transport_do_send_pkt()
231 struct vhost_vsock *vsock; in vhost_transport_send_pkt_work() local
233 vsock = container_of(work, struct vhost_vsock, send_pkt_work); in vhost_transport_send_pkt_work()
234 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
236 vhost_transport_do_send_pkt(vsock, vq); in vhost_transport_send_pkt_work()
242 struct vhost_vsock *vsock; in vhost_transport_send_pkt() local
248 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); in vhost_transport_send_pkt()
249 if (!vsock) { in vhost_transport_send_pkt()
256 atomic_inc(&vsock->queued_replies); in vhost_transport_send_pkt()
258 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
259 list_add_tail(&pkt->list, &vsock->send_pkt_list); in vhost_transport_send_pkt()
260 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_send_pkt()
262 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); in vhost_transport_send_pkt()
271 struct vhost_vsock *vsock; in vhost_transport_cancel_pkt() local
280 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); in vhost_transport_cancel_pkt()
281 if (!vsock) in vhost_transport_cancel_pkt()
284 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
285 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { in vhost_transport_cancel_pkt()
290 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_transport_cancel_pkt()
300 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
303 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in vhost_transport_cancel_pkt()
376 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) in vhost_vsock_more_replies() argument
378 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
382 val = atomic_read(&vsock->queued_replies); in vhost_vsock_more_replies()
391 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_tx_kick() local
403 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
407 if (!vhost_vsock_more_replies(vsock)) { in vhost_vsock_handle_tx_kick()
421 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_vsock_handle_tx_kick()
422 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
440 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) in vhost_vsock_handle_tx_kick()
453 vhost_signal(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
463 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_rx_kick() local
466 vhost_transport_do_send_pkt(vsock, vq); in vhost_vsock_handle_rx_kick()
469 static int vhost_vsock_start(struct vhost_vsock *vsock) in vhost_vsock_start() argument
475 mutex_lock(&vsock->dev.mutex); in vhost_vsock_start()
477 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_start()
481 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
482 vq = &vsock->vqs[i]; in vhost_vsock_start()
492 vq->private_data = vsock; in vhost_vsock_start()
501 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
508 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
509 vq = &vsock->vqs[i]; in vhost_vsock_start()
516 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
520 static int vhost_vsock_stop(struct vhost_vsock *vsock) in vhost_vsock_stop() argument
525 mutex_lock(&vsock->dev.mutex); in vhost_vsock_stop()
527 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_stop()
531 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop()
532 struct vhost_virtqueue *vq = &vsock->vqs[i]; in vhost_vsock_stop()
540 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_stop()
544 static void vhost_vsock_free(struct vhost_vsock *vsock) in vhost_vsock_free() argument
546 kvfree(vsock); in vhost_vsock_free()
552 struct vhost_vsock *vsock; in vhost_vsock_dev_open() local
558 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); in vhost_vsock_dev_open()
559 if (!vsock) in vhost_vsock_dev_open()
562 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); in vhost_vsock_dev_open()
568 vsock->guest_cid = 0; /* no CID assigned yet */ in vhost_vsock_dev_open()
570 atomic_set(&vsock->queued_replies, 0); in vhost_vsock_dev_open()
572 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_dev_open()
573 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; in vhost_vsock_dev_open()
574 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; in vhost_vsock_dev_open()
575 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; in vhost_vsock_dev_open()
577 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), in vhost_vsock_dev_open()
581 file->private_data = vsock; in vhost_vsock_dev_open()
582 spin_lock_init(&vsock->send_pkt_list_lock); in vhost_vsock_dev_open()
583 INIT_LIST_HEAD(&vsock->send_pkt_list); in vhost_vsock_dev_open()
584 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); in vhost_vsock_dev_open()
588 vhost_vsock_free(vsock); in vhost_vsock_dev_open()
592 static void vhost_vsock_flush(struct vhost_vsock *vsock) in vhost_vsock_flush() argument
596 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) in vhost_vsock_flush()
597 if (vsock->vqs[i].handle_kick) in vhost_vsock_flush()
598 vhost_poll_flush(&vsock->vqs[i].poll); in vhost_vsock_flush()
599 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); in vhost_vsock_flush()
630 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_dev_release() local
633 if (vsock->guest_cid) in vhost_vsock_dev_release()
634 hash_del_rcu(&vsock->hash); in vhost_vsock_dev_release()
644 vhost_vsock_stop(vsock); in vhost_vsock_dev_release()
645 vhost_vsock_flush(vsock); in vhost_vsock_dev_release()
646 vhost_dev_stop(&vsock->dev); in vhost_vsock_dev_release()
648 spin_lock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
649 while (!list_empty(&vsock->send_pkt_list)) { in vhost_vsock_dev_release()
652 pkt = list_first_entry(&vsock->send_pkt_list, in vhost_vsock_dev_release()
657 spin_unlock_bh(&vsock->send_pkt_list_lock); in vhost_vsock_dev_release()
659 vhost_dev_cleanup(&vsock->dev); in vhost_vsock_dev_release()
660 kfree(vsock->dev.vqs); in vhost_vsock_dev_release()
661 vhost_vsock_free(vsock); in vhost_vsock_dev_release()
665 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) in vhost_vsock_set_cid() argument
681 if (other && other != vsock) { in vhost_vsock_set_cid()
686 if (vsock->guest_cid) in vhost_vsock_set_cid()
687 hash_del_rcu(&vsock->hash); in vhost_vsock_set_cid()
689 vsock->guest_cid = guest_cid; in vhost_vsock_set_cid()
690 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); in vhost_vsock_set_cid()
696 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) in vhost_vsock_set_features() argument
704 mutex_lock(&vsock->dev.mutex); in vhost_vsock_set_features()
706 !vhost_log_access_ok(&vsock->dev)) { in vhost_vsock_set_features()
707 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
711 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_set_features()
712 vq = &vsock->vqs[i]; in vhost_vsock_set_features()
717 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
724 struct vhost_vsock *vsock = f->private_data; in vhost_vsock_dev_ioctl() local
735 return vhost_vsock_set_cid(vsock, guest_cid); in vhost_vsock_dev_ioctl()
740 return vhost_vsock_start(vsock); in vhost_vsock_dev_ioctl()
742 return vhost_vsock_stop(vsock); in vhost_vsock_dev_ioctl()
751 return vhost_vsock_set_features(vsock, features); in vhost_vsock_dev_ioctl()
753 mutex_lock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
754 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
756 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
758 vhost_vsock_flush(vsock); in vhost_vsock_dev_ioctl()
759 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()