Lines Matching refs:vs
239 static void vhost_scsi_init_inflight(struct vhost_scsi *vs, in vhost_scsi_init_inflight() argument
247 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
252 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
254 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
257 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
258 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
367 struct vhost_scsi *vs = cmd->tvc_vhost; in vhost_scsi_complete_cmd() local
369 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); in vhost_scsi_complete_cmd()
371 vhost_work_queue(&vs->dev, &vs->vs_completion_work); in vhost_scsi_complete_cmd()
400 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_free_evt() argument
402 vs->vs_events_nr--; in vhost_scsi_free_evt()
407 vhost_scsi_allocate_evt(struct vhost_scsi *vs, in vhost_scsi_allocate_evt() argument
410 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
413 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { in vhost_scsi_allocate_evt()
414 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
421 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
427 vs->vs_events_nr++; in vhost_scsi_allocate_evt()
447 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_do_evt_work() argument
449 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
456 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
461 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
466 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
470 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
472 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
479 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
483 if (vs->vs_events_missed) { in vhost_scsi_do_evt_work()
485 vs->vs_events_missed = false; in vhost_scsi_do_evt_work()
491 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
498 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_evt_work() local
500 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
505 llnode = llist_del_all(&vs->vs_event_list); in vhost_scsi_evt_work()
507 vhost_scsi_do_evt_work(vs, evt); in vhost_scsi_evt_work()
508 vhost_scsi_free_evt(vs, evt); in vhost_scsi_evt_work()
520 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_complete_cmd_work() local
531 llnode = llist_del_all(&vs->vs_completion_list); in vhost_scsi_complete_cmd_work()
554 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
565 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
794 vhost_scsi_send_bad_target(struct vhost_scsi *vs, in vhost_scsi_send_bad_target() argument
807 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
813 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, in vhost_scsi_get_desc() argument
831 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_get_desc()
832 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_get_desc()
911 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
939 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
942 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_handle_vq()
1076 cmd->tvc_vhost = vs; in vhost_scsi_handle_vq()
1120 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_handle_vq()
1127 vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, in vhost_scsi_send_tmf_reject() argument
1143 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); in vhost_scsi_send_tmf_reject()
1149 vhost_scsi_send_an_resp(struct vhost_scsi *vs, in vhost_scsi_send_an_resp() argument
1165 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); in vhost_scsi_send_an_resp()
1171 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_ctl_handle_vq() argument
1192 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_ctl_handle_vq()
1195 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1259 vhost_scsi_send_tmf_reject(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1261 vhost_scsi_send_an_resp(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1272 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_ctl_handle_vq()
1282 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_ctl_handle_kick() local
1285 vhost_scsi_ctl_handle_vq(vs, vq); in vhost_scsi_ctl_handle_kick()
1289 vhost_scsi_send_evt(struct vhost_scsi *vs, in vhost_scsi_send_evt() argument
1297 evt = vhost_scsi_allocate_evt(vs, event, reason); in vhost_scsi_send_evt()
1314 llist_add(&evt->list, &vs->vs_event_list); in vhost_scsi_send_evt()
1315 vhost_work_queue(&vs->dev, &vs->vs_event_work); in vhost_scsi_send_evt()
1322 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick() local
1328 if (vs->vs_events_missed) in vhost_scsi_evt_handle_kick()
1329 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); in vhost_scsi_evt_handle_kick()
1338 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick() local
1340 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1343 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) in vhost_scsi_flush_vq() argument
1345 vhost_poll_flush(&vs->vqs[index].vq.poll); in vhost_scsi_flush_vq()
1349 static void vhost_scsi_flush(struct vhost_scsi *vs) in vhost_scsi_flush() argument
1355 vhost_scsi_init_inflight(vs, old_inflight); in vhost_scsi_flush()
1367 vhost_scsi_flush_vq(vs, i); in vhost_scsi_flush()
1368 vhost_work_flush(&vs->dev, &vs->vs_completion_work); in vhost_scsi_flush()
1369 vhost_work_flush(&vs->dev, &vs->vs_event_work); in vhost_scsi_flush()
1384 vhost_scsi_set_endpoint(struct vhost_scsi *vs, in vhost_scsi_set_endpoint() argument
1396 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1399 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_set_endpoint()
1401 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1413 if (vs->vs_tpg) in vhost_scsi_set_endpoint()
1414 memcpy(vs_tpg, vs->vs_tpg, len); in vhost_scsi_set_endpoint()
1429 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { in vhost_scsi_set_endpoint()
1450 tpg->vhost_scsi = vs; in vhost_scsi_set_endpoint()
1458 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, in vhost_scsi_set_endpoint()
1459 sizeof(vs->vs_vhost_wwpn)); in vhost_scsi_set_endpoint()
1461 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1476 vhost_scsi_flush(vs); in vhost_scsi_set_endpoint()
1477 kfree(vs->vs_tpg); in vhost_scsi_set_endpoint()
1478 vs->vs_tpg = vs_tpg; in vhost_scsi_set_endpoint()
1481 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1487 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, in vhost_scsi_clear_endpoint() argument
1499 mutex_lock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1501 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_clear_endpoint()
1502 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1508 if (!vs->vs_tpg) { in vhost_scsi_clear_endpoint()
1515 tpg = vs->vs_tpg[target]; in vhost_scsi_clear_endpoint()
1536 vs->vs_tpg[target] = NULL; in vhost_scsi_clear_endpoint()
1548 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1558 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1559 kfree(vs->vs_tpg); in vhost_scsi_clear_endpoint()
1560 vs->vs_tpg = NULL; in vhost_scsi_clear_endpoint()
1561 WARN_ON(vs->vs_events_nr); in vhost_scsi_clear_endpoint()
1562 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1569 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1574 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) in vhost_scsi_set_features() argument
1582 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_features()
1584 !vhost_log_access_ok(&vs->dev)) { in vhost_scsi_set_features()
1585 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1590 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1595 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1601 struct vhost_scsi *vs; in vhost_scsi_open() local
1605 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); in vhost_scsi_open()
1606 if (!vs) { in vhost_scsi_open()
1607 vs = vzalloc(sizeof(*vs)); in vhost_scsi_open()
1608 if (!vs) in vhost_scsi_open()
1616 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); in vhost_scsi_open()
1617 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); in vhost_scsi_open()
1619 vs->vs_events_nr = 0; in vhost_scsi_open()
1620 vs->vs_events_missed = false; in vhost_scsi_open()
1622 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1623 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1624 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1625 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1627 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, in vhost_scsi_open()
1633 vhost_scsi_init_inflight(vs, NULL); in vhost_scsi_open()
1635 f->private_data = vs; in vhost_scsi_open()
1639 kvfree(vs); in vhost_scsi_open()
1646 struct vhost_scsi *vs = f->private_data; in vhost_scsi_release() local
1649 mutex_lock(&vs->dev.mutex); in vhost_scsi_release()
1650 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); in vhost_scsi_release()
1651 mutex_unlock(&vs->dev.mutex); in vhost_scsi_release()
1652 vhost_scsi_clear_endpoint(vs, &t); in vhost_scsi_release()
1653 vhost_dev_stop(&vs->dev); in vhost_scsi_release()
1654 vhost_dev_cleanup(&vs->dev); in vhost_scsi_release()
1656 vhost_scsi_flush(vs); in vhost_scsi_release()
1657 kfree(vs->dev.vqs); in vhost_scsi_release()
1658 kvfree(vs); in vhost_scsi_release()
1667 struct vhost_scsi *vs = f->private_data; in vhost_scsi_ioctl() local
1675 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl()
1684 return vhost_scsi_set_endpoint(vs, &backend); in vhost_scsi_ioctl()
1691 return vhost_scsi_clear_endpoint(vs, &backend); in vhost_scsi_ioctl()
1700 vs->vs_events_missed = events_missed; in vhost_scsi_ioctl()
1705 events_missed = vs->vs_events_missed; in vhost_scsi_ioctl()
1718 return vhost_scsi_set_features(vs, features); in vhost_scsi_ioctl()
1720 mutex_lock(&vs->dev.mutex); in vhost_scsi_ioctl()
1721 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1724 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1725 mutex_unlock(&vs->dev.mutex); in vhost_scsi_ioctl()
1786 struct vhost_scsi *vs = tpg->vhost_scsi; in vhost_scsi_do_plug() local
1790 if (!vs) in vhost_scsi_do_plug()
1793 mutex_lock(&vs->dev.mutex); in vhost_scsi_do_plug()
1800 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
1803 vhost_scsi_send_evt(vs, tpg, lun, in vhost_scsi_do_plug()
1806 mutex_unlock(&vs->dev.mutex); in vhost_scsi_do_plug()