Lines Matching +full:virtio +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio Transport driver for Arm System Control and Management Interface
6 * Copyright (C) 2020-2021 OpenSynergy.
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
22 #include <linux/virtio.h>
36 * struct scmi_vio_channel - Transport channel information
62 * struct scmi_vio_msg - Transport PDU information
76 /* Only one SCMI VirtIO device can possibly exist */
91 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); in scmi_vio_feed_vq_rx()
93 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
95 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); in scmi_vio_feed_vq_rx()
97 dev_err_once(vioch->cinfo->dev, in scmi_vio_feed_vq_rx()
100 virtqueue_kick(vioch->vqueue); in scmi_vio_feed_vq_rx()
102 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
110 if (vioch->is_rx) { in scmi_finalize_message()
114 spin_lock(&vioch->lock); in scmi_finalize_message()
115 list_add(&msg->list, &vioch->free_list); in scmi_finalize_message()
116 spin_unlock(&vioch->lock); in scmi_finalize_message()
128 if (WARN_ON_ONCE(!vqueue->vdev->priv)) in scmi_vio_complete_cb()
130 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; in scmi_vio_complete_cb()
133 spin_lock_irqsave(&vioch->ready_lock, ready_flags); in scmi_vio_complete_cb()
135 if (!vioch->ready) { in scmi_vio_complete_cb()
142 spin_lock(&vioch->lock); in scmi_vio_complete_cb()
153 spin_unlock(&vioch->lock); in scmi_vio_complete_cb()
156 msg->rx_len = length; in scmi_vio_complete_cb()
157 scmi_rx_callback(vioch->cinfo, in scmi_vio_complete_cb()
158 msg_read_header(msg->input), msg); in scmi_vio_complete_cb()
164 * Release ready_lock and re-enable IRQs between loop iterations in scmi_vio_complete_cb()
166 * flag vioch->ready to false even in between processing of in scmi_vio_complete_cb()
170 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); in scmi_vio_complete_cb()
174 spin_unlock(&vioch->lock); in scmi_vio_complete_cb()
176 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); in scmi_vio_complete_cb()
188 struct scmi_vio_channel *vioch = base_cinfo->transport_info; in virtio_get_max_msg()
190 return vioch->max_msg; in virtio_get_max_msg()
193 static int virtio_link_supplier(struct device *dev) in virtio_link_supplier()
197 "Deferring probe after not finding a bound scmi-virtio device\n"); in virtio_link_supplier()
198 return -EPROBE_DEFER; in virtio_link_supplier()
201 if (!device_link_add(dev, &scmi_vdev->dev, in virtio_link_supplier()
203 dev_err(dev, "Adding link to supplier virtio device failed\n"); in virtio_link_supplier()
204 return -ECANCELED; in virtio_link_supplier()
210 static bool virtio_chan_available(struct device *dev, int idx) in virtio_chan_available()
217 channels = (struct scmi_vio_channel *)scmi_vdev->priv; in virtio_chan_available()
231 return vioch && !vioch->cinfo; in virtio_chan_available()
234 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, in virtio_chan_setup()
243 return -EPROBE_DEFER; in virtio_chan_setup()
245 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; in virtio_chan_setup()
247 for (i = 0; i < vioch->max_msg; i++) { in virtio_chan_setup()
250 msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); in virtio_chan_setup()
252 return -ENOMEM; in virtio_chan_setup()
255 msg->request = devm_kzalloc(cinfo->dev, in virtio_chan_setup()
258 if (!msg->request) in virtio_chan_setup()
259 return -ENOMEM; in virtio_chan_setup()
262 msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, in virtio_chan_setup()
264 if (!msg->input) in virtio_chan_setup()
265 return -ENOMEM; in virtio_chan_setup()
268 spin_lock_irqsave(&vioch->lock, flags); in virtio_chan_setup()
269 list_add_tail(&msg->list, &vioch->free_list); in virtio_chan_setup()
270 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_chan_setup()
276 spin_lock_irqsave(&vioch->lock, flags); in virtio_chan_setup()
277 cinfo->transport_info = vioch; in virtio_chan_setup()
279 vioch->cinfo = cinfo; in virtio_chan_setup()
280 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_chan_setup()
282 spin_lock_irqsave(&vioch->ready_lock, flags); in virtio_chan_setup()
283 vioch->ready = true; in virtio_chan_setup()
284 spin_unlock_irqrestore(&vioch->ready_lock, flags); in virtio_chan_setup()
293 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_chan_free()
295 spin_lock_irqsave(&vioch->ready_lock, flags); in virtio_chan_free()
296 vioch->ready = false; in virtio_chan_free()
297 spin_unlock_irqrestore(&vioch->ready_lock, flags); in virtio_chan_free()
301 spin_lock_irqsave(&vioch->lock, flags); in virtio_chan_free()
302 vioch->cinfo = NULL; in virtio_chan_free()
303 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_chan_free()
311 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_send_message()
319 spin_lock_irqsave(&vioch->lock, flags); in virtio_send_message()
321 if (list_empty(&vioch->free_list)) { in virtio_send_message()
322 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_send_message()
323 return -EBUSY; in virtio_send_message()
326 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); in virtio_send_message()
327 list_del(&msg->list); in virtio_send_message()
329 msg_tx_prepare(msg->request, xfer); in virtio_send_message()
331 sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); in virtio_send_message()
332 sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); in virtio_send_message()
334 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); in virtio_send_message()
336 list_add(&msg->list, &vioch->free_list); in virtio_send_message()
337 dev_err_once(vioch->cinfo->dev, in virtio_send_message()
341 virtqueue_kick(vioch->vqueue); in virtio_send_message()
344 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_send_message()
352 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_response()
355 msg_fetch_response(msg->input, msg->rx_len, xfer); in virtio_fetch_response()
356 xfer->priv = NULL; in virtio_fetch_response()
363 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_notification()
366 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); in virtio_fetch_notification()
367 xfer->priv = NULL; in virtio_fetch_notification()
384 struct device *dev = &vdev->dev; in scmi_vio_probe()
392 /* Only one SCMI VirtiO device allowed */ in scmi_vio_probe()
395 "One SCMI Virtio device was already initialized: only one allowed.\n"); in scmi_vio_probe()
396 return -EBUSY; in scmi_vio_probe()
404 return -ENOMEM; in scmi_vio_probe()
439 vdev->priv = channels; in scmi_vio_probe()
451 * virtio channels will have been already marked NOT ready, causing any in scmi_vio_remove()
455 vdev->config->reset(vdev); in scmi_vio_remove()
456 vdev->config->del_vqs(vdev); in scmi_vio_remove()
464 dev_err(&vdev->dev, in scmi_vio_validate()
465 "device does not comply with spec version 1.x\n"); in scmi_vio_validate()
466 return -EINVAL; in scmi_vio_validate()
482 .driver.name = "scmi-virtio",
506 .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */