Lines Matching +full:mhu +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2021 ARM Ltd.
22 #include <linux/io-64-nonatomic-hi-lo.h>
42 SCMI_ERR_SUPPORT = -1, /* Not supported */
43 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
44 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
45 SCMI_ERR_ENTRY = -4, /* Not found */
46 SCMI_ERR_RANGE = -5, /* Value out of range */
47 SCMI_ERR_BUSY = -6, /* Device busy */
48 SCMI_ERR_COMMS = -7, /* Communication Error */
49 SCMI_ERR_GENERIC = -8, /* Generic Error */
50 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
51 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
75 * struct scmi_xfers_info - Structure to manage transfer information
83 * a number of xfers equal to the maximum allowed in-flight
86 * currently in-flight messages.
97 * struct scmi_protocol_instance - Describe an initialized protocol instance.
100 * @gid: A reference for per-protocol devres management.
121 * struct scmi_info - Structure representing a SCMI instance
126 * implementation version and (sub-)vendor identification.
130 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
141 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
144 * to have an execution latency lesser-equal to the threshold
176 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
177 -EINVAL, /* SCMI_ERR_PARAM */
178 -EACCES, /* SCMI_ERR_ACCESS */
179 -ENOENT, /* SCMI_ERR_ENTRY */
180 -ERANGE, /* SCMI_ERR_RANGE */
181 -EBUSY, /* SCMI_ERR_BUSY */
182 -ECOMM, /* SCMI_ERR_COMMS */
183 -EIO, /* SCMI_ERR_GENERIC */
184 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
185 -EPROTO, /* SCMI_ERR_PROTOCOL */
190 int err_idx = -errno; in scmi_to_linux_errno()
194 return -EIO; in scmi_to_linux_errno()
202 info->notify_priv = priv; in scmi_notification_instance_data_set()
213 return info->notify_priv; in scmi_notification_instance_data_get()
217 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
219 * @minfo: Pointer to Tx/Rx Message management info based on channel type
223 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
224 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
225 * of incorrect association of a late and expired xfer with a live in-flight
226 * transaction, both happening to re-use the same token identifier.
228 * Since platform is NOT required to answer our request in-order we should
231 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
234 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
238 * X = used in-flight
241 * ------
243 * |- xfer_id picked
244 * -----------+----------------------------------------------------------
246 * ----------------------------------------------------------------------
248 * |- next_token
250 * Out-of-order pending at start
251 * -----------------------------
253 * |- xfer_id picked, last_token fixed
254 * -----+----------------------------------------------------------------
256 * ----------------------------------------------------------------------
258 * |- next_token
261 * Out-of-order pending at end
262 * ---------------------------
264 * |- xfer_id picked, last_token fixed
265 * -----+----------------------------------------------------------------
267 * ----------------------------------------------------------------------
269 * |- next_token
281 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
282 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
288 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
291 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
295 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
299 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
303 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
304 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
307 return -ENOMEM; in scmi_xfer_token_set()
310 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
312 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
314 /* Set in-flight */ in scmi_xfer_token_set()
315 set_bit(xfer_id, minfo->xfer_alloc_table); in scmi_xfer_token_set()
316 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
322 * scmi_xfer_token_clear - Release the token
324 * @minfo: Pointer to Tx/Rx Message management info based on channel type
330 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
334 * scmi_xfer_get() - Allocate one message
337 * @minfo: Pointer to Tx/Rx Message management info based on channel type
363 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
364 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
365 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
366 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
370 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
371 hlist_del_init(&xfer->node); in scmi_xfer_get()
377 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
383 hash_add(minfo->pending_xfers, &xfer->node, in scmi_xfer_get()
384 xfer->hdr.seq); in scmi_xfer_get()
385 xfer->pending = true; in scmi_xfer_get()
387 dev_err(handle->dev, in scmi_xfer_get()
389 hlist_add_head(&xfer->node, &minfo->free_xfers); in scmi_xfer_get()
395 refcount_set(&xfer->users, 1); in scmi_xfer_get()
396 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
398 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
404 * __scmi_xfer_put() - Release a message
406 * @minfo: Pointer to Tx/Rx Message management info based on channel type
419 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
420 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
421 if (xfer->pending) { in __scmi_xfer_put()
423 hash_del(&xfer->node); in __scmi_xfer_put()
424 xfer->pending = false; in __scmi_xfer_put()
426 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
428 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
432 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
434 * @minfo: Pointer to Tx/Rx Message management info based on channel type
448 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
449 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
451 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
455 * scmi_msg_response_validate - Validate message type against state of related
464 * related synchronous response (Out-of-Order Delayed Response) the missing
467 * SCMI transport can deliver such out-of-order responses.
469 * Context: Assumes to be called with xfer->lock already acquired.
480 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
483 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
484 dev_err(cinfo->dev, in scmi_msg_response_validate()
486 xfer->hdr.seq); in scmi_msg_response_validate()
487 return -EINVAL; in scmi_msg_response_validate()
490 switch (xfer->state) { in scmi_msg_response_validate()
497 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
498 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
499 complete(&xfer->done); in scmi_msg_response_validate()
500 dev_warn(cinfo->dev, in scmi_msg_response_validate()
502 xfer->hdr.seq); in scmi_msg_response_validate()
507 return -EINVAL; in scmi_msg_response_validate()
511 return -EINVAL; in scmi_msg_response_validate()
518 * scmi_xfer_state_update - Update xfer state
531 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
534 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
535 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
537 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
544 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
550 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
567 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
568 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
573 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
576 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
579 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
582 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
583 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
585 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
598 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
601 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
602 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
603 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
606 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
615 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
616 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
622 if (info->desc->ops->clear_channel) in scmi_clear_channel()
623 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
629 return cinfo->no_completion_irq || info->desc->force_polling; in is_polling_required()
634 return info->desc->ops->poll_done || in is_transport_polling_capable()
635 info->desc->sync_cmds_completed_on_ret; in is_transport_polling_capable()
649 struct device *dev = cinfo->dev; in scmi_handle_notification()
650 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
651 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
655 xfer = scmi_xfer_get(cinfo->handle, minfo, false); in scmi_handle_notification()
663 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
665 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
666 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
667 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
670 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", in scmi_handle_notification()
671 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_notification()
672 xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
674 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
675 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
677 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
678 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
690 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
700 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
701 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
704 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
705 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
706 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
708 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, in scmi_handle_response()
709 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
711 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
712 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
714 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
715 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
716 xfer->hdr.type); in scmi_handle_response()
718 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
720 complete(xfer->async_done); in scmi_handle_response()
722 complete(&xfer->done); in scmi_handle_response()
729 * scmi_rx_callback() - callback for receiving messages
760 * xfer_put() - Release a transmit message
769 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
771 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
777 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
780 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
781 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
783 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
784 try_wait_for_completion(&xfer->done) || in scmi_xfer_done_no_timeout()
789 * scmi_wait_for_message_response - An helper to group all the possible ways of
795 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
796 * configuration flags like xfer->hdr.poll_completion.
803 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
804 struct device *dev = info->dev; in scmi_wait_for_message_response()
805 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; in scmi_wait_for_message_response()
807 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
808 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
810 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
812 if (xfer->hdr.poll_completion) { in scmi_wait_for_message_response()
817 if (!info->desc->sync_cmds_completed_on_ret) { in scmi_wait_for_message_response()
828 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_message_response()
830 ret = -ETIMEDOUT; in scmi_wait_for_message_response()
838 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_message_response()
841 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_message_response()
842 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_message_response()
843 info->desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_message_response()
844 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_message_response()
846 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_message_response()
849 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_message_response()
851 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_message_response()
852 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_message_response()
856 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_message_response()
860 ret = -ETIMEDOUT; in scmi_wait_for_message_response()
868 * do_xfer() - Do one transfer
873 * Return: -ETIMEDOUT in case of no response, if transmit error,
882 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
883 struct device *dev = info->dev; in do_xfer()
887 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { in do_xfer()
890 return -EINVAL; in do_xfer()
893 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
895 return -EINVAL; in do_xfer()
899 xfer->hdr.poll_completion = true; in do_xfer()
906 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
907 reinit_completion(&xfer->done); in do_xfer()
909 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
910 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
911 xfer->hdr.poll_completion); in do_xfer()
913 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
916 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
917 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
919 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
923 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
929 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", in do_xfer()
930 xfer->hdr.seq, xfer->hdr.status, in do_xfer()
931 xfer->tx.buf, xfer->tx.len); in do_xfer()
934 if (!ret && xfer->hdr.status) in do_xfer()
935 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
937 if (info->desc->ops->mark_txdone) in do_xfer()
938 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
940 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
941 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
950 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
952 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
958 * do_xfer_with_response() - Do one transfer and wait until the delayed
965 * it could cause long busy-waiting here, so ignore polling for the delayed
978 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
987 xfer->async_done = &async_response; in do_xfer_with_response()
995 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
999 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1000 dev_err(ph->dev, in do_xfer_with_response()
1003 ret = -ETIMEDOUT; in do_xfer_with_response()
1004 } else if (xfer->hdr.status) { in do_xfer_with_response()
1005 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1009 xfer->async_done = NULL; in do_xfer_with_response()
1014 * xfer_get_init() - Allocate and initialise one message for transmit
1035 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1036 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1037 struct device *dev = info->dev; in xfer_get_init()
1040 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1041 tx_size > info->desc->max_msg_size) in xfer_get_init()
1042 return -ERANGE; in xfer_get_init()
1044 xfer = scmi_xfer_get(pi->handle, minfo, true); in xfer_get_init()
1051 xfer->tx.len = tx_size; in xfer_get_init()
1052 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1053 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1054 xfer->hdr.id = msg_id; in xfer_get_init()
1055 xfer->hdr.poll_completion = false; in xfer_get_init()
1063 * version_get() - command to get the revision of the SCMI entity
1084 rev_info = t->rx.buf; in version_get()
1093 * scmi_set_protocol_priv - Set protocol specific data at init time
1105 pi->priv = priv; in scmi_set_protocol_priv()
1111 * scmi_get_protocol_priv - Set protocol specific data at init time
1121 return pi->priv; in scmi_get_protocol_priv()
1139 * scmi_common_extended_name_get - Common helper to get extended resources name
1157 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), in scmi_common_extended_name_get()
1162 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1163 resp = t->rx.buf; in scmi_common_extended_name_get()
1165 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1167 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1169 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1172 dev_warn(ph->dev, in scmi_common_extended_name_get()
1173 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1179 * struct scmi_iterator - Iterator descriptor
1180 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1181 * a proper custom command payload for each multi-part command request.
1183 * @process_response to parse the multi-part replies.
1189 * internal routines and by the caller-provided @scmi_iterator_ops.
1211 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1213 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1215 i->ph = ph; in scmi_iterator_init()
1216 i->ops = ops; in scmi_iterator_init()
1217 i->priv = priv; in scmi_iterator_init()
1219 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1221 devm_kfree(ph->dev, i); in scmi_iterator_init()
1225 i->state.max_resources = max_resources; in scmi_iterator_init()
1226 i->msg = i->t->tx.buf; in scmi_iterator_init()
1227 i->resp = i->t->rx.buf; in scmi_iterator_init()
1234 int ret = -EINVAL; in scmi_iterator_run()
1240 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1243 iops = i->ops; in scmi_iterator_run()
1244 ph = i->ph; in scmi_iterator_run()
1245 st = &i->state; in scmi_iterator_run()
1248 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1249 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1253 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1254 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1258 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1259 dev_err(ph->dev, in scmi_iterator_run()
1261 st->max_resources); in scmi_iterator_run()
1262 ret = -EINVAL; in scmi_iterator_run()
1266 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1267 st->loop_idx++) { in scmi_iterator_run()
1268 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1273 st->desc_index += st->num_returned; in scmi_iterator_run()
1274 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1279 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1283 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1284 devm_kfree(ph->dev, i); in scmi_iterator_run()
1328 ret = -EINVAL; in scmi_common_fastchannel_init()
1332 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1337 info = t->tx.buf; in scmi_common_fastchannel_init()
1338 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1339 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1346 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1350 resp = t->rx.buf; in scmi_common_fastchannel_init()
1351 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1352 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1354 ret = -EINVAL; in scmi_common_fastchannel_init()
1358 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1359 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1360 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1362 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1369 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1371 ret = -ENOMEM; in scmi_common_fastchannel_init()
1376 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1377 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1378 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1380 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1384 db->addr = addr; in scmi_common_fastchannel_init()
1385 db->width = size; in scmi_common_fastchannel_init()
1386 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1387 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1388 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1389 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
1394 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1396 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1398 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
1403 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
1409 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1412 dev_warn(ph->dev, in scmi_common_fastchannel_init()
1413 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
1414 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1421 if (db->mask) \
1422 val = ioread##w(db->addr) & db->mask; \
1423 iowrite##w((u##w)db->set | val, db->addr); \
1428 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
1431 if (db->width == 1) in scmi_common_fastchannel_db_ring()
1433 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
1435 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
1437 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
1444 if (db->mask) in scmi_common_fastchannel_db_ring()
1445 val = ioread64_hi_lo(db->addr) & db->mask; in scmi_common_fastchannel_db_ring()
1446 iowrite64_hi_lo(db->set | val, db->addr); in scmi_common_fastchannel_db_ring()
1460 * scmi_revision_area_get - Retrieve version memory area.
1475 return pi->handle->version; in scmi_revision_area_get()
1479 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1486 * all resources management is handled via a dedicated per-protocol devres
1498 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
1501 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
1504 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1506 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1510 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1514 pi->gid = gid; in scmi_alloc_init_protocol_instance()
1515 pi->proto = proto; in scmi_alloc_init_protocol_instance()
1516 pi->handle = handle; in scmi_alloc_init_protocol_instance()
1517 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
1518 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
1519 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
1520 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
1521 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
1522 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
1523 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
1524 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
1528 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
1530 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
1537 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
1538 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
1539 &pi->ph, in scmi_alloc_init_protocol_instance()
1540 pi->proto->events); in scmi_alloc_init_protocol_instance()
1542 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
1543 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
1544 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
1547 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
1548 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
1554 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1555 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
1561 * scmi_get_protocol_instance - Protocol initialization helper.
1567 * resource allocation with a dedicated per-protocol devres subgroup.
1570 * in particular returns -EPROBE_DEFER when the desired protocol could
1579 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
1580 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
1583 refcount_inc(&pi->users); in scmi_get_protocol_instance()
1592 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
1594 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
1600 * scmi_protocol_acquire - Protocol acquire
1615 * scmi_protocol_release - Protocol de-initialization helper.
1619 * Remove one user for the specified protocol and triggers de-initialization
1620 * and resources de-allocation once the last user has gone.
1627 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
1628 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
1632 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
1633 void *gid = pi->gid; in scmi_protocol_release()
1635 if (pi->proto->events) in scmi_protocol_release()
1638 if (pi->proto->instance_deinit) in scmi_protocol_release()
1639 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
1641 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
1645 devres_release_group(handle->dev, gid); in scmi_protocol_release()
1646 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
1651 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
1658 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
1660 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
1668 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
1670 if (!info->protocols_imp) in scmi_is_protocol_implemented()
1673 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
1674 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
1688 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
1700 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
1702 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
1708 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
1709 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
1710 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
1716 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1727 * released, and possibly de-initialized on last user, once the SCMI driver
1740 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
1746 *ph = &pi->ph; in scmi_devm_protocol_get()
1748 return pi->proto->ops; in scmi_devm_protocol_get()
1752 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
1762 * released, and possibly de-initialized on last user, once the SCMI driver
1786 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
1790 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1802 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
1808 * scmi_is_transport_atomic - Method to check if underlying transport for an
1823 ret = info->desc->atomic_enabled && is_transport_polling_capable(info); in scmi_is_transport_atomic()
1825 *atomic_threshold = info->atomic_threshold; in scmi_is_transport_atomic()
1833 info->users++; in scmi_handle_get_from_info_unlocked()
1834 return &info->handle; in scmi_handle_get_from_info_unlocked()
1838 * scmi_handle_get() - Get the SCMI handle for a device
1857 if (dev->parent == info->dev) { in scmi_handle_get()
1868 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1877 * if null was passed, it returns -EINVAL;
1884 return -EINVAL; in scmi_handle_put()
1888 if (!WARN_ON(!info->users)) in scmi_handle_put()
1889 info->users--; in scmi_handle_put()
1900 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
1901 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
1903 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
1904 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
1906 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
1907 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
1908 return -EINVAL; in __scmi_xfer_info_init()
1911 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
1914 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX), in __scmi_xfer_info_init()
1916 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
1917 return -ENOMEM; in __scmi_xfer_info_init()
1921 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
1924 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
1925 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
1928 return -ENOMEM; in __scmi_xfer_info_init()
1930 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
1932 if (!xfer->rx.buf) in __scmi_xfer_info_init()
1933 return -ENOMEM; in __scmi_xfer_info_init()
1935 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
1936 init_completion(&xfer->done); in __scmi_xfer_info_init()
1937 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
1940 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
1943 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
1950 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
1952 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
1953 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
1954 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
1958 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
1960 return -EINVAL; in scmi_channels_max_msg_configure()
1961 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
1964 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
1966 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
1967 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
1981 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
1982 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE)) in scmi_xfer_info_init()
1983 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
1989 int prot_id, bool tx) in scmi_chan_setup() argument
1996 idx = tx ? 0 : 1; in scmi_chan_setup()
1997 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2004 if (!info->desc->ops->chan_available(dev, idx)) { in scmi_chan_setup()
2007 return -EINVAL; in scmi_chan_setup()
2011 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2013 return -ENOMEM; in scmi_chan_setup()
2015 cinfo->dev = dev; in scmi_chan_setup()
2016 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2018 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2022 if (tx && is_polling_required(cinfo, info)) { in scmi_chan_setup()
2025 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2039 cinfo->handle = &info->handle; in scmi_chan_setup()
2051 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2059 * scmi_get_protocol_device - Helper to get/create an SCMI device.
2083 sdev = scmi_child_dev_find(info->dev, prot_id, name); in scmi_get_protocol_device()
2089 dev_warn(info->dev, in scmi_get_protocol_device()
2098 sdev = scmi_device_create(np, info->dev, prot_id, name); in scmi_get_protocol_device()
2100 dev_err(info->dev, "failed to create %d protocol device\n", in scmi_get_protocol_device()
2107 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { in scmi_get_protocol_device()
2108 dev_err(&sdev->dev, "failed to setup transport\n"); in scmi_get_protocol_device()
2138 * scmi_create_protocol_devices - Create devices for all pending requests for
2160 rdev->id_table->name); in scmi_create_protocol_devices()
2166 * scmi_protocol_device_request - Helper to request a device
2178 * This way the requested device is created straight-away for all the already
2197 id_table->name, id_table->protocol_id); in scmi_protocol_device_request()
2209 if (rdev->id_table->protocol_id == in scmi_protocol_device_request()
2210 id_table->protocol_id) in scmi_protocol_device_request()
2214 if (!strcmp(rdev->id_table->name, id_table->name)) { in scmi_protocol_device_request()
2216 rdev->id_table->protocol_id, in scmi_protocol_device_request()
2217 rdev->id_table->name); in scmi_protocol_device_request()
2218 ret = -EINVAL; in scmi_protocol_device_request()
2230 ret = -ENOMEM; in scmi_protocol_device_request()
2233 rdev->id_table = id_table; in scmi_protocol_device_request()
2244 ret = -ENOMEM; in scmi_protocol_device_request()
2250 id_table->protocol_id, in scmi_protocol_device_request()
2251 id_table->protocol_id + 1, GFP_KERNEL); in scmi_protocol_device_request()
2252 if (ret != id_table->protocol_id) { in scmi_protocol_device_request()
2253 pr_err("Failed to save SCMI device - ret:%d\n", ret); in scmi_protocol_device_request()
2256 ret = -EINVAL; in scmi_protocol_device_request()
2261 list_add(&rdev->node, phead); in scmi_protocol_device_request()
2273 child = idr_find(&info->active_protocols, in scmi_protocol_device_request()
2274 id_table->protocol_id); in scmi_protocol_device_request()
2279 id_table->protocol_id, in scmi_protocol_device_request()
2280 id_table->name); in scmi_protocol_device_request()
2283 if (!sdev->handle) in scmi_protocol_device_request()
2284 sdev->handle = in scmi_protocol_device_request()
2287 if (sdev->handle) in scmi_protocol_device_request()
2288 scmi_device_link_add(&sdev->dev, in scmi_protocol_device_request()
2289 sdev->handle->dev); in scmi_protocol_device_request()
2292 dev_err(info->dev, in scmi_protocol_device_request()
2294 id_table->protocol_id); in scmi_protocol_device_request()
2306 * scmi_protocol_device_unrequest - Helper to unrequest a device
2324 id_table->name, id_table->protocol_id); in scmi_protocol_device_unrequest()
2327 phead = idr_find(&scmi_requested_devices, id_table->protocol_id); in scmi_protocol_device_unrequest()
2332 if (!strcmp(victim->id_table->name, id_table->name)) { in scmi_protocol_device_unrequest()
2333 list_del(&victim->node); in scmi_protocol_device_unrequest()
2341 id_table->protocol_id); in scmi_protocol_device_unrequest()
2351 struct idr *idr = &info->tx_idr; in scmi_cleanup_txrx_channels()
2353 ret = idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_txrx_channels()
2354 idr_destroy(&info->tx_idr); in scmi_cleanup_txrx_channels()
2356 idr = &info->rx_idr; in scmi_cleanup_txrx_channels()
2357 ret = idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_txrx_channels()
2358 idr_destroy(&info->rx_idr); in scmi_cleanup_txrx_channels()
2369 struct device *dev = &pdev->dev; in scmi_probe()
2370 struct device_node *child, *np = dev->of_node; in scmi_probe()
2374 return -EINVAL; in scmi_probe()
2378 return -ENOMEM; in scmi_probe()
2380 info->dev = dev; in scmi_probe()
2381 info->desc = desc; in scmi_probe()
2382 INIT_LIST_HEAD(&info->node); in scmi_probe()
2383 idr_init(&info->protocols); in scmi_probe()
2384 mutex_init(&info->protocols_mtx); in scmi_probe()
2385 idr_init(&info->active_protocols); in scmi_probe()
2388 idr_init(&info->tx_idr); in scmi_probe()
2389 idr_init(&info->rx_idr); in scmi_probe()
2391 handle = &info->handle; in scmi_probe()
2392 handle->dev = info->dev; in scmi_probe()
2393 handle->version = &info->version; in scmi_probe()
2394 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
2395 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
2396 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
2399 if (!of_property_read_u32(np, "atomic-threshold-us", in scmi_probe()
2400 &info->atomic_threshold)) in scmi_probe()
2403 info->atomic_threshold); in scmi_probe()
2404 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
2406 if (desc->ops->link_supplier) { in scmi_probe()
2407 ret = desc->ops->link_supplier(dev); in scmi_probe()
2423 if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) in scmi_probe()
2439 list_add_tail(&info->node, &scmi_list); in scmi_probe()
2461 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
2476 scmi_notification_exit(&info->handle); in scmi_probe()
2494 if (info->users) in scmi_remove()
2495 dev_warn(&pdev->dev, in scmi_remove()
2497 list_del(&info->node); in scmi_remove()
2500 scmi_notification_exit(&info->handle); in scmi_remove()
2502 mutex_lock(&info->protocols_mtx); in scmi_remove()
2503 idr_destroy(&info->protocols); in scmi_remove()
2504 mutex_unlock(&info->protocols_mtx); in scmi_remove()
2506 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
2508 idr_destroy(&info->active_protocols); in scmi_remove()
2513 dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); in scmi_remove()
2523 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
2524 info->version.minor_ver); in protocol_version_show()
2533 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
2542 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
2551 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
2570 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2573 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2576 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2585 .name = "arm-scmi",
2595 * __scmi_transports_setup - Common helper to call transport-specific
2610 for (trans = scmi_of_match; trans->data; trans++) { in __scmi_transports_setup()
2611 const struct scmi_desc *tdesc = trans->data; in __scmi_transports_setup()
2613 if ((init && !tdesc->transport_init) || in __scmi_transports_setup()
2614 (!init && !tdesc->transport_exit)) in __scmi_transports_setup()
2618 ret = tdesc->transport_init(); in __scmi_transports_setup()
2620 tdesc->transport_exit(); in __scmi_transports_setup()
2624 trans->compatible); in __scmi_transports_setup()
2648 return -EINVAL; in scmi_driver_init()
2652 /* Initialize any compiled-in transport which provided an init/exit */ in scmi_driver_init()
2693 MODULE_ALIAS("platform:arm-scmi");